mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
3 Commits
split-chec
...
streaming-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5bc3488538 | ||
|
|
1cd6232537 | ||
|
|
ed1bef0e2d |
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -8,7 +8,7 @@ body:
|
||||
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/#how-to-upgrade)
|
||||
to [the latest available release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
and verify whether the bug is reproducible there.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/troubleshooting/) first.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html) first.
|
||||
- type: textarea
|
||||
id: describe-the-bug
|
||||
attributes:
|
||||
@@ -60,12 +60,12 @@ body:
|
||||
|
||||
For VictoriaMetrics health-state issues please provide full-length screenshots
|
||||
of Grafana dashboards if possible:
|
||||
* [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229/)
|
||||
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176/)
|
||||
* [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229-victoriametrics/)
|
||||
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176-victoriametrics-cluster/)
|
||||
|
||||
See how to setup monitoring here:
|
||||
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring)
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/question.yml
vendored
8
.github/ISSUE_TEMPLATE/question.yml
vendored
@@ -24,9 +24,9 @@ body:
|
||||
label: Troubleshooting docs
|
||||
description: I am familiar with the following troubleshooting docs
|
||||
options:
|
||||
- label: General - https://docs.victoriametrics.com/troubleshooting/
|
||||
- label: General - https://docs.victoriametrics.com/Troubleshooting.html
|
||||
required: false
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent/#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert/#troubleshooting
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent.html#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert.html#troubleshooting
|
||||
required: false
|
||||
9
.github/pull_request_template.md
vendored
9
.github/pull_request_template.md
vendored
@@ -1,9 +0,0 @@
|
||||
### Describe Your Changes
|
||||
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).
|
||||
56
.github/workflows/benchmark.yml
vendored
56
.github/workflows/benchmark.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: benchmark
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- cluster
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
- "dashboards/**"
|
||||
- "deployment/**.yml"
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- labeled
|
||||
branches:
|
||||
- master
|
||||
- cluster
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
- "dashboards/**"
|
||||
- "deployment/**.yml"
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-streamaggr-benchmark-image:
|
||||
name: build
|
||||
runs-on: ubuntu-latest
|
||||
if: contains(github.event.pull_request.labels.*.name, 'streamaggr-benchmark')
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Package VMAgent Docker image for benchmark
|
||||
run: |
|
||||
SKIP_SCRATCH_BUILD=true \
|
||||
DOCKER_BUILD_OPTS='--cache-to type=gha,mode=max --cache-from type=gha' \
|
||||
PKG_TAG=${{ github.event.pull_request.head.sha }} \
|
||||
DOCKER_REGISTRY=ghcr.io \
|
||||
TARGET_PLATFORM=linux/amd64 make publish-vmagent
|
||||
54
.github/workflows/build.yml
vendored
54
.github/workflows/build.yml
vendored
@@ -1,54 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
- '**/Dockerfile*' # The trailing * is for app/vmui/Dockerfile-*.
|
||||
- '**/Makefile'
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
- '**/Dockerfile*' # The trailing * is for app/vmui/Dockerfile-*.
|
||||
- '**/Makefile'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
cache: false
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-crossbuild-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-crossbuild-
|
||||
|
||||
- name: Run crossbuild
|
||||
run: make crossbuild
|
||||
26
.github/workflows/check-licenses.yml
vendored
26
.github/workflows/check-licenses.yml
vendored
@@ -14,25 +14,13 @@ jobs:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@main
|
||||
with:
|
||||
go-version: 1.21.3
|
||||
id: go
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@master
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
cache: false
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
~/go/bin
|
||||
key: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-
|
||||
|
||||
- name: Check License
|
||||
run: make check-licenses
|
||||
run: |
|
||||
make check-licenses
|
||||
|
||||
62
.github/workflows/codeql-analysis-go.yml
vendored
62
.github/workflows/codeql-analysis-go.yml
vendored
@@ -1,62 +0,0 @@
|
||||
name: 'CodeQL Go'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache: false
|
||||
go-version: stable
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: 'language:go'
|
||||
@@ -1,27 +1,22 @@
|
||||
name: 'CodeQL JS/TS'
|
||||
name: "CodeQL - JS"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
branches: [master, cluster]
|
||||
paths:
|
||||
- '**.js'
|
||||
- '**.ts'
|
||||
- '**.tsx'
|
||||
- "**.js"
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [master, cluster]
|
||||
paths:
|
||||
- '**.js'
|
||||
- '**.ts'
|
||||
- '**.tsx'
|
||||
- "**.js"
|
||||
schedule:
|
||||
- cron: "30 18 * * 2"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
@@ -31,16 +26,21 @@ jobs:
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ["javascript"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: javascript-typescript
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
uses: github/codeql-action/analyze@v2
|
||||
with:
|
||||
category: 'language:js/ts'
|
||||
category: "javascript"
|
||||
92
.github/workflows/codeql-analysis.yml
vendored
Normal file
92
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, cluster]
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "**.js"
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [master, cluster]
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "**.js"
|
||||
schedule:
|
||||
- cron: "30 18 * * 2"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ["go"]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.21.3
|
||||
check-latest: true
|
||||
cache: true
|
||||
if: ${{ matrix.language == 'go' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
124
.github/workflows/main.yml
vendored
124
.github/workflows/main.yml
vendored
@@ -1,27 +1,25 @@
|
||||
name: main
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '.github/workflows/main.yml'
|
||||
- '**.go'
|
||||
- cluster
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '.github/workflows/main.yml'
|
||||
- '**.go'
|
||||
|
||||
- cluster
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
@@ -32,79 +30,67 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
cache: false
|
||||
go-version: stable
|
||||
go-version: 1.21.3
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-check-all-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-check-all-
|
||||
|
||||
- name: Run fmt
|
||||
run: make fmt
|
||||
|
||||
- name: Run vet
|
||||
run: make vet
|
||||
|
||||
# Use action instead of `make golangci-lint` to speed up the process
|
||||
# as it caches data between builds.
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
# The version must match "install-golangci-lint" Makefile target version.
|
||||
version: v1.59.1
|
||||
|
||||
- name: Run govulncheck
|
||||
run: make govulncheck
|
||||
|
||||
- name: Check diff
|
||||
run: git diff --exit-code
|
||||
- name: Dependencies
|
||||
run: |
|
||||
make install-golangci-lint
|
||||
make check-all
|
||||
git diff --exit-code
|
||||
|
||||
test:
|
||||
name: test
|
||||
needs: lint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
scenario:
|
||||
- 'test-full'
|
||||
- 'test-full-386'
|
||||
- 'test-pure'
|
||||
scenario: ["test-full", "test-pure", "test-full-386"]
|
||||
name: test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.21.3
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: run tests
|
||||
run: |
|
||||
make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
|
||||
build:
|
||||
needs: test
|
||||
name: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
cache: false
|
||||
go-version: stable
|
||||
go-version: 1.21.3
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
|
||||
path: gocache-for-docker
|
||||
key: gocache-docker-${{ runner.os }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.mod') }}
|
||||
|
||||
- name: Run tests
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
- name: Build
|
||||
run: |
|
||||
make victoria-metrics-crossbuild
|
||||
make vmuitils-crossbuild
|
||||
|
||||
16
.github/workflows/sync-docs.yml
vendored
16
.github/workflows/sync-docs.yml
vendored
@@ -6,6 +6,8 @@ on:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
workflow_dispatch: {}
|
||||
env:
|
||||
PAGEFIND_VERSION: "1.0.3"
|
||||
permissions:
|
||||
contents: read # This is required for actions/checkout and to commit back image update
|
||||
deployments: write
|
||||
@@ -24,8 +26,18 @@ jobs:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
path: docs
|
||||
- uses: peaceiris/actions-hugo@v2
|
||||
with:
|
||||
hugo-version: 'latest'
|
||||
extended: true
|
||||
- name: Install PageFind #install the static search engine for index build
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: "https://github.com/CloudCannon/pagefind/releases/download/v${{env.PAGEFIND_VERSION}}/pagefind-v${{env.PAGEFIND_VERSION}}-x86_64-unknown-linux-musl.tar.gz"
|
||||
name: "pagefind"
|
||||
version: ${{env.PAGEFIND_VERSION}}
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v6
|
||||
uses: crazy-max/ghaction-import-gpg@v5
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
|
||||
@@ -38,11 +50,13 @@ jobs:
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "short_sha=$calculatedSha" >> $GITHUB_OUTPUT
|
||||
working-directory: main
|
||||
|
||||
- name: update code and commit
|
||||
run: |
|
||||
rm -rf content
|
||||
cp -r ../main/docs content
|
||||
make clean-after-copy
|
||||
make build-search-index
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
git add .
|
||||
|
||||
33
.github/workflows/wiki.yml
vendored
Normal file
33
.github/workflows/wiki.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: wiki
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'docs/*'
|
||||
branches:
|
||||
- master
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write # for Git to git push
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: publish
|
||||
shell: bash
|
||||
env:
|
||||
TOKEN: ${{secrets.CI_TOKEN}}
|
||||
run: |
|
||||
git clone https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git wiki
|
||||
cp -r docs/* wiki
|
||||
cd wiki
|
||||
git config --local user.email "info@victoriametrics.com"
|
||||
git config --local user.name "Vika"
|
||||
git add .
|
||||
git commit -m "update wiki pages"
|
||||
remote_repo="https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git"
|
||||
git push "${remote_repo}"
|
||||
cd ..
|
||||
rm -rf wiki
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -22,5 +22,3 @@ Gemfile.lock
|
||||
/_site
|
||||
_site
|
||||
*.tmp
|
||||
/docs/.jekyll-metadata
|
||||
coverage.txt
|
||||
@@ -16,7 +16,4 @@ issues:
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- "fmt.Fprintf"
|
||||
- "fmt.Fprint"
|
||||
- "(net/http.ResponseWriter).Write"
|
||||
exclude: ./errcheck_excludes.txt
|
||||
|
||||
@@ -1 +1,16 @@
|
||||
The document has been moved [here](https://docs.victoriametrics.com/contributing/).
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
||||
- Filing issues and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
- Spreading a word about VictoriaMetrics: conference talks, articles, comments, experience sharing with colleagues.
|
||||
- Updating documentation.
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
- Prefer simple code and architecture.
|
||||
- Avoid complex abstractions.
|
||||
- Avoid magic code and fancy algorithms.
|
||||
- Avoid [big external dependencies](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d).
|
||||
- Minimize the number of moving parts in the distributed system.
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -175,7 +175,7 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2019-2024 VictoriaMetrics, Inc.
|
||||
Copyright 2019-2023 VictoriaMetrics, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
155
Makefile
155
Makefile
@@ -1,7 +1,5 @@
|
||||
PKG_PREFIX := github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
MAKE_CONCURRENCY ?= $(shell getconf _NPROCESSORS_ONLN)
|
||||
MAKE_PARALLEL := $(MAKE) -j $(MAKE_CONCURRENCY)
|
||||
DATEINFO_TAG ?= $(shell date -u +'%Y%m%d-%H%M%S')
|
||||
BUILDINFO_TAG ?= $(shell echo $$(git describe --long --all | tr '/' '-')$$( \
|
||||
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | openssl sha1 | cut -d' ' -f2 | cut -c 1-8)))
|
||||
@@ -17,9 +15,9 @@ GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TA
|
||||
.PHONY: $(MAKECMDGOALS)
|
||||
|
||||
include app/*/Makefile
|
||||
include docs/Makefile
|
||||
include deployment/*/Makefile
|
||||
include dashboards/Makefile
|
||||
include snap/local/Makefile
|
||||
include package/release/Makefile
|
||||
|
||||
all: \
|
||||
@@ -27,164 +25,162 @@ all: \
|
||||
victoria-logs-prod \
|
||||
vmagent-prod \
|
||||
vmalert-prod \
|
||||
vmalert-tool-prod \
|
||||
vmauth-prod \
|
||||
vmbackup-prod \
|
||||
vmrestore-prod \
|
||||
vmctl-prod
|
||||
vmctl-prod \
|
||||
vmalert-tool-prod
|
||||
|
||||
clean:
|
||||
rm -rf bin/*
|
||||
|
||||
publish: \
|
||||
publish: package-base \
|
||||
publish-victoria-metrics \
|
||||
publish-vmagent \
|
||||
publish-vmalert \
|
||||
publish-vmalert-tool \
|
||||
publish-vmauth \
|
||||
publish-vmbackup \
|
||||
publish-vmrestore \
|
||||
publish-vmctl
|
||||
publish-vmctl \
|
||||
publish-vmalert-tool
|
||||
|
||||
package: \
|
||||
package-victoria-metrics \
|
||||
package-victoria-logs \
|
||||
package-vmagent \
|
||||
package-vmalert \
|
||||
package-vmalert-tool \
|
||||
package-vmauth \
|
||||
package-vmbackup \
|
||||
package-vmrestore \
|
||||
package-vmctl
|
||||
package-vmctl \
|
||||
package-vmalert-tool
|
||||
|
||||
vmutils: \
|
||||
vmagent \
|
||||
vmalert \
|
||||
vmalert-tool \
|
||||
vmauth \
|
||||
vmbackup \
|
||||
vmrestore \
|
||||
vmctl
|
||||
vmctl \
|
||||
vmalert-tool
|
||||
|
||||
vmutils-pure: \
|
||||
vmagent-pure \
|
||||
vmalert-pure \
|
||||
vmalert-tool-pure \
|
||||
vmauth-pure \
|
||||
vmbackup-pure \
|
||||
vmrestore-pure \
|
||||
vmctl-pure
|
||||
vmctl-pure \
|
||||
vmalert-tool-pure
|
||||
|
||||
vmutils-linux-amd64: \
|
||||
vmagent-linux-amd64 \
|
||||
vmalert-linux-amd64 \
|
||||
vmalert-tool-linux-amd64 \
|
||||
vmauth-linux-amd64 \
|
||||
vmbackup-linux-amd64 \
|
||||
vmrestore-linux-amd64 \
|
||||
vmctl-linux-amd64
|
||||
vmctl-linux-amd64 \
|
||||
vmalert-tool-linux-amd64
|
||||
|
||||
vmutils-linux-arm64: \
|
||||
vmagent-linux-arm64 \
|
||||
vmalert-linux-arm64 \
|
||||
vmalert-tool-linux-arm64 \
|
||||
vmauth-linux-arm64 \
|
||||
vmbackup-linux-arm64 \
|
||||
vmrestore-linux-arm64 \
|
||||
vmctl-linux-arm64
|
||||
vmctl-linux-arm64 \
|
||||
vmalert-tool-linux-arm64
|
||||
|
||||
vmutils-linux-arm: \
|
||||
vmagent-linux-arm \
|
||||
vmalert-linux-arm \
|
||||
vmalert-tool-linux-arm \
|
||||
vmauth-linux-arm \
|
||||
vmbackup-linux-arm \
|
||||
vmrestore-linux-arm \
|
||||
vmctl-linux-arm
|
||||
vmctl-linux-arm \
|
||||
vmalert-tool-linux-arm
|
||||
|
||||
vmutils-linux-386: \
|
||||
vmagent-linux-386 \
|
||||
vmalert-linux-386 \
|
||||
vmalert-tool-linux-386 \
|
||||
vmauth-linux-386 \
|
||||
vmbackup-linux-386 \
|
||||
vmrestore-linux-386 \
|
||||
vmctl-linux-386
|
||||
vmctl-linux-386 \
|
||||
vmalert-tool-linux-386
|
||||
|
||||
vmutils-linux-ppc64le: \
|
||||
vmagent-linux-ppc64le \
|
||||
vmalert-linux-ppc64le \
|
||||
vmalert-tool-linux-ppc64le \
|
||||
vmauth-linux-ppc64le \
|
||||
vmbackup-linux-ppc64le \
|
||||
vmrestore-linux-ppc64le \
|
||||
vmctl-linux-ppc64le
|
||||
vmctl-linux-ppc64le \
|
||||
vmalert-tool-linux-ppc64le
|
||||
|
||||
vmutils-darwin-amd64: \
|
||||
vmagent-darwin-amd64 \
|
||||
vmalert-darwin-amd64 \
|
||||
vmalert-tool-darwin-amd64 \
|
||||
vmauth-darwin-amd64 \
|
||||
vmbackup-darwin-amd64 \
|
||||
vmrestore-darwin-amd64 \
|
||||
vmctl-darwin-amd64
|
||||
vmctl-darwin-amd64 \
|
||||
vmalert-tool-darwin-amd64
|
||||
|
||||
vmutils-darwin-arm64: \
|
||||
vmagent-darwin-arm64 \
|
||||
vmalert-darwin-arm64 \
|
||||
vmalert-tool-darwin-arm64 \
|
||||
vmauth-darwin-arm64 \
|
||||
vmbackup-darwin-arm64 \
|
||||
vmrestore-darwin-arm64 \
|
||||
vmctl-darwin-arm64
|
||||
vmctl-darwin-arm64 \
|
||||
vmalert-tool-darwin-arm64
|
||||
|
||||
vmutils-freebsd-amd64: \
|
||||
vmagent-freebsd-amd64 \
|
||||
vmalert-freebsd-amd64 \
|
||||
vmalert-tool-freebsd-amd64 \
|
||||
vmauth-freebsd-amd64 \
|
||||
vmbackup-freebsd-amd64 \
|
||||
vmrestore-freebsd-amd64 \
|
||||
vmctl-freebsd-amd64
|
||||
vmctl-freebsd-amd64 \
|
||||
vmalert-tool-freebsd-amd64
|
||||
|
||||
vmutils-openbsd-amd64: \
|
||||
vmagent-openbsd-amd64 \
|
||||
vmalert-openbsd-amd64 \
|
||||
vmalert-tool-openbsd-amd64 \
|
||||
vmauth-openbsd-amd64 \
|
||||
vmbackup-openbsd-amd64 \
|
||||
vmrestore-openbsd-amd64 \
|
||||
vmctl-openbsd-amd64
|
||||
vmctl-openbsd-amd64 \
|
||||
vmalert-tool-openbsd-amd64
|
||||
|
||||
vmutils-windows-amd64: \
|
||||
vmagent-windows-amd64 \
|
||||
vmalert-windows-amd64 \
|
||||
vmalert-tool-windows-amd64 \
|
||||
vmauth-windows-amd64 \
|
||||
vmbackup-windows-amd64 \
|
||||
vmrestore-windows-amd64 \
|
||||
vmctl-windows-amd64
|
||||
|
||||
crossbuild:
|
||||
$(MAKE_PARALLEL) victoria-metrics-crossbuild vmutils-crossbuild
|
||||
vmctl-windows-amd64 \
|
||||
vmalert-tool-windows-amd64
|
||||
|
||||
victoria-metrics-crossbuild: \
|
||||
victoria-metrics-linux-386 \
|
||||
victoria-metrics-linux-amd64 \
|
||||
victoria-metrics-linux-arm64 \
|
||||
victoria-metrics-linux-arm \
|
||||
victoria-metrics-linux-386 \
|
||||
victoria-metrics-linux-ppc64le \
|
||||
victoria-metrics-darwin-amd64 \
|
||||
victoria-metrics-darwin-arm64 \
|
||||
victoria-metrics-freebsd-amd64 \
|
||||
victoria-metrics-openbsd-amd64 \
|
||||
victoria-metrics-windows-amd64
|
||||
victoria-metrics-openbsd-amd64
|
||||
|
||||
vmutils-crossbuild: \
|
||||
vmutils-linux-386 \
|
||||
vmutils-linux-amd64 \
|
||||
vmutils-linux-arm64 \
|
||||
vmutils-linux-arm \
|
||||
vmutils-linux-386 \
|
||||
vmutils-linux-ppc64le \
|
||||
vmutils-darwin-amd64 \
|
||||
vmutils-darwin-arm64 \
|
||||
@@ -194,15 +190,14 @@ vmutils-crossbuild: \
|
||||
|
||||
publish-release:
|
||||
rm -rf bin/*
|
||||
git checkout $(TAG) && $(MAKE) release && LATEST_TAG=stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-cluster && $(MAKE) release && LATEST_TAG=cluster-stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise && $(MAKE) release && LATEST_TAG=enterprise-stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise-cluster && $(MAKE) release && LATEST_TAG=enterprise-cluster-stable $(MAKE) publish
|
||||
git checkout $(TAG) && LATEST_TAG=stable $(MAKE) release publish && \
|
||||
git checkout $(TAG)-cluster && LATEST_TAG=cluster-stable $(MAKE) release publish && \
|
||||
git checkout $(TAG)-enterprise && LATEST_TAG=enterprise-stable $(MAKE) release publish && \
|
||||
git checkout $(TAG)-enterprise-cluster && LATEST_TAG=enterprise-cluster-stable $(MAKE) release publish
|
||||
|
||||
release:
|
||||
$(MAKE_PARALLEL) \
|
||||
release-victoria-metrics \
|
||||
release-vmutils
|
||||
release: \
|
||||
release-victoria-metrics \
|
||||
release-vmutils
|
||||
|
||||
release-victoria-metrics: \
|
||||
release-victoria-metrics-linux-386 \
|
||||
@@ -261,16 +256,16 @@ release-victoria-metrics-windows-goarch: victoria-metrics-windows-$(GOARCH)-prod
|
||||
cd bin && rm -rf \
|
||||
victoria-metrics-windows-$(GOARCH)-prod.exe
|
||||
|
||||
release-victoria-logs:
|
||||
$(MAKE_PARALLEL) release-victoria-logs-linux-386 \
|
||||
release-victoria-logs-linux-amd64 \
|
||||
release-victoria-logs-linux-arm \
|
||||
release-victoria-logs-linux-arm64 \
|
||||
release-victoria-logs-darwin-amd64 \
|
||||
release-victoria-logs-darwin-arm64 \
|
||||
release-victoria-logs-freebsd-amd64 \
|
||||
release-victoria-logs-openbsd-amd64 \
|
||||
release-victoria-logs-windows-amd64
|
||||
release-victoria-logs: \
|
||||
release-victoria-logs-linux-386 \
|
||||
release-victoria-logs-linux-amd64 \
|
||||
release-victoria-logs-linux-arm \
|
||||
release-victoria-logs-linux-arm64 \
|
||||
release-victoria-logs-darwin-amd64 \
|
||||
release-victoria-logs-darwin-arm64 \
|
||||
release-victoria-logs-freebsd-amd64 \
|
||||
release-victoria-logs-openbsd-amd64 \
|
||||
release-victoria-logs-windows-amd64
|
||||
|
||||
release-victoria-logs-linux-386:
|
||||
GOOS=linux GOARCH=386 $(MAKE) release-victoria-logs-goos-goarch
|
||||
@@ -359,72 +354,72 @@ release-vmutils-windows-amd64:
|
||||
release-vmutils-goos-goarch: \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
tar --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf vmutils-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod
|
||||
&& sha256sum vmutils-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod \
|
||||
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > vmutils-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf \
|
||||
vmagent-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod \
|
||||
vmauth-$(GOOS)-$(GOARCH)-prod \
|
||||
vmbackup-$(GOOS)-$(GOARCH)-prod \
|
||||
vmrestore-$(GOOS)-$(GOARCH)-prod \
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod
|
||||
vmctl-$(GOOS)-$(GOARCH)-prod \
|
||||
vmalert-tool-$(GOOS)-$(GOARCH)-prod
|
||||
|
||||
release-vmutils-windows-goarch: \
|
||||
vmagent-windows-$(GOARCH)-prod \
|
||||
vmalert-windows-$(GOARCH)-prod \
|
||||
vmalert-tool-windows-$(GOARCH)-prod \
|
||||
vmauth-windows-$(GOARCH)-prod \
|
||||
vmbackup-windows-$(GOARCH)-prod \
|
||||
vmrestore-windows-$(GOARCH)-prod \
|
||||
vmctl-windows-$(GOARCH)-prod
|
||||
vmctl-windows-$(GOARCH)-prod \
|
||||
vmalert-tool-windows-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
zip vmutils-windows-$(GOARCH)-$(PKG_TAG).zip \
|
||||
vmagent-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-tool-windows-$(GOARCH)-prod.exe \
|
||||
vmauth-windows-$(GOARCH)-prod.exe \
|
||||
vmbackup-windows-$(GOARCH)-prod.exe \
|
||||
vmrestore-windows-$(GOARCH)-prod.exe \
|
||||
vmctl-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-tool-windows-$(GOARCH)-prod.exe \
|
||||
&& sha256sum vmutils-windows-$(GOARCH)-$(PKG_TAG).zip \
|
||||
vmagent-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-tool-windows-$(GOARCH)-prod.exe \
|
||||
vmauth-windows-$(GOARCH)-prod.exe \
|
||||
vmbackup-windows-$(GOARCH)-prod.exe \
|
||||
vmrestore-windows-$(GOARCH)-prod.exe \
|
||||
vmctl-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-tool-windows-$(GOARCH)-prod.exe \
|
||||
> vmutils-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf \
|
||||
vmagent-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-tool-windows-$(GOARCH)-prod.exe \
|
||||
vmauth-windows-$(GOARCH)-prod.exe \
|
||||
vmbackup-windows-$(GOARCH)-prod.exe \
|
||||
vmrestore-windows-$(GOARCH)-prod.exe \
|
||||
vmctl-windows-$(GOARCH)-prod.exe
|
||||
vmctl-windows-$(GOARCH)-prod.exe \
|
||||
vmalert-tool-windows-$(GOARCH)-prod.exe
|
||||
|
||||
pprof-cpu:
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
||||
@@ -439,8 +434,6 @@ vet:
|
||||
|
||||
check-all: fmt vet golangci-lint govulncheck
|
||||
|
||||
clean-checkers: remove-golangci-lint remove-govulncheck
|
||||
|
||||
test:
|
||||
go test ./lib/... ./app/...
|
||||
|
||||
@@ -467,7 +460,7 @@ benchmark-pure:
|
||||
vendor-update:
|
||||
go get -u -d ./lib/...
|
||||
go get -u -d ./app/...
|
||||
go mod tidy -compat=1.22
|
||||
go mod tidy -compat=1.20
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
@@ -493,11 +486,7 @@ golangci-lint: install-golangci-lint
|
||||
golangci-lint run
|
||||
|
||||
install-golangci-lint:
|
||||
# The version must match GitHub main.yml lint job "Run golangci-lint" step version.
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.59.1
|
||||
|
||||
remove-golangci-lint:
|
||||
rm -rf `which golangci-lint`
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.54.2
|
||||
|
||||
govulncheck: install-govulncheck
|
||||
govulncheck ./...
|
||||
@@ -505,9 +494,6 @@ govulncheck: install-govulncheck
|
||||
install-govulncheck:
|
||||
which govulncheck || go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
remove-govulncheck:
|
||||
rm -rf `which govulncheck`
|
||||
|
||||
install-wwhrd:
|
||||
which wwhrd || go install github.com/frapposelli/wwhrd@latest
|
||||
|
||||
@@ -543,3 +529,12 @@ copy-docs:
|
||||
docs-sync:
|
||||
SRC=README.md DST=docs/README.md OLD_URL='' ORDER=0 TITLE=VictoriaMetrics $(MAKE) copy-docs
|
||||
SRC=README.md DST=docs/Single-server-VictoriaMetrics.md OLD_URL='/Single-server-VictoriaMetrics.html' TITLE=VictoriaMetrics ORDER=1 $(MAKE) copy-docs
|
||||
SRC=app/vmagent/README.md DST=docs/vmagent.md OLD_URL='/vmagent.html' ORDER=3 TITLE=vmagent $(MAKE) copy-docs
|
||||
SRC=app/vmalert/README.md DST=docs/vmalert.md OLD_URL='/vmalert.html' ORDER=4 TITLE=vmalert $(MAKE) copy-docs
|
||||
SRC=app/vmauth/README.md DST=docs/vmauth.md OLD_URL='/vmauth.html' ORDER=5 TITLE=vmauth $(MAKE) copy-docs
|
||||
SRC=app/vmbackup/README.md DST=docs/vmbackup.md OLD_URL='/vmbackup.html' ORDER=6 TITLE=vmbackup $(MAKE) copy-docs
|
||||
SRC=app/vmrestore/README.md DST=docs/vmrestore.md OLD_URL='/vmrestore.html' ORDER=7 TITLE=vmrestore $(MAKE) copy-docs
|
||||
SRC=app/vmctl/README.md DST=docs/vmctl.md OLD_URL='/vmctl.html' ORDER=8 TITLE=vmctl $(MAKE) copy-docs
|
||||
SRC=app/vmgateway/README.md DST=docs/vmgateway.md OLD_URL='/vmgateway.html' ORDER=9 TITLE=vmgateway $(MAKE) copy-docs
|
||||
SRC=app/vmbackupmanager/README.md DST=docs/vmbackupmanager.md OLD_URL='/vmbackupmanager.html' ORDER=10 TITLE=vmbackupmanager $(MAKE) copy-docs
|
||||
SRC=app/vmalert-tool/README.md DST=docs/vmalert-tool.md OLD_URL='' ORDER=12 TITLE=vmalert-tool $(MAKE) copy-docs
|
||||
|
||||
10
SECURITY.md
10
SECURITY.md
@@ -2,17 +2,13 @@
|
||||
|
||||
## Supported Versions
|
||||
|
||||
The following versions of VictoriaMetrics receive regular security fixes:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| [latest release](https://docs.victoriametrics.com/changelog/) | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| v1.93.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: |
|
||||
| v1.93.x LTS release | :white_check_mark: |
|
||||
| v1.87.x LTS release | :white_check_mark: |
|
||||
| other releases | :x: |
|
||||
|
||||
See [this page](https://victoriametrics.com/security/) for more details.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report any security issues to security@victoriametrics.com
|
||||
|
||||
@@ -101,10 +101,3 @@ victoria-logs-windows-amd64:
|
||||
|
||||
victoria-logs-pure:
|
||||
APP_NAME=victoria-logs $(MAKE) app-local-pure
|
||||
|
||||
run-victoria-logs:
|
||||
mkdir -p victoria-logs-data
|
||||
DOCKER_OPTS='-v $(shell pwd)/victoria-logs-data:/victoria-logs-data' \
|
||||
APP_NAME=victoria-logs \
|
||||
ARGS='' \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
ARG base_image
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 9428
|
||||
EXPOSE 8428
|
||||
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG src_binary
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
@@ -21,10 +22,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the given -httpListenAddr . "+
|
||||
httpListenAddr = flag.String("httpListenAddr", ":9428", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
gogc = flag.Int("gogc", 100, "GOGC to use. See https://tip.golang.org/doc/gc-guide")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -32,31 +34,27 @@ func main() {
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
cgroup.SetGOGC(*gogc)
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":9428"}
|
||||
}
|
||||
logger.Infof("starting VictoriaLogs at %q...", listenAddrs)
|
||||
logger.Infof("starting VictoriaLogs at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
|
||||
vlstorage.Init()
|
||||
vlselect.Init()
|
||||
vlinsert.Init()
|
||||
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/victorialogs/", time.Since(startTime).Seconds())
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/VictoriaLogs/", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
pushmetrics.Stop()
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
@@ -77,7 +75,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>Single-node VictoriaLogs</h2></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/victorialogs/'>https://docs.victoriametrics.com/victorialogs/</a></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/VictoriaLogs/'>https://docs.victoriametrics.com/VictoriaLogs/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
{"select/vmui", "Web UI for VictoriaLogs"},
|
||||
@@ -99,7 +97,7 @@ func usage() {
|
||||
const s = `
|
||||
victoria-logs is a log management and analytics service.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/victorialogs/
|
||||
See the docs at https://docs.victoriametrics.com/VictoriaLogs/
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image AS certs
|
||||
FROM $certs_image as certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 9428
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY victoria-logs-linux-${TARGETARCH}-prod ./victoria-logs-prod
|
||||
|
||||
@@ -88,9 +88,6 @@ victoria-metrics-linux-ppc64le:
|
||||
victoria-metrics-linux-s390x:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-linux-loong64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-linux-386:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -26,12 +26,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP addresses to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8428", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
||||
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication")
|
||||
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running VictoriaMetrics. The following config files are checked: "+
|
||||
"-promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. "+
|
||||
"This can be changed with -promscrape.config.strictParse=false command-line flag")
|
||||
@@ -48,6 +48,7 @@ func main() {
|
||||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
if promscrape.IsDryRun() {
|
||||
*dryRun = true
|
||||
@@ -66,37 +67,30 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":8428"}
|
||||
}
|
||||
logger.Infof("starting VictoriaMetrics at %q...", listenAddrs)
|
||||
logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
storage.SetDedupInterval(*minScrapeInterval)
|
||||
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
|
||||
startSelfScraper()
|
||||
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaMetrics in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
pushmetrics.Stop()
|
||||
|
||||
stopSelfScraper()
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
vminsert.Stop()
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
@@ -123,12 +117,12 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
{"expand-with-exprs", "WITH expressions' tutorial"},
|
||||
{"api/v1/targets", "advanced information about discovered targets in JSON format"},
|
||||
{"config", "-promscrape.config contents"},
|
||||
{"stream-agg", "streaming aggregation status"},
|
||||
{"metrics", "available service metrics"},
|
||||
{"flags", "command-line flags"},
|
||||
{"api/v1/status/tsdb", "tsdb status page"},
|
||||
{"api/v1/status/top_queries", "top queries"},
|
||||
{"api/v1/status/active_queries", "active queries"},
|
||||
{"-/reload", "reload configuration"},
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -7,13 +7,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -39,13 +37,11 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testImportCSVWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/import/csv"
|
||||
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -58,15 +54,15 @@ var (
|
||||
)
|
||||
|
||||
type test struct {
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
Issue string `json:"issue"`
|
||||
ExpectedResultLinesCount int `json:"expected_result_lines_count"`
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
ResultQueryRange QueryRange `json:"result_query_range"`
|
||||
Issue string `json:"issue"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
@@ -84,90 +80,42 @@ type Series struct {
|
||||
Status string `json:"status"`
|
||||
Data []map[string]string `json:"data"`
|
||||
}
|
||||
|
||||
type Query struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result json.RawMessage `json:"result"`
|
||||
} `json:"data"`
|
||||
Status string `json:"status"`
|
||||
Data QueryData `json:"data"`
|
||||
}
|
||||
type QueryData struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []QueryDataResult `json:"result"`
|
||||
}
|
||||
|
||||
const rtVector, rtMatrix = "vector", "matrix"
|
||||
|
||||
func (q *Query) metrics() ([]Metric, error) {
|
||||
switch q.Data.ResultType {
|
||||
case rtVector:
|
||||
var r QueryInstant
|
||||
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.metrics()
|
||||
case rtMatrix:
|
||||
var r QueryRange
|
||||
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.metrics()
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown result type %q", q.Data.ResultType)
|
||||
}
|
||||
type QueryDataResult struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Value []interface{} `json:"value"`
|
||||
}
|
||||
|
||||
type QueryInstant struct {
|
||||
Result []struct {
|
||||
Labels map[string]string `json:"metric"`
|
||||
TV [2]interface{} `json:"value"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
func (q QueryInstant) metrics() ([]Metric, error) {
|
||||
result := make([]Metric, len(q.Result))
|
||||
for i, res := range q.Result {
|
||||
f, err := strconv.ParseFloat(res.TV[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
|
||||
}
|
||||
var m Metric
|
||||
m.Metric = res.Labels
|
||||
m.Timestamps = append(m.Timestamps, int64(res.TV[0].(float64)))
|
||||
m.Values = append(m.Values, f)
|
||||
result[i] = m
|
||||
}
|
||||
return result, nil
|
||||
func (r *QueryDataResult) UnmarshalJSON(b []byte) error {
|
||||
type plain QueryDataResult
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
type QueryRange struct {
|
||||
Result []struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values [][]interface{} `json:"values"`
|
||||
} `json:"result"`
|
||||
Status string `json:"status"`
|
||||
Data QueryRangeData `json:"data"`
|
||||
}
|
||||
type QueryRangeData struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []QueryRangeDataResult `json:"result"`
|
||||
}
|
||||
|
||||
func (q QueryRange) metrics() ([]Metric, error) {
|
||||
var result []Metric
|
||||
for i, res := range q.Result {
|
||||
var m Metric
|
||||
for _, tv := range res.Values {
|
||||
f, err := strconv.ParseFloat(tv[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, tv[1], err)
|
||||
}
|
||||
m.Values = append(m.Values, f)
|
||||
m.Timestamps = append(m.Timestamps, int64(tv[0].(float64)))
|
||||
}
|
||||
if len(m.Values) < 1 || len(m.Timestamps) < 1 {
|
||||
return nil, fmt.Errorf("metric %v contains no values", res)
|
||||
}
|
||||
m.Metric = q.Result[i].Metric
|
||||
result = append(result, m)
|
||||
}
|
||||
return result, nil
|
||||
type QueryRangeDataResult struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values [][]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
func (q *Query) UnmarshalJSON(b []byte) error {
|
||||
type plain Query
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(q))
|
||||
func (r *QueryRangeDataResult) UnmarshalJSON(b []byte) error {
|
||||
type plain QueryRangeDataResult
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -184,7 +132,7 @@ func setUp() {
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
go httpserver.Serve(*httpListenAddrs, useProxyProtocol, requestHandler)
|
||||
go httpserver.Serve(*httpListenAddr, false, requestHandler)
|
||||
readyStorageCheckFunc := func() bool {
|
||||
resp, err := http.Get(testHealthHTTPPath)
|
||||
if err != nil {
|
||||
@@ -230,7 +178,7 @@ func waitFor(timeout time.Duration, f func() bool) error {
|
||||
}
|
||||
|
||||
func tearDown() {
|
||||
if err := httpserver.Stop(*httpListenAddrs); err != nil {
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
log.Printf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
@@ -241,18 +189,14 @@ func tearDown() {
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
t.Run("write", testWrite)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
vmstorage.Storage.DebugFlush()
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
time.Sleep(1 * time.Second)
|
||||
t.Run("read", testRead)
|
||||
}
|
||||
|
||||
func testWrite(t *testing.T) {
|
||||
t.Run("prometheus", func(t *testing.T) {
|
||||
for _, test := range readIn("prometheus", t, insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
s := newSuite(t)
|
||||
r := testutil.WriteRequest{}
|
||||
s.noError(json.Unmarshal([]byte(strings.Join(test.Data, "\n")), &r.Timeseries))
|
||||
@@ -265,14 +209,6 @@ func testWrite(t *testing.T) {
|
||||
httpWrite(t, testPromWriteHTTPPath, test.InsertQuery, bytes.NewBuffer(data))
|
||||
}
|
||||
})
|
||||
t.Run("csv", func(t *testing.T) {
|
||||
for _, test := range readIn("csv", t, insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
httpWrite(t, testImportCSVWriteHTTPPath, test.InsertQuery, bytes.NewBuffer([]byte(strings.Join(test.Data, "\n"))))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("influxdb", func(t *testing.T) {
|
||||
for _, x := range readIn("influxdb", t, insertionTime) {
|
||||
@@ -314,7 +250,7 @@ func testWrite(t *testing.T) {
|
||||
}
|
||||
|
||||
func testRead(t *testing.T) {
|
||||
for _, engine := range []string{"csv", "prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
for _, engine := range []string{"prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
t.Run(engine, func(t *testing.T) {
|
||||
for _, x := range readIn(engine, t, insertionTime) {
|
||||
test := x
|
||||
@@ -325,12 +261,7 @@ func testRead(t *testing.T) {
|
||||
if test.Issue != "" {
|
||||
test.Issue = "\nRegression in " + test.Issue
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(q, "/api/v1/export/csv"):
|
||||
data := strings.Split(string(httpReadData(t, testReadHTTPPath, q)), "\n")
|
||||
if len(data) == test.ExpectedResultLinesCount {
|
||||
t.Fatalf("not expected number of csv lines want=%d\ngot=%d test=%s.%s\n\response=%q", len(data), test.ExpectedResultLinesCount, q, test.Issue, strings.Join(data, "\n"))
|
||||
}
|
||||
switch true {
|
||||
case strings.HasPrefix(q, "/api/v1/export"):
|
||||
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
|
||||
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
|
||||
@@ -341,19 +272,17 @@ func testRead(t *testing.T) {
|
||||
if err := checkSeriesResult(s, test.ResultSeries); err != nil {
|
||||
t.Fatalf("Series. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query_range"):
|
||||
queryResult := QueryRange{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
if err := checkQueryRangeResult(queryResult, test.ResultQueryRange); err != nil {
|
||||
t.Fatalf("Query Range. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query"):
|
||||
queryResult := Query{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
gotMetrics, err := queryResult.metrics()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse query response: %s", err)
|
||||
}
|
||||
expMetrics, err := test.ResultQuery.metrics()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse expected response: %s", err)
|
||||
}
|
||||
if err := checkMetricsResult(gotMetrics, expMetrics); err != nil {
|
||||
t.Fatalf("%q fails with error %s.%s", q, err, test.Issue)
|
||||
if err := checkQueryResult(queryResult, test.ResultQuery); err != nil {
|
||||
t.Fatalf("Query. %s fails with error: %s.%s", q, err, test.Issue)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unsupported read query %s", q)
|
||||
@@ -369,7 +298,7 @@ func readIn(readFor string, t *testing.T, insertTime time.Time) []test {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
var tt []test
|
||||
s.noError(filepath.Walk(filepath.Join(testFixturesDir, readFor), func(path string, _ os.FileInfo, err error) error {
|
||||
s.noError(filepath.Walk(filepath.Join(testFixturesDir, readFor), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -431,7 +360,6 @@ func httpReadMetrics(t *testing.T, address, query string) []Metric {
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func httpReadStruct(t *testing.T, address, query string, dst interface{}) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
@@ -444,20 +372,6 @@ func httpReadStruct(t *testing.T, address, query string, dst interface{}) {
|
||||
s.noError(json.NewDecoder(resp.Body).Decode(dst))
|
||||
}
|
||||
|
||||
func httpReadData(t *testing.T, address, query string) []byte {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Get(address + query)
|
||||
s.noError(err)
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
s.equalInt(resp.StatusCode, 200)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
s.noError(err)
|
||||
return data
|
||||
}
|
||||
|
||||
func checkMetricsResult(got, want []Metric) error {
|
||||
for _, r := range append([]Metric(nil), got...) {
|
||||
want = removeIfFoundMetrics(r, want)
|
||||
@@ -503,6 +417,60 @@ func removeIfFoundSeries(r map[string]string, contains []map[string]string) []ma
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkQueryResult(got, want Query) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
if got.Data.ResultType != want.Data.ResultType {
|
||||
return fmt.Errorf("result type mismatch %q - %q", want.Data.ResultType, got.Data.ResultType)
|
||||
}
|
||||
wantData := append([]QueryDataResult(nil), want.Data.Result...)
|
||||
for _, r := range got.Data.Result {
|
||||
wantData = removeIfFoundQueryData(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected query result %+v not found in %+v", wantData, got.Data.Result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundQueryData(r QueryDataResult, contains []QueryDataResult) []QueryDataResult {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Value[0], item.Value[0]) && reflect.DeepEqual(r.Value[1], item.Value[1]) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkQueryRangeResult(got, want QueryRange) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
if got.Data.ResultType != want.Data.ResultType {
|
||||
return fmt.Errorf("result type mismatch %q - %q", want.Data.ResultType, got.Data.ResultType)
|
||||
}
|
||||
wantData := append([]QueryRangeDataResult(nil), want.Data.Result...)
|
||||
for _, r := range got.Data.Result {
|
||||
wantData = removeIfFoundQueryRangeData(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected query range result %+v not found in %+v", wantData, got.Data.Result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundQueryRangeData(r QueryRangeDataResult, contains []QueryRangeDataResult) []QueryRangeDataResult {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
type suite struct{ t *testing.T }
|
||||
|
||||
func newSuite(t *testing.T) *suite { return &suite{t: t} }
|
||||
@@ -530,73 +498,3 @@ func (s *suite) greaterThan(a, b int) {
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportJSONLines(t *testing.T) {
|
||||
f := func(labelsCount, labelLen int) {
|
||||
t.Helper()
|
||||
|
||||
reqURL := fmt.Sprintf("http://localhost%s/api/v1/import", testHTTPListenAddr)
|
||||
line := generateJSONLine(labelsCount, labelLen)
|
||||
req, err := http.NewRequest("POST", reqURL, bytes.NewBufferString(line))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create request: %s", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot perform request for labelsCount=%d, labelLen=%d: %s", labelsCount, labelLen, err)
|
||||
}
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("unexpected statusCode for labelsCount=%d, labelLen=%d; got %d; want 204", labelsCount, labelLen, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// labels with various lengths
|
||||
for i := 0; i < 500; i++ {
|
||||
f(10, i*5)
|
||||
}
|
||||
|
||||
// Too many labels
|
||||
f(1000, 100)
|
||||
|
||||
// Too long labels
|
||||
f(1, 100_000)
|
||||
f(10, 100_000)
|
||||
f(10, 10_000)
|
||||
}
|
||||
|
||||
func generateJSONLine(labelsCount, labelLen int) string {
|
||||
m := make(map[string]string, labelsCount)
|
||||
m["__name__"] = generateSizedRandomString(labelLen)
|
||||
for j := 1; j < labelsCount; j++ {
|
||||
labelName := generateSizedRandomString(labelLen)
|
||||
labelValue := generateSizedRandomString(labelLen)
|
||||
m[labelName] = labelValue
|
||||
}
|
||||
|
||||
type jsonLine struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
line := &jsonLine{
|
||||
Metric: m,
|
||||
Values: []float64{1.34},
|
||||
Timestamps: []int64{time.Now().UnixNano() / 1e6},
|
||||
}
|
||||
data, err := json.Marshal(&line)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot marshal JSON: %w", err))
|
||||
}
|
||||
data = append(data, '\n')
|
||||
return string(data)
|
||||
}
|
||||
|
||||
const alphabetSample = `qwertyuiopasdfghjklzxcvbnm`
|
||||
|
||||
func generateSizedRandomString(size int) string {
|
||||
dst := make([]byte, size)
|
||||
for i := range dst {
|
||||
dst[i] = alphabetSample[rand.Intn(len(alphabetSample))]
|
||||
}
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image AS certs
|
||||
FROM $certs_image as certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/appmetrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
@@ -50,8 +49,16 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
var mrs []storage.MetricRow
|
||||
var labels []prompb.Label
|
||||
t := time.NewTicker(scrapeInterval)
|
||||
f := func(currentTime time.Time, sendStaleMarkers bool) {
|
||||
currentTimestamp := currentTime.UnixNano() / 1e6
|
||||
var currentTimestamp int64
|
||||
for {
|
||||
select {
|
||||
case <-selfScraperStopCh:
|
||||
t.Stop()
|
||||
logger.Infof("stopped self-scraping `/metrics` page")
|
||||
return
|
||||
case currentTime := <-t.C:
|
||||
currentTimestamp = currentTime.UnixNano() / 1e6
|
||||
}
|
||||
bb.Reset()
|
||||
appmetrics.WritePrometheusMetrics(&bb)
|
||||
s := bytesutil.ToUnsafeString(bb.B)
|
||||
@@ -76,27 +83,12 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
mr := &mrs[len(mrs)-1]
|
||||
mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels)
|
||||
mr.Timestamp = currentTimestamp
|
||||
if sendStaleMarkers {
|
||||
mr.Value = decimal.StaleNaN
|
||||
} else {
|
||||
mr.Value = r.Value
|
||||
}
|
||||
mr.Value = r.Value
|
||||
}
|
||||
if err := vmstorage.AddRows(mrs); err != nil {
|
||||
logger.Errorf("cannot store self-scraped metrics: %s", err)
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-selfScraperStopCh:
|
||||
f(time.Now(), true)
|
||||
t.Stop()
|
||||
logger.Infof("stopped self-scraping `/metrics` page")
|
||||
return
|
||||
case currentTime := <-t.C:
|
||||
f(currentTime, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addLabel(dst []prompb.Label, key, value string) []prompb.Label {
|
||||
@@ -106,7 +98,7 @@ func addLabel(dst []prompb.Label, key, value string) []prompb.Label {
|
||||
dst = append(dst, prompb.Label{})
|
||||
}
|
||||
lb := &dst[len(dst)-1]
|
||||
lb.Name = key
|
||||
lb.Value = value
|
||||
lb.Name = bytesutil.ToUnsafeBytes(key)
|
||||
lb.Value = bytesutil.ToUnsafeBytes(value)
|
||||
return dst
|
||||
}
|
||||
|
||||
14
app/victoria-metrics/testdata/csv/basic.json
vendored
14
app/victoria-metrics/testdata/csv/basic.json
vendored
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"name": "csv export",
|
||||
"data": [
|
||||
"rfc3339,4,{TIME_MS}",
|
||||
"rfc3339milli,6,{TIME_MS}",
|
||||
"ts,8,{TIME_MS}",
|
||||
"tsms,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:tfmt,2:metric:test_csv,3:time:unix_ms",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,tfmt,__value__,__timestamp__:rfc3339&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"name": "csv export with extra_labels",
|
||||
"data": [
|
||||
"location-1,4,{TIME_MS}",
|
||||
"location-2,6,{TIME_MS}",
|
||||
"location-3,8,{TIME_MS}",
|
||||
"location-4,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:location,2:metric:test_csv_labels,3:time:unix_ms&extra_label=location=location-1",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,location,__value__,__timestamp__:unix_ms&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
@@ -7,7 +7,7 @@
|
||||
"not_nan_not_inf;item=y 3 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=y 1 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query_range?query=1/(not_nan_not_inf-1)!=inf!=nan&start={TIME_S-3m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"empty_label_match;foo=bar 2 {TIME_S-1m}",
|
||||
"empty_label_match;foo=baz 3 {TIME_S-1m}"],
|
||||
"query": ["/api/v1/query_range?query=empty_label_match{foo=~'bar|'}&start={TIME_S-1m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"max_lookback_set 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_set&start={TIME_S-150s}&end={TIME_S}&step=10s&max_lookback=1s"],
|
||||
"result_query": {
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_set"},"values":[
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"max_lookback_unset 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_unset&start={TIME_S-150s}&end={TIME_S}&step=10s"],
|
||||
"result_query": {
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_unset"},"values":[
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"not_nan_as_missing_data;item=y 3 {TIME_S-1m}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=not_nan_as_missing_data>1&start={TIME_S-2m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"name": "instant query with look-behind window",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
|
||||
"query": ["/api/v1/query?query=foo[5m]"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"foo"},"values":[["{TIME_S-60s}", "1"]]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"name": "instant scalar query",
|
||||
"query": ["/api/v1/query?query=42&time={TIME_S}"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[{"metric":{},"value":["{TIME_S}", "42"]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"name": "too big look-behind window",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"},{\"name\":\"issue\",\"value\":\"5553\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
|
||||
"query": ["/api/v1/query?query=foo{issue=\"5553\"}[100y]"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"foo", "issue": "5553"},"values":[["{TIME_S-60s}", "1"]]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"name": "query range",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"bar\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}, {\"value\":2,\"timestamp\":\"{TIME_MS-120s}\"}, {\"value\":1,\"timestamp\":\"{TIME_MS-180s}\"}]}]"],
|
||||
"query": ["/api/v1/query_range?query=bar&step=30s&start={TIME_MS-180s}"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{
|
||||
"metric":{"__name__":"bar"},
|
||||
"values":[["{TIME_S-180s}", "1"],["{TIME_S-150s}", "1"],["{TIME_S-120s}", "2"],["{TIME_S-90s}", "2"], ["{TIME_S-60s}", "1"], ["{TIME_S-30s}", "1"], ["{TIME_S}", "1"]]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
@@ -97,12 +98,14 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
processLogMessage := cp.GetProcessLogMessageFunc(lr)
|
||||
isGzip := r.Header.Get("Content-Encoding") == "gzip"
|
||||
n, err := readBulkRequest(r.Body, isGzip, cp.TimeField, cp.MsgField, lmp)
|
||||
lmp.MustClose()
|
||||
n, err := readBulkRequest(r.Body, isGzip, cp.TimeField, cp.MsgField, processLogMessage)
|
||||
vlstorage.MustAddRows(lr)
|
||||
logstorage.PutLogRows(lr)
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message #%d in /_bulk request: %s, stream fields: %s", n, err, cp.StreamFields)
|
||||
logger.Warnf("cannot decode log message #%d in /_bulk request: %s", n, err)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -129,7 +132,9 @@ var (
|
||||
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string,
|
||||
processLogMessage func(timestamp int64, fields []logstorage.Field),
|
||||
) (int, error) {
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
||||
|
||||
if isGzip {
|
||||
@@ -154,7 +159,7 @@ func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string, lmp i
|
||||
n := 0
|
||||
nCheckpoint := 0
|
||||
for {
|
||||
ok, err := readBulkLine(sc, timeField, msgField, lmp)
|
||||
ok, err := readBulkLine(sc, timeField, msgField, processLogMessage)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil || !ok {
|
||||
rowsIngestedTotal.Add(n - nCheckpoint)
|
||||
@@ -170,7 +175,9 @@ func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string, lmp i
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
func readBulkLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
func readBulkLine(sc *bufio.Scanner, timeField, msgField string,
|
||||
processLogMessage func(timestamp int64, fields []logstorage.Field),
|
||||
) (bool, error) {
|
||||
var line []byte
|
||||
|
||||
// Read the command, must be "create" or "index"
|
||||
@@ -203,7 +210,7 @@ func readBulkLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils
|
||||
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
|
||||
}
|
||||
line = sc.Bytes()
|
||||
p := logstorage.GetJSONParser()
|
||||
p := logjson.GetParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
@@ -215,9 +222,9 @@ func readBulkLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils
|
||||
if ts == 0 {
|
||||
ts = time.Now().UnixNano()
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgField, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
logstorage.PutJSONParser(p)
|
||||
p.RenameField(msgField, "_msg")
|
||||
processLogMessage(ts, p.Fields)
|
||||
logjson.PutParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -266,9 +273,9 @@ func parseElasticsearchTimestamp(s string) (int64, error) {
|
||||
}
|
||||
return t.UnixNano(), nil
|
||||
}
|
||||
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("cannot parse timestamp %q", s)
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp %q: %w", s, err)
|
||||
}
|
||||
return nsecs, nil
|
||||
return t.UnixNano(), nil
|
||||
}
|
||||
|
||||
@@ -4,18 +4,23 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestReadBulkRequest_Failure(t *testing.T) {
|
||||
func TestReadBulkRequestFailure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
t.Fatalf("unexpected call to processLogMessage with timestamp=%d, fields=%s", timestamp, fields)
|
||||
}
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, "_time", "_msg", tlp)
|
||||
rows, err := readBulkRequest(r, false, "_time", "_msg", processLogMessage)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
@@ -32,38 +37,58 @@ func TestReadBulkRequest_Failure(t *testing.T) {
|
||||
foobar`)
|
||||
}
|
||||
|
||||
func TestReadBulkRequest_Success(t *testing.T) {
|
||||
func TestReadBulkRequestSuccess(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
var timestamps []int64
|
||||
var result string
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
timestamps = append(timestamps, timestamp)
|
||||
|
||||
a := make([]string, len(fields))
|
||||
for i, f := range fields {
|
||||
a[i] = fmt.Sprintf("%q:%q", f.Name, f.Value)
|
||||
}
|
||||
s := "{" + strings.Join(a, ",") + "}\n"
|
||||
result += s
|
||||
}
|
||||
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, timeField, msgField, tlp)
|
||||
rows, err := readBulkRequest(r, false, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
if !reflect.DeepEqual(timestamps, timestampsExpected) {
|
||||
t.Fatalf("unexpected timestamps;\ngot\n%d\nwant\n%d", timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
|
||||
// Read the request with compression
|
||||
tlp = &insertutils.TestLogMessageProcessor{}
|
||||
timestamps = nil
|
||||
result = ""
|
||||
compressedData := compressData(data)
|
||||
r = bytes.NewBufferString(compressedData)
|
||||
rows, err = readBulkRequest(r, true, timeField, msgField, tlp)
|
||||
rows, err = readBulkRequest(r, true, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatalf("verification failure after compression: %s", err)
|
||||
|
||||
if !reflect.DeepEqual(timestamps, timestampsExpected) {
|
||||
t.Fatalf("unexpected timestamps;\ngot\n%d\nwant\n%d", timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,7 +111,8 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
|
||||
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"@timestamp":"","_msg":"baz"}
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}`
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}
|
||||
`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
@@ -94,10 +120,10 @@ func compressData(s string) string {
|
||||
var bb bytes.Buffer
|
||||
zw := gzip.NewWriter(&bb)
|
||||
if _, err := zw.Write([]byte(s)); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when compressing data: %w", err))
|
||||
panic(fmt.Errorf("unexpected error when compressing data: %s", err))
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when closing gzip writer: %w", err))
|
||||
panic(fmt.Errorf("unexpected error when closing gzip writer: %s", err))
|
||||
}
|
||||
return bb.String()
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func BenchmarkReadBulkRequest(b *testing.B) {
|
||||
@@ -33,7 +33,7 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
processLogMessage := func(timestmap int64, fields []logstorage.Field) {}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(data)))
|
||||
@@ -41,9 +41,9 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
r := &bytes.Reader{}
|
||||
for pb.Next() {
|
||||
r.Reset(dataBytes)
|
||||
_, err := readBulkRequest(r, isGzip, timeField, msgField, blp)
|
||||
_, err := readBulkRequest(r, isGzip, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -2,8 +2,6 @@ package insertutils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
@@ -12,12 +10,11 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
)
|
||||
|
||||
// CommonParams contains common HTTP parameters used by log ingestion APIs.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters
|
||||
// See https://docs.victoriametrics.com/VictoriaLogs/data-ingestion/#http-parameters
|
||||
type CommonParams struct {
|
||||
TenantID logstorage.TenantID
|
||||
TimeField string
|
||||
@@ -74,126 +71,29 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID.
|
||||
func GetCommonParamsForSyslog(tenantID logstorage.TenantID) *CommonParams {
|
||||
// See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeField: "timestamp",
|
||||
MsgField: "message",
|
||||
StreamFields: []string{
|
||||
"hostname",
|
||||
"app_name",
|
||||
"proc_id",
|
||||
},
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// LogMessageProcessor is an interface for log message processors.
|
||||
type LogMessageProcessor interface {
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and the given fields.
|
||||
//
|
||||
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can re-use them.
|
||||
AddRow(timestamp int64, fields []logstorage.Field)
|
||||
|
||||
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
|
||||
MustClose()
|
||||
}
|
||||
|
||||
type logMessageProcessor struct {
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
lastFlushTime time.Time
|
||||
|
||||
cp *CommonParams
|
||||
lr *logstorage.LogRows
|
||||
}
|
||||
|
||||
func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
|
||||
lmp.wg.Add(1)
|
||||
go func() {
|
||||
defer lmp.wg.Done()
|
||||
|
||||
d := timeutil.AddJitterToDuration(time.Second)
|
||||
ticker := time.NewTicker(d)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lmp.stopCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
lmp.mu.Lock()
|
||||
if time.Since(lmp.lastFlushTime) >= d {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
lmp.mu.Unlock()
|
||||
}
|
||||
// GetProcessLogMessageFunc returns a function, which adds parsed log messages to lr.
|
||||
func (cp *CommonParams) GetProcessLogMessageFunc(lr *logstorage.LogRows) func(timestamp int64, fields []logstorage.Field) {
|
||||
return func(timestamp int64, fields []logstorage.Field) {
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// AddRow adds new log message to lmp with the given timestamp and fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
lr.MustAdd(cp.TenantID, timestamp, fields)
|
||||
if cp.Debug {
|
||||
s := lr.GetRowString(0)
|
||||
lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` query arg: %s", cp.DebugRemoteAddr, cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lr.NeedFlush() {
|
||||
vlstorage.MustAddRows(lr)
|
||||
lr.ResetKeepSettings()
|
||||
}
|
||||
}
|
||||
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields)
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` query arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lmp.lr.NeedFlush() {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// flushLocked must be called under locked lmp.mu.
|
||||
func (lmp *logMessageProcessor) flushLocked() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
vlstorage.MustAddRows(lmp.lr)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
}
|
||||
|
||||
// MustClose flushes the remaining data to the underlying storage and closes lmp.
|
||||
func (lmp *logMessageProcessor) MustClose() {
|
||||
close(lmp.stopCh)
|
||||
lmp.wg.Wait()
|
||||
|
||||
lmp.flushLocked()
|
||||
logstorage.PutLogRows(lmp.lr)
|
||||
lmp.lr = nil
|
||||
}
|
||||
|
||||
// NewLogMessageProcessor returns new LogMessageProcessor for the given cp.
|
||||
//
|
||||
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
|
||||
func (cp *CommonParams) NewLogMessageProcessor() LogMessageProcessor {
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
lmp := &logMessageProcessor{
|
||||
cp: cp,
|
||||
lr: lr,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
lmp.initPeriodicFlush()
|
||||
|
||||
return lmp
|
||||
}
|
||||
|
||||
var rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// TestLogMessageProcessor implements LogMessageProcessor for testing.
|
||||
type TestLogMessageProcessor struct {
|
||||
timestamps []int64
|
||||
rows []string
|
||||
}
|
||||
|
||||
// AddRow adds row with the given timestamp and fields to tlp
|
||||
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
tlp.timestamps = append(tlp.timestamps, timestamp)
|
||||
tlp.rows = append(tlp.rows, string(logstorage.MarshalFieldsToJSON(nil, fields)))
|
||||
}
|
||||
|
||||
// MustClose closes tlp.
|
||||
func (tlp *TestLogMessageProcessor) MustClose() {
|
||||
}
|
||||
|
||||
// Verify verifies the number of rows, timestamps and results after AddRow calls.
|
||||
func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected []int64, resultExpected string) error {
|
||||
result := strings.Join(tlp.rows, "\n")
|
||||
if len(tlp.rows) != rowsExpected {
|
||||
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), rowsExpected, result, resultExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tlp.timestamps, timestampsExpected) {
|
||||
return fmt.Errorf("unexpected timestamps;\ngot\n%d\nwant\n%d", tlp.timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
return fmt.Errorf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BenchmarkLogMessageProcessor implements LogMessageProcessor for benchmarks.
|
||||
type BenchmarkLogMessageProcessor struct{}
|
||||
|
||||
// AddRow implements LogMessageProcessor interface.
|
||||
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _ []logstorage.Field) {
|
||||
}
|
||||
|
||||
// MustClose implements LogMessageProcessor interface.
|
||||
func (blp *BenchmarkLogMessageProcessor) MustClose() {
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// ExtractTimestampRFC3339NanoFromFields extracts RFC3339 timestamp in nanoseconds from the field with the name timeField at fields.
|
||||
//
|
||||
// The value for the timeField is set to empty string after returning from the function,
|
||||
// so it could be ignored during data ingestion.
|
||||
//
|
||||
// The current timestamp is returned if fields do not contain a field with timeField name or if the timeField value is empty.
|
||||
func ExtractTimestampRFC3339NanoFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
if f.Value == "" || f.Value == "0" {
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(f.Value)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp from %s=%q", timeField, f.Value)
|
||||
}
|
||||
f.Value = ""
|
||||
return nsecs, nil
|
||||
}
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
|
||||
f := func(timeField string, fields []logstorage.Field, nsecsExpected int64) {
|
||||
t.Helper()
|
||||
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields(timeField, fields)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if nsecs != nsecsExpected {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, nsecsExpected)
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
if f.Name == timeField {
|
||||
if f.Value != "" {
|
||||
t.Fatalf("unexpected value for field %s; got %q; want %q", timeField, f.Value, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20Z"},
|
||||
}, 1718753840000000000)
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20+08:00"},
|
||||
}, 1718725040000000000)
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20.123-05:30"},
|
||||
}, 1718773640123000000)
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
}, 1718773640123456789)
|
||||
}
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
fields := []logstorage.Field{
|
||||
{Name: "time", Value: s},
|
||||
}
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields("time", fields)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if nsecs != 0 {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, 0)
|
||||
}
|
||||
}
|
||||
|
||||
f("foobar")
|
||||
|
||||
// no Z at the end
|
||||
f("2024-06-18T23:37:20")
|
||||
|
||||
// incomplete time
|
||||
f("2024-06-18")
|
||||
f("2024-06-18T23:37")
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
@@ -20,13 +20,13 @@ import (
|
||||
)
|
||||
|
||||
// RequestHandler processes jsonline insert requests
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
startTime := time.Now()
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
|
||||
if r.Method != "POST" {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
requestsTotal.Inc()
|
||||
@@ -34,40 +34,27 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
processLogMessage := cp.GetProcessLogMessageFunc(lr)
|
||||
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read gzipped jsonline request: %s", err)
|
||||
return
|
||||
logger.Errorf("cannot read gzipped _bulk request: %s", err)
|
||||
return true
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
err = processStreamInternal(reader, cp.TimeField, cp.MsgField, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("jsonline: %s", err)
|
||||
} else {
|
||||
// update requestDuration only for successfully parsed requests.
|
||||
// There is no need in updating requestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
}
|
||||
}
|
||||
|
||||
func processStreamInternal(r io.Reader, timeField, msgField string, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
@@ -79,21 +66,31 @@ func processStreamInternal(r io.Reader, timeField, msgField string, lmp insertut
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok, err := readLine(sc, timeField, msgField, lmp)
|
||||
ok, err := readLine(sc, cp.TimeField, cp.MsgField, processLogMessage)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
return fmt.Errorf("cannot read line #%d in /jsonline request: %s", n, err)
|
||||
logger.Errorf("cannot read line #%d in /jsonline request: %s", n, err)
|
||||
break
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
break
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
|
||||
vlstorage.MustAddRows(lr)
|
||||
logstorage.PutLogRows(lr)
|
||||
|
||||
// update jsonlineRequestDuration only for successfully parsed requests.
|
||||
// There is no need in updating jsonlineRequestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
jsonlineRequestDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func readLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
func readLine(sc *bufio.Scanner, timeField, msgField string, processLogMessage func(timestamp int64, fields []logstorage.Field)) (bool, error) {
|
||||
var line []byte
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
@@ -108,28 +105,57 @@ func readLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils.Log
|
||||
line = sc.Bytes()
|
||||
}
|
||||
|
||||
p := logstorage.GetJSONParser()
|
||||
p := logjson.GetParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
ts, err := insertutils.ExtractTimestampRFC3339NanoFromFields(timeField, p.Fields)
|
||||
ts, err := extractTimestampFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot get timestamp: %w", err)
|
||||
return false, fmt.Errorf("cannot parse timestamp: %w", err)
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgField, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
logstorage.PutJSONParser(p)
|
||||
if ts == 0 {
|
||||
ts = time.Now().UnixNano()
|
||||
}
|
||||
p.RenameField(msgField, "_msg")
|
||||
processLogMessage(ts, p.Fields)
|
||||
logjson.PutParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func extractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
timestamp, err := parseISO8601Timestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.Value = ""
|
||||
return timestamp, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func parseISO8601Timestamp(s string) (int64, error) {
|
||||
if s == "0" || s == "" {
|
||||
// Special case for returning the current timestamp.
|
||||
// It must be automatically converted to the current timestamp by the caller.
|
||||
return 0, nil
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp %q: %w", s, err)
|
||||
}
|
||||
return t.UnixNano(), nil
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/jsonline"}`)
|
||||
|
||||
requestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/jsonline"}`)
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
jsonlineRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/jsonline"}`)
|
||||
)
|
||||
|
||||
@@ -1,27 +1,59 @@
|
||||
package jsonline
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
func TestReadBulkRequestSuccess(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, timeField, msgField, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
var timestamps []int64
|
||||
var result string
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
timestamps = append(timestamps, timestamp)
|
||||
|
||||
a := make([]string, len(fields))
|
||||
for i, f := range fields {
|
||||
a[i] = fmt.Sprintf("%q:%q", f.Name, f.Value)
|
||||
}
|
||||
s := "{" + strings.Join(a, ",") + "}\n"
|
||||
result += s
|
||||
}
|
||||
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
sc := bufio.NewScanner(r)
|
||||
rows := 0
|
||||
for {
|
||||
ok, err := readLine(sc, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
rows++
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(timestamps, timestampsExpected) {
|
||||
t.Fatalf("unexpected timestamps;\ngot\n%d\nwant\n%d", timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify non-empty data
|
||||
data := `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
@@ -32,24 +64,7 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
|
||||
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"@timestamp":"","_msg":"baz"}
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}`
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}
|
||||
`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "time", "", tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// invalid json
|
||||
f("foobar")
|
||||
|
||||
// invalid timestamp field
|
||||
f(`{"time":"foobar"}`)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,7 @@ import (
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
case "/api/v1/push":
|
||||
handleInsert(r, w)
|
||||
return true
|
||||
return handleInsert(r, w)
|
||||
case "/ready":
|
||||
// See https://grafana.com/docs/loki/latest/api/#identify-ready-loki-instance
|
||||
w.WriteHeader(http.StatusOK)
|
||||
@@ -24,14 +23,14 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
|
||||
// See https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
|
||||
func handleInsert(r *http.Request, w http.ResponseWriter) {
|
||||
func handleInsert(r *http.Request, w http.ResponseWriter) bool {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
handleJSON(r, w)
|
||||
return handleJSON(r, w)
|
||||
default:
|
||||
// Protobuf request body should be handled by default according to https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
|
||||
handleProtobuf(r, w)
|
||||
return handleProtobuf(r, w)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +45,7 @@ func getCommonParams(r *http.Request) (*insertutils.CommonParams, error) {
|
||||
if cp.TenantID.AccountID == 0 && cp.TenantID.ProjectID == 0 {
|
||||
org := r.Header.Get("X-Scope-OrgID")
|
||||
if org != "" {
|
||||
tenantID, err := logstorage.ParseTenantID(org)
|
||||
tenantID, err := logstorage.GetTenantIDFromString(org)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -21,15 +20,15 @@ import (
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
func handleJSON(r *http.Request, w http.ResponseWriter) bool {
|
||||
startTime := time.Now()
|
||||
requestsJSONTotal.Inc()
|
||||
lokiRequestsJSONTotal.Inc()
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot initialize gzip reader: %s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
@@ -40,41 +39,45 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := parseJSONRequest(data, lmp)
|
||||
lmp.MustClose()
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
processLogMessage := cp.GetProcessLogMessageFunc(lr)
|
||||
n, err := parseJSONRequest(data, processLogMessage)
|
||||
vlstorage.MustAddRows(lr)
|
||||
logstorage.PutLogRows(lr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki json request: %s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
rowsIngestedJSONTotal.Add(n)
|
||||
|
||||
// update requestJSONDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJSONDuration for request errors,
|
||||
// update lokiRequestJSONDuration only for successfully parsed requests
|
||||
// There is no need in updating lokiRequestJSONDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestJSONDuration.UpdateDuration(startTime)
|
||||
lokiRequestJSONDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="json"}`)
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
lokiRequestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="json"}`)
|
||||
lokiRequestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
)
|
||||
|
||||
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func parseJSONRequest(data []byte, processLogMessage func(timestamp int64, fields []logstorage.Field)) (int, error) {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(data)
|
||||
@@ -167,7 +170,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(msg),
|
||||
})
|
||||
lmp.AddRow(ts, fields)
|
||||
processLogMessage(ts, fields)
|
||||
}
|
||||
rowsIngested += len(lines)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestParseJSONRequest_Failure(t *testing.T) {
|
||||
func TestParseJSONRequestFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
n, err := parseJSONRequest([]byte(s), func(timestamp int64, fields []logstorage.Field) {
|
||||
t.Fatalf("unexpected call to parseJSONRequest callback!")
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
@@ -54,30 +56,39 @@ func TestParseJSONRequest_Failure(t *testing.T) {
|
||||
f(`{"streams":[{"values":[["123",1234]]}]}`)
|
||||
}
|
||||
|
||||
func TestParseJSONRequest_Success(t *testing.T) {
|
||||
f := func(s string, timestampsExpected []int64, resultExpected string) {
|
||||
func TestParseJSONRequestSuccess(t *testing.T) {
|
||||
f := func(s string, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
var lines []string
|
||||
n, err := parseJSONRequest([]byte(s), func(timestamp int64, fields []logstorage.Field) {
|
||||
var a []string
|
||||
for _, f := range fields {
|
||||
a = append(a, f.String())
|
||||
}
|
||||
line := fmt.Sprintf("_time:%d %s", timestamp, strings.Join(a, " "))
|
||||
lines = append(lines, line)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
if n != len(lines) {
|
||||
t.Fatalf("unexpected number of lines parsed; got %d; want %d", n, len(lines))
|
||||
}
|
||||
result := strings.Join(lines, "\n")
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty streams
|
||||
f(`{"streams":[]}`, nil, ``)
|
||||
f(`{"streams":[{"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{},"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[]}`, ``)
|
||||
f(`{"streams":[{"values":[]}]}`, ``)
|
||||
f(`{"streams":[{"stream":{},"values":[]}]}`, ``)
|
||||
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, ``)
|
||||
|
||||
// Empty stream labels
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, `_time:1577836800000000001 "_msg":"foo bar"`)
|
||||
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, `_time:1577836800000000001 "_msg":"foo bar"`)
|
||||
|
||||
// Non-empty stream labels
|
||||
f(`{"streams":[{"stream":{
|
||||
@@ -87,9 +98,9 @@ func TestParseJSONRequest_Success(t *testing.T) {
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1477836900005000002", "abc"],
|
||||
["147.78369e9", "foobar"]
|
||||
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
{"label1":"value1","label2":"value2","_msg":"abc"}
|
||||
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
|
||||
]}]}`, `_time:1577836800000000001 "label1":"value1" "label2":"value2" "_msg":"foo bar"
|
||||
_time:1477836900005000002 "label1":"value1" "label2":"value2" "_msg":"abc"
|
||||
_time:147783690000 "label1":"value1" "label2":"value2" "_msg":"foobar"`)
|
||||
|
||||
// Multiple streams
|
||||
f(`{
|
||||
@@ -113,7 +124,7 @@ func TestParseJSONRequest_Success(t *testing.T) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
|
||||
{"foo":"bar","a":"b","_msg":"abc"}
|
||||
{"x":"y","_msg":"yx"}`)
|
||||
}`, `_time:1577836800000000001 "foo":"bar" "a":"b" "_msg":"foo bar"
|
||||
_time:1577836900005000002 "foo":"bar" "a":"b" "_msg":"abc"
|
||||
_time:1877836900005000002 "x":"y" "_msg":"yx"`)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func BenchmarkParseJSONRequest(b *testing.B) {
|
||||
@@ -22,15 +22,14 @@ func BenchmarkParseJSONRequest(b *testing.B) {
|
||||
}
|
||||
|
||||
func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
data := getJSONBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseJSONRequest(data, blp)
|
||||
_, err := parseJSONRequest(data, func(timestamp int64, fields []logstorage.Field) {})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -24,49 +23,53 @@ var (
|
||||
pushReqsPool sync.Pool
|
||||
)
|
||||
|
||||
func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
func handleProtobuf(r *http.Request, w http.ResponseWriter) bool {
|
||||
startTime := time.Now()
|
||||
requestsProtobufTotal.Inc()
|
||||
lokiRequestsProtobufTotal.Inc()
|
||||
wcr := writeconcurrencylimiter.GetReader(r.Body)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := parseProtobufRequest(data, lmp)
|
||||
lmp.MustClose()
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
processLogMessage := cp.GetProcessLogMessageFunc(lr)
|
||||
n, err := parseProtobufRequest(data, processLogMessage)
|
||||
vlstorage.MustAddRows(lr)
|
||||
logstorage.PutLogRows(lr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki protobuf request: %s", err)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update requestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// update lokiRequestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating lokiRequestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestProtobufDuration.UpdateDuration(startTime)
|
||||
lokiRequestProtobufDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
lokiRequestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
|
||||
lokiRequestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func parseProtobufRequest(data []byte, processLogMessage func(timestamp int64, fields []logstorage.Field)) (int, error) {
|
||||
bb := bytesBufPool.Get()
|
||||
defer bytesBufPool.Put(bb)
|
||||
|
||||
@@ -81,7 +84,7 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
|
||||
err = req.Unmarshal(bb.B)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse request body: %w", err)
|
||||
return 0, fmt.Errorf("cannot parse request body: %s", err)
|
||||
}
|
||||
|
||||
var commonFields []logstorage.Field
|
||||
@@ -94,7 +97,7 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
// Labels are same for all entries in the stream.
|
||||
commonFields, err = parsePromLabels(commonFields[:0], stream.Labels)
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
|
||||
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %s", stream.Labels, err)
|
||||
}
|
||||
fields := commonFields
|
||||
|
||||
@@ -109,7 +112,7 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
lmp.AddRow(ts, fields)
|
||||
processLogMessage(ts, fields)
|
||||
}
|
||||
rowsIngested += len(stream.Entries)
|
||||
}
|
||||
|
||||
@@ -6,83 +6,83 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
type testLogMessageProcessor struct {
|
||||
pr PushRequest
|
||||
}
|
||||
|
||||
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
msg := ""
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
msg = f.Value
|
||||
}
|
||||
}
|
||||
var a []string
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
continue
|
||||
}
|
||||
item := fmt.Sprintf("%s=%q", f.Name, f.Value)
|
||||
a = append(a, item)
|
||||
}
|
||||
labels := "{" + strings.Join(a, ", ") + "}"
|
||||
tlp.pr.Streams = append(tlp.pr.Streams, Stream{
|
||||
Labels: labels,
|
||||
Entries: []Entry{
|
||||
{
|
||||
Timestamp: time.Unix(0, timestamp),
|
||||
Line: msg,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (tlp *testLogMessageProcessor) MustClose() {
|
||||
}
|
||||
|
||||
func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
f := func(s string, timestampsExpected []int64, resultExpected string) {
|
||||
func TestParseProtobufRequestSuccess(t *testing.T) {
|
||||
f := func(s string, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &testLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
var pr PushRequest
|
||||
n, err := parseJSONRequest([]byte(s), func(timestamp int64, fields []logstorage.Field) {
|
||||
msg := ""
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
msg = f.Value
|
||||
}
|
||||
}
|
||||
var a []string
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
continue
|
||||
}
|
||||
item := fmt.Sprintf("%s=%q", f.Name, f.Value)
|
||||
a = append(a, item)
|
||||
}
|
||||
labels := "{" + strings.Join(a, ", ") + "}"
|
||||
pr.Streams = append(pr.Streams, Stream{
|
||||
Labels: labels,
|
||||
Entries: []Entry{
|
||||
{
|
||||
Timestamp: time.Unix(0, timestamp),
|
||||
Line: msg,
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if n != len(tlp.pr.Streams) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), n)
|
||||
if n != len(pr.Streams) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(pr.Streams), n)
|
||||
}
|
||||
|
||||
data, err := tlp.pr.Marshal()
|
||||
data, err := pr.Marshal()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when marshaling PushRequest: %s", err)
|
||||
}
|
||||
encodedData := snappy.Encode(nil, data)
|
||||
|
||||
tlp2 := &insertutils.TestLogMessageProcessor{}
|
||||
n, err = parseProtobufRequest(encodedData, tlp2)
|
||||
var lines []string
|
||||
n, err = parseProtobufRequest(encodedData, func(timestamp int64, fields []logstorage.Field) {
|
||||
var a []string
|
||||
for _, f := range fields {
|
||||
a = append(a, f.String())
|
||||
}
|
||||
line := fmt.Sprintf("_time:%d %s", timestamp, strings.Join(a, " "))
|
||||
lines = append(lines, line)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp2.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
if n != len(lines) {
|
||||
t.Fatalf("unexpected number of lines parsed; got %d; want %d", n, len(lines))
|
||||
}
|
||||
result := strings.Join(lines, "\n")
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty streams
|
||||
f(`{"streams":[]}`, nil, ``)
|
||||
f(`{"streams":[{"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{},"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[]}`, ``)
|
||||
f(`{"streams":[{"values":[]}]}`, ``)
|
||||
f(`{"streams":[{"stream":{},"values":[]}]}`, ``)
|
||||
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, ``)
|
||||
|
||||
// Empty stream labels
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, `_time:1577836800000000001 "_msg":"foo bar"`)
|
||||
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, `_time:1577836800000000001 "_msg":"foo bar"`)
|
||||
|
||||
// Non-empty stream labels
|
||||
f(`{"streams":[{"stream":{
|
||||
@@ -92,9 +92,9 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1477836900005000002", "abc"],
|
||||
["147.78369e9", "foobar"]
|
||||
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
{"label1":"value1","label2":"value2","_msg":"abc"}
|
||||
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
|
||||
]}]}`, `_time:1577836800000000001 "label1":"value1" "label2":"value2" "_msg":"foo bar"
|
||||
_time:1477836900005000002 "label1":"value1" "label2":"value2" "_msg":"abc"
|
||||
_time:147783690000 "label1":"value1" "label2":"value2" "_msg":"foobar"`)
|
||||
|
||||
// Multiple streams
|
||||
f(`{
|
||||
@@ -118,12 +118,12 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
|
||||
{"foo":"bar","a":"b","_msg":"abc"}
|
||||
{"x":"y","_msg":"yx"}`)
|
||||
}`, `_time:1577836800000000001 "foo":"bar" "a":"b" "_msg":"foo bar"
|
||||
_time:1577836900005000002 "foo":"bar" "a":"b" "_msg":"abc"
|
||||
_time:1877836900005000002 "x":"y" "_msg":"yx"`)
|
||||
}
|
||||
|
||||
func TestParsePromLabels_Success(t *testing.T) {
|
||||
func TestParsePromLabelsSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
fields, err := parsePromLabels(nil, s)
|
||||
@@ -147,7 +147,7 @@ func TestParsePromLabels_Success(t *testing.T) {
|
||||
f(`{foo="ba\"r\\z\n", a="", b="\"\\"}`)
|
||||
}
|
||||
|
||||
func TestParsePromLabels_Failure(t *testing.T) {
|
||||
func TestParsePromLabelsFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
fields, err := parsePromLabels(nil, s)
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func BenchmarkParseProtobufRequest(b *testing.B) {
|
||||
@@ -24,15 +24,14 @@ func BenchmarkParseProtobufRequest(b *testing.B) {
|
||||
}
|
||||
|
||||
func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseProtobufRequest(body, blp)
|
||||
_, err := parseProtobufRequest(body, func(timestamp int64, fields []logstorage.Field) {})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -7,17 +7,14 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/syslog"
|
||||
)
|
||||
|
||||
// Init initializes vlinsert
|
||||
func Init() {
|
||||
syslog.MustInit()
|
||||
}
|
||||
|
||||
// Stop stops vlinsert
|
||||
func Stop() {
|
||||
syslog.MustStop()
|
||||
}
|
||||
|
||||
// RequestHandler handles insert requests for VictoriaLogs
|
||||
@@ -31,8 +28,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/jsonline" {
|
||||
jsonline.RequestHandler(w, r)
|
||||
return true
|
||||
return jsonline.RequestHandler(w, r)
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/elasticsearch/"):
|
||||
|
||||
@@ -1,531 +0,0 @@
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
syslogTimezone = flag.String("syslog.timezone", "Local", "Timezone to use when parsing timestamps in RFC3164 syslog messages. Timezone must be a valid IANA Time Zone. "+
|
||||
"For example: America/New_York, Europe/Berlin, Etc/GMT+3 . See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
|
||||
syslogTenantIDTCP = flagutil.NewArrayString("syslog.tenantID.tcp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
syslogTenantIDUDP = flagutil.NewArrayString("syslog.tenantID.udp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
|
||||
listenAddrTCP = flagutil.NewArrayString("syslog.listenAddr.tcp", "Comma-separated list of TCP addresses to listen to for Syslog messages. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
listenAddrUDP = flagutil.NewArrayString("syslog.listenAddr.udp", "Comma-separated list of UDP address to listen to for Syslog messages. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
|
||||
tlsEnable = flagutil.NewArrayBool("syslog.tls", "Whether to enable TLS for receiving syslog messages at the corresponding -syslog.listenAddr.tcp. "+
|
||||
"The corresponding -syslog.tlsCertFile and -syslog.tlsKeyFile must be set if -syslog.tls is set. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsCertFile = flagutil.NewArrayString("syslog.tlsCertFile", "Path to file with TLS certificate for the corresponding -syslog.listenAddr.tcp if the corresponding -syslog.tls is set. "+
|
||||
"Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsKeyFile = flagutil.NewArrayString("syslog.tlsKeyFile", "Path to file with TLS key for the corresponding -syslog.listenAddr.tcp if the corresponding -syslog.tls is set. "+
|
||||
"The provided key file is automatically re-read every second, so it can be dynamically updated. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsCipherSuites = flagutil.NewArrayString("syslog.tlsCipherSuites", "Optional list of TLS cipher suites for -syslog.listenAddr.tcp if -syslog.tls is set. "+
|
||||
"See the list of supported cipher suites at https://pkg.go.dev/crypto/tls#pkg-constants . "+
|
||||
"See also https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsMinVersion = flag.String("syslog.tlsMinVersion", "TLS13", "The minimum TLS version to use for -syslog.listenAddr.tcp if -syslog.tls is set. "+
|
||||
"Supported values: TLS10, TLS11, TLS12, TLS13. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
|
||||
compressMethodTCP = flagutil.NewArrayString("syslog.compressMethod.tcp", "Compression method for syslog messages received at the corresponding -syslog.listenAddr.tcp. "+
|
||||
"Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression")
|
||||
compressMethodUDP = flagutil.NewArrayString("syslog.compressMethod.udp", "Compression method for syslog messages received at the corresponding -syslog.listenAddr.udp. "+
|
||||
"Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression")
|
||||
|
||||
useLocalTimestampTCP = flagutil.NewArrayBool("syslog.useLocalTimestamp.tcp", "Whether to use local timestamp instead of the original timestamp for the ingested syslog messages "+
|
||||
"at the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#log-timestamps")
|
||||
useLocalTimestampUDP = flagutil.NewArrayBool("syslog.useLocalTimestamp.udp", "Whether to use local timestamp instead of the original timestamp for the ingested syslog messages "+
|
||||
"at the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#log-timestamps")
|
||||
)
|
||||
|
||||
// MustInit initializes syslog parser at the given -syslog.listenAddr.tcp and -syslog.listenAddr.udp ports
|
||||
//
|
||||
// This function must be called after flag.Parse().
|
||||
//
|
||||
// MustStop() must be called in order to free up resources occupied by the initialized syslog parser.
|
||||
func MustInit() {
|
||||
if workersStopCh != nil {
|
||||
logger.Panicf("BUG: MustInit() called twice without MustStop() call")
|
||||
}
|
||||
workersStopCh = make(chan struct{})
|
||||
|
||||
for argIdx, addr := range *listenAddrTCP {
|
||||
workersWG.Add(1)
|
||||
go func(addr string, argIdx int) {
|
||||
runTCPListener(addr, argIdx)
|
||||
workersWG.Done()
|
||||
}(addr, argIdx)
|
||||
}
|
||||
|
||||
for argIdx, addr := range *listenAddrUDP {
|
||||
workersWG.Add(1)
|
||||
go func(addr string, argIdx int) {
|
||||
runUDPListener(addr, argIdx)
|
||||
workersWG.Done()
|
||||
}(addr, argIdx)
|
||||
}
|
||||
|
||||
currentYear := time.Now().Year()
|
||||
globalCurrentYear.Store(int64(currentYear))
|
||||
workersWG.Add(1)
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-workersStopCh:
|
||||
ticker.Stop()
|
||||
workersWG.Done()
|
||||
return
|
||||
case <-ticker.C:
|
||||
currentYear := time.Now().Year()
|
||||
globalCurrentYear.Store(int64(currentYear))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if *syslogTimezone != "" {
|
||||
tz, err := time.LoadLocation(*syslogTimezone)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.timezone=%q: %s", *syslogTimezone, err)
|
||||
}
|
||||
globalTimezone = tz
|
||||
} else {
|
||||
globalTimezone = time.Local
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalCurrentYear atomic.Int64
|
||||
globalTimezone *time.Location
|
||||
)
|
||||
|
||||
var (
|
||||
workersWG sync.WaitGroup
|
||||
workersStopCh chan struct{}
|
||||
)
|
||||
|
||||
// MustStop stops syslog parser initialized via MustInit()
|
||||
func MustStop() {
|
||||
close(workersStopCh)
|
||||
workersWG.Wait()
|
||||
workersStopCh = nil
|
||||
}
|
||||
|
||||
func runUDPListener(addr string, argIdx int) {
|
||||
ln, err := net.ListenPacket(netutil.GetUDPNetwork(), addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP syslog server at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
tenantIDStr := syslogTenantIDUDP.GetOptionalArg(argIdx)
|
||||
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.tenantID.udp=%q for -syslog.listenAddr.udp=%q: %s", tenantIDStr, addr, err)
|
||||
}
|
||||
|
||||
compressMethod := compressMethodUDP.GetOptionalArg(argIdx)
|
||||
checkCompressMethod(compressMethod, addr, "udp")
|
||||
|
||||
useLocalTimestamp := useLocalTimestampUDP.GetOptionalArg(argIdx)
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp)
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
logger.Infof("started accepting syslog messages at -syslog.listenAddr.udp=%q", addr)
|
||||
<-workersStopCh
|
||||
if err := ln.Close(); err != nil {
|
||||
logger.Fatalf("syslog: cannot close UDP listener at %s: %s", addr, err)
|
||||
}
|
||||
<-doneCh
|
||||
logger.Infof("finished accepting syslog messages at -syslog.listenAddr.udp=%q", addr)
|
||||
}
|
||||
|
||||
func runTCPListener(addr string, argIdx int) {
|
||||
var tlsConfig *tls.Config
|
||||
if tlsEnable.GetOptionalArg(argIdx) {
|
||||
certFile := tlsCertFile.GetOptionalArg(argIdx)
|
||||
keyFile := tlsKeyFile.GetOptionalArg(argIdx)
|
||||
tc, err := netutil.GetServerTLSConfig(certFile, keyFile, *tlsMinVersion, *tlsCipherSuites)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot load TLS cert from -syslog.tlsCertFile=%q, -syslog.tlsKeyFile=%q, -syslog.tlsMinVersion=%q, -syslog.tlsCipherSuites=%q: %s",
|
||||
certFile, keyFile, *tlsMinVersion, *tlsCipherSuites, err)
|
||||
}
|
||||
tlsConfig = tc
|
||||
}
|
||||
ln, err := netutil.NewTCPListener("syslog", addr, false, tlsConfig)
|
||||
if err != nil {
|
||||
logger.Fatalf("syslog: cannot start TCP listener at %s: %s", addr, err)
|
||||
}
|
||||
|
||||
tenantIDStr := syslogTenantIDTCP.GetOptionalArg(argIdx)
|
||||
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.tenantID.tcp=%q for -syslog.listenAddr.tcp=%q: %s", tenantIDStr, addr, err)
|
||||
}
|
||||
|
||||
compressMethod := compressMethodTCP.GetOptionalArg(argIdx)
|
||||
checkCompressMethod(compressMethod, addr, "tcp")
|
||||
|
||||
useLocalTimestamp := useLocalTimestampTCP.GetOptionalArg(argIdx)
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp)
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
logger.Infof("started accepting syslog messages at -syslog.listenAddr.tcp=%q", addr)
|
||||
<-workersStopCh
|
||||
if err := ln.Close(); err != nil {
|
||||
logger.Fatalf("syslog: cannot close TCP listener at %s: %s", addr, err)
|
||||
}
|
||||
<-doneCh
|
||||
logger.Infof("finished accepting syslog messages at -syslog.listenAddr.tcp=%q", addr)
|
||||
}
|
||||
|
||||
func checkCompressMethod(compressMethod, addr, protocol string) {
|
||||
switch compressMethod {
|
||||
case "", "none", "gzip", "deflate":
|
||||
return
|
||||
default:
|
||||
logger.Fatalf("unsupported -syslog.compressMethod.%s=%q for -syslog.listenAddr.%s=%q; supported values: 'none', 'gzip', 'deflate'", protocol, compressMethod, protocol, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) {
|
||||
gomaxprocs := cgroup.AvailableCPUs()
|
||||
var wg sync.WaitGroup
|
||||
localAddr := ln.LocalAddr()
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cp := insertutils.GetCommonParamsForSyslog(tenantID)
|
||||
var bb bytesutil.ByteBuffer
|
||||
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
|
||||
for {
|
||||
bb.Reset()
|
||||
bb.B = bb.B[:cap(bb.B)]
|
||||
n, remoteAddr, err := ln.ReadFrom(bb.B)
|
||||
if err != nil {
|
||||
udpErrorsTotal.Inc()
|
||||
var ne net.Error
|
||||
if errors.As(err, &ne) {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("syslog: temporary error when listening for UDP at %q: %s", localAddr, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
}
|
||||
logger.Errorf("syslog: cannot read UDP data from %s at %s: %s", remoteAddr, localAddr, err)
|
||||
continue
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
udpRequestsTotal.Inc()
|
||||
if err := processStream(bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process UDP data from %s at %s: %s", remoteAddr, localAddr, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) {
|
||||
var cm ingestserver.ConnsMap
|
||||
cm.Init("syslog")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
addr := ln.Addr()
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
var ne net.Error
|
||||
if errors.As(err, &ne) {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("syslog: temporary error when listening for TCP addr %q: %s", addr, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
logger.Fatalf("syslog: unrecoverable error when accepting TCP connections at %q: %s", addr, err)
|
||||
}
|
||||
logger.Fatalf("syslog: unexpected error when accepting TCP connections at %q: %s", addr, err)
|
||||
}
|
||||
if !cm.Add(c) {
|
||||
_ = c.Close()
|
||||
break
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
cp := insertutils.GetCommonParamsForSyslog(tenantID)
|
||||
if err := processStream(c, compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
cm.Delete(c)
|
||||
_ = c.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
cm.CloseAll(0)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// processStream parses a stream of syslog messages from r and ingests them into vlstorage.
|
||||
func processStream(r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
err := processStreamInternal(r, compressMethod, useLocalTimestamp, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func processStreamInternal(r io.Reader, compressMethod string, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
switch compressMethod {
|
||||
case "", "none":
|
||||
case "gzip":
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped data: %w", err)
|
||||
}
|
||||
r = zr
|
||||
case "deflate":
|
||||
zr, err := common.GetZlibReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read deflated data: %w", err)
|
||||
}
|
||||
r = zr
|
||||
default:
|
||||
logger.Panicf("BUG: unsupported compressMethod=%q; supported values: none, gzip, deflate", compressMethod)
|
||||
}
|
||||
|
||||
err := processUncompressedStream(r, useLocalTimestamp, lmp)
|
||||
|
||||
switch compressMethod {
|
||||
case "gzip":
|
||||
zr := r.(*gzip.Reader)
|
||||
common.PutGzipReader(zr)
|
||||
case "deflate":
|
||||
zr := r.(io.ReadCloser)
|
||||
common.PutZlibReader(zr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
slr := getSyslogLineReader(wcr)
|
||||
defer putSyslogLineReader(slr)
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok := slr.nextLine()
|
||||
wcr.DecConcurrency()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
currentYear := int(globalCurrentYear.Load())
|
||||
err := processLine(slr.line, currentYear, globalTimezone, useLocalTimestamp, lmp)
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
return fmt.Errorf("cannot read line #%d: %s", n, err)
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
return slr.Error()
|
||||
}
|
||||
|
||||
type syslogLineReader struct {
|
||||
line []byte
|
||||
|
||||
br *bufio.Reader
|
||||
err error
|
||||
}
|
||||
|
||||
func (slr *syslogLineReader) reset(r io.Reader) {
|
||||
slr.line = slr.line[:0]
|
||||
slr.br.Reset(r)
|
||||
slr.err = nil
|
||||
}
|
||||
|
||||
// Error returns the last error occurred in slr.
|
||||
func (slr *syslogLineReader) Error() error {
|
||||
if slr.err == nil || slr.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return slr.err
|
||||
}
|
||||
|
||||
// nextLine reads the next syslog line from slr and stores it at slr.line.
|
||||
//
|
||||
// false is returned if the next line cannot be read. Error() must be called in this case
|
||||
// in order to verify whether there is an error or just slr stream has been finished.
|
||||
func (slr *syslogLineReader) nextLine() bool {
|
||||
if slr.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
again:
|
||||
prefix, err := slr.br.ReadSlice(' ')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
slr.err = fmt.Errorf("cannot read message frame prefix: %w", err)
|
||||
return false
|
||||
}
|
||||
if len(prefix) == 0 {
|
||||
slr.err = err
|
||||
return false
|
||||
}
|
||||
}
|
||||
// skip empty lines
|
||||
for len(prefix) > 0 && prefix[0] == '\n' {
|
||||
prefix = prefix[1:]
|
||||
}
|
||||
if len(prefix) == 0 {
|
||||
// An empty prefix or a prefix with empty lines - try reading yet another prefix.
|
||||
goto again
|
||||
}
|
||||
|
||||
if prefix[0] >= '0' && prefix[0] <= '9' {
|
||||
// This is octet-counting method. See https://www.ietf.org/archive/id/draft-gerhards-syslog-plain-tcp-07.html#msgxfer
|
||||
msgLenStr := bytesutil.ToUnsafeString(prefix[:len(prefix)-1])
|
||||
msgLen, err := strconv.ParseUint(msgLenStr, 10, 64)
|
||||
if err != nil {
|
||||
slr.err = fmt.Errorf("cannot parse message length from %q: %w", msgLenStr, err)
|
||||
return false
|
||||
}
|
||||
if maxMsgLen := insertutils.MaxLineSizeBytes.IntN(); msgLen > uint64(maxMsgLen) {
|
||||
slr.err = fmt.Errorf("cannot read message longer than %d bytes; msgLen=%d", maxMsgLen, msgLen)
|
||||
return false
|
||||
}
|
||||
slr.line = slicesutil.SetLength(slr.line, int(msgLen))
|
||||
if _, err := io.ReadFull(slr.br, slr.line); err != nil {
|
||||
slr.err = fmt.Errorf("cannot read message with size %d bytes: %w", msgLen, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// This is octet-stuffing method. See https://www.ietf.org/archive/id/draft-gerhards-syslog-plain-tcp-07.html#octet-stuffing-legacy
|
||||
slr.line = append(slr.line[:0], prefix...)
|
||||
for {
|
||||
line, err := slr.br.ReadSlice('\n')
|
||||
if err == nil {
|
||||
slr.line = append(slr.line, line[:len(line)-1]...)
|
||||
return true
|
||||
}
|
||||
if err == io.EOF {
|
||||
slr.line = append(slr.line, line...)
|
||||
return true
|
||||
}
|
||||
if err == bufio.ErrBufferFull {
|
||||
slr.line = append(slr.line, line...)
|
||||
continue
|
||||
}
|
||||
slr.err = fmt.Errorf("cannot read message in octet-stuffing method: %w", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func getSyslogLineReader(r io.Reader) *syslogLineReader {
|
||||
v := syslogLineReaderPool.Get()
|
||||
if v == nil {
|
||||
br := bufio.NewReaderSize(r, 64*1024)
|
||||
return &syslogLineReader{
|
||||
br: br,
|
||||
}
|
||||
}
|
||||
slr := v.(*syslogLineReader)
|
||||
slr.reset(r)
|
||||
return slr
|
||||
}
|
||||
|
||||
func putSyslogLineReader(slr *syslogLineReader) {
|
||||
syslogLineReaderPool.Put(slr)
|
||||
}
|
||||
|
||||
var syslogLineReaderPool sync.Pool
|
||||
|
||||
func processLine(line []byte, currentYear int, timezone *time.Location, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
p := logstorage.GetSyslogParser(currentYear, timezone)
|
||||
lineStr := bytesutil.ToUnsafeString(line)
|
||||
p.Parse(lineStr)
|
||||
|
||||
var ts int64
|
||||
if useLocalTimestamp {
|
||||
ts = time.Now().UnixNano()
|
||||
} else {
|
||||
nsecs, err := insertutils.ExtractTimestampRFC3339NanoFromFields("timestamp", p.Fields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get timestamp from syslog line %q: %w", line, err)
|
||||
}
|
||||
ts = nsecs
|
||||
}
|
||||
logstorage.RenameField(p.Fields, "message", "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
logstorage.PutSyslogParser(p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="syslog"}`)
|
||||
|
||||
errorsTotal = metrics.NewCounter(`vl_errors_total{type="syslog"}`)
|
||||
|
||||
udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`)
|
||||
udpErrorsTotal = metrics.NewCounter(`vl_udp_errors_total{type="syslog"}`)
|
||||
)
|
||||
@@ -1,130 +0,0 @@
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestSyslogLineReader_Success(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
slr := getSyslogLineReader(r)
|
||||
defer putSyslogLineReader(slr)
|
||||
|
||||
var lines []string
|
||||
for slr.nextLine() {
|
||||
lines = append(lines, string(slr.line))
|
||||
}
|
||||
if err := slr.Error(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines read;\ngot\n%q\nwant\n%q", lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("\n", nil)
|
||||
f("\n\n\n", nil)
|
||||
|
||||
f("foobar", []string{"foobar"})
|
||||
f("foobar\n", []string{"foobar\n"})
|
||||
f("\n\nfoo\n\nbar\n\n", []string{"foo\n\nbar\n\n"})
|
||||
|
||||
f(`Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...`, []string{"Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches..."})
|
||||
|
||||
f(`Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...
|
||||
|
||||
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
|
||||
|
||||
`, []string{
|
||||
"Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...",
|
||||
"<165>Jun 4 12:08:33 abcd systemd[345]: abc defg",
|
||||
`<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.`,
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyslogLineReader_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
slr := getSyslogLineReader(r)
|
||||
defer putSyslogLineReader(slr)
|
||||
|
||||
if slr.nextLine() {
|
||||
t.Fatalf("expecting failure to read the first line")
|
||||
}
|
||||
if err := slr.Error(); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// invalid format for message size
|
||||
f("12foo bar")
|
||||
|
||||
// too big message size
|
||||
f("123 aa")
|
||||
f("1233423432 abc")
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data string, currentYear, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
MustInit()
|
||||
defer MustStop()
|
||||
|
||||
globalTimezone = time.UTC
|
||||
globalCurrentYear.Store(int64(currentYear))
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "", false, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
data := `Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...
|
||||
|
||||
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
|
||||
`
|
||||
currentYear := 2023
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1685794113000000000, 1685880513000000000, 1685814132345000000}
|
||||
resultExpected := `{"format":"rfc3164","timestamp":"","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
|
||||
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","timestamp":"","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
|
||||
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","timestamp":"","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
|
||||
f(data, currentYear, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
MustInit()
|
||||
defer MustStop()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "", false, tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// invalid format for message size
|
||||
f("12foo bar")
|
||||
|
||||
// too big message size
|
||||
f("123 foo")
|
||||
f("123456789 bar")
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
vlogsgenerator:
|
||||
APP_NAME=vlogsgenerator $(MAKE) app-local
|
||||
|
||||
vlogsgenerator-race:
|
||||
APP_NAME=vlogsgenerator RACE=-race $(MAKE) app-local
|
||||
@@ -1,158 +0,0 @@
|
||||
# vlogsgenerator
|
||||
|
||||
Logs generator for [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/).
|
||||
|
||||
## How to build vlogsgenerator?
|
||||
|
||||
Run `make vlogsgenerator` from the repository root. This builds `bin/vlogsgenerator` binary.
|
||||
|
||||
## How run vlogsgenerator?
|
||||
|
||||
`vlogsgenerator` generates logs in [JSON line format](https://jsonlines.org/) suitable for the ingestion
|
||||
via [`/insert/jsonline` endpoint at VictoriaLogs](https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api).
|
||||
|
||||
By default it writes the generated logs into `stdout`. For example, the following command writes generated logs to `stdout`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator
|
||||
```
|
||||
|
||||
It is possible to redirect the generated logs to file. For example, the following command writes the generated logs to `logs.json` file:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator > logs.json
|
||||
```
|
||||
|
||||
The generated logs at `logs.json` file can be inspected with the following command:
|
||||
|
||||
```
|
||||
head logs.json | jq .
|
||||
```
|
||||
|
||||
Below is an example output:
|
||||
|
||||
```json
|
||||
{
|
||||
"_time": "2024-05-08T14:34:00.854Z",
|
||||
"_msg": "message for the stream 8 and worker 0; ip=185.69.136.129; uuid=b4fe8f1a-c93c-dea3-ba11-5b9f0509291e; u64=8996587920687045253",
|
||||
"host": "host_8",
|
||||
"worker_id": "0",
|
||||
"run_id": "f9b3deee-e6b6-7f56-5deb-1586e4e81725",
|
||||
"const_0": "some value 0 8",
|
||||
"const_1": "some value 1 8",
|
||||
"const_2": "some value 2 8",
|
||||
"var_0": "some value 0 12752539384823438260",
|
||||
"dict_0": "warn",
|
||||
"dict_1": "info",
|
||||
"u8_0": "6",
|
||||
"u16_0": "35202",
|
||||
"u32_0": "1964973739",
|
||||
"u64_0": "4810489083243239145",
|
||||
"float_0": "1.868",
|
||||
"ip_0": "250.34.75.125",
|
||||
"timestamp_0": "1799-03-16T01:34:18.311Z",
|
||||
"json_0": "{\"foo\":\"bar_3\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":32}"
|
||||
}
|
||||
{
|
||||
"_time": "2024-05-08T14:34:00.854Z",
|
||||
"_msg": "message for the stream 9 and worker 0; ip=164.244.254.194; uuid=7e8373b1-ce0d-1ce7-8e96-4bcab8955598; u64=13949903463741076522",
|
||||
"host": "host_9",
|
||||
"worker_id": "0",
|
||||
"run_id": "f9b3deee-e6b6-7f56-5deb-1586e4e81725",
|
||||
"const_0": "some value 0 9",
|
||||
"const_1": "some value 1 9",
|
||||
"const_2": "some value 2 9",
|
||||
"var_0": "some value 0 5371555382075206134",
|
||||
"dict_0": "INFO",
|
||||
"dict_1": "FATAL",
|
||||
"u8_0": "219",
|
||||
"u16_0": "31459",
|
||||
"u32_0": "3918836777",
|
||||
"u64_0": "6593354256620219850",
|
||||
"float_0": "1.085",
|
||||
"ip_0": "253.151.88.158",
|
||||
"timestamp_0": "2042-10-05T16:42:57.082Z",
|
||||
"json_0": "{\"foo\":\"bar_5\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":27}"
|
||||
}
|
||||
```
|
||||
|
||||
The `run_id` field uniquely identifies every `vlogsgenerator` invocation.
|
||||
|
||||
### How to write logs to VictoriaLogs?
|
||||
|
||||
The generated logs can be written directly to VictoriaLogs by passing the address of [`/insert/jsonline` endpoint](https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api)
|
||||
to `-addr` command-line flag. For example, the following command writes the generated logs to VictoriaLogs running at `localhost`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator -addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
`vlogsgenerator` accepts various command-line flags, which can be used for configuring the number and the shape of the generated logs.
|
||||
These flags can be inspected by running `vlogsgenerator -help`. Below are the most interesting flags:
|
||||
|
||||
* `-start` - starting timestamp for generating logs. Logs are evenly generated on the [`-start` ... `-end`] interval.
|
||||
* `-end` - ending timestamp for generating logs. Logs are evenly generated on the [`-start` ... `-end`] interval.
|
||||
* `-activeStreams` - the number of active [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to generate.
|
||||
* `-logsPerStream` - the number of log entries to generate per each log stream. Log entries are evenly distributed on the [`-start` ... `-end`] interval.
|
||||
|
||||
The total number of generated logs can be calculated as `-activeStreams` * `-logsPerStream`.
|
||||
|
||||
For example, the following command generates `1_000_000` log entries on the time range `[2024-01-01 - 2024-02-01]` across `100`
|
||||
[log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields), where every logs stream contains `10_000` log entries,
|
||||
and writes them to `http://localhost:9428/insert/jsonline`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
### Churn rate
|
||||
|
||||
It is possible to generate churn rate for active [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
|
||||
by specifying `-totalStreams` command-line flag bigger than `-activeStreams`. For example, the following command generates
|
||||
logs for `1000` total streams, while the number of active streams equals to `100`. This means that at every time there are logs for `100` streams,
|
||||
but these streams change over the given [`-start` ... `-end`] time range, so the total number of streams on the given time range becomes `1000`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-totalStreams=1_000 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
In this case the total number of generated logs equals to `-totalStreams` * `-logsPerStream` = `10_000_000`.
|
||||
|
||||
### Benchmark tuning
|
||||
|
||||
By default `vlogsgenerator` generates and writes logs by a single worker. This may limit the maximum data ingestion rate during benchmarks.
|
||||
The number of workers can be changed via `-workers` command-line flag. For example, the following command generates and writes logs with `16` workers:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline \
|
||||
-workers=16
|
||||
```
|
||||
|
||||
### Output statistics
|
||||
|
||||
Every 10 seconds `vlogsgenerator` writes statistics about the generated logs into `stderr`. The frequency of the generated statistics can be adjusted via `-statInterval` command-line flag.
|
||||
For example, the following command writes statistics every 2 seconds:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline \
|
||||
-statInterval=2s
|
||||
```
|
||||
@@ -1,344 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", "stdout", "HTTP address to push the generated logs to; if it is set to stdout, then logs are generated to stdout")
|
||||
workers = flag.Int("workers", 1, "The number of workers to use to push logs to -addr")
|
||||
|
||||
start = newTimeFlag("start", "-1d", "Generated logs start from this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
end = newTimeFlag("end", "0s", "Generated logs end at this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
activeStreams = flag.Int("activeStreams", 100, "The number of active log streams to generate; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields")
|
||||
totalStreams = flag.Int("totalStreams", 0, "The number of total log streams; if -totalStreams > -activeStreams, then some active streams are substituted with new streams "+
|
||||
"during data generation")
|
||||
logsPerStream = flag.Int64("logsPerStream", 1_000, "The number of log entries to generate per each log stream. Log entries are evenly distributed between -start and -end")
|
||||
constFieldsPerLog = flag.Int("constFieldsPerLog", 3, "The number of fields with constaint values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
varFieldsPerLog = flag.Int("varFieldsPerLog", 1, "The number of fields with variable values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
dictFieldsPerLog = flag.Int("dictFieldsPerLog", 2, "The number of fields with up to 8 different values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u8FieldsPerLog = flag.Int("u8FieldsPerLog", 1, "The number of fields with uint8 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u16FieldsPerLog = flag.Int("u16FieldsPerLog", 1, "The number of fields with uint16 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u32FieldsPerLog = flag.Int("u32FieldsPerLog", 1, "The number of fields with uint32 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u64FieldsPerLog = flag.Int("u64FieldsPerLog", 1, "The number of fields with uint64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
floatFieldsPerLog = flag.Int("floatFieldsPerLog", 1, "The number of fields with float64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
ipFieldsPerLog = flag.Int("ipFieldsPerLog", 1, "The number of fields with IPv4 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
timestampFieldsPerLog = flag.Int("timestampFieldsPerLog", 1, "The number of fields with ISO8601 timestamps per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
jsonFieldsPerLog = flag.Int("jsonFieldsPerLog", 1, "The number of JSON fields to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
|
||||
statInterval = flag.Duration("statInterval", 10*time.Second, "The interval between publishing the stats")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
var remoteWriteURL *url.URL
|
||||
if *addr != "stdout" {
|
||||
urlParsed, err := url.Parse(*addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -addr=%q: %s", *addr, err)
|
||||
}
|
||||
qs, err := url.ParseQuery(urlParsed.RawQuery)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse query string in -addr=%q: %w", *addr, err)
|
||||
}
|
||||
qs.Set("_stream_fields", "host,worker_id")
|
||||
urlParsed.RawQuery = qs.Encode()
|
||||
remoteWriteURL = urlParsed
|
||||
}
|
||||
|
||||
if start.nsec >= end.nsec {
|
||||
logger.Fatalf("-start=%s must be smaller than -end=%s", start, end)
|
||||
}
|
||||
if *activeStreams <= 0 {
|
||||
logger.Fatalf("-activeStreams must be bigger than 0; got %d", *activeStreams)
|
||||
}
|
||||
if *logsPerStream <= 0 {
|
||||
logger.Fatalf("-logsPerStream must be bigger than 0; got %d", *logsPerStream)
|
||||
}
|
||||
if *totalStreams < *activeStreams {
|
||||
*totalStreams = *activeStreams
|
||||
}
|
||||
|
||||
cfg := &workerConfig{
|
||||
url: remoteWriteURL,
|
||||
activeStreams: *activeStreams,
|
||||
totalStreams: *totalStreams,
|
||||
}
|
||||
|
||||
// divide total and active streams among workers
|
||||
if *workers <= 0 {
|
||||
logger.Fatalf("-workers must be bigger than 0; got %d", *workers)
|
||||
}
|
||||
if *workers > *activeStreams {
|
||||
logger.Fatalf("-workers=%d cannot exceed -activeStreams=%d", *workers, *activeStreams)
|
||||
}
|
||||
cfg.activeStreams /= *workers
|
||||
cfg.totalStreams /= *workers
|
||||
|
||||
logger.Infof("start -workers=%d workers for ingesting -logsPerStream=%d log entries per each -totalStreams=%d (-activeStreams=%d) on a time range -start=%s, -end=%s to -addr=%s",
|
||||
*workers, *logsPerStream, *totalStreams, *activeStreams, toRFC3339(start.nsec), toRFC3339(end.nsec), *addr)
|
||||
|
||||
startTime := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < *workers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
generateAndPushLogs(cfg, workerID)
|
||||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
prevEntries := uint64(0)
|
||||
prevBytes := uint64(0)
|
||||
ticker := time.NewTicker(*statInterval)
|
||||
for range ticker.C {
|
||||
currEntries := logEntriesCount.Load()
|
||||
deltaEntries := currEntries - prevEntries
|
||||
rateEntries := float64(deltaEntries) / statInterval.Seconds()
|
||||
|
||||
currBytes := bytesGenerated.Load()
|
||||
deltaBytes := currBytes - prevBytes
|
||||
rateBytes := float64(deltaBytes) / statInterval.Seconds()
|
||||
logger.Infof("generated %dK log entries (%dK total) at %.0fK entries/sec, %dMB (%dMB total) at %.0fMB/sec",
|
||||
deltaEntries/1e3, currEntries/1e3, rateEntries/1e3, deltaBytes/1e6, currBytes/1e6, rateBytes/1e6)
|
||||
|
||||
prevEntries = currEntries
|
||||
prevBytes = currBytes
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
dSecs := time.Since(startTime).Seconds()
|
||||
currEntries := logEntriesCount.Load()
|
||||
currBytes := bytesGenerated.Load()
|
||||
rateEntries := float64(currEntries) / dSecs
|
||||
rateBytes := float64(currBytes) / dSecs
|
||||
logger.Infof("ingested %dK log entries (%dMB) in %.3f seconds; avg ingestion rate: %.0fK entries/sec, %.0fMB/sec", currEntries/1e3, currBytes/1e6, dSecs, rateEntries/1e3, rateBytes/1e6)
|
||||
}
|
||||
|
||||
var logEntriesCount atomic.Uint64
|
||||
|
||||
var bytesGenerated atomic.Uint64
|
||||
|
||||
type workerConfig struct {
|
||||
url *url.URL
|
||||
activeStreams int
|
||||
totalStreams int
|
||||
}
|
||||
|
||||
type statWriter struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (sw *statWriter) Write(p []byte) (int, error) {
|
||||
bytesGenerated.Add(uint64(len(p)))
|
||||
return sw.w.Write(p)
|
||||
}
|
||||
|
||||
func generateAndPushLogs(cfg *workerConfig, workerID int) {
|
||||
pr, pw := io.Pipe()
|
||||
sw := &statWriter{
|
||||
w: pw,
|
||||
}
|
||||
bw := bufio.NewWriter(sw)
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
generateLogs(bw, workerID, cfg.activeStreams, cfg.totalStreams)
|
||||
_ = bw.Flush()
|
||||
_ = pw.Close()
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
if cfg.url == nil {
|
||||
_, err := io.Copy(os.Stdout, pr)
|
||||
if err != nil {
|
||||
logger.Fatalf("unexpected error when writing logs to stdout: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", cfg.url.String(), pr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot create request to %q: %s", cfg.url, err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot perform request to %q: %s", cfg.url, err)
|
||||
}
|
||||
if resp.StatusCode/100 != 2 {
|
||||
logger.Fatalf("unexpected status code got from %q: %d; want 2xx", cfg.url, err)
|
||||
}
|
||||
|
||||
// Wait until all the generateLogs goroutine is finished.
|
||||
<-doneCh
|
||||
}
|
||||
|
||||
func generateLogs(bw *bufio.Writer, workerID, activeStreams, totalStreams int) {
|
||||
streamLifetime := int64(float64(end.nsec-start.nsec) * (float64(activeStreams) / float64(totalStreams)))
|
||||
streamStep := int64(float64(end.nsec-start.nsec) / float64(totalStreams-activeStreams+1))
|
||||
step := streamLifetime / (*logsPerStream - 1)
|
||||
|
||||
currNsec := start.nsec
|
||||
for currNsec < end.nsec {
|
||||
firstStreamID := int((currNsec - start.nsec) / streamStep)
|
||||
generateLogsAtTimestamp(bw, workerID, currNsec, firstStreamID, activeStreams)
|
||||
currNsec += step
|
||||
}
|
||||
}
|
||||
|
||||
var runID = toUUID(rand.Uint64(), rand.Uint64())
|
||||
|
||||
func generateLogsAtTimestamp(bw *bufio.Writer, workerID int, ts int64, firstStreamID, activeStreams int) {
|
||||
streamID := firstStreamID
|
||||
timeStr := toRFC3339(ts)
|
||||
for i := 0; i < activeStreams; i++ {
|
||||
ip := toIPv4(rand.Uint32())
|
||||
uuid := toUUID(rand.Uint64(), rand.Uint64())
|
||||
fmt.Fprintf(bw, `{"_time":%q,"_msg":"message for the stream %d and worker %d; ip=%s; uuid=%s; u64=%d","host":"host_%d","worker_id":"%d"`,
|
||||
timeStr, streamID, workerID, ip, uuid, rand.Uint64(), streamID, workerID)
|
||||
fmt.Fprintf(bw, `,"run_id":"%s"`, runID)
|
||||
for j := 0; j < *constFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"const_%d":"some value %d %d"`, j, j, streamID)
|
||||
}
|
||||
for j := 0; j < *varFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"var_%d":"some value %d %d"`, j, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *dictFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"dict_%d":"%s"`, j, dictValues[rand.Intn(len(dictValues))])
|
||||
}
|
||||
for j := 0; j < *u8FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u8_%d":"%d"`, j, uint8(rand.Uint32()))
|
||||
}
|
||||
for j := 0; j < *u16FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u16_%d":"%d"`, j, uint16(rand.Uint32()))
|
||||
}
|
||||
for j := 0; j < *u32FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u32_%d":"%d"`, j, rand.Uint32())
|
||||
}
|
||||
for j := 0; j < *u64FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u64_%d":"%d"`, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *floatFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"float_%d":"%v"`, j, math.Round(10_000*rand.Float64())/1000)
|
||||
}
|
||||
for j := 0; j < *ipFieldsPerLog; j++ {
|
||||
ip := toIPv4(rand.Uint32())
|
||||
fmt.Fprintf(bw, `,"ip_%d":"%s"`, j, ip)
|
||||
}
|
||||
for j := 0; j < *timestampFieldsPerLog; j++ {
|
||||
timestamp := toISO8601(int64(rand.Uint64()))
|
||||
fmt.Fprintf(bw, `,"timestamp_%d":"%s"`, j, timestamp)
|
||||
}
|
||||
for j := 0; j < *jsonFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"json_%d":"{\"foo\":\"bar_%d\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":%d}"`, j, rand.Intn(10), rand.Intn(100))
|
||||
}
|
||||
fmt.Fprintf(bw, "}\n")
|
||||
|
||||
logEntriesCount.Add(1)
|
||||
streamID++
|
||||
}
|
||||
}
|
||||
|
||||
var dictValues = []string{
|
||||
"debug",
|
||||
"info",
|
||||
"warn",
|
||||
"error",
|
||||
"fatal",
|
||||
"ERROR",
|
||||
"FATAL",
|
||||
"INFO",
|
||||
}
|
||||
|
||||
func newTimeFlag(name, defaultValue, description string) *timeFlag {
|
||||
var tf timeFlag
|
||||
if err := tf.Set(defaultValue); err != nil {
|
||||
logger.Panicf("invalid defaultValue=%q for flag %q: %w", defaultValue, name, err)
|
||||
}
|
||||
flag.Var(&tf, name, description)
|
||||
return &tf
|
||||
}
|
||||
|
||||
type timeFlag struct {
|
||||
s string
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (tf *timeFlag) Set(s string) error {
|
||||
msec, err := promutils.ParseTimeMsec(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse time from %q: %w", s, err)
|
||||
}
|
||||
tf.s = s
|
||||
tf.nsec = msec * 1e6
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tf *timeFlag) String() string {
|
||||
return tf.s
|
||||
}
|
||||
|
||||
func toRFC3339(nsec int64) string {
|
||||
return time.Unix(0, nsec).UTC().Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
func toISO8601(nsec int64) string {
|
||||
return time.Unix(0, nsec).UTC().Format("2006-01-02T15:04:05.000Z")
|
||||
}
|
||||
|
||||
func toIPv4(n uint32) string {
|
||||
dst := make([]byte, 0, len("255.255.255.255"))
|
||||
dst = marshalUint64(dst, uint64(n>>24))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64((n>>16)&0xff))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64((n>>8)&0xff))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64(n&0xff))
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
func toUUID(a, b uint64) string {
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", a&(1<<32-1), (a>>32)&(1<<16-1), (a >> 48), b&(1<<16-1), b>>16)
|
||||
}
|
||||
|
||||
// marshalUint64 appends string representation of n to dst and returns the result.
|
||||
func marshalUint64(dst []byte, n uint64) []byte {
|
||||
return strconv.AppendUint(dst, n, 10)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func getBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
v := bufferedWriterPool.Get()
|
||||
if v == nil {
|
||||
return &bufferedWriter{
|
||||
bw: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
bw := v.(*bufferedWriter)
|
||||
bw.bw.Reset(w)
|
||||
return bw
|
||||
}
|
||||
|
||||
func putBufferedWriter(bw *bufferedWriter) {
|
||||
bw.reset()
|
||||
bufferedWriterPool.Put(bw)
|
||||
}
|
||||
|
||||
var bufferedWriterPool sync.Pool
|
||||
|
||||
type bufferedWriter struct {
|
||||
mu sync.Mutex
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) reset() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) WriteIgnoreErrors(p []byte) {
|
||||
bw.mu.Lock()
|
||||
_, _ = bw.bw.Write(p)
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) FlushIgnoreErrors() {
|
||||
bw.mu.Lock()
|
||||
_ = bw.bw.Flush()
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
{% import (
|
||||
"slices"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// FieldsForHits formats labels for /select/logsql/hits response
|
||||
{% func FieldsForHits(columns []logstorage.BlockColumn, rowIdx int) %}
|
||||
{
|
||||
{% if len(columns) > 0 %}
|
||||
{%q= columns[0].Name %}:{%q= columns[0].Values[rowIdx] %}
|
||||
{% for _, c := range columns[1:] %}
|
||||
,{%q= c.Name %}:{%q= c.Values[rowIdx] %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func HitsSeries(m map[string]*hitsSeries) %}
|
||||
{
|
||||
{% code
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
%}
|
||||
"hits":[
|
||||
{% if len(sortedKeys) > 0 %}
|
||||
{%= hitsSeriesLine(m, sortedKeys[0]) %}
|
||||
{% for _, k := range sortedKeys[1:] %}
|
||||
,{%= hitsSeriesLine(m, k) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func hitsSeriesLine(m map[string]*hitsSeries, k string) %}
|
||||
{
|
||||
{% code
|
||||
hs := m[k]
|
||||
hs.sort()
|
||||
timestamps := hs.timestamps
|
||||
hits := hs.hits
|
||||
%}
|
||||
"fields":{%s= k %},
|
||||
"timestamps":[
|
||||
{% if len(timestamps) > 0 %}
|
||||
{%q= timestamps[0] %}
|
||||
{% for _, ts := range timestamps[1:] %}
|
||||
,{%q= ts %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
"values":[
|
||||
{% if len(hits) > 0 %}
|
||||
{%dul= hits[0] %}
|
||||
{% for _, v := range hits[1:] %}
|
||||
,{%dul= v %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
"total":{%dul= hs.hitsTotal %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
@@ -1,223 +0,0 @@
|
||||
// Code generated by qtc from "hits_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:1
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// FieldsForHits formats labels for /select/logsql/hits response
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
func StreamFieldsForHits(qw422016 *qt422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:12
|
||||
if len(columns) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().Q(columns[0].Name)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().Q(columns[0].Values[rowIdx])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:14
|
||||
for _, c := range columns[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().Q(c.Name)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().Q(c.Values[rowIdx])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:16
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:17
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:17
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
func WriteFieldsForHits(qq422016 qtio422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
StreamFieldsForHits(qw422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
func FieldsForHits(columns []logstorage.BlockColumn, rowIdx int) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
WriteFieldsForHits(qb422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:21
|
||||
func StreamHitsSeries(qw422016 *qt422016.Writer, m map[string]*hitsSeries) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:21
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:24
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:29
|
||||
qw422016.N().S(`"hits":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:31
|
||||
if len(sortedKeys) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:32
|
||||
streamhitsSeriesLine(qw422016, m, sortedKeys[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:33
|
||||
for _, k := range sortedKeys[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:33
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:34
|
||||
streamhitsSeriesLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:35
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:36
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:36
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
func WriteHitsSeries(qq422016 qtio422016.Writer, m map[string]*hitsSeries) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
StreamHitsSeries(qw422016, m)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
func HitsSeries(m map[string]*hitsSeries) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
WriteHitsSeries(qb422016, m)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:41
|
||||
func streamhitsSeriesLine(qw422016 *qt422016.Writer, m map[string]*hitsSeries, k string) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:41
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:44
|
||||
hs := m[k]
|
||||
hs.sort()
|
||||
timestamps := hs.timestamps
|
||||
hits := hs.hits
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:48
|
||||
qw422016.N().S(`"fields":`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:49
|
||||
qw422016.N().S(k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:49
|
||||
qw422016.N().S(`,"timestamps":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:51
|
||||
if len(timestamps) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:52
|
||||
qw422016.N().Q(timestamps[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:53
|
||||
for _, ts := range timestamps[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:53
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:54
|
||||
qw422016.N().Q(ts)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:55
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:56
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:56
|
||||
qw422016.N().S(`],"values":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:59
|
||||
if len(hits) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:60
|
||||
qw422016.N().DUL(hits[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:61
|
||||
for _, v := range hits[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:61
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:62
|
||||
qw422016.N().DUL(v)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:63
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:64
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:64
|
||||
qw422016.N().S(`],"total":`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:66
|
||||
qw422016.N().DUL(hs.hitsTotal)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:66
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
func writehitsSeriesLine(qq422016 qtio422016.Writer, m map[string]*hitsSeries, k string) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
streamhitsSeriesLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
func hitsSeriesLine(m map[string]*hitsSeries, k string) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
writehitsSeriesLine(qb422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
}
|
||||
@@ -1,770 +1,56 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
// ProcessHitsRequest handles /select/logsql/hits request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-hits-stats
|
||||
func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain step
|
||||
stepStr := r.FormValue("step")
|
||||
if stepStr == "" {
|
||||
stepStr = "1d"
|
||||
}
|
||||
step, err := promutils.ParseDuration(stepStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'step' arg: %s", err)
|
||||
return
|
||||
}
|
||||
if step <= 0 {
|
||||
httpserver.Errorf(w, r, "'step' must be bigger than zero")
|
||||
}
|
||||
|
||||
// Obtain offset
|
||||
offsetStr := r.FormValue("offset")
|
||||
if offsetStr == "" {
|
||||
offsetStr = "0s"
|
||||
}
|
||||
offset, err := promutils.ParseDuration(offsetStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'offset' arg: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain field entries
|
||||
fields := r.Form["field"]
|
||||
|
||||
// Obtain limit on the number of top fields entries.
|
||||
fieldsLimit, err := httputils.GetInt(r, "fields_limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if fieldsLimit < 0 {
|
||||
fieldsLimit = 0
|
||||
}
|
||||
|
||||
// Prepare the query
|
||||
q.AddCountByTimePipe(int64(step), int64(offset), fields)
|
||||
q.Optimize()
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string]*hitsSeries)
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
timestampValues := columns[0].Values
|
||||
hitsValues := columns[len(columns)-1].Values
|
||||
columns = columns[1 : len(columns)-1]
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
timestampStr := strings.Clone(timestampValues[i])
|
||||
hitsStr := strings.Clone(hitsValues[i])
|
||||
hits, err := strconv.ParseUint(hitsStr, 10, 64)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot parse hitsStr=%q: %s", hitsStr, err)
|
||||
}
|
||||
|
||||
bb.Reset()
|
||||
WriteFieldsForHits(bb, columns, i)
|
||||
|
||||
mLock.Lock()
|
||||
hs, ok := m[string(bb.B)]
|
||||
if !ok {
|
||||
k := string(bb.B)
|
||||
hs = &hitsSeries{}
|
||||
m[k] = hs
|
||||
}
|
||||
hs.timestamps = append(hs.timestamps, timestampStr)
|
||||
hs.hits = append(hs.hits, hits)
|
||||
hs.hitsTotal += hits
|
||||
mLock.Unlock()
|
||||
}
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
|
||||
m = getTopHitsSeries(m, fieldsLimit)
|
||||
|
||||
// Write response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteHitsSeries(w, m)
|
||||
}
|
||||
|
||||
func getTopHitsSeries(m map[string]*hitsSeries, fieldsLimit int) map[string]*hitsSeries {
|
||||
if fieldsLimit <= 0 || fieldsLimit >= len(m) {
|
||||
return m
|
||||
}
|
||||
|
||||
type fieldsHits struct {
|
||||
fieldsStr string
|
||||
hs *hitsSeries
|
||||
}
|
||||
a := make([]fieldsHits, 0, len(m))
|
||||
for fieldsStr, hs := range m {
|
||||
a = append(a, fieldsHits{
|
||||
fieldsStr: fieldsStr,
|
||||
hs: hs,
|
||||
})
|
||||
}
|
||||
sort.Slice(a, func(i, j int) bool {
|
||||
return a[i].hs.hitsTotal > a[j].hs.hitsTotal
|
||||
})
|
||||
|
||||
hitsOther := make(map[string]uint64)
|
||||
for _, x := range a[fieldsLimit:] {
|
||||
for i, timestampStr := range x.hs.timestamps {
|
||||
hitsOther[timestampStr] += x.hs.hits[i]
|
||||
}
|
||||
}
|
||||
var hsOther hitsSeries
|
||||
for timestampStr, hits := range hitsOther {
|
||||
hsOther.timestamps = append(hsOther.timestamps, timestampStr)
|
||||
hsOther.hits = append(hsOther.hits, hits)
|
||||
hsOther.hitsTotal += hits
|
||||
}
|
||||
|
||||
mNew := make(map[string]*hitsSeries, fieldsLimit+1)
|
||||
for _, x := range a[:fieldsLimit] {
|
||||
mNew[x.fieldsStr] = x.hs
|
||||
}
|
||||
mNew["{}"] = &hsOther
|
||||
|
||||
return mNew
|
||||
}
|
||||
|
||||
type hitsSeries struct {
|
||||
hitsTotal uint64
|
||||
timestamps []string
|
||||
hits []uint64
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Len() int {
|
||||
return len(hs.timestamps)
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Swap(i, j int) {
|
||||
hs.timestamps[i], hs.timestamps[j] = hs.timestamps[j], hs.timestamps[i]
|
||||
hs.hits[i], hs.hits[j] = hs.hits[j], hs.hits[i]
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Less(i, j int) bool {
|
||||
return hs.timestamps[i] < hs.timestamps[j]
|
||||
}
|
||||
|
||||
// ProcessFieldNamesRequest handles /select/logsql/field_names request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-names
|
||||
func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain field names for the given query
|
||||
q.Optimize()
|
||||
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain field names: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, fieldNames)
|
||||
}
|
||||
|
||||
// ProcessFieldValuesRequest handles /select/logsql/field_values request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-values
|
||||
func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse fieldName query arg
|
||||
fieldName := r.FormValue("field")
|
||||
if fieldName == "" {
|
||||
httpserver.Errorf(w, r, "missing 'field' query arg")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain unique values for the given field
|
||||
q.Optimize()
|
||||
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain values for field %q: %s", fieldName, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, values)
|
||||
}
|
||||
|
||||
// ProcessStreamFieldNamesRequest processes /select/logsql/stream_field_names request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-field-names
|
||||
func ProcessStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain stream field names for the given query
|
||||
q.Optimize()
|
||||
names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream field names: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, names)
|
||||
}
|
||||
|
||||
// ProcessStreamFieldValuesRequest processes /select/logsql/stream_field_values request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-field-values
|
||||
func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse fieldName query arg
|
||||
fieldName := r.FormValue("field")
|
||||
if fieldName == "" {
|
||||
httpserver.Errorf(w, r, "missing 'field' query arg")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain stream field values for the given query and the given fieldName
|
||||
q.Optimize()
|
||||
values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream field values: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, values)
|
||||
}
|
||||
|
||||
// ProcessStreamIDsRequest processes /select/logsql/stream_ids request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream_ids
|
||||
func ProcessStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain streamIDs for the given query
|
||||
q.Optimize()
|
||||
streamIDs, err := vlstorage.GetStreamIDs(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream_ids: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, streamIDs)
|
||||
}
|
||||
|
||||
// ProcessStreamsRequest processes /select/logsql/streams request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-streams
|
||||
func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain streams for the given query
|
||||
q.Optimize()
|
||||
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain streams: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, streams)
|
||||
}
|
||||
|
||||
// ProcessLiveTailRequest processes live tailing request to /select/logsq/tail
|
||||
func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
liveTailRequests.Inc()
|
||||
defer liveTailRequests.Dec()
|
||||
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if !q.CanLiveTail() {
|
||||
httpserver.Errorf(w, r, "the query [%s] cannot be used in live tailing; see https://docs.victoriametrics.com/victorialogs/querying/#live-tailing for details", q)
|
||||
}
|
||||
q.Optimize()
|
||||
|
||||
refreshIntervalMsecs, err := httputils.GetDuration(r, "refresh_interval", 1000)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
refreshInterval := time.Millisecond * time.Duration(refreshIntervalMsecs)
|
||||
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
tp := newTailProcessor(cancel)
|
||||
|
||||
ticker := time.NewTicker(refreshInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
end := time.Now().UnixNano()
|
||||
doneCh := ctxWithCancel.Done()
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
logger.Panicf("BUG: it is expected that http.ResponseWriter (%T) supports http.Flusher interface", w)
|
||||
}
|
||||
for {
|
||||
start := end - tailOffsetNsecs
|
||||
end = time.Now().UnixNano()
|
||||
|
||||
qCopy := q.Clone()
|
||||
qCopy.AddTimeFilter(start, end)
|
||||
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, qCopy, tp.writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute tail query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
resultRows, err := tp.getTailRows()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot get tail results for query [%q]: %s", q, err)
|
||||
return
|
||||
}
|
||||
if len(resultRows) > 0 {
|
||||
WriteJSONRows(w, resultRows)
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var liveTailRequests = metrics.NewCounter(`vl_live_tailing_requests`)
|
||||
|
||||
const tailOffsetNsecs = 5e9
|
||||
|
||||
type logRow struct {
|
||||
timestamp int64
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
func sortLogRows(rows []logRow) {
|
||||
sort.SliceStable(rows, func(i, j int) bool {
|
||||
return rows[i].timestamp < rows[j].timestamp
|
||||
})
|
||||
}
|
||||
|
||||
type tailProcessor struct {
|
||||
cancel func()
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
perStreamRows map[string][]logRow
|
||||
lastTimestamps map[string]int64
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func newTailProcessor(cancel func()) *tailProcessor {
|
||||
return &tailProcessor{
|
||||
cancel: cancel,
|
||||
|
||||
perStreamRows: make(map[string][]logRow),
|
||||
lastTimestamps: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *tailProcessor) writeBlock(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(timestamps) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
tp.mu.Lock()
|
||||
defer tp.mu.Unlock()
|
||||
|
||||
if tp.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure columns contain _time field, since it is needed for proper tail work.
|
||||
hasTime := false
|
||||
for _, c := range columns {
|
||||
if c.Name == "_time" {
|
||||
hasTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasTime {
|
||||
tp.err = fmt.Errorf("missing _time field")
|
||||
tp.cancel()
|
||||
return
|
||||
}
|
||||
|
||||
// Copy block rows to tp.perStreamRows
|
||||
for i, timestamp := range timestamps {
|
||||
streamID := ""
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j, c := range columns {
|
||||
name := strings.Clone(c.Name)
|
||||
value := strings.Clone(c.Values[i])
|
||||
|
||||
fields[j] = logstorage.Field{
|
||||
Name: name,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
if name == "_stream_id" {
|
||||
streamID = value
|
||||
}
|
||||
}
|
||||
|
||||
tp.perStreamRows[streamID] = append(tp.perStreamRows[streamID], logRow{
|
||||
timestamp: timestamp,
|
||||
fields: fields,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *tailProcessor) getTailRows() ([][]logstorage.Field, error) {
|
||||
if tp.err != nil {
|
||||
return nil, tp.err
|
||||
}
|
||||
|
||||
var resultRows []logRow
|
||||
for streamID, rows := range tp.perStreamRows {
|
||||
sortLogRows(rows)
|
||||
|
||||
lastTimestamp, ok := tp.lastTimestamps[streamID]
|
||||
if ok {
|
||||
// Skip already written rows
|
||||
for len(rows) > 0 && rows[0].timestamp <= lastTimestamp {
|
||||
rows = rows[1:]
|
||||
}
|
||||
}
|
||||
if len(rows) > 0 {
|
||||
resultRows = append(resultRows, rows...)
|
||||
tp.lastTimestamps[streamID] = rows[len(rows)-1].timestamp
|
||||
}
|
||||
}
|
||||
clear(tp.perStreamRows)
|
||||
|
||||
sortLogRows(resultRows)
|
||||
|
||||
tailRows := make([][]logstorage.Field, len(resultRows))
|
||||
for i, row := range resultRows {
|
||||
tailRows[i] = row.fields
|
||||
}
|
||||
|
||||
return tailRows, nil
|
||||
}
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#http-api
|
||||
func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
bw := getBufferedWriter(w)
|
||||
defer func() {
|
||||
bw.FlushIgnoreErrors()
|
||||
putBufferedWriter(bw)
|
||||
}()
|
||||
w.Header().Set("Content-Type", "application/stream+json")
|
||||
|
||||
if limit > 0 {
|
||||
if q.CanReturnLastNResults() {
|
||||
rows, err := getLastNQueryResults(ctx, tenantIDs, q, limit)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
bb := blockResultPool.Get()
|
||||
b := bb.B
|
||||
for i := range rows {
|
||||
b = logstorage.MarshalFieldsToJSON(b[:0], rows[i].fields)
|
||||
b = append(b, '\n')
|
||||
bw.WriteIgnoreErrors(b)
|
||||
}
|
||||
bb.B = b
|
||||
blockResultPool.Put(bb)
|
||||
return
|
||||
}
|
||||
|
||||
q.AddPipeLimit(uint64(limit))
|
||||
}
|
||||
q.Optimize()
|
||||
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
WriteJSONRow(bb, columns, i)
|
||||
}
|
||||
bw.WriteIgnoreErrors(bb.B)
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
type row struct {
|
||||
timestamp int64
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
limitUpper := 2 * limit
|
||||
q.AddPipeLimit(uint64(limitUpper))
|
||||
q.Optimize()
|
||||
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rows) < limitUpper {
|
||||
// Fast path - the requested time range contains up to limitUpper rows.
|
||||
rows = getLastNRows(rows, limit)
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// Slow path - search for the time range containing up to limitUpper rows.
|
||||
start, end := q.GetFilterTimeRange()
|
||||
d := end/2 - start/2
|
||||
start += d
|
||||
|
||||
qOrig := q
|
||||
for {
|
||||
q = qOrig.Clone()
|
||||
q.AddTimeFilter(start, end)
|
||||
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(rows) >= limit && len(rows) < limitUpper || d == 0 {
|
||||
rows = getLastNRows(rows, limit)
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
lastBit := d & 1
|
||||
d /= 2
|
||||
if len(rows) > limit {
|
||||
start += d
|
||||
} else {
|
||||
start -= d + lastBit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getLastNRows(rows []row, limit int) []row {
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
return rows[i].timestamp < rows[j].timestamp
|
||||
})
|
||||
if len(rows) > limit {
|
||||
rows = rows[len(rows)-limit:]
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var rows []row
|
||||
var rowsLock sync.Mutex
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
rowsLock.Lock()
|
||||
defer rowsLock.Unlock()
|
||||
|
||||
for i, timestamp := range timestamps {
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j := range columns {
|
||||
f := &fields[j]
|
||||
f.Name = strings.Clone(columns[j].Name)
|
||||
f.Value = strings.Clone(columns[j].Values[i])
|
||||
}
|
||||
rows = append(rows, row{
|
||||
timestamp: timestamp,
|
||||
fields: fields,
|
||||
})
|
||||
}
|
||||
|
||||
if len(rows) >= limit {
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) {
|
||||
var (
|
||||
maxSortBufferSize = flagutil.NewBytes("select.maxSortBufferSize", 1024*1024, "Query results from /select/logsql/query are automatically sorted by _time "+
|
||||
"if their summary size doesn't exceed this value; otherwise, query results are streamed in the response without sorting; "+
|
||||
"too big value for this flag may result in high memory usage since the sorting is performed in memory")
|
||||
)
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request
|
||||
func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan struct{}) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot obtain tenanID: %w", err)
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
|
||||
// Parse query
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQuery(qStr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot parse query [%s]: %s", qStr, err)
|
||||
httpserver.Errorf(w, r, "cannot parse query [%s]: %s", qStr, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/stream+json; charset=utf-8")
|
||||
|
||||
// Parse optional start and end args
|
||||
start, okStart, err := getTimeNsec(r, "start")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
end, okEnd, err := getTimeNsec(r, "end")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if okStart || okEnd {
|
||||
if !okStart {
|
||||
start = math.MinInt64
|
||||
sw := getSortWriter()
|
||||
sw.Init(w, maxSortBufferSize.IntN())
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
vlstorage.RunQuery(tenantIDs, q, stopCh, func(columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 {
|
||||
return
|
||||
}
|
||||
if !okEnd {
|
||||
end = math.MaxInt64
|
||||
}
|
||||
q.AddTimeFilter(start, end)
|
||||
}
|
||||
rowsCount := len(columns[0].Values)
|
||||
|
||||
return q, tenantIDs, nil
|
||||
bb := blockResultPool.Get()
|
||||
for rowIdx := 0; rowIdx < rowsCount; rowIdx++ {
|
||||
WriteJSONRow(bb, columns, rowIdx)
|
||||
}
|
||||
sw.MustWrite(bb.B)
|
||||
blockResultPool.Put(bb)
|
||||
})
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
}
|
||||
|
||||
func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
|
||||
s := r.FormValue(argName)
|
||||
if s == "" {
|
||||
return 0, false, nil
|
||||
}
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
nsecs, err := promutils.ParseTimeAt(s, currentTimestamp)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("cannot parse %s=%s: %w", argName, s, err)
|
||||
}
|
||||
return nsecs, true, nil
|
||||
}
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// ValuesWithHitsJSON generates JSON from the given values.
|
||||
{% func ValuesWithHitsJSON(values []logstorage.ValueWithHits) %}
|
||||
{
|
||||
"values":{%= valuesWithHitsJSONArray(values) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func valuesWithHitsJSONArray(values []logstorage.ValueWithHits) %}
|
||||
[
|
||||
{% if len(values) > 0 %}
|
||||
{%= valueWithHitsJSON(values[0]) %}
|
||||
{% for _, v := range values[1:] %}
|
||||
,{%= valueWithHitsJSON(v) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
{% func valueWithHitsJSON(v logstorage.ValueWithHits) %}
|
||||
{
|
||||
"value":{%q= v.Value %},
|
||||
"hits":{%dul= v.Hits %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
@@ -1,152 +0,0 @@
|
||||
// Code generated by qtc from "logsql.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// ValuesWithHitsJSON generates JSON from the given values.
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
func StreamValuesWithHitsJSON(qw422016 *qt422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
qw422016.N().S(`{"values":`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:10
|
||||
streamvaluesWithHitsJSONArray(qw422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:10
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
func WriteValuesWithHitsJSON(qq422016 qtio422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
StreamValuesWithHitsJSON(qw422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
func ValuesWithHitsJSON(values []logstorage.ValueWithHits) string {
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
WriteValuesWithHitsJSON(qb422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:14
|
||||
func streamvaluesWithHitsJSONArray(qw422016 *qt422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:14
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:16
|
||||
if len(values) > 0 {
|
||||
//line app/vlselect/logsql/logsql.qtpl:17
|
||||
streamvalueWithHitsJSON(qw422016, values[0])
|
||||
//line app/vlselect/logsql/logsql.qtpl:18
|
||||
for _, v := range values[1:] {
|
||||
//line app/vlselect/logsql/logsql.qtpl:18
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:19
|
||||
streamvalueWithHitsJSON(qw422016, v)
|
||||
//line app/vlselect/logsql/logsql.qtpl:20
|
||||
}
|
||||
//line app/vlselect/logsql/logsql.qtpl:21
|
||||
}
|
||||
//line app/vlselect/logsql/logsql.qtpl:21
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
func writevaluesWithHitsJSONArray(qq422016 qtio422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
streamvaluesWithHitsJSONArray(qw422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
func valuesWithHitsJSONArray(values []logstorage.ValueWithHits) string {
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
writevaluesWithHitsJSONArray(qb422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:25
|
||||
func streamvalueWithHitsJSON(qw422016 *qt422016.Writer, v logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:25
|
||||
qw422016.N().S(`{"value":`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:27
|
||||
qw422016.N().Q(v.Value)
|
||||
//line app/vlselect/logsql/logsql.qtpl:27
|
||||
qw422016.N().S(`,"hits":`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:28
|
||||
qw422016.N().DUL(v.Hits)
|
||||
//line app/vlselect/logsql/logsql.qtpl:28
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
func writevalueWithHitsJSON(qq422016 qtio422016.Writer, v logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
streamvalueWithHitsJSON(qw422016, v)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
func valueWithHitsJSON(v logstorage.ValueWithHits) string {
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
writevalueWithHitsJSON(qb422016, v)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
}
|
||||
225
app/vlselect/logsql/sort_writer.go
Normal file
225
app/vlselect/logsql/sort_writer.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func getSortWriter() *sortWriter {
|
||||
v := sortWriterPool.Get()
|
||||
if v == nil {
|
||||
return &sortWriter{}
|
||||
}
|
||||
return v.(*sortWriter)
|
||||
}
|
||||
|
||||
func putSortWriter(sw *sortWriter) {
|
||||
sw.reset()
|
||||
sortWriterPool.Put(sw)
|
||||
}
|
||||
|
||||
var sortWriterPool sync.Pool
|
||||
|
||||
// sortWriter expects JSON line stream to be written to it.
|
||||
//
|
||||
// It buffers the incoming data until its size reaches maxBufLen.
|
||||
// Then it streams the buffered data and all the incoming data to w.
|
||||
//
|
||||
// The FinalFlush() must be called when all the data is written.
|
||||
// If the buf isn't empty at FinalFlush() call, then the buffered data
|
||||
// is sorted by _time field.
|
||||
type sortWriter struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
maxBufLen int
|
||||
buf []byte
|
||||
bufFlushed bool
|
||||
|
||||
hasErr bool
|
||||
}
|
||||
|
||||
func (sw *sortWriter) reset() {
|
||||
sw.w = nil
|
||||
sw.maxBufLen = 0
|
||||
sw.buf = sw.buf[:0]
|
||||
sw.bufFlushed = false
|
||||
sw.hasErr = false
|
||||
}
|
||||
|
||||
func (sw *sortWriter) Init(w io.Writer, maxBufLen int) {
|
||||
sw.reset()
|
||||
|
||||
sw.w = w
|
||||
sw.maxBufLen = maxBufLen
|
||||
}
|
||||
|
||||
func (sw *sortWriter) MustWrite(p []byte) {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
|
||||
if sw.hasErr {
|
||||
return
|
||||
}
|
||||
|
||||
if sw.bufFlushed {
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
sw.hasErr = true
|
||||
}
|
||||
return
|
||||
}
|
||||
if len(sw.buf)+len(p) < sw.maxBufLen {
|
||||
sw.buf = append(sw.buf, p...)
|
||||
return
|
||||
}
|
||||
sw.bufFlushed = true
|
||||
if len(sw.buf) > 0 {
|
||||
if _, err := sw.w.Write(sw.buf); err != nil {
|
||||
sw.hasErr = true
|
||||
return
|
||||
}
|
||||
sw.buf = sw.buf[:0]
|
||||
}
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
sw.hasErr = true
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *sortWriter) FinalFlush() {
|
||||
if sw.hasErr || sw.bufFlushed {
|
||||
return
|
||||
}
|
||||
rs := getRowsSorter()
|
||||
rs.parseRows(sw.buf)
|
||||
rs.sort()
|
||||
WriteJSONRows(sw.w, rs.rows)
|
||||
putRowsSorter(rs)
|
||||
}
|
||||
|
||||
func getRowsSorter() *rowsSorter {
|
||||
v := rowsSorterPool.Get()
|
||||
if v == nil {
|
||||
return &rowsSorter{}
|
||||
}
|
||||
return v.(*rowsSorter)
|
||||
}
|
||||
|
||||
func putRowsSorter(rs *rowsSorter) {
|
||||
rs.reset()
|
||||
rowsSorterPool.Put(rs)
|
||||
}
|
||||
|
||||
var rowsSorterPool sync.Pool
|
||||
|
||||
type rowsSorter struct {
|
||||
buf []byte
|
||||
fieldsBuf []logstorage.Field
|
||||
rows [][]logstorage.Field
|
||||
times []string
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) reset() {
|
||||
rs.buf = rs.buf[:0]
|
||||
|
||||
fieldsBuf := rs.fieldsBuf
|
||||
for i := range fieldsBuf {
|
||||
fieldsBuf[i].Reset()
|
||||
}
|
||||
rs.fieldsBuf = fieldsBuf[:0]
|
||||
|
||||
rows := rs.rows
|
||||
for i := range rows {
|
||||
rows[i] = nil
|
||||
}
|
||||
rs.rows = rows[:0]
|
||||
|
||||
times := rs.times
|
||||
for i := range times {
|
||||
times[i] = ""
|
||||
}
|
||||
rs.times = times[:0]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) parseRows(src []byte) {
|
||||
rs.reset()
|
||||
|
||||
buf := rs.buf
|
||||
fieldsBuf := rs.fieldsBuf
|
||||
rows := rs.rows
|
||||
times := rs.times
|
||||
|
||||
p := logjson.GetParser()
|
||||
for len(src) > 0 {
|
||||
var line []byte
|
||||
n := bytes.IndexByte(src, '\n')
|
||||
if n < 0 {
|
||||
line = src
|
||||
src = nil
|
||||
} else {
|
||||
line = src[:n]
|
||||
src = src[n+1:]
|
||||
}
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
logger.Panicf("BUG: unexpected invalid JSON line: %s", err)
|
||||
}
|
||||
|
||||
timeValue := ""
|
||||
fieldsBufLen := len(fieldsBuf)
|
||||
for _, f := range p.Fields {
|
||||
bufLen := len(buf)
|
||||
buf = append(buf, f.Name...)
|
||||
name := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, f.Value...)
|
||||
value := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
fieldsBuf = append(fieldsBuf, logstorage.Field{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
|
||||
if name == "_time" {
|
||||
timeValue = value
|
||||
}
|
||||
}
|
||||
rows = append(rows, fieldsBuf[fieldsBufLen:])
|
||||
times = append(times, timeValue)
|
||||
}
|
||||
logjson.PutParser(p)
|
||||
|
||||
rs.buf = buf
|
||||
rs.fieldsBuf = fieldsBuf
|
||||
rs.rows = rows
|
||||
rs.times = times
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Len() int {
|
||||
return len(rs.rows)
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Less(i, j int) bool {
|
||||
times := rs.times
|
||||
return times[i] < times[j]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Swap(i, j int) {
|
||||
times := rs.times
|
||||
rows := rs.rows
|
||||
times[i], times[j] = times[j], times[i]
|
||||
rows[i], rows[j] = rows[j], rows[i]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) sort() {
|
||||
sort.Sort(rs)
|
||||
}
|
||||
39
app/vlselect/logsql/sort_writer_test.go
Normal file
39
app/vlselect/logsql/sort_writer_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSortWriter(t *testing.T) {
|
||||
f := func(maxBufLen int, data string, expectedResult string) {
|
||||
t.Helper()
|
||||
|
||||
var bb bytes.Buffer
|
||||
sw := getSortWriter()
|
||||
sw.Init(&bb, maxBufLen)
|
||||
|
||||
for _, s := range strings.Split(data, "\n") {
|
||||
sw.MustWrite([]byte(s + "\n"))
|
||||
}
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
|
||||
result := bb.String()
|
||||
if result != expectedResult {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, expectedResult)
|
||||
}
|
||||
}
|
||||
|
||||
f(100, "", "")
|
||||
f(100, "{}", "{}\n")
|
||||
|
||||
data := `{"_time":"def","_msg":"xxx"}
|
||||
{"_time":"abc","_msg":"foo"}`
|
||||
resultExpected := `{"_time":"abc","_msg":"foo"}
|
||||
{"_time":"def","_msg":"xxx"}
|
||||
`
|
||||
f(100, data, resultExpected)
|
||||
f(10, data, data+"\n")
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package vlselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -14,6 +13,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ var (
|
||||
"See also -search.maxQueueDuration")
|
||||
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the search request waits for execution when -search.maxConcurrentRequests "+
|
||||
"limit is reached; see also -search.maxQueryDuration")
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution. It can be overridden on a per-query basis via 'timeout' query arg")
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution")
|
||||
)
|
||||
|
||||
func getDefaultMaxConcurrentRequests() int {
|
||||
@@ -75,9 +75,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// Skip requests, which do not start with /select/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/select")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/select/vmui" {
|
||||
if path == "/vmui" {
|
||||
// VMUI access via incomplete url without `/` in the end. Redirect to complete url.
|
||||
// Use relative redirect, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
@@ -86,128 +87,63 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
httpserver.Redirect(w, newURL)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/select/vmui/") {
|
||||
if strings.HasPrefix(path, "/select/vmui/static/") {
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
if strings.HasPrefix(path, "/vmui/static/") {
|
||||
// Allow clients caching static contents for long period of time, since it shouldn't change over time.
|
||||
// Path to static contents (such as js and css) must be changed whenever its contents is changed.
|
||||
// See https://developer.chrome.com/docs/lighthouse/performance/uses-long-cache-ttl/
|
||||
w.Header().Set("Cache-Control", "max-age=31536000")
|
||||
}
|
||||
r.URL.Path = strings.TrimPrefix(path, "/select")
|
||||
r.URL.Path = path
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
// Limit the number of concurrent queries, which can consume big amounts of CPU time.
|
||||
// Limit the number of concurrent queries, which can consume big amounts of CPU.
|
||||
startTime := time.Now()
|
||||
ctx := r.Context()
|
||||
d := getMaxQueryDuration(r)
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
stopCh := ctxWithTimeout.Done()
|
||||
stopCh := r.Context().Done()
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
default:
|
||||
// Sleep for a while until giving up. This should resolve short bursts in requests.
|
||||
concurrencyLimitReached.Inc()
|
||||
d := getMaxQueryDuration(r)
|
||||
if d > *maxQueueDuration {
|
||||
d = *maxQueueDuration
|
||||
}
|
||||
t := timerpool.Get(d)
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
timerpool.Put(t)
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
case <-stopCh:
|
||||
switch ctxWithTimeout.Err() {
|
||||
case context.Canceled:
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has canceled the pending request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||
case context.DeadlineExceeded:
|
||||
concurrencyLimitTimeout.Inc()
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("couldn't start executing the request in %.3f seconds, since -search.maxConcurrentRequests=%d concurrent requests "+
|
||||
"are executed. Possible solutions: to reduce query load; to add more compute resources to the server; "+
|
||||
"to increase -search.maxQueueDuration=%s; to increase -search.maxQueryDuration=%s; to increase -search.maxConcurrentRequests; "+
|
||||
"to pass bigger value to 'timeout' query arg",
|
||||
d.Seconds(), *maxConcurrentRequests, maxQueueDuration, maxQueryDuration),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
timerpool.Put(t)
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has cancelled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||
return true
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
concurrencyLimitTimeout.Inc()
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("couldn't start executing the request in %.3f seconds, since -search.maxConcurrentRequests=%d concurrent requests "+
|
||||
"are executed. Possible solutions: to reduce query load; to add more compute resources to the server; "+
|
||||
"to increase -search.maxQueueDuration=%s; to increase -search.maxQueryDuration; to increase -search.maxConcurrentRequests",
|
||||
d.Seconds(), *maxConcurrentRequests, maxQueueDuration),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if path == "/select/logsql/tail" {
|
||||
logsqlTailRequests.Inc()
|
||||
// Process live tailing request without timeout (e.g. use ctx instead of ctxWithTimeout),
|
||||
// since it is OK to run live tailing requests for very long time.
|
||||
logsql.ProcessLiveTailRequest(ctx, w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
ok := processSelectRequest(ctxWithTimeout, w, r, path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
err := ctxWithTimeout.Err()
|
||||
switch err {
|
||||
case nil:
|
||||
// nothing to do
|
||||
case context.Canceled:
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has canceled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||
case context.DeadlineExceeded:
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("the request couldn't be executed in %.3f seconds; possible solutions: "+
|
||||
"to increase -search.maxQueryDuration=%s; to pass bigger value to 'timeout' query arg", d.Seconds(), maxQueryDuration),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
default:
|
||||
httpserver.Errorf(w, r, "unexpected error: %s", err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func processSelectRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, path string) bool {
|
||||
httpserver.EnableCORS(w, r)
|
||||
switch path {
|
||||
case "/select/logsql/field_names":
|
||||
logsqlFieldNamesRequests.Inc()
|
||||
logsql.ProcessFieldNamesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/field_values":
|
||||
logsqlFieldValuesRequests.Inc()
|
||||
logsql.ProcessFieldValuesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/hits":
|
||||
logsqlHitsRequests.Inc()
|
||||
logsql.ProcessHitsRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/query":
|
||||
switch {
|
||||
case path == "/logsql/query":
|
||||
logsqlQueryRequests.Inc()
|
||||
logsql.ProcessQueryRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/stream_field_names":
|
||||
logsqlStreamFieldNamesRequests.Inc()
|
||||
logsql.ProcessStreamFieldNamesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/stream_field_values":
|
||||
logsqlStreamFieldValuesRequests.Inc()
|
||||
logsql.ProcessStreamFieldValuesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/stream_ids":
|
||||
logsqlStreamIDsRequests.Inc()
|
||||
logsql.ProcessStreamIDsRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/streams":
|
||||
logsqlStreamsRequests.Inc()
|
||||
logsql.ProcessStreamsRequest(ctx, w, r)
|
||||
httpserver.EnableCORS(w, r)
|
||||
logsql.ProcessQueryRequest(w, r, stopCh)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -228,13 +164,5 @@ func getMaxQueryDuration(r *http.Request) time.Duration {
|
||||
}
|
||||
|
||||
var (
|
||||
logsqlFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_names"}`)
|
||||
logsqlFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_values"}`)
|
||||
logsqlHitsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/hits"}`)
|
||||
logsqlQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/query"}`)
|
||||
logsqlStreamFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_field_names"}`)
|
||||
logsqlStreamFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_field_values"}`)
|
||||
logsqlStreamIDsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_ids"}`)
|
||||
logsqlStreamsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/streams"}`)
|
||||
logsqlTailRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/tail"}`)
|
||||
logsqlQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/query"}`)
|
||||
)
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.1041c3d4.css",
|
||||
"main.js": "./static/js/main.8988988c.js",
|
||||
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.aaabf95f2c9bf356bde4.md",
|
||||
"main.css": "./static/css/main.9a224445.css",
|
||||
"main.js": "./static/js/main.02178f4b.js",
|
||||
"static/js/522.b5ae4365.chunk.js": "./static/js/522.b5ae4365.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.957b90ab4cb4852eec26.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.1041c3d4.css",
|
||||
"static/js/main.8988988c.js"
|
||||
"static/css/main.9a224445.css",
|
||||
"static/js/main.02178f4b.js"
|
||||
]
|
||||
}
|
||||
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.8988988c.js"></script><link href="./static/css/main.1041c3d4.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.02178f4b.js"></script><link href="./static/css/main.9a224445.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/css/main.9a224445.css
Normal file
1
app/vlselect/vmui/static/css/main.9a224445.css
Normal file
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/js/522.b5ae4365.chunk.js
Normal file
1
app/vlselect/vmui/static/js/522.b5ae4365.chunk.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.02178f4b.js
Normal file
2
app/vlselect/vmui/static/js/main.02178f4b.js
Normal file
File diff suppressed because one or more lines are too long
@@ -4,8 +4,10 @@
|
||||
http://jedwatson.github.io/classnames
|
||||
*/
|
||||
|
||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
||||
|
||||
/**
|
||||
* @remix-run/router v1.15.1
|
||||
* @remix-run/router v1.7.2
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
@@ -16,7 +18,7 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.22.1
|
||||
* React Router DOM v6.14.2
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
@@ -27,7 +29,7 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router v6.22.1
|
||||
* React Router v6.14.2
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,10 @@
|
||||
package vlstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -20,21 +19,19 @@ import (
|
||||
var (
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "7d", "Log entries with timestamps older than now-retentionPeriod are automatically deleted; "+
|
||||
"log entries with timestamps outside the retention are also rejected during data ingestion; the minimum supported retention is 1d (one day); "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/#retention ; see also -retention.maxDiskSpaceUsageBytes")
|
||||
maxDiskSpaceUsageBytes = flagutil.NewBytes("retention.maxDiskSpaceUsageBytes", 0, "The maximum disk space usage at -storageDataPath before older per-day "+
|
||||
"partitions are automatically dropped; see https://docs.victoriametrics.com/victorialogs/#retention-by-disk-space-usage ; see also -retentionPeriod")
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#retention")
|
||||
futureRetention = flagutil.NewDuration("futureRetention", "2d", "Log entries with timestamps bigger than now+futureRetention are rejected during data ingestion; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/#retention")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-logs-data", "Path to directory where to store VictoriaLogs data; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/#storage")
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#retention")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-logs-data", "Path to directory with the VictoriaLogs data; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#storage")
|
||||
inmemoryDataFlushInterval = flag.Duration("inmemoryDataFlushInterval", 5*time.Second, "The interval for guaranteed saving of in-memory data to disk. "+
|
||||
"The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. "+
|
||||
"Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
logNewStreams = flag.Bool("logNewStreams", false, "Whether to log creation of new streams; this can be useful for debugging of high cardinality issues with log streams; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields ; see also -logIngestedRows")
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#stream-fields ; see also -logIngestedRows")
|
||||
logIngestedRows = flag.Bool("logIngestedRows", false, "Whether to log all the ingested log entries; this can be useful for debugging of data ingestion; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/data-ingestion/ ; see also -logNewStreams")
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/data-ingestion/ ; see also -logNewStreams")
|
||||
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 10e6, "The minimum free disk space at -storageDataPath after which "+
|
||||
"the storage stops accepting new data")
|
||||
)
|
||||
@@ -51,13 +48,12 @@ func Init() {
|
||||
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
|
||||
}
|
||||
cfg := &logstorage.StorageConfig{
|
||||
Retention: retentionPeriod.Duration(),
|
||||
MaxDiskSpaceUsageBytes: maxDiskSpaceUsageBytes.N,
|
||||
FlushInterval: *inmemoryDataFlushInterval,
|
||||
FutureRetention: futureRetention.Duration(),
|
||||
LogNewStreams: *logNewStreams,
|
||||
LogIngestedRows: *logIngestedRows,
|
||||
MinFreeDiskSpaceBytes: minFreeDiskSpaceBytes.N,
|
||||
Retention: retentionPeriod.Duration(),
|
||||
FlushInterval: *inmemoryDataFlushInterval,
|
||||
FutureRetention: futureRetention.Duration(),
|
||||
LogNewStreams: *logNewStreams,
|
||||
LogIngestedRows: *logIngestedRows,
|
||||
MinFreeDiskSpaceBytes: minFreeDiskSpaceBytes.N,
|
||||
}
|
||||
logger.Infof("opening storage at -storageDataPath=%s", *storageDataPath)
|
||||
startTime := time.Now()
|
||||
@@ -65,16 +61,10 @@ func Init() {
|
||||
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
logger.Infof("successfully opened storage in %.3f seconds; smallParts: %d; bigParts: %d; smallPartBlocks: %d; bigPartBlocks: %d; smallPartRows: %d; bigPartRows: %d; "+
|
||||
"smallPartSize: %d bytes; bigPartSize: %d bytes",
|
||||
time.Since(startTime).Seconds(), ss.SmallParts, ss.BigParts, ss.SmallPartBlocks, ss.BigPartBlocks, ss.SmallPartRowsCount, ss.BigPartRowsCount,
|
||||
ss.CompressedSmallPartSize, ss.CompressedBigPartSize)
|
||||
logger.Infof("successfully opened storage in %.3f seconds; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
|
||||
time.Since(startTime).Seconds(), ss.FileParts, ss.FileBlocks, ss.FileRowsCount, ss.CompressedFileSize)
|
||||
storageMetrics = initStorageMetrics(strg)
|
||||
|
||||
// register storage metrics
|
||||
storageMetrics = metrics.NewSet()
|
||||
storageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, strg)
|
||||
})
|
||||
metrics.RegisterSet(storageMetrics)
|
||||
}
|
||||
|
||||
@@ -109,99 +99,117 @@ func MustAddRows(lr *logstorage.LogRows) {
|
||||
strg.MustAddRows(lr)
|
||||
}
|
||||
|
||||
// RunQuery runs the given q and calls writeBlock for the returned data blocks
|
||||
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteBlockFunc) error {
|
||||
return strg.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
// RunQuery runs the given q and calls processBlock for the returned data blocks
|
||||
func RunQuery(tenantIDs []logstorage.TenantID, q *logstorage.Query, stopCh <-chan struct{}, processBlock func(columns []logstorage.BlockColumn)) {
|
||||
strg.RunQuery(tenantIDs, q, stopCh, processBlock)
|
||||
}
|
||||
|
||||
// GetFieldNames executes q and returns field names seen in results.
|
||||
func GetFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
func initStorageMetrics(strg *logstorage.Storage) *metrics.Set {
|
||||
ssCache := &logstorage.StorageStats{}
|
||||
var ssCacheLock sync.Mutex
|
||||
var lastUpdateTime time.Time
|
||||
|
||||
// GetFieldValues executes q and returns unique values for the fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique values are returned.
|
||||
func GetFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreamFieldNames executes q and returns stream field names seen in results.
|
||||
func GetStreamFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
|
||||
// GetStreamFieldValues executes q and returns stream field values for the given fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique stream field values are returned.
|
||||
func GetStreamFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreams executes q and returns streams seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streams are returned.
|
||||
func GetStreams(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreams(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
|
||||
// GetStreamIDs executes q and returns streamIDs seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streamIDs are returned.
|
||||
func GetStreamIDs(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamIDs(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
|
||||
func writeStorageMetrics(w io.Writer, strg *logstorage.Storage) {
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
|
||||
metrics.WriteGaugeUint64(w, fmt.Sprintf(`vl_free_disk_space_bytes{path=%q}`, *storageDataPath), fs.MustGetFreeSpace(*storageDataPath))
|
||||
|
||||
isReadOnly := uint64(0)
|
||||
if ss.IsReadOnly {
|
||||
isReadOnly = 1
|
||||
m := func() *logstorage.StorageStats {
|
||||
ssCacheLock.Lock()
|
||||
defer ssCacheLock.Unlock()
|
||||
if time.Since(lastUpdateTime) < time.Second {
|
||||
return ssCache
|
||||
}
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
ssCache = &ss
|
||||
lastUpdateTime = time.Now()
|
||||
return ssCache
|
||||
}
|
||||
metrics.WriteGaugeUint64(w, fmt.Sprintf(`vl_storage_is_read_only{path=%q}`, *storageDataPath), isReadOnly)
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_active_merges{type="storage/inmemory"}`, ss.InmemoryActiveMerges)
|
||||
metrics.WriteGaugeUint64(w, `vl_active_merges{type="storage/small"}`, ss.SmallPartActiveMerges)
|
||||
metrics.WriteGaugeUint64(w, `vl_active_merges{type="storage/big"}`, ss.BigPartActiveMerges)
|
||||
ms := metrics.NewSet()
|
||||
|
||||
metrics.WriteCounterUint64(w, `vl_merges_total{type="storage/inmemory"}`, ss.InmemoryMergesTotal)
|
||||
metrics.WriteCounterUint64(w, `vl_merges_total{type="storage/small"}`, ss.SmallPartMergesTotal)
|
||||
metrics.WriteCounterUint64(w, `vl_merges_total{type="storage/big"}`, ss.BigPartMergesTotal)
|
||||
ms.NewGauge(fmt.Sprintf(`vl_free_disk_space_bytes{path=%q}`, *storageDataPath), func() float64 {
|
||||
return float64(fs.MustGetFreeSpace(*storageDataPath))
|
||||
})
|
||||
ms.NewGauge(fmt.Sprintf(`vl_storage_is_read_only{path=%q}`, *storageDataPath), func() float64 {
|
||||
if m().IsReadOnly {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_rows{type="storage/inmemory"}`, ss.InmemoryRowsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_rows{type="storage/small"}`, ss.SmallPartRowsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_rows{type="storage/big"}`, ss.BigPartRowsCount)
|
||||
ms.NewGauge(`vl_active_merges{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryActiveMerges)
|
||||
})
|
||||
ms.NewGauge(`vl_merges_total{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryMergesTotal)
|
||||
})
|
||||
ms.NewGauge(`vl_active_merges{type="file"}`, func() float64 {
|
||||
return float64(m().FileActiveMerges)
|
||||
})
|
||||
ms.NewGauge(`vl_merges_total{type="file"}`, func() float64 {
|
||||
return float64(m().FileMergesTotal)
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_parts{type="storage/inmemory"}`, ss.InmemoryParts)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_parts{type="storage/small"}`, ss.SmallParts)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_parts{type="storage/big"}`, ss.BigParts)
|
||||
ms.NewGauge(`vl_storage_rows{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryRowsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_rows{type="file"}`, func() float64 {
|
||||
return float64(m().FileRowsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_parts{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryParts)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_parts{type="file"}`, func() float64 {
|
||||
return float64(m().FileParts)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_blocks{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryBlocks)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_blocks{type="file"}`, func() float64 {
|
||||
return float64(m().FileBlocks)
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_blocks{type="storage/inmemory"}`, ss.InmemoryBlocks)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_blocks{type="storage/small"}`, ss.SmallPartBlocks)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_blocks{type="storage/big"}`, ss.BigPartBlocks)
|
||||
ms.NewGauge(`vl_partitions`, func() float64 {
|
||||
return float64(m().PartitionsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_streams_created_total`, func() float64 {
|
||||
return float64(m().StreamsCreatedTotal)
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_partitions`, ss.PartitionsCount)
|
||||
metrics.WriteCounterUint64(w, `vl_streams_created_total`, ss.StreamsCreatedTotal)
|
||||
ms.NewGauge(`vl_indexdb_rows`, func() float64 {
|
||||
return float64(m().IndexdbItemsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_indexdb_parts`, func() float64 {
|
||||
return float64(m().IndexdbPartsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_indexdb_blocks`, func() float64 {
|
||||
return float64(m().IndexdbBlocksCount)
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_indexdb_rows`, ss.IndexdbItemsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_indexdb_parts`, ss.IndexdbPartsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_indexdb_blocks`, ss.IndexdbBlocksCount)
|
||||
ms.NewGauge(`vl_data_size_bytes{type="indexdb"}`, func() float64 {
|
||||
return float64(m().IndexdbSizeBytes)
|
||||
})
|
||||
ms.NewGauge(`vl_data_size_bytes{type="storage"}`, func() float64 {
|
||||
dm := m()
|
||||
return float64(dm.CompressedInmemorySize + dm.CompressedFileSize)
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_data_size_bytes{type="indexdb"}`, ss.IndexdbSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vl_data_size_bytes{type="storage"}`, ss.CompressedInmemorySize+ss.CompressedSmallPartSize+ss.CompressedBigPartSize)
|
||||
ms.NewGauge(`vl_compressed_data_size_bytes{type="inmemory"}`, func() float64 {
|
||||
return float64(m().CompressedInmemorySize)
|
||||
})
|
||||
ms.NewGauge(`vl_compressed_data_size_bytes{type="file"}`, func() float64 {
|
||||
return float64(m().CompressedFileSize)
|
||||
})
|
||||
ms.NewGauge(`vl_uncompressed_data_size_bytes{type="inmemory"}`, func() float64 {
|
||||
return float64(m().UncompressedInmemorySize)
|
||||
})
|
||||
ms.NewGauge(`vl_uncompressed_data_size_bytes{type="file"}`, func() float64 {
|
||||
return float64(m().UncompressedFileSize)
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_compressed_data_size_bytes{type="storage/inmemory"}`, ss.CompressedInmemorySize)
|
||||
metrics.WriteGaugeUint64(w, `vl_compressed_data_size_bytes{type="storage/small"}`, ss.CompressedSmallPartSize)
|
||||
metrics.WriteGaugeUint64(w, `vl_compressed_data_size_bytes{type="storage/big"}`, ss.CompressedBigPartSize)
|
||||
ms.NewGauge(`vl_rows_dropped_total{reason="too_big_timestamp"}`, func() float64 {
|
||||
return float64(m().RowsDroppedTooBigTimestamp)
|
||||
})
|
||||
ms.NewGauge(`vl_rows_dropped_total{reason="too_small_timestamp"}`, func() float64 {
|
||||
return float64(m().RowsDroppedTooSmallTimestamp)
|
||||
})
|
||||
|
||||
metrics.WriteGaugeUint64(w, `vl_uncompressed_data_size_bytes{type="storage/inmemory"}`, ss.UncompressedInmemorySize)
|
||||
metrics.WriteGaugeUint64(w, `vl_uncompressed_data_size_bytes{type="storage/small"}`, ss.UncompressedSmallPartSize)
|
||||
metrics.WriteGaugeUint64(w, `vl_uncompressed_data_size_bytes{type="storage/big"}`, ss.UncompressedBigPartSize)
|
||||
|
||||
metrics.WriteCounterUint64(w, `vl_rows_dropped_total{reason="too_big_timestamp"}`, ss.RowsDroppedTooBigTimestamp)
|
||||
metrics.WriteCounterUint64(w, `vl_rows_dropped_total{reason="too_small_timestamp"}`, ss.RowsDroppedTooSmallTimestamp)
|
||||
return ms
|
||||
}
|
||||
|
||||
@@ -88,9 +88,6 @@ vmagent-linux-ppc64le:
|
||||
vmagent-linux-s390x:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-linux-loong64:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-linux-386:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,15 +3,13 @@ package common
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
// PushCtx is a context used for populating WriteRequest.
|
||||
type PushCtx struct {
|
||||
// WriteRequest contains the WriteRequest, which must be pushed later to remote storage.
|
||||
//
|
||||
// The actual labels and samples for the time series are stored in Labels and Samples fields.
|
||||
WriteRequest prompbmarshal.WriteRequest
|
||||
|
||||
// Labels contains flat list of all the labels used in WriteRequest.
|
||||
@@ -23,7 +21,13 @@ type PushCtx struct {
|
||||
|
||||
// Reset resets ctx.
|
||||
func (ctx *PushCtx) Reset() {
|
||||
ctx.WriteRequest.Reset()
|
||||
tss := ctx.WriteRequest.Timeseries
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
ts.Labels = nil
|
||||
ts.Samples = nil
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = ctx.WriteRequest.Timeseries[:0]
|
||||
|
||||
promrelabel.CleanLabels(ctx.Labels)
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
@@ -35,10 +39,15 @@ func (ctx *PushCtx) Reset() {
|
||||
//
|
||||
// Call PutPushCtx when the ctx is no longer needed.
|
||||
func GetPushCtx() *PushCtx {
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*PushCtx)
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*PushCtx)
|
||||
}
|
||||
return &PushCtx{}
|
||||
}
|
||||
return &PushCtx{}
|
||||
}
|
||||
|
||||
// PutPushCtx returns ctx to the pool.
|
||||
@@ -46,7 +55,12 @@ func GetPushCtx() *PushCtx {
|
||||
// ctx mustn't be used after returning to the pool.
|
||||
func PutPushCtx(ctx *PushCtx) {
|
||||
ctx.Reset()
|
||||
pushCtxPool.Put(ctx)
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *PushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
@@ -65,9 +65,7 @@ func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.L
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(len(rows))
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(len(rows))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package datadogv1
|
||||
package datadog
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -8,32 +8,33 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogv1"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogv1/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadog/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="datadogv1"}`)
|
||||
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="datadogv1"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="datadogv1"}`)
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="datadog"}`)
|
||||
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="datadog"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="datadog"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForHTTP processes remote write for DataDog POST /api/v1/series request.
|
||||
//
|
||||
// See https://docs.datadoghq.com/api/latest/metrics/#submit-metrics
|
||||
func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ce := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, ce, func(series []datadogv1.Series) error {
|
||||
return stream.Parse(req.Body, ce, func(series []datadog.Series) error {
|
||||
return insertRows(at, series, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, series []datadogv1.Series, extraLabels []prompbmarshal.Label) error {
|
||||
func insertRows(at *auth.Token, series []datadog.Series, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
@@ -62,7 +63,7 @@ func insertRows(at *auth.Token, series []datadogv1.Series, extraLabels []prompbm
|
||||
})
|
||||
}
|
||||
for _, tag := range ss.Tags {
|
||||
name, value := datadogutils.SplitTag(tag)
|
||||
name, value := datadog.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
@@ -87,9 +88,7 @@ func insertRows(at *auth.Token, series []datadogv1.Series, extraLabels []prompbm
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
@@ -1,95 +0,0 @@
|
||||
package datadogsketches
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogsketches"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogsketches/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="datadogsketches"}`)
|
||||
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="datadogsketches"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="datadogsketches"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForHTTP processes remote write for DataDog POST /api/beta/sketches request.
|
||||
func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ce := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, ce, func(sketches []*datadogsketches.Sketch) error {
|
||||
return insertRows(at, sketches, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
rowsTotal := 0
|
||||
tssDst := ctx.WriteRequest.Timeseries[:0]
|
||||
labels := ctx.Labels[:0]
|
||||
samples := ctx.Samples[:0]
|
||||
for _, sketch := range sketches {
|
||||
ms := sketch.ToSummary()
|
||||
for _, m := range ms {
|
||||
labelsLen := len(labels)
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: m.Name,
|
||||
})
|
||||
for _, label := range m.Labels {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: label.Name,
|
||||
Value: label.Value,
|
||||
})
|
||||
}
|
||||
for _, tag := range sketch.Tags {
|
||||
name, value := datadogutils.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
labels = append(labels, extraLabels...)
|
||||
samplesLen := len(samples)
|
||||
for _, p := range m.Points {
|
||||
samples = append(samples, prompbmarshal.Sample{
|
||||
Timestamp: p.Timestamp,
|
||||
Value: p.Value,
|
||||
})
|
||||
}
|
||||
rowsTotal += len(m.Points)
|
||||
tssDst = append(tssDst, prompbmarshal.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
}
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return nil
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
package datadogv2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogv2"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogv2/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="datadogv2"}`)
|
||||
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="datadogv2"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="datadogv2"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForHTTP processes remote write for DataDog POST /api/v2/series request.
|
||||
//
|
||||
// See https://docs.datadoghq.com/api/latest/metrics/#submit-metrics
|
||||
func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ct := req.Header.Get("Content-Type")
|
||||
ce := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, ce, ct, func(series []datadogv2.Series) error {
|
||||
return insertRows(at, series, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, series []datadogv2.Series, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
rowsTotal := 0
|
||||
tssDst := ctx.WriteRequest.Timeseries[:0]
|
||||
labels := ctx.Labels[:0]
|
||||
samples := ctx.Samples[:0]
|
||||
for i := range series {
|
||||
ss := &series[i]
|
||||
rowsTotal += len(ss.Points)
|
||||
labelsLen := len(labels)
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: ss.Metric,
|
||||
})
|
||||
for _, rs := range ss.Resources {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: rs.Type,
|
||||
Value: rs.Name,
|
||||
})
|
||||
}
|
||||
if ss.SourceTypeName != "" {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "source_type_name",
|
||||
Value: ss.SourceTypeName,
|
||||
})
|
||||
}
|
||||
for _, tag := range ss.Tags {
|
||||
name, value := datadogutils.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
labels = append(labels, extraLabels...)
|
||||
samplesLen := len(samples)
|
||||
for _, pt := range ss.Points {
|
||||
samples = append(samples, prompbmarshal.Sample{
|
||||
Timestamp: pt.Timestamp * 1000,
|
||||
Value: pt.Value,
|
||||
})
|
||||
}
|
||||
tssDst = append(tssDst, prompbmarshal.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
}
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return nil
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite/stream"
|
||||
@@ -21,12 +20,10 @@ var (
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
func InsertHandler(r io.Reader) error {
|
||||
return stream.Parse(r, false, func(rows []parser.Row) error {
|
||||
return insertRows(nil, rows)
|
||||
})
|
||||
return stream.Parse(r, insertRows)
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, rows []parser.Row) error {
|
||||
func insertRows(rows []parser.Row) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
@@ -59,9 +56,7 @@ func insertRows(at *auth.Token, rows []parser.Row) error {
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(nil, &ctx.WriteRequest)
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return nil
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
@@ -35,9 +36,9 @@ var (
|
||||
// InsertHandlerForReader processes remote write for influx line protocol.
|
||||
//
|
||||
// See https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener/
|
||||
func InsertHandlerForReader(at *auth.Token, r io.Reader, isGzipped bool) error {
|
||||
func InsertHandlerForReader(r io.Reader, isGzipped bool) error {
|
||||
return stream.Parse(r, isGzipped, "", "", func(db string, rows []parser.Row) error {
|
||||
return insertRows(at, db, rows, nil)
|
||||
return insertRows(nil, db, rows, nil)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -129,9 +130,7 @@ func insertRows(at *auth.Token, db string, rows []parser.Row, extraLabels []prom
|
||||
ctx.ctx.Labels = labels
|
||||
ctx.ctx.Samples = samples
|
||||
ctx.commonLabels = commonLabels
|
||||
if !remotewrite.TryPush(at, &ctx.ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.ctx.WriteRequest)
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
@@ -159,15 +158,25 @@ func (ctx *pushCtx) reset() {
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
pushCtxPool.Put(ctx)
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
@@ -8,12 +8,13 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/csvimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/datadogsketches"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/datadogv1"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/datadogv2"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/datadog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/native"
|
||||
@@ -40,16 +41,15 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. "+
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8429", "TCP address to listen for http connections. "+
|
||||
"Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. "+
|
||||
"Note that /targets and /metrics pages aren't available if -httpListenAddr=''. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
"Note that /targets and /metrics pages aren't available if -httpListenAddr=''. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for InfluxDB line protocol data. Usually :8089 must be set. Doesn't work if empty. "+
|
||||
@@ -70,8 +70,7 @@ var (
|
||||
"See also -opentsdbHTTPListenAddr.useProxyProtocol")
|
||||
opentsdbHTTPUseProxyProtocol = flag.Bool("opentsdbHTTPListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted "+
|
||||
"at -opentsdbHTTPListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
||||
configAuthKey = flagutil.NewPassword("configAuthKey", "Authorization key for accessing /config page. It must be passed via authKey query arg. It overrides httpAuth.* settings.")
|
||||
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings.")
|
||||
configAuthKey = flag.String("configAuthKey", "", "Authorization key for accessing /config page. It must be passed via authKey query arg")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running vmagent. The following files are checked: "+
|
||||
"-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig, -remoteWrite.streamAggr.config . "+
|
||||
"Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag")
|
||||
@@ -98,6 +97,7 @@ func main() {
|
||||
remotewrite.InitSecretFlags()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
if promscrape.IsDryRun() {
|
||||
if err := promscrape.CheckConfig(); err != nil {
|
||||
@@ -120,18 +120,13 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":8429"}
|
||||
}
|
||||
logger.Infof("starting vmagent at %q...", listenAddrs)
|
||||
logger.Infof("starting vmagent at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
remotewrite.StartIngestionRateLimiter()
|
||||
remotewrite.Init()
|
||||
common.StartUnmarshalWorkers()
|
||||
if len(*influxListenAddr) > 0 {
|
||||
influxServer = influxserver.MustStart(*influxListenAddr, *influxUseProxyProtocol, func(r io.Reader) error {
|
||||
return influx.InsertHandlerForReader(nil, r, false)
|
||||
return influx.InsertHandlerForReader(r, false)
|
||||
})
|
||||
}
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
@@ -146,23 +141,24 @@ func main() {
|
||||
opentsdbhttpServer = opentsdbhttpserver.MustStart(*opentsdbHTTPListenAddr, *opentsdbHTTPUseProxyProtocol, httpInsertHandler)
|
||||
}
|
||||
|
||||
promscrape.Init(remotewrite.PushDropSamplesOnFailure)
|
||||
promscrape.Init(remotewrite.Push)
|
||||
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
if len(*httpListenAddr) > 0 {
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
}
|
||||
logger.Infof("started vmagent in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
remotewrite.StopIngestionRateLimiter()
|
||||
pushmetrics.Stop()
|
||||
|
||||
startTime = time.Now()
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
if len(*httpListenAddr) > 0 {
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
promscrape.Stop()
|
||||
|
||||
@@ -225,7 +221,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>vmagent</h2>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/vmagent/'>https://docs.victoriametrics.com/vmagent/</a></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/vmagent.html'>https://docs.victoriametrics.com/vmagent.html</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
{"targets", "status for discovered active targets"},
|
||||
@@ -233,6 +229,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
{"metric-relabel-debug", "debug metric relabeling"},
|
||||
{"api/v1/targets", "advanced information about discovered targets in JSON format"},
|
||||
{"config", "-promscrape.config contents"},
|
||||
{"stream-agg", "streaming aggregation status"},
|
||||
{"metrics", "available service metrics"},
|
||||
{"flags", "command-line flags"},
|
||||
{"-/reload", "reload configuration"},
|
||||
@@ -264,7 +261,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path = strings.TrimSuffix(path, "/")
|
||||
}
|
||||
switch path {
|
||||
case "/prometheus/api/v1/write", "/api/v1/write", "/api/v1/push", "/prometheus/api/v1/push":
|
||||
case "/prometheus/api/v1/write", "/api/v1/write":
|
||||
if common.HandleVMProtoServerHandshake(w, r) {
|
||||
return true
|
||||
}
|
||||
@@ -316,14 +313,14 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "/opentelemetry/api/v1/push", "/opentelemetry/v1/metrics":
|
||||
case "/opentelemetry/api/v1/push":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(nil, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
@@ -349,20 +346,9 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "/datadog/api/v1/series":
|
||||
datadogv1WriteRequests.Inc()
|
||||
if err := datadogv1.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
datadogv1WriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(202)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "/datadog/api/v2/series":
|
||||
datadogv2WriteRequests.Inc()
|
||||
if err := datadogv2.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
datadogv2WriteErrors.Inc()
|
||||
datadogWriteRequests.Inc()
|
||||
if err := datadog.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
datadogWriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
@@ -371,15 +357,6 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.WriteHeader(202)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "/datadog/api/beta/sketches":
|
||||
datadogsketchesWriteRequests.Inc()
|
||||
if err := datadogsketches.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
datadogsketchesWriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(202)
|
||||
return true
|
||||
case "/datadog/api/v1/validate":
|
||||
datadogValidateRequests.Inc()
|
||||
// See https://docs.datadoghq.com/api/latest/authentication/#validate-api-key
|
||||
@@ -434,7 +411,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
return true
|
||||
case "/prometheus/config", "/config":
|
||||
if !httpserver.CheckAuthFlag(w, r, configAuthKey.Get(), "configAuthKey") {
|
||||
if !httpserver.CheckAuthFlag(w, r, *configAuthKey, "configAuthKey") {
|
||||
return true
|
||||
}
|
||||
promscrapeConfigRequests.Inc()
|
||||
@@ -443,7 +420,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
case "/prometheus/api/v1/status/config", "/api/v1/status/config":
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#config
|
||||
if !httpserver.CheckAuthFlag(w, r, configAuthKey.Get(), "configAuthKey") {
|
||||
if !httpserver.CheckAuthFlag(w, r, *configAuthKey, "configAuthKey") {
|
||||
return true
|
||||
}
|
||||
promscrapeStatusConfigRequests.Inc()
|
||||
@@ -453,15 +430,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
fmt.Fprintf(w, `{"status":"success","data":{"yaml":%q}}`, bb.B)
|
||||
return true
|
||||
case "/prometheus/-/reload", "/-/reload":
|
||||
if !httpserver.CheckAuthFlag(w, r, reloadAuthKey.Get(), "reloadAuthKey") {
|
||||
return true
|
||||
}
|
||||
promscrapeConfigReloadRequests.Inc()
|
||||
procutil.SelfSIGHUP()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/stream-agg":
|
||||
streamaggr.WriteHumanReadableState(w, r, remotewrite.GetAggregators())
|
||||
return true
|
||||
case "/ready":
|
||||
if rdy := promscrape.PendingScrapeConfigs.Load(); rdy > 0 {
|
||||
if rdy := atomic.LoadInt32(&promscrape.PendingScrapeConfigs); rdy > 0 {
|
||||
errMsg := fmt.Sprintf("waiting for scrapes to init, left: %d", rdy)
|
||||
http.Error(w, errMsg, http.StatusTooEarly)
|
||||
} else {
|
||||
@@ -513,7 +490,7 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
p.Suffix = strings.TrimSuffix(p.Suffix, "/")
|
||||
}
|
||||
switch p.Suffix {
|
||||
case "prometheus/", "prometheus", "prometheus/api/v1/write", "prometheus/api/v1/push":
|
||||
case "prometheus/", "prometheus", "prometheus/api/v1/write":
|
||||
prometheusWriteRequests.Inc()
|
||||
if err := promremotewrite.InsertHandler(at, r); err != nil {
|
||||
prometheusWriteErrors.Inc()
|
||||
@@ -562,14 +539,14 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "opentelemetry/api/v1/push", "opentelemetry/v1/metrics":
|
||||
case "opentelemetry/api/v1/push":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(at, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
@@ -595,19 +572,9 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "datadog/api/v1/series":
|
||||
datadogv1WriteRequests.Inc()
|
||||
if err := datadogv1.InsertHandlerForHTTP(at, r); err != nil {
|
||||
datadogv1WriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(202)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "datadog/api/v2/series":
|
||||
datadogv2WriteRequests.Inc()
|
||||
if err := datadogv2.InsertHandlerForHTTP(at, r); err != nil {
|
||||
datadogv2WriteErrors.Inc()
|
||||
datadogWriteRequests.Inc()
|
||||
if err := datadog.InsertHandlerForHTTP(at, r); err != nil {
|
||||
datadogWriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
@@ -615,15 +582,6 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
w.WriteHeader(202)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "datadog/api/beta/sketches":
|
||||
datadogsketchesWriteRequests.Inc()
|
||||
if err := datadogsketches.InsertHandlerForHTTP(at, r); err != nil {
|
||||
datadogsketchesWriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(202)
|
||||
return true
|
||||
case "datadog/api/v1/validate":
|
||||
datadogValidateRequests.Inc()
|
||||
// See https://docs.datadoghq.com/api/latest/authentication/#validate-api-key
|
||||
@@ -674,22 +632,16 @@ var (
|
||||
|
||||
influxQueryRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/influx/query", protocol="influx"}`)
|
||||
|
||||
datadogv1WriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/series", protocol="datadog"}`)
|
||||
datadogv1WriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/datadog/api/v1/series", protocol="datadog"}`)
|
||||
|
||||
datadogv2WriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v2/series", protocol="datadog"}`)
|
||||
datadogv2WriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/datadog/api/v2/series", protocol="datadog"}`)
|
||||
|
||||
datadogsketchesWriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/beta/sketches", protocol="datadog"}`)
|
||||
datadogsketchesWriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/datadog/api/beta/sketches", protocol="datadog"}`)
|
||||
datadogWriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/series", protocol="datadog"}`)
|
||||
datadogWriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/datadog/api/v1/series", protocol="datadog"}`)
|
||||
|
||||
datadogValidateRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/validate", protocol="datadog"}`)
|
||||
datadogCheckRunRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/check_run", protocol="datadog"}`)
|
||||
datadogIntakeRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
|
||||
datadogMetadataRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
|
||||
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
|
||||
newrelicWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
newrelicWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
@@ -718,7 +670,7 @@ func usage() {
|
||||
const s = `
|
||||
vmagent collects metrics data via popular data ingestion protocols and routes it to VictoriaMetrics.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmagent/ .
|
||||
See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image AS certs
|
||||
FROM $certs_image as certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
|
||||
@@ -84,8 +84,6 @@ func insertRows(at *auth.Token, block *stream.Block, extraLabels []prompbmarshal
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -76,9 +76,7 @@ func insertRows(at *auth.Token, rows []newrelic.Row, extraLabels []prompbmarshal
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(len(rows))
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(samplesCount)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -28,15 +27,10 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
||||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
var processBody func([]byte) ([]byte, error)
|
||||
if req.Header.Get("Content-Type") == "application/json" {
|
||||
if req.Header.Get("X-Amz-Firehose-Protocol-Version") != "" {
|
||||
processBody = firehose.ProcessRequestBody
|
||||
} else {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
}
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
}
|
||||
return stream.ParseStream(req.Body, isGzipped, processBody, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return insertRows(at, tss, extraLabels)
|
||||
})
|
||||
}
|
||||
@@ -65,9 +59,7 @@ func insertRows(at *auth.Token, tss []prompbmarshal.TimeSeries, extraLabels []pr
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
|
||||
@@ -56,9 +56,7 @@ func insertRows(rows []parser.Row) error {
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(nil, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(nil, &ctx.WriteRequest)
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return nil
|
||||
|
||||
@@ -64,9 +64,7 @@ func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.L
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return nil
|
||||
|
||||
@@ -73,9 +73,7 @@ func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.L
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(len(rows))
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(len(rows))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user