mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
1 Commits
master
...
debug/vmag
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27103ea656 |
12
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
12
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -5,10 +5,10 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade)
|
||||
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/#how-to-upgrade)
|
||||
to [the latest available release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
and verify whether the bug is reproducible there.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/victoriametrics/troubleshooting/) first.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/troubleshooting/) first.
|
||||
- type: textarea
|
||||
id: describe-the-bug
|
||||
attributes:
|
||||
@@ -60,12 +60,12 @@ body:
|
||||
|
||||
For VictoriaMetrics health-state issues please provide full-length screenshots
|
||||
of Grafana dashboards if possible:
|
||||
* [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229)
|
||||
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176)
|
||||
* [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229/)
|
||||
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176/)
|
||||
|
||||
See how to setup monitoring here:
|
||||
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/#monitoring)
|
||||
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/#monitoring)
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/question.yml
vendored
8
.github/ISSUE_TEMPLATE/question.yml
vendored
@@ -5,7 +5,7 @@ body:
|
||||
- type: textarea
|
||||
id: describe-the-component
|
||||
attributes:
|
||||
label: Is your question related to a specific component?
|
||||
label: Is your question request related to a specific component?
|
||||
placeholder: |
|
||||
VictoriaMetrics, vmagent, vmalert, vmui, etc...
|
||||
validations:
|
||||
@@ -24,9 +24,9 @@ body:
|
||||
label: Troubleshooting docs
|
||||
description: I am familiar with the following troubleshooting docs
|
||||
options:
|
||||
- label: General - https://docs.victoriametrics.com/victoriametrics/troubleshooting/
|
||||
- label: General - https://docs.victoriametrics.com/troubleshooting/
|
||||
required: false
|
||||
- label: vmagent - https://docs.victoriametrics.com/victoriametrics/vmagent/#troubleshooting
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent/#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/victoriametrics/vmalert/#troubleshooting
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert/#troubleshooting
|
||||
required: false
|
||||
|
||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -4,8 +4,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
cooldown:
|
||||
default-days: 21
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
@@ -25,8 +23,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
cooldown:
|
||||
default-days: 21
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/app/vmui/packages/vmui"
|
||||
schedule:
|
||||
|
||||
10
.github/pull_request_template.md
vendored
10
.github/pull_request_template.md
vendored
@@ -1,3 +1,9 @@
|
||||
**PLEASE REMOVE LINE BELOW BEFORE SUBMITTING**
|
||||
### Describe Your Changes
|
||||
|
||||
Before creating the PR, make sure you have read and followed the [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
48
.github/scripts/lint-changelog-tip.sh
vendored
48
.github/scripts/lint-changelog-tip.sh
vendored
@@ -1,48 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
set -e
|
||||
|
||||
CHANGELOG_FILE="docs/victoriametrics/changelog/CHANGELOG.md"
|
||||
|
||||
GITHUB_BASE_REF=${GITHUB_BASE_REF:-"master"}
|
||||
GIT_REMOTE=${GIT_REMOTE:-"origin"}
|
||||
|
||||
git diff "${GIT_REMOTE}/${GITHUB_BASE_REF}"...HEAD -- $CHANGELOG_FILE > diff.txt
|
||||
if ! grep -q "^+" diff.txt; then
|
||||
echo "No additions in CHANGELOG.md"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ADDED_LINES=$(grep "^+\S" diff.txt | sed 's/^+//')
|
||||
|
||||
START_TIP=$(grep -n "^## tip" "$CHANGELOG_FILE" | head -1 | cut -d: -f1)
|
||||
if [ -z "$START_TIP" ]; then
|
||||
echo "ERROR: ${CHANGELOG_FILE} does not contain a ## tip section"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
END_TIP=$(awk "NR>$START_TIP && /^## / {print NR; exit}" "${CHANGELOG_FILE}")
|
||||
if [ -z "$END_TIP" ]; then
|
||||
END_TIP=$(wc -l < "$CHANGELOG_FILE")
|
||||
fi
|
||||
|
||||
BAD=0
|
||||
while IFS= read -r line; do
|
||||
# Grep exact line inside the file and get line numbers
|
||||
MATCHES=$(grep -n -F "$line" "$CHANGELOG_FILE" | cut -d: -f1)
|
||||
for m in $MATCHES; do
|
||||
if [ "$m" -lt "$START_TIP" ] || [ "$m" -gt "$END_TIP" ]; then
|
||||
echo "'$line' on line ${m} is outside ## tip section (lines ${START_TIP}-${END_TIP})"
|
||||
BAD=1
|
||||
fi
|
||||
done
|
||||
done << EOF
|
||||
$ADDED_LINES
|
||||
EOF
|
||||
|
||||
if [ "$BAD" -ne 0 ]; then
|
||||
echo "CHANGELOG modifications must be placed inside the ## tip section."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "CHANGELOG modifications are valid."
|
||||
65
.github/workflows/build.yml
vendored
65
.github/workflows/build.yml
vendored
@@ -7,20 +7,16 @@ on:
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
- '**/Dockerfile'
|
||||
- '**/Dockerfile*' # The trailing * is for app/vmui/Dockerfile-*.
|
||||
- '**/Makefile'
|
||||
- '!app/vmui/**'
|
||||
- '.github/workflows/build.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
- '**/Dockerfile'
|
||||
- '**/Dockerfile*' # The trailing * is for app/vmui/Dockerfile-*.
|
||||
- '**/Makefile'
|
||||
- '!app/vmui/**'
|
||||
- '.github/workflows/build.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -31,53 +27,28 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: ${{ matrix.os }}-${{ matrix.arch }}
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
arch: 386
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: linux
|
||||
arch: arm64
|
||||
- os: linux
|
||||
arch: arm
|
||||
- os: linux
|
||||
arch: ppc64le
|
||||
- os: linux
|
||||
arch: s390x
|
||||
- os: darwin
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
- os: freebsd
|
||||
arch: amd64
|
||||
- os: openbsd
|
||||
arch: amd64
|
||||
- os: netbsd
|
||||
arch: amd64
|
||||
- os: windows
|
||||
arch: amd64
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
go-version: stable
|
||||
cache: false
|
||||
|
||||
- name: Build victoria-metrics for ${{ matrix.os }}-${{ matrix.arch }}
|
||||
run: make victoria-metrics-${{ matrix.os }}-${{ matrix.arch }}
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-crossbuild-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-crossbuild-
|
||||
|
||||
- name: Build vmutils for ${{ matrix.os }}-${{ matrix.arch }}
|
||||
run: make vmutils-${{ matrix.os }}-${{ matrix.arch }}
|
||||
- name: Run crossbuild
|
||||
run: make crossbuild
|
||||
|
||||
19
.github/workflows/changelog-linter.yml
vendored
19
.github/workflows/changelog-linter.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: 'changelog-linter'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "docs/victoriametrics/changelog/CHANGELOG.md"
|
||||
|
||||
jobs:
|
||||
tip-lint:
|
||||
runs-on: 'ubuntu-latest'
|
||||
steps:
|
||||
- uses: 'actions/checkout@v6'
|
||||
with:
|
||||
# needed for proper diff
|
||||
fetch-depth: 0
|
||||
|
||||
- name: 'Validate that changelog changes are under ## tip'
|
||||
run: |
|
||||
GITHUB_BASE_REF=${{ github.base_ref }} ./.github/scripts/lint-changelog-tip.sh
|
||||
47
.github/workflows/check-commit-signed.yml
vendored
47
.github/workflows/check-commit-signed.yml
vendored
@@ -1,47 +0,0 @@
|
||||
name: check-commit-signed
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
check-commit-signed:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # we need full history for commit verification
|
||||
|
||||
- name: Check commit signatures
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" != "pull_request" ]; then
|
||||
echo "Not a PR event, skipping signature check"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
RANGE="${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}"
|
||||
echo "Checking commits in PR range: $RANGE"
|
||||
|
||||
if [ -z "$(git rev-list $RANGE)" ]; then
|
||||
echo "No new commits in this PR, skipping signature check"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check raw commit objects for a "gpgsig" header as a fast early signal for
|
||||
# contributors. Both GPG and SSH signatures use this header.
|
||||
# This avoids relying on %G? which returns N for SSH commits.
|
||||
# This check is not a security enforcement — unsigned commits cannot be merged
|
||||
# anyway due to the GitHub repository merge policy.
|
||||
unsigned=""
|
||||
for sha in $(git rev-list $RANGE); do
|
||||
if ! git cat-file commit "$sha" | grep -q "^gpgsig"; then
|
||||
unsigned="$unsigned $sha"
|
||||
fi
|
||||
done
|
||||
if [ -n "$unsigned" ]; then
|
||||
echo "Found unsigned commits:"
|
||||
echo "$unsigned"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All commits in PR are signed (GPG or SSH)"
|
||||
10
.github/workflows/check-licenses.yml
vendored
10
.github/workflows/check-licenses.yml
vendored
@@ -19,22 +19,20 @@ jobs:
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: stable
|
||||
cache: false
|
||||
|
||||
- run: go version
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v5
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
~/go/bin
|
||||
key: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-
|
||||
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-
|
||||
|
||||
- name: Check License
|
||||
run: make check-licenses
|
||||
|
||||
17
.github/workflows/codeql-analysis-go.yml
vendored
17
.github/workflows/codeql-analysis-go.yml
vendored
@@ -29,35 +29,34 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache: false
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
go-version: stable
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v5
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-
|
||||
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4.35.2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v4.35.2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4.35.2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: 'language:go'
|
||||
|
||||
46
.github/workflows/codeql-analysis-js-ts.yml
vendored
Normal file
46
.github/workflows/codeql-analysis-js-ts.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: 'CodeQL JS/TS'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.js'
|
||||
- '**.ts'
|
||||
- '**.tsx'
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.js'
|
||||
- '**.ts'
|
||||
- '**.tsx'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: javascript-typescript
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: 'language:js/ts'
|
||||
57
.github/workflows/docs.yaml
vendored
57
.github/workflows/docs.yaml
vendored
@@ -1,57 +0,0 @@
|
||||
name: publish-docs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '.github/workflows/docs.yaml'
|
||||
workflow_dispatch: {}
|
||||
permissions:
|
||||
contents: read # This is required for actions/checkout and to commit back image update
|
||||
deployments: write
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: __vm
|
||||
|
||||
- name: Checkout private code
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
path: __vm-docs
|
||||
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v7
|
||||
id: import-gpg
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
git_config_global: true
|
||||
|
||||
- name: Copy docs
|
||||
id: update
|
||||
run: |
|
||||
find docs -type d -maxdepth 1 -mindepth 1 -exec \
|
||||
sh -c 'rsync -zarvh --delete {}/ ../__vm-docs/content/$(basename {})/' \;
|
||||
echo "SHORT_SHA=$(git rev-parse --short $GITHUB_SHA)" >> $GITHUB_OUTPUT
|
||||
working-directory: __vm
|
||||
|
||||
- name: Push to vmdocs
|
||||
run: |
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
if [[ -n $(git status --porcelain) ]]; then
|
||||
git add .
|
||||
git commit -S -m "sync docs with VictoriaMetrics/VictoriaMetrics commit: ${{ steps.update.outputs.SHORT_SHA }}"
|
||||
git push
|
||||
fi
|
||||
working-directory: __vm-docs
|
||||
93
.github/workflows/main.yml
vendored
Normal file
93
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
name: main
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache: false
|
||||
go-version: stable
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-check-all-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-check-all-
|
||||
|
||||
- name: Run check-all
|
||||
run: |
|
||||
make check-all
|
||||
git diff --exit-code
|
||||
|
||||
test:
|
||||
name: test
|
||||
needs: lint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
scenario:
|
||||
- 'test-full'
|
||||
- 'test-full-386'
|
||||
- 'test-pure'
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache: false
|
||||
go-version: stable
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
|
||||
|
||||
- name: Run tests
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
51
.github/workflows/sync-docs.yml
vendored
Normal file
51
.github/workflows/sync-docs.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: publish-docs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'docs/**'
|
||||
workflow_dispatch: {}
|
||||
permissions:
|
||||
contents: read # This is required for actions/checkout and to commit back image update
|
||||
deployments: write
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: main
|
||||
- name: Checkout private code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
path: docs
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v6
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
workdir: docs
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "short_sha=$calculatedSha" >> $GITHUB_OUTPUT
|
||||
working-directory: main
|
||||
- name: update code and commit
|
||||
run: |
|
||||
rm -rf content
|
||||
cp -r ../main/docs content
|
||||
make clean-after-copy
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
git add .
|
||||
git commit -S -m "sync docs with VictoriaMetrics/VictoriaMetrics commit: ${{ steps.vars.outputs.short_sha }}"
|
||||
git push
|
||||
working-directory: docs
|
||||
111
.github/workflows/test.yml
vendored
111
.github/workflows/test.yml
vendored
@@ -1,111 +0,0 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
- 'go.*'
|
||||
- '.github/workflows/main.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- '**.go'
|
||||
- 'go.*'
|
||||
- '.github/workflows/main.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- run: go version
|
||||
|
||||
- name: Cache golangci-lint
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: |
|
||||
~/.cache/golangci-lint
|
||||
~/go/bin
|
||||
key: golangci-lint-${{ runner.os }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('.golangci.yml') }}
|
||||
|
||||
- name: Run check-all
|
||||
run: |
|
||||
make check-all
|
||||
git diff --exit-code
|
||||
|
||||
unit:
|
||||
name: unit
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
scenario:
|
||||
- 'test'
|
||||
- 'test-386'
|
||||
- 'test-pure'
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
|
||||
- name: Run tests
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
apptest:
|
||||
name: apptest
|
||||
runs-on: apptest
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
|
||||
- name: Run app tests
|
||||
run: make apptest
|
||||
88
.github/workflows/vmui.yml
vendored
88
.github/workflows/vmui.yml
vendored
@@ -1,88 +0,0 @@
|
||||
name: vmui
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- 'app/vmui/packages/vmui/**'
|
||||
- '.github/workflows/vmui.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- cluster
|
||||
- master
|
||||
paths:
|
||||
- 'app/vmui/packages/vmui/**'
|
||||
- '.github/workflows/vmui.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
pull-requests: read
|
||||
checks: write
|
||||
|
||||
concurrency:
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
|
||||
jobs:
|
||||
vmui-checks:
|
||||
name: VMUI Checks (lint, test, typecheck)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Cache node_modules
|
||||
id: cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: app/vmui/packages/vmui/node_modules
|
||||
key: vmui-deps-${{ runner.os }}-${{ hashFiles('app/vmui/packages/vmui/package-lock.json', 'app/vmui/Dockerfile-build') }}
|
||||
restore-keys: |
|
||||
vmui-deps-${{ runner.os }}-
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: make vmui-install
|
||||
|
||||
- name: Run lint
|
||||
id: lint
|
||||
run: make vmui-lint
|
||||
continue-on-error: true
|
||||
env:
|
||||
VMUI_SKIP_INSTALL: true
|
||||
|
||||
- name: Run tests
|
||||
id: test
|
||||
run: make vmui-test
|
||||
continue-on-error: true
|
||||
env:
|
||||
VMUI_SKIP_INSTALL: true
|
||||
|
||||
- name: Run typecheck
|
||||
id: typecheck
|
||||
run: make vmui-typecheck
|
||||
continue-on-error: true
|
||||
env:
|
||||
VMUI_SKIP_INSTALL: true
|
||||
|
||||
- name: Annotate Code Linting Results
|
||||
uses: ataylorme/eslint-annotate-action@v3
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
report-json: app/vmui/packages/vmui/vmui-lint-report.json
|
||||
|
||||
- name: Check overall status
|
||||
run: |
|
||||
echo "Lint status: ${{ steps.lint.outcome }}"
|
||||
echo "Test status: ${{ steps.test.outcome }}"
|
||||
echo "Typecheck status: ${{ steps.typecheck.outcome }}"
|
||||
|
||||
if [[ "${{ steps.lint.outcome }}" == "failure" || "${{ steps.test.outcome }}" == "failure" || "${{ steps.typecheck.outcome }}" == "failure" ]]; then
|
||||
echo "One or more checks failed"
|
||||
exit 1
|
||||
else
|
||||
echo "All checks passed"
|
||||
fi
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -12,7 +12,6 @@
|
||||
/victoria-logs-data
|
||||
/victoria-metrics-data
|
||||
/vmagent-remotewrite-data
|
||||
/vlagent-remotewritewrite
|
||||
/vmstorage-data
|
||||
/vmselect-cache
|
||||
/package/temp-deb-*
|
||||
@@ -27,5 +26,3 @@ _site
|
||||
/docs/.jekyll-metadata
|
||||
coverage.txt
|
||||
cspell.json
|
||||
*~
|
||||
deployment/docker/provisioning/plugins/
|
||||
|
||||
@@ -1,29 +1,22 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 2m
|
||||
|
||||
linters:
|
||||
settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- fmt.Fprintf
|
||||
- fmt.Fprint
|
||||
- (net/http.ResponseWriter).Write
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA(4003|1019|5011):'
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
enable:
|
||||
- revive
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: "SA(4003|1019|5011):"
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0014
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- "fmt.Fprintf"
|
||||
- "fmt.Fprint"
|
||||
- "(net/http.ResponseWriter).Write"
|
||||
|
||||
@@ -4,4 +4,3 @@ allowlist:
|
||||
- BSD-3-Clause
|
||||
- BSD-2-Clause
|
||||
- ISC
|
||||
- MPL-2.0
|
||||
|
||||
@@ -1 +1 @@
|
||||
The document has been moved [here](https://docs.victoriametrics.com/victoriametrics/contributing/).
|
||||
The document has been moved [here](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -175,7 +175,7 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2019-2026 VictoriaMetrics, Inc.
|
||||
Copyright 2019-2024 VictoriaMetrics, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
206
Makefile
206
Makefile
@@ -5,32 +5,28 @@ MAKE_PARALLEL := $(MAKE) -j $(MAKE_CONCURRENCY)
|
||||
DATEINFO_TAG ?= $(shell date -u +'%Y%m%d-%H%M%S')
|
||||
BUILDINFO_TAG ?= $(shell echo $$(git describe --long --all | tr '/' '-')$$( \
|
||||
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | openssl sha1 | cut -d' ' -f2 | cut -c 1-8)))
|
||||
LATEST_TAG ?= latest
|
||||
|
||||
PKG_TAG ?= $(shell git tag -l --points-at HEAD)
|
||||
ifeq ($(PKG_TAG),)
|
||||
PKG_TAG := $(BUILDINFO_TAG)
|
||||
endif
|
||||
|
||||
EXTRA_DOCKER_TAG_SUFFIX ?=
|
||||
EXTRA_GO_BUILD_TAGS ?=
|
||||
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
|
||||
TAR_OWNERSHIP ?= --owner=1000 --group=1000
|
||||
|
||||
GOLANGCI_LINT_VERSION := 2.9.0
|
||||
|
||||
.PHONY: $(MAKECMDGOALS)
|
||||
|
||||
include app/*/Makefile
|
||||
include codespell/Makefile
|
||||
include cspell/Makefile
|
||||
include docs/Makefile
|
||||
include deployment/*/Makefile
|
||||
include dashboards/Makefile
|
||||
include package/release/Makefile
|
||||
include benchmarks/Makefile
|
||||
|
||||
all: \
|
||||
victoria-metrics-prod \
|
||||
victoria-logs-prod \
|
||||
vmagent-prod \
|
||||
vmalert-prod \
|
||||
vmalert-tool-prod \
|
||||
@@ -54,6 +50,7 @@ publish: \
|
||||
|
||||
package: \
|
||||
package-victoria-metrics \
|
||||
package-victoria-logs \
|
||||
package-vmagent \
|
||||
package-vmalert \
|
||||
package-vmalert-tool \
|
||||
@@ -125,15 +122,6 @@ vmutils-linux-ppc64le: \
|
||||
vmrestore-linux-ppc64le \
|
||||
vmctl-linux-ppc64le
|
||||
|
||||
vmutils-linux-s390x: \
|
||||
vmagent-linux-s390x \
|
||||
vmalert-linux-s390x \
|
||||
vmalert-tool-linux-s390x \
|
||||
vmauth-linux-s390x \
|
||||
vmbackup-linux-s390x \
|
||||
vmrestore-linux-s390x \
|
||||
vmctl-linux-s390x
|
||||
|
||||
vmutils-darwin-amd64: \
|
||||
vmagent-darwin-amd64 \
|
||||
vmalert-darwin-amd64 \
|
||||
@@ -179,11 +167,9 @@ vmutils-windows-amd64: \
|
||||
vmrestore-windows-amd64 \
|
||||
vmctl-windows-amd64
|
||||
|
||||
# When adding a new crossbuild target, please also add it to the .github/workflows/build.yml
|
||||
crossbuild:
|
||||
$(MAKE_PARALLEL) victoria-metrics-crossbuild vmutils-crossbuild
|
||||
|
||||
# When adding a new crossbuild target, please also add it to the .github/workflows/build.yml
|
||||
victoria-metrics-crossbuild: \
|
||||
victoria-metrics-linux-386 \
|
||||
victoria-metrics-linux-amd64 \
|
||||
@@ -196,7 +182,6 @@ victoria-metrics-crossbuild: \
|
||||
victoria-metrics-openbsd-amd64 \
|
||||
victoria-metrics-windows-amd64
|
||||
|
||||
# When adding a new crossbuild target, please also add it to the .github/workflows/build.yml
|
||||
vmutils-crossbuild: \
|
||||
vmutils-linux-386 \
|
||||
vmutils-linux-amd64 \
|
||||
@@ -209,52 +194,12 @@ vmutils-crossbuild: \
|
||||
vmutils-openbsd-amd64 \
|
||||
vmutils-windows-amd64
|
||||
|
||||
publish-final-images:
|
||||
PKG_TAG=$(TAG) APP_NAME=victoria-metrics $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmagent $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert-tool $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmauth $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmbackup $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmrestore $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmctl $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=victoria-metrics $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmagent $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmalert $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmauth $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackup $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmrestore $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmgateway $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackupmanager $(MAKE) publish-via-docker-from-rc && \
|
||||
PKG_TAG=$(TAG) $(MAKE) publish-latest
|
||||
|
||||
publish-latest:
|
||||
PKG_TAG=$(TAG) APP_NAME=victoria-metrics $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmagent $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert-tool $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmauth $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmbackup $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmrestore $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmctl $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmgateway $(MAKE) publish-via-docker-latest
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackupmanager $(MAKE) publish-via-docker-latest
|
||||
|
||||
publish-release:
|
||||
rm -rf bin/*
|
||||
git checkout $(TAG) && $(MAKE) release && $(MAKE) publish && \
|
||||
git checkout $(TAG)-cluster && $(MAKE) release && $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise && $(MAKE) release && $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise-cluster && $(MAKE) release && $(MAKE) publish
|
||||
git checkout $(TAG) && $(MAKE) release && LATEST_TAG=stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-cluster && $(MAKE) release && LATEST_TAG=cluster-stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise && $(MAKE) release && LATEST_TAG=enterprise-stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise-cluster && $(MAKE) release && LATEST_TAG=enterprise-cluster-stable $(MAKE) publish
|
||||
|
||||
release:
|
||||
$(MAKE_PARALLEL) \
|
||||
@@ -266,7 +211,6 @@ release-victoria-metrics: \
|
||||
release-victoria-metrics-linux-amd64 \
|
||||
release-victoria-metrics-linux-arm \
|
||||
release-victoria-metrics-linux-arm64 \
|
||||
release-victoria-metrics-linux-s390x \
|
||||
release-victoria-metrics-darwin-amd64 \
|
||||
release-victoria-metrics-darwin-arm64 \
|
||||
release-victoria-metrics-freebsd-amd64 \
|
||||
@@ -285,9 +229,6 @@ release-victoria-metrics-linux-arm:
|
||||
release-victoria-metrics-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-linux-s390x:
|
||||
GOOS=linux GOARCH=s390x $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
@@ -322,12 +263,68 @@ release-victoria-metrics-windows-goarch: victoria-metrics-windows-$(GOARCH)-prod
|
||||
cd bin && rm -rf \
|
||||
victoria-metrics-windows-$(GOARCH)-prod.exe
|
||||
|
||||
release-victoria-logs:
|
||||
$(MAKE_PARALLEL) release-victoria-logs-linux-386 \
|
||||
release-victoria-logs-linux-amd64 \
|
||||
release-victoria-logs-linux-arm \
|
||||
release-victoria-logs-linux-arm64 \
|
||||
release-victoria-logs-darwin-amd64 \
|
||||
release-victoria-logs-darwin-arm64 \
|
||||
release-victoria-logs-freebsd-amd64 \
|
||||
release-victoria-logs-openbsd-amd64 \
|
||||
release-victoria-logs-windows-amd64
|
||||
|
||||
release-victoria-logs-linux-386:
|
||||
GOOS=linux GOARCH=386 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-linux-arm:
|
||||
GOOS=linux GOARCH=arm $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-darwin-arm64:
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-freebsd-amd64:
|
||||
GOOS=freebsd GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-openbsd-amd64:
|
||||
GOOS=openbsd GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-windows-amd64:
|
||||
GOARCH=amd64 $(MAKE) release-victoria-logs-windows-goarch
|
||||
|
||||
release-victoria-logs-goos-goarch: victoria-logs-$(GOOS)-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
tar $(TAR_OWNERSHIP) --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-logs-$(GOOS)-$(GOARCH)-prod \
|
||||
&& sha256sum victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-logs-$(GOOS)-$(GOARCH)-prod \
|
||||
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf victoria-logs-$(GOOS)-$(GOARCH)-prod
|
||||
|
||||
release-victoria-logs-windows-goarch: victoria-logs-windows-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
zip victoria-logs-windows-$(GOARCH)-$(PKG_TAG).zip \
|
||||
victoria-logs-windows-$(GOARCH)-prod.exe \
|
||||
&& sha256sum victoria-logs-windows-$(GOARCH)-$(PKG_TAG).zip \
|
||||
victoria-logs-windows-$(GOARCH)-prod.exe \
|
||||
> victoria-logs-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf \
|
||||
victoria-logs-windows-$(GOARCH)-prod.exe
|
||||
|
||||
release-vmutils: \
|
||||
release-vmutils-linux-386 \
|
||||
release-vmutils-linux-amd64 \
|
||||
release-vmutils-linux-arm64 \
|
||||
release-vmutils-linux-arm \
|
||||
release-vmutils-linux-s390x \
|
||||
release-vmutils-darwin-amd64 \
|
||||
release-vmutils-darwin-arm64 \
|
||||
release-vmutils-freebsd-amd64 \
|
||||
@@ -336,7 +333,7 @@ release-vmutils: \
|
||||
|
||||
release-vmutils-linux-386:
|
||||
GOOS=linux GOARCH=386 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
|
||||
release-vmutils-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
@@ -346,9 +343,6 @@ release-vmutils-linux-arm64:
|
||||
release-vmutils-linux-arm:
|
||||
GOOS=linux GOARCH=arm $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-linux-s390x:
|
||||
GOOS=linux GOARCH=s390x $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
@@ -435,99 +429,73 @@ release-vmutils-windows-goarch: \
|
||||
vmctl-windows-$(GOARCH)-prod.exe
|
||||
|
||||
pprof-cpu:
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics $(PPROF_FILE)
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
||||
|
||||
fmt:
|
||||
gofmt -l -w -s ./lib
|
||||
gofmt -l -w -s ./app
|
||||
gofmt -l -w -s ./apptest
|
||||
|
||||
vet:
|
||||
go vet -tags 'synctest' ./lib/...
|
||||
go vet ./lib/...
|
||||
go vet ./app/...
|
||||
go vet ./apptest/...
|
||||
|
||||
check-all: fmt vet golangci-lint govulncheck
|
||||
|
||||
clean-checkers: remove-golangci-lint remove-govulncheck
|
||||
|
||||
test:
|
||||
go test -tags 'synctest' ./lib/... ./app/...
|
||||
go test ./lib/... ./app/...
|
||||
|
||||
test-race:
|
||||
go test -tags 'synctest' -race ./lib/... ./app/...
|
||||
|
||||
test-386:
|
||||
GOARCH=386 go test -tags 'synctest' ./lib/... ./app/...
|
||||
go test -race ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
CGO_ENABLED=0 go test -tags 'synctest' ./lib/... ./app/...
|
||||
CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
test-full-386:
|
||||
GOARCH=386 go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
apptest:
|
||||
$(MAKE) victoria-metrics-race vmagent-race vmalert-race vmauth-race vmctl-race vmbackup-race vmrestore-race
|
||||
go test ./apptest/... -skip="^Test(Cluster|Legacy).*"
|
||||
|
||||
apptest-legacy: victoria-metrics-race vmbackup-race vmrestore-race
|
||||
OS=$$(uname | tr '[:upper:]' '[:lower:]'); \
|
||||
ARCH=$$(uname -m | tr '[:upper:]' '[:lower:]' | sed 's/x86_64/amd64/'); \
|
||||
VERSION=v1.132.0; \
|
||||
VMSINGLE=victoria-metrics-$${OS}-$${ARCH}-$${VERSION}.tar.gz; \
|
||||
VMCLUSTER=victoria-metrics-$${OS}-$${ARCH}-$${VERSION}-cluster.tar.gz; \
|
||||
URL=https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/$${VERSION}; \
|
||||
DIR=/tmp/$${VERSION}; \
|
||||
test -d $${DIR} || (mkdir $${DIR} && \
|
||||
curl --output-dir /tmp -LO $${URL}/$${VMSINGLE} && tar xzf /tmp/$${VMSINGLE} -C $${DIR} && \
|
||||
curl --output-dir /tmp -LO $${URL}/$${VMCLUSTER} && tar xzf /tmp/$${VMCLUSTER} -C $${DIR} \
|
||||
); \
|
||||
VM_LEGACY_VMSINGLE_PATH=$${DIR}/victoria-metrics-prod \
|
||||
VM_LEGACY_VMSTORAGE_PATH=$${DIR}/vmstorage-prod \
|
||||
go test ./apptest/tests -run="^TestLegacySingle.*"
|
||||
GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
benchmark:
|
||||
go test -run=NO_TESTS -bench=. ./lib/...
|
||||
go test -run=NO_TESTS -bench=. ./app/...
|
||||
go test -bench=. ./lib/...
|
||||
go test -bench=. ./app/...
|
||||
|
||||
benchmark-pure:
|
||||
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./app/...
|
||||
CGO_ENABLED=0 go test -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -bench=. ./app/...
|
||||
|
||||
vendor-update:
|
||||
go get -u ./lib/...
|
||||
go get -u ./app/...
|
||||
go mod tidy -compat=1.26
|
||||
go get -u -d ./lib/...
|
||||
go get -u -d ./app/...
|
||||
go mod tidy -compat=1.22
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-pure:
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-goos-goarch:
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-windows-goarch:
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc -dir=lib
|
||||
qtc -dir=app
|
||||
qtc
|
||||
|
||||
install-qtc:
|
||||
which qtc || go install github.com/valyala/quicktemplate/qtc@latest
|
||||
|
||||
|
||||
golangci-lint: install-golangci-lint
|
||||
golangci-lint run --build-tags 'synctest'
|
||||
golangci-lint run
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint && (golangci-lint --version | grep -q $(GOLANGCI_LINT_VERSION)) || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v$(GOLANGCI_LINT_VERSION)
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.60.1
|
||||
|
||||
remove-golangci-lint:
|
||||
rm -rf `which golangci-lint`
|
||||
|
||||
111
README.md
111
README.md
@@ -1,35 +1,29 @@
|
||||
# VictoriaMetrics
|
||||
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
[](https://hub.docker.com/u/victoriametrics)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions/workflows/build.yml)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://slack.victoriametrics.com)
|
||||
[](https://x.com/VictoriaMetrics/)
|
||||
[](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
[](https://hub.docker.com/r/victoriametrics/victoria-metrics)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||
|
||||
<picture>
|
||||
<source srcset="docs/victoriametrics/logo_white.webp" media="(prefers-color-scheme: dark)">
|
||||
<source srcset="docs/victoriametrics/logo.webp" media="(prefers-color-scheme: light)">
|
||||
<img src="docs/victoriametrics/logo.webp" width="300" alt="VictoriaMetrics logo">
|
||||
<source srcset="docs/logo_white.webp" media="(prefers-color-scheme: dark)">
|
||||
<source srcset="docs/logo.webp" media="(prefers-color-scheme: light)">
|
||||
<img src="docs/logo.webp" width="300" alt="VictoriaMetrics logo">
|
||||
</picture>
|
||||
|
||||
VictoriaMetrics is a fast, cost-effective, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
|
||||
Here are some resources and information about VictoriaMetrics:
|
||||
|
||||
- **Case studies**: [Grammarly, Roblox, Wix, Spotify,...](https://docs.victoriametrics.com/victoriametrics/casestudies/).
|
||||
- **Available**: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), Docker images on [Docker Hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and [Quay](https://quay.io/repository/victoriametrics/victoria-metrics), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
- **Deployment types**: [Single-node version](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and [Cluster version](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) under [Apache License 2.0](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE).
|
||||
- **Getting started:** Read [key concepts](https://docs.victoriametrics.com/victoriametrics/keyconcepts/) and follow the
|
||||
[quick start guide](https://docs.victoriametrics.com/victoriametrics/quick-start/).
|
||||
- **Community**: [Slack](https://slack.victoriametrics.com/) (join via [Slack Inviter](https://slack.victoriametrics.com/)), [X (Twitter)](https://x.com/VictoriaMetrics), [YouTube](https://www.youtube.com/@VictoriaMetrics). See full list [here](https://docs.victoriametrics.com/victoriametrics/#community-and-contributions).
|
||||
- **Changelog**: Project evolves fast - check the [CHANGELOG](https://docs.victoriametrics.com/victoriametrics/changelog/), and [How to upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade-victoriametrics).
|
||||
- **Enterprise support:** [Contact us](mailto:info@victoriametrics.com) for commercial support with additional [enterprise features](https://docs.victoriametrics.com/victoriametrics/enterprise/).
|
||||
- **Enterprise releases:** Enterprise and [long-term support releases (LTS)](https://docs.victoriametrics.com/victoriametrics/lts-releases/) are publicly available and can be evaluated for free
|
||||
using a [free trial license](https://victoriametrics.com/products/enterprise/trial/).
|
||||
- **Security:** we achieved [security certifications](https://victoriametrics.com/security/) for Database Software Development and Software-Based Monitoring Services.
|
||||
- Documentation: [docs.victoriametrics.com](https://docs.victoriametrics.com)
|
||||
- Case studies: [Grammarly, Roblox, Wix,...](https://docs.victoriametrics.com/casestudies/).
|
||||
- Available: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- Deployment types: [Single-node version](https://docs.victoriametrics.com/), [Cluster version](https://docs.victoriametrics.com/cluster-victoriametrics/), and [Enterprise version](https://docs.victoriametrics.com/enterprise/)
|
||||
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/changelog/), and [How to upgrade](#how-to-upgrade-victoriametrics)
|
||||
- Community: [Slack](https://slack.victoriametrics.com/), [Twitter](https://twitter.com/VictoriaMetrics), [LinkedIn](https://www.linkedin.com/company/victoriametrics/), [YouTube](https://www.youtube.com/@VictoriaMetrics)
|
||||
|
||||
Yes, we open-source both the single-node VictoriaMetrics and the cluster version.
|
||||
|
||||
@@ -39,22 +33,22 @@ VictoriaMetrics is optimized for timeseries data, even when old time series are
|
||||
|
||||
* **Long-term storage for Prometheus** or as a drop-in replacement for Prometheus and Graphite in Grafana.
|
||||
* **Powerful stream aggregation**: Can be used as a StatsD alternative.
|
||||
* **Ideal for big data**: Works well with large amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://docs.victoriametrics.com/victoriametrics/enterprise/).
|
||||
* **Ideal for big data**: Works well with large amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise/).
|
||||
* **Query language**: Supports both PromQL and the more performant MetricsQL.
|
||||
* **Easy to setup**: No dependencies, single [small binary](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d), configuration through command-line flags, but the default is also fine-tuned; backup and restore with [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* **Global query view**: Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics and queried via a single query.
|
||||
* **Various Protocols**: Support metric scraping, ingestion and backfilling in various protocol.
|
||||
* [Prometheus exporters](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/victoriametrics/integrations/prometheus/), [Prometheus exposition format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](https://docs.victoriametrics.com/victoriametrics/integrations/influxdb/) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-telnet).
|
||||
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-http).
|
||||
* [JSON line format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-csv-data).
|
||||
* [Native binary format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/victoriametrics/integrations/datadog/).
|
||||
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/victoriametrics/integrations/newrelic/#sending-data-from-agent).
|
||||
* [OpenTelemetry metrics format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#sending-data-via-opentelemetry).
|
||||
* [Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](#prometheus-setup), [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol).
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests).
|
||||
* [JSON line format](#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](#how-to-import-csv-data).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
|
||||
* [NewRelic infrastructure agent](#how-to-send-data-from-newrelic-agent).
|
||||
* [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
|
||||
* **NFS-based storages**: Supports storing data on NFS-based storages such as Amazon EFS, Google Filestore.
|
||||
* And many other features such as metrics relabeling, cardinality limiter, etc.
|
||||
|
||||
@@ -66,9 +60,9 @@ In addition, the Enterprise version includes extra features:
|
||||
- **Backup automation**: Automates regular backup procedures.
|
||||
- **Multiple retentions**: Reducing storage costs by specifying different retentions for different datasets.
|
||||
- **Downsampling**: Reducing storage costs and increasing performance for queries over historical data.
|
||||
- **Stable releases** with long-term support lines ([LTS](https://docs.victoriametrics.com/victoriametrics/lts-releases/)).
|
||||
- **Stable releases** with long-term support lines ([LTS](https://docs.victoriametrics.com/lts-releases/)).
|
||||
- **Comprehensive support**: First-class consulting, feature requests and technical support provided by the core VictoriaMetrics dev team.
|
||||
- Many other features, which you can read about on [the Enterprise page](https://docs.victoriametrics.com/victoriametrics/enterprise/).
|
||||
- Many other features, which you can read about on [the Enterprise page](https://docs.victoriametrics.com/enterprise/).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. Or you can request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/), downloaded Enterprise binaries are available at [Github Releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
|
||||
@@ -81,7 +75,7 @@ Some good benchmarks VictoriaMetrics achieved:
|
||||
* **Minimal memory footprint**: handling millions of unique timeseries with [10x less RAM](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) than InfluxDB, up to [7x less RAM](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) than Prometheus, Thanos or Cortex.
|
||||
* **Highly scalable and performance** for [data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4), [20x outperforms](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) InfluxDB and TimescaleDB.
|
||||
* **High data compression**: [70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4) may be stored into limited storage than TimescaleDB, [7x less storage](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) space is required than Prometheus, Thanos or Cortex.
|
||||
* **Reducing storage costs**: [10x more effective](https://docs.victoriametrics.com/victoriametrics/casestudies/#grammarly) than Graphite according to the Grammarly case study.
|
||||
* **Reducing storage costs**: [10x more effective](https://docs.victoriametrics.com/casestudies/#grammarly) than Graphite according to the Grammarly case study.
|
||||
* **A single-node VictoriaMetrics** can replace medium-sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB. See [VictoriaMetrics vs Thanos](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683), [Measuring vertical scalability](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae), [Remote write storage wars - PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* **Optimized for storage**: [Works well with high-latency IO](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc.).
|
||||
|
||||
@@ -90,42 +84,41 @@ Some good benchmarks VictoriaMetrics achieved:
|
||||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
* [Slack Inviter](https://slack.victoriametrics.com/) and [Slack channel](https://victoriametrics.slack.com/)
|
||||
* [X (Twitter)](https://x.com/VictoriaMetrics/)
|
||||
* [Twitter](https://twitter.com/VictoriaMetrics/)
|
||||
* [Linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||
* [Reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
* [Telegram-en](https://t.me/VictoriaMetrics_en)
|
||||
* [Telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
* [Mastodon](https://mastodon.social/@victoriametrics/)
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then please [read these docs](https://docs.victoriametrics.com/victoriametrics/contributing/).
|
||||
If you like VictoriaMetrics and want to contribute, then please [read these docs](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
## VictoriaMetrics Logo
|
||||
|
||||
The provided [ZIP file](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different logo orientations. Each folder includes the following file types:
|
||||
[Zip](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
|
||||
|
||||
* JPEG: Preview files
|
||||
* PNG: Preview files with transparent background
|
||||
* AI: Adobe Illustrator files
|
||||
Files included in each folder:
|
||||
|
||||
### VictoriaMetrics Logo Usage Guidelines
|
||||
* 2 JPEG Preview files
|
||||
* 2 PNG Preview files with transparent background
|
||||
* 2 EPS Adobe Illustrator EPS10 files
|
||||
|
||||
#### Font
|
||||
### Logo Usage Guidelines
|
||||
|
||||
* Font Used: Lato Black
|
||||
* Download here: [Lato Font](https://fonts.google.com/specimen/Lato)
|
||||
#### Font used
|
||||
|
||||
* Lato Black
|
||||
* Lato Regular
|
||||
|
||||
#### Color Palette
|
||||
|
||||
* Black [#000000](https://www.color-hex.com/color/000000)
|
||||
* Purple [#4d0e82](https://www.color-hex.com/color/4d0e82)
|
||||
* Orange [#ff2e00](https://www.color-hex.com/color/ff2e00)
|
||||
* White [#ffffff](https://www.color-hex.com/color/ffffff)
|
||||
* HEX [#110f0f](https://www.color-hex.com/color/110f0f)
|
||||
* HEX [#ffffff](https://www.color-hex.com/color/ffffff)
|
||||
|
||||
### Logo Usage Rules
|
||||
### We kindly ask
|
||||
|
||||
* Only use the Lato Black font as specified.
|
||||
* Maintain sufficient clear space around the logo for visibility.
|
||||
* Do not modify the spacing, alignment, or positioning of design elements.
|
||||
* You may resize the logo as needed, but ensure all proportions remain intact.
|
||||
|
||||
Thank you for your cooperation!
|
||||
* Please don't use any other font instead of suggested.
|
||||
* To keep enough clear space around the logo.
|
||||
* Do not change spacing, alignment, or relative locations of the design elements.
|
||||
* Do not change the proportions for any of the design elements or the design itself.
|
||||
You may resize as needed but must retain all proportions.
|
||||
16
SECURITY.md
16
SECURITY.md
@@ -1,4 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
You can find out about our security policy and VictoriaMetrics version support on the [security page](https://docs.victoriametrics.com/victoriametrics/#security) in the documentation.
|
||||
## Supported Versions
|
||||
|
||||
The following versions of VictoriaMetrics receive regular security fixes:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| [latest release](https://docs.victoriametrics.com/changelog/) | :white_check_mark: |
|
||||
| v1.102.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| other releases | :x: |
|
||||
|
||||
See [this page](https://victoriametrics.com/security/) for more details.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report any security issues to <security@victoriametrics.com>
|
||||
|
||||
BIN
VM_logo.zip
BIN
VM_logo.zip
Binary file not shown.
113
app/victoria-logs/Makefile
Normal file
113
app/victoria-logs/Makefile
Normal file
@@ -0,0 +1,113 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
victoria-logs:
|
||||
APP_NAME=victoria-logs $(MAKE) app-local
|
||||
|
||||
victoria-logs-race:
|
||||
APP_NAME=victoria-logs RACE=-race $(MAKE) app-local
|
||||
|
||||
victoria-logs-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker
|
||||
|
||||
victoria-logs-pure-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-pure
|
||||
|
||||
victoria-logs-linux-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
victoria-logs-linux-arm-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
victoria-logs-linux-arm64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
victoria-logs-linux-ppc64le-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
victoria-logs-linux-386-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-386
|
||||
|
||||
victoria-logs-darwin-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
victoria-logs-darwin-arm64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
victoria-logs-freebsd-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
victoria-logs-openbsd-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
victoria-logs-windows-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-victoria-logs:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker
|
||||
|
||||
package-victoria-logs-pure:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-pure
|
||||
|
||||
package-victoria-logs-amd64:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-victoria-logs-arm:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-arm
|
||||
|
||||
package-victoria-logs-arm64:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-victoria-logs-ppc64le:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-victoria-logs-386:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-386
|
||||
|
||||
publish-victoria-logs:
|
||||
APP_NAME=victoria-logs $(MAKE) publish-via-docker
|
||||
|
||||
victoria-logs-linux-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-arm:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-arm64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-ppc64le:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-s390x:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-loong64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-386:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-darwin-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-darwin-arm64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-freebsd-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-openbsd-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=victoria-logs $(MAKE) app-local-windows-goarch
|
||||
|
||||
victoria-logs-pure:
|
||||
APP_NAME=victoria-logs $(MAKE) app-local-pure
|
||||
|
||||
run-victoria-logs:
|
||||
mkdir -p victoria-logs-data
|
||||
DOCKER_OPTS='-v $(shell pwd)/victoria-logs-data:/victoria-logs-data' \
|
||||
APP_NAME=victoria-logs \
|
||||
ARGS='' \
|
||||
$(MAKE) run-via-docker
|
||||
@@ -1 +0,0 @@
|
||||
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).
|
||||
8
app/victoria-logs/deployment/Dockerfile
Normal file
8
app/victoria-logs/deployment/Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
ARG base_image
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 9428
|
||||
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG src_binary
|
||||
COPY $src_binary ./victoria-logs-prod
|
||||
105
app/victoria-logs/main.go
Normal file
105
app/victoria-logs/main.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the given -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":9428"}
|
||||
}
|
||||
logger.Infof("starting VictoriaLogs at %q...", listenAddrs)
|
||||
startTime := time.Now()
|
||||
|
||||
vlstorage.Init()
|
||||
vlselect.Init()
|
||||
vlinsert.Init()
|
||||
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/victorialogs/", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
pushmetrics.Stop()
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
vlinsert.Stop()
|
||||
vlselect.Stop()
|
||||
vlstorage.Stop()
|
||||
|
||||
fs.MustStopDirRemover()
|
||||
|
||||
logger.Infof("the VictoriaLogs has been stopped in %.3f seconds", time.Since(startTime).Seconds())
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if r.URL.Path == "/" {
|
||||
if r.Method != http.MethodGet {
|
||||
return false
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>Single-node VictoriaLogs</h2></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/victorialogs/'>https://docs.victoriametrics.com/victorialogs/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
{"select/vmui", "Web UI for VictoriaLogs"},
|
||||
{"metrics", "available service metrics"},
|
||||
{"flags", "command-line flags"},
|
||||
})
|
||||
return true
|
||||
}
|
||||
if vlinsert.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
if vlselect.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func usage() {
|
||||
const s = `
|
||||
victoria-logs is a log management and analytics service.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/victorialogs/
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
12
app/victoria-logs/multiarch/Dockerfile
Normal file
12
app/victoria-logs/multiarch/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
||||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 9428
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY victoria-logs-linux-${TARGETARCH}-prod ./victoria-logs-prod
|
||||
@@ -27,9 +27,6 @@ victoria-metrics-linux-ppc64le-prod:
|
||||
victoria-metrics-linux-386-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-386
|
||||
|
||||
victoria-metrics-linux-s390x-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
victoria-metrics-darwin-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
ARG base_image=non-existing
|
||||
ARG base_image
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8428
|
||||
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
ARG src_binary=non-existing
|
||||
ARG src_binary
|
||||
COPY $src_binary ./victoria-metrics-prod
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
@@ -31,7 +31,7 @@ var (
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
||||
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#deduplication")
|
||||
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running VictoriaMetrics. The following config files are checked: "+
|
||||
"-promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. "+
|
||||
"This can be changed with -promscrape.config.strictParse=false command-line flag")
|
||||
@@ -39,24 +39,9 @@ var (
|
||||
"The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. "+
|
||||
"Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmsingle can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate.")
|
||||
finalDedupScheduleInterval = flag.Duration("storage.finalDedupScheduleCheckInterval", time.Hour, "The interval for checking when final deduplication process should be started."+
|
||||
"Storage unconditionally adds 25% jitter to the interval value on each check evaluation."+
|
||||
" Changing the interval to the bigger values may delay downsampling, deduplication for historical data."+
|
||||
" See also https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#deduplication")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// VictoriaMetrics is optimized for reduced memory allocations,
|
||||
// so it can run with the reduced GOGC in order to reduce the used memory,
|
||||
// while keeping CPU usage spent in GC at low levels.
|
||||
//
|
||||
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
|
||||
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
|
||||
// exceeds 0.05 for extended periods of time.
|
||||
cgroup.SetGOGC(30)
|
||||
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
@@ -89,20 +74,13 @@ func main() {
|
||||
startTime := time.Now()
|
||||
storage.SetDedupInterval(*minScrapeInterval)
|
||||
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
|
||||
if *finalDedupScheduleInterval < time.Hour {
|
||||
logger.Fatalf("-dedup.finalDedupScheduleCheckInterval cannot be smaller than 1 hour; got %s", *finalDedupScheduleInterval)
|
||||
}
|
||||
storage.SetFinalDedupScheduleInterval(*finalDedupScheduleInterval)
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsertcommon.StartIngestionRateLimiter(*maxIngestionRate)
|
||||
vminsert.Init()
|
||||
|
||||
startSelfScraper()
|
||||
|
||||
go httpserver.Serve(listenAddrs, requestHandler, httpserver.ServeOptions{
|
||||
UseProxyProtocol: useProxyProtocol,
|
||||
})
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaMetrics in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
@@ -118,12 +96,13 @@ func main() {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
vminsertcommon.StopIngestionRateLimiter()
|
||||
vminsert.Stop()
|
||||
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
|
||||
fs.MustStopDirRemover()
|
||||
|
||||
logger.Infof("the VictoriaMetrics has been stopped in %.3f seconds", time.Since(startTime).Seconds())
|
||||
}
|
||||
|
||||
@@ -134,7 +113,6 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>Single-node VictoriaMetrics</h2></br>")
|
||||
fmt.Fprintf(w, "Version %s<br>", buildinfo.Version)
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/'>https://docs.victoriametrics.com/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
@@ -170,7 +148,7 @@ func usage() {
|
||||
const s = `
|
||||
victoria-metrics is a time series database and monitoring solution.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/victoriametrics/
|
||||
See the docs at https://docs.victoriametrics.com/
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
619
app/victoria-metrics/main_test.go
Normal file
619
app/victoria-metrics/main_test.go
Normal file
@@ -0,0 +1,619 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testutil "github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-metrics/test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
testFixturesDir = "testdata"
|
||||
testStorageSuffix = "vm-test-storage"
|
||||
testHTTPListenAddr = ":7654"
|
||||
testStatsDListenAddr = ":2003"
|
||||
testOpenTSDBListenAddr = ":4242"
|
||||
testOpenTSDBHTTPListenAddr = ":4243"
|
||||
testLogLevel = "INFO"
|
||||
)
|
||||
|
||||
const (
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testImportCSVWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/import/csv"
|
||||
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
)
|
||||
|
||||
const (
|
||||
testStorageInitTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
storagePath string
|
||||
insertionTime = time.Now().UTC()
|
||||
)
|
||||
|
||||
type test struct {
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
Issue string `json:"issue"`
|
||||
ExpectedResultLinesCount int `json:"expected_result_lines_count"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
|
||||
func (r *Metric) UnmarshalJSON(b []byte) error {
|
||||
type plain Metric
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
type Series struct {
|
||||
Status string `json:"status"`
|
||||
Data []map[string]string `json:"data"`
|
||||
}
|
||||
|
||||
type Query struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result json.RawMessage `json:"result"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
const rtVector, rtMatrix = "vector", "matrix"
|
||||
|
||||
func (q *Query) metrics() ([]Metric, error) {
|
||||
switch q.Data.ResultType {
|
||||
case rtVector:
|
||||
var r QueryInstant
|
||||
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.metrics()
|
||||
case rtMatrix:
|
||||
var r QueryRange
|
||||
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.metrics()
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown result type %q", q.Data.ResultType)
|
||||
}
|
||||
}
|
||||
|
||||
type QueryInstant struct {
|
||||
Result []struct {
|
||||
Labels map[string]string `json:"metric"`
|
||||
TV [2]any `json:"value"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
func (q QueryInstant) metrics() ([]Metric, error) {
|
||||
result := make([]Metric, len(q.Result))
|
||||
for i, res := range q.Result {
|
||||
f, err := strconv.ParseFloat(res.TV[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
|
||||
}
|
||||
var m Metric
|
||||
m.Metric = res.Labels
|
||||
m.Timestamps = append(m.Timestamps, int64(res.TV[0].(float64)))
|
||||
m.Values = append(m.Values, f)
|
||||
result[i] = m
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type QueryRange struct {
|
||||
Result []struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values [][]any `json:"values"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
func (q QueryRange) metrics() ([]Metric, error) {
|
||||
var result []Metric
|
||||
for i, res := range q.Result {
|
||||
var m Metric
|
||||
for _, tv := range res.Values {
|
||||
f, err := strconv.ParseFloat(tv[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, tv[1], err)
|
||||
}
|
||||
m.Values = append(m.Values, f)
|
||||
m.Timestamps = append(m.Timestamps, int64(tv[0].(float64)))
|
||||
}
|
||||
if len(m.Values) < 1 || len(m.Timestamps) < 1 {
|
||||
return nil, fmt.Errorf("metric %v contains no values", res)
|
||||
}
|
||||
m.Metric = q.Result[i].Metric
|
||||
result = append(result, m)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (q *Query) UnmarshalJSON(b []byte) error {
|
||||
type plain Query
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(q))
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setUp()
|
||||
code := m.Run()
|
||||
tearDown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setUp() {
|
||||
storagePath = filepath.Join(os.TempDir(), testStorageSuffix)
|
||||
processFlags()
|
||||
logger.Init()
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
go httpserver.Serve(*httpListenAddrs, useProxyProtocol, requestHandler)
|
||||
readyStorageCheckFunc := func() bool {
|
||||
resp, err := http.Get(testHealthHTTPPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
return resp.StatusCode == 200
|
||||
}
|
||||
if err := waitFor(testStorageInitTimeout, readyStorageCheckFunc); err != nil {
|
||||
log.Fatalf("http server can't start for %s seconds, err %s", testStorageInitTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
func processFlags() {
|
||||
flag.Parse()
|
||||
for _, fv := range []struct {
|
||||
flag string
|
||||
value string
|
||||
}{
|
||||
{flag: "storageDataPath", value: storagePath},
|
||||
{flag: "httpListenAddr", value: testHTTPListenAddr},
|
||||
{flag: "graphiteListenAddr", value: testStatsDListenAddr},
|
||||
{flag: "opentsdbListenAddr", value: testOpenTSDBListenAddr},
|
||||
{flag: "loggerLevel", value: testLogLevel},
|
||||
{flag: "opentsdbHTTPListenAddr", value: testOpenTSDBHTTPListenAddr},
|
||||
} {
|
||||
// panics if flag doesn't exist
|
||||
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
|
||||
log.Fatalf("unable to set %q with value %q, err: %v", fv.flag, fv.value, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitFor(timeout time.Duration, f func() bool) error {
|
||||
fraction := timeout / 10
|
||||
for i := fraction; i < timeout; i += fraction {
|
||||
if f() {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(fraction)
|
||||
}
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
|
||||
func tearDown() {
|
||||
if err := httpserver.Stop(*httpListenAddrs); err != nil {
|
||||
log.Printf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
fs.MustRemoveAll(storagePath)
|
||||
}
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
t.Run("write", testWrite)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
vmstorage.Storage.DebugFlush()
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
t.Run("read", testRead)
|
||||
}
|
||||
|
||||
func testWrite(t *testing.T) {
|
||||
t.Run("prometheus", func(t *testing.T) {
|
||||
for _, test := range readIn("prometheus", insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
r := testutil.WriteRequest{}
|
||||
testData := strings.Join(test.Data, "\n")
|
||||
if err := json.Unmarshal([]byte(testData), &r.Timeseries); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot unmarshal TimeSeries: %s\ntest data\n%s", err, testData))
|
||||
}
|
||||
if n := len(r.Timeseries); n <= 0 {
|
||||
panic(fmt.Errorf("BUG: expecting non-empty Timeseries in test data:\n%s", testData))
|
||||
}
|
||||
data := testutil.Compress(r)
|
||||
httpWrite(t, testPromWriteHTTPPath, test.InsertQuery, bytes.NewBuffer(data))
|
||||
}
|
||||
})
|
||||
t.Run("csv", func(t *testing.T) {
|
||||
for _, test := range readIn("csv", insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
httpWrite(t, testImportCSVWriteHTTPPath, test.InsertQuery, bytes.NewBuffer([]byte(strings.Join(test.Data, "\n"))))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("influxdb", func(t *testing.T) {
|
||||
for _, x := range readIn("influxdb", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpWrite(t, testWriteHTTPPath, test.InsertQuery, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("graphite", func(t *testing.T) {
|
||||
for _, x := range readIn("graphite", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testStatsDListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdb", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdb", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testOpenTSDBListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdbhttp", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdbhttp", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger.Infof("writing %s", test.Data)
|
||||
httpWrite(t, testOpenTSDBWriteHTTPPath, test.InsertQuery, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testRead(t *testing.T) {
|
||||
for _, engine := range []string{"csv", "prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
t.Run(engine, func(t *testing.T) {
|
||||
for _, x := range readIn(engine, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, q := range test.Query {
|
||||
q = testutil.PopulateTimeTplString(q, insertionTime)
|
||||
if test.Issue != "" {
|
||||
test.Issue = "\nRegression in " + test.Issue
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(q, "/api/v1/export/csv"):
|
||||
data := strings.Split(string(httpReadData(t, testReadHTTPPath, q)), "\n")
|
||||
if len(data) == test.ExpectedResultLinesCount {
|
||||
t.Fatalf("not expected number of csv lines want=%d\ngot=%d test=%s.%s\n\response=%q", len(data), test.ExpectedResultLinesCount, q, test.Issue, strings.Join(data, "\n"))
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/export"):
|
||||
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
|
||||
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/series"):
|
||||
s := Series{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &s)
|
||||
if err := checkSeriesResult(s, test.ResultSeries); err != nil {
|
||||
t.Fatalf("Series. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query"):
|
||||
queryResult := Query{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
gotMetrics, err := queryResult.metrics()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse query response: %s", err)
|
||||
}
|
||||
expMetrics, err := test.ResultQuery.metrics()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse expected response: %s", err)
|
||||
}
|
||||
if err := checkMetricsResult(gotMetrics, expMetrics); err != nil {
|
||||
t.Fatalf("%q fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unsupported read query %s", q)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func readIn(readFor string, insertTime time.Time) []test {
|
||||
testDir := filepath.Join(testFixturesDir, readFor)
|
||||
var tt []test
|
||||
err := filepath.Walk(testDir, func(path string, _ os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filepath.Ext(path) != ".json" {
|
||||
return nil
|
||||
}
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot read %s: %s", path, err))
|
||||
}
|
||||
item := test{}
|
||||
if err := json.Unmarshal(b, &item); err != nil {
|
||||
panic(fmt.Errorf("cannot parse %T from %s: %s; data:\n%s", &item, path, err, b))
|
||||
}
|
||||
for i := range item.Data {
|
||||
item.Data[i] = testutil.PopulateTimeTplString(item.Data[i], insertTime)
|
||||
}
|
||||
tt = append(tt, item)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot read test data at %s: %w", testDir, err))
|
||||
}
|
||||
if len(tt) == 0 {
|
||||
panic(fmt.Errorf("BUG: no tests found in %s", testDir))
|
||||
}
|
||||
return tt
|
||||
}
|
||||
|
||||
func httpWrite(t *testing.T, address, query string, r io.Reader) {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Post(requestURL, "", r)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 204", requestURL, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func tcpWrite(t *testing.T, address, data string) {
|
||||
t.Helper()
|
||||
|
||||
conn, err := net.Dial("tcp", address)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot dial %s: %s", address, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
n, err := conn.Write([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot write %d bytes to %s: %s", len(data), address, err)
|
||||
}
|
||||
if n != len(data) {
|
||||
panic(fmt.Errorf("BUG: conn.Write() returned unexpected number of written bytes to %s; got %d; want %d", address, n, len(data)))
|
||||
}
|
||||
}
|
||||
|
||||
func httpReadMetrics(t *testing.T, address, query string) []Metric {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Get(requestURL)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
|
||||
}
|
||||
|
||||
var rows []Metric
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
for {
|
||||
var row Metric
|
||||
err := dec.Decode(&row)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return rows
|
||||
}
|
||||
t.Fatalf("cannot decode %T from response received from %s: %s", &row, requestURL, err)
|
||||
}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
func httpReadStruct(t *testing.T, address, query string, dst any) {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Get(requestURL)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
|
||||
}
|
||||
err = json.NewDecoder(resp.Body).Decode(dst)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot decode %T from response received from %s: %s", dst, requestURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
func httpReadData(t *testing.T, address, query string) []byte {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Get(requestURL)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
|
||||
}
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot read response from %s: %s", requestURL, err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func checkMetricsResult(got, want []Metric) error {
|
||||
for _, r := range append([]Metric(nil), got...) {
|
||||
want = removeIfFoundMetrics(r, want)
|
||||
}
|
||||
if len(want) > 0 {
|
||||
return fmt.Errorf("expected metrics %+v not found in %+v", want, got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundMetrics(r Metric, contains []Metric) []Metric {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) &&
|
||||
reflect.DeepEqual(r.Timestamps, item.Timestamps) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkSeriesResult(got, want Series) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
wantData := append([]map[string]string(nil), want.Data...)
|
||||
for _, r := range got.Data {
|
||||
wantData = removeIfFoundSeries(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected seria(s) %+v not found in %+v", wantData, got.Data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundSeries(r map[string]string, contains []map[string]string) []map[string]string {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r, item) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func TestImportJSONLines(t *testing.T) {
|
||||
f := func(labelsCount, labelLen int) {
|
||||
t.Helper()
|
||||
|
||||
reqURL := fmt.Sprintf("http://localhost%s/api/v1/import", testHTTPListenAddr)
|
||||
line := generateJSONLine(labelsCount, labelLen)
|
||||
req, err := http.NewRequest("POST", reqURL, bytes.NewBufferString(line))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create request: %s", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot perform request for labelsCount=%d, labelLen=%d: %s", labelsCount, labelLen, err)
|
||||
}
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("unexpected statusCode for labelsCount=%d, labelLen=%d; got %d; want 204", labelsCount, labelLen, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// labels with various lengths
|
||||
for i := 0; i < 500; i++ {
|
||||
f(10, i*5)
|
||||
}
|
||||
|
||||
// Too many labels
|
||||
f(1000, 100)
|
||||
|
||||
// Too long labels
|
||||
f(1, 100_000)
|
||||
f(10, 100_000)
|
||||
f(10, 10_000)
|
||||
}
|
||||
|
||||
func generateJSONLine(labelsCount, labelLen int) string {
|
||||
m := make(map[string]string, labelsCount)
|
||||
m["__name__"] = generateSizedRandomString(labelLen)
|
||||
for j := 1; j < labelsCount; j++ {
|
||||
labelName := generateSizedRandomString(labelLen)
|
||||
labelValue := generateSizedRandomString(labelLen)
|
||||
m[labelName] = labelValue
|
||||
}
|
||||
|
||||
type jsonLine struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
line := &jsonLine{
|
||||
Metric: m,
|
||||
Values: []float64{1.34},
|
||||
Timestamps: []int64{time.Now().UnixNano() / 1e6},
|
||||
}
|
||||
data, err := json.Marshal(&line)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot marshal JSON: %w", err))
|
||||
}
|
||||
data = append(data, '\n')
|
||||
return string(data)
|
||||
}
|
||||
|
||||
const alphabetSample = `qwertyuiopasdfghjklzxcvbnm`
|
||||
|
||||
func generateSizedRandomString(size int) string {
|
||||
dst := make([]byte, size)
|
||||
for i := range dst {
|
||||
dst[i] = alphabetSample[rand.Intn(len(alphabetSample))]
|
||||
}
|
||||
return string(dst)
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image=non-existing
|
||||
ARG root_image=non-existing
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image AS certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
@@ -9,5 +9,4 @@ COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certifica
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG BINARY_SUFFIX=non-existing
|
||||
COPY victoria-metrics-linux-${TARGETARCH}-prod${BINARY_SUFFIX} ./victoria-metrics-prod
|
||||
COPY victoria-metrics-linux-${TARGETARCH}-prod ./victoria-metrics-prod
|
||||
|
||||
@@ -10,12 +10,9 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -29,9 +26,11 @@ var selfScraperWG sync.WaitGroup
|
||||
|
||||
func startSelfScraper() {
|
||||
selfScraperStopCh = make(chan struct{})
|
||||
selfScraperWG.Go(func() {
|
||||
selfScraperWG.Add(1)
|
||||
go func() {
|
||||
defer selfScraperWG.Done()
|
||||
selfScraper(*selfScrapeInterval)
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
func stopSelfScraper() {
|
||||
@@ -48,7 +47,6 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
|
||||
var bb bytesutil.ByteBuffer
|
||||
var rows prometheus.Rows
|
||||
var metadataRows prometheus.MetadataRows
|
||||
var mrs []storage.MetricRow
|
||||
var labels []prompb.Label
|
||||
t := time.NewTicker(scrapeInterval)
|
||||
@@ -58,12 +56,7 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
appmetrics.WritePrometheusMetrics(&bb)
|
||||
s := bytesutil.ToUnsafeString(bb.B)
|
||||
rows.Reset()
|
||||
// Parse metrics and optionally metadata when enabled
|
||||
if prommetadata.IsEnabled() {
|
||||
rows, metadataRows = prometheus.UnmarshalWithMetadata(rows, metadataRows, s, nil)
|
||||
} else {
|
||||
rows.UnmarshalWithErrLogger(s, nil)
|
||||
}
|
||||
rows.Unmarshal(s)
|
||||
mrs = mrs[:0]
|
||||
for i := range rows.Rows {
|
||||
r := &rows.Rows[i]
|
||||
@@ -75,10 +68,6 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
t := &r.Tags[j]
|
||||
labels = addLabel(labels, t.Key, t.Value)
|
||||
}
|
||||
if timeserieslimits.IsExceeding(labels) {
|
||||
// Skip metric with exceeding labels.
|
||||
continue
|
||||
}
|
||||
if len(mrs) < cap(mrs) {
|
||||
mrs = mrs[:len(mrs)+1]
|
||||
} else {
|
||||
@@ -96,19 +85,6 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
if err := vmstorage.AddRows(mrs); err != nil {
|
||||
logger.Errorf("cannot store self-scraped metrics: %s", err)
|
||||
}
|
||||
if len(metadataRows.Rows) > 0 {
|
||||
mms := make([]metricsmetadata.Row, 0, len(metadataRows.Rows))
|
||||
for _, mm := range metadataRows.Rows {
|
||||
mms = append(mms, metricsmetadata.Row{
|
||||
MetricFamilyName: bytesutil.ToUnsafeBytes(mm.Metric),
|
||||
Help: bytesutil.ToUnsafeBytes(mm.Help),
|
||||
Type: mm.Type,
|
||||
})
|
||||
}
|
||||
if err := vmstorage.AddMetadataRows(mms); err != nil {
|
||||
logger.Errorf("cannot store self-scraped metrics metadata: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -33,13 +33,13 @@ func PopulateTimeTpl(b []byte, tGlobal time.Time) []byte {
|
||||
}
|
||||
switch strings.TrimSpace(parts[0]) {
|
||||
case `TIME_S`:
|
||||
return fmt.Appendf(nil, "%d", t.Unix())
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()))
|
||||
case `TIME_MSZ`:
|
||||
return fmt.Appendf(nil, "%d", t.Unix()*1e3)
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()*1e3))
|
||||
case `TIME_MS`:
|
||||
return fmt.Appendf(nil, "%d", timeToMillis(t))
|
||||
return []byte(fmt.Sprintf("%d", timeToMillis(t)))
|
||||
case `TIME_NS`:
|
||||
return fmt.Appendf(nil, "%d", t.UnixNano())
|
||||
return []byte(fmt.Sprintf("%d", t.UnixNano()))
|
||||
default:
|
||||
log.Fatalf("unknown time pattern %s in %s", parts[0], repl)
|
||||
}
|
||||
|
||||
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "csv export",
|
||||
"data": [
|
||||
"rfc3339,4,{TIME_MS}",
|
||||
"rfc3339milli,6,{TIME_MS}",
|
||||
"ts,8,{TIME_MS}",
|
||||
"tsms,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:tfmt,2:metric:test_csv,3:time:unix_ms",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,tfmt,__value__,__timestamp__:rfc3339&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "csv export with extra_labels",
|
||||
"data": [
|
||||
"location-1,4,{TIME_MS}",
|
||||
"location-2,6,{TIME_MS}",
|
||||
"location-3,8,{TIME_MS}",
|
||||
"location-4,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:location,2:metric:test_csv_labels,3:time:unix_ms&extra_label=location=location-1",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,location,__value__,__timestamp__:unix_ms&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["graphite.foo.bar.baz;tag1=value1;tag2=value2 123 {TIME_S}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"graphite.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
16
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
16
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "comparison-not-inf-not-nan",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/150",
|
||||
"data": [
|
||||
"not_nan_not_inf;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=x 1 {TIME_S-2m}",
|
||||
"not_nan_not_inf;item=y 3 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=y 1 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query_range?query=1/(not_nan_not_inf-1)!=inf!=nan&start={TIME_S-3m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"], ["{TIME_S}","0.5"]]}
|
||||
]}}
|
||||
}
|
||||
16
app/victoria-metrics/testdata/graphite/empty-label-match.json
vendored
Normal file
16
app/victoria-metrics/testdata/graphite/empty-label-match.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "empty-label-match",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395",
|
||||
"data": [
|
||||
"empty_label_match 1 {TIME_S-1m}",
|
||||
"empty_label_match;foo=bar 2 {TIME_S-1m}",
|
||||
"empty_label_match;foo=baz 3 {TIME_S-1m}"],
|
||||
"query": ["/api/v1/query_range?query=empty_label_match{foo=~'bar|'}&start={TIME_S-1m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"__name__":"empty_label_match"},"values":[["{TIME_S-1m}","1"],["{TIME_S}","1"]]},
|
||||
{"metric":{"__name__":"empty_label_match","foo":"bar"},"values":[["{TIME_S-1m}","2"],["{TIME_S}","2"]]}
|
||||
]}}
|
||||
}
|
||||
17
app/victoria-metrics/testdata/graphite/graphite-selector.json
vendored
Normal file
17
app/victoria-metrics/testdata/graphite/graphite-selector.json
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"name": "graphite-selector",
|
||||
"issue": "",
|
||||
"data": [
|
||||
"graphite-selector.bar.baz 1 {TIME_S-1m}",
|
||||
"graphite-selector.xxx.yy 2 {TIME_S-1m}",
|
||||
"graphite-selector.bb.cc 3 {TIME_S-1m}",
|
||||
"graphite-selector.a.baz 4 {TIME_S-1m}"],
|
||||
"query": ["/api/v1/query?query=sort({__graphite__='graphite-selector.*.baz'})&time={TIME_S-1m}"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[
|
||||
{"metric":{"__name__":"graphite-selector.bar.baz"},"value":["{TIME_S-1m}","1"]},
|
||||
{"metric":{"__name__":"graphite-selector.a.baz"},"value":["{TIME_S-1m}","4"]}
|
||||
]}
|
||||
}
|
||||
}
|
||||
23
app/victoria-metrics/testdata/graphite/max_lookback_set.json
vendored
Normal file
23
app/victoria-metrics/testdata/graphite/max_lookback_set.json
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"name": "max_lookback_set",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
|
||||
"data": [
|
||||
"max_lookback_set 1 {TIME_S-30s}",
|
||||
"max_lookback_set 2 {TIME_S-60s}",
|
||||
"max_lookback_set 3 {TIME_S-120s}",
|
||||
"max_lookback_set 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_set&start={TIME_S-150s}&end={TIME_S}&step=10s&max_lookback=1s"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_set"},"values":[
|
||||
["{TIME_S-150s}","4"],
|
||||
["{TIME_S-120s}","3"],
|
||||
["{TIME_S-60s}","2"],
|
||||
["{TIME_S-30s}","1"],
|
||||
["{TIME_S-20s}","1"],
|
||||
["{TIME_S-10s}","1"],
|
||||
["{TIME_S-0s}","1"]
|
||||
]}]}}
|
||||
}
|
||||
31
app/victoria-metrics/testdata/graphite/max_lookback_unset.json
vendored
Normal file
31
app/victoria-metrics/testdata/graphite/max_lookback_unset.json
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"name": "max_lookback_unset",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
|
||||
"data": [
|
||||
"max_lookback_unset 1 {TIME_S-30s}",
|
||||
"max_lookback_unset 2 {TIME_S-60s}",
|
||||
"max_lookback_unset 3 {TIME_S-120s}",
|
||||
"max_lookback_unset 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_unset&start={TIME_S-150s}&end={TIME_S}&step=10s"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_unset"},"values":[
|
||||
["{TIME_S-150s}","4"],
|
||||
["{TIME_S-140s}","4"],
|
||||
["{TIME_S-130s}","4"],
|
||||
["{TIME_S-120s}","3"],
|
||||
["{TIME_S-110s}","3"],
|
||||
["{TIME_S-100s}","3"],
|
||||
["{TIME_S-90s}","3"],
|
||||
["{TIME_S-80s}","3"],
|
||||
["{TIME_S-60s}","2"],
|
||||
["{TIME_S-50s}","2"],
|
||||
["{TIME_S-40s}","2"],
|
||||
["{TIME_S-30s}","1"],
|
||||
["{TIME_S-20s}","1"],
|
||||
["{TIME_S-10s}","1"],
|
||||
["{TIME_S-0s}","1"]
|
||||
]}]}}
|
||||
}
|
||||
16
app/victoria-metrics/testdata/graphite/name-plus-negative-filter.json
vendored
Normal file
16
app/victoria-metrics/testdata/graphite/name-plus-negative-filter.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "name-plus-negative-filter",
|
||||
"issue": "",
|
||||
"data": [
|
||||
"name-plus-negative-filter;foo=123 1 {TIME_S-1m}",
|
||||
"name-plus-negative-filter;bar=123 2 {TIME_S-1m}",
|
||||
"name-plus-negative-filter;foo=qwe 3 {TIME_S-1m}"
|
||||
],
|
||||
"query": ["/api/v1/query?query={__name__='name-plus-negative-filter',foo!='123'}&time={TIME_S-1m}"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[
|
||||
{"metric":{"__name__":"name-plus-negative-filter","foo":"qwe"},"value":["{TIME_S-1m}","3"]}
|
||||
]}
|
||||
}
|
||||
}
|
||||
18
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
18
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "not-nan-as-missing-data",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153",
|
||||
"data": [
|
||||
"not_nan_as_missing_data;item=x 2 {TIME_S-2m}",
|
||||
"not_nan_as_missing_data;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_as_missing_data;item=y 4 {TIME_S-2m}",
|
||||
"not_nan_as_missing_data;item=y 3 {TIME_S-1m}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=not_nan_as_missing_data>1&start={TIME_S-2m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"x"},"values":[["{TIME_S-2m}","2"]]},
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"],["{TIME_S}", "3"]]}
|
||||
]}}
|
||||
}
|
||||
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "subquery-aggregation",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184",
|
||||
"data": [
|
||||
"forms_daily_count;item=x 1 {TIME_S-1m}",
|
||||
"forms_daily_count;item=x 2 {TIME_S-2m}",
|
||||
"forms_daily_count;item=y 3 {TIME_S-1m}",
|
||||
"forms_daily_count;item=y 4 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}&latency_offset=1ms"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[{"metric":{"item":"x"},"value":["{TIME_S-1m}","2"]},{"metric":{"item":"y"},"value":["{TIME_S-1m}","4"]}]}
|
||||
}
|
||||
}
|
||||
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["measurement,tag1=value1,tag2=value2 field1=1.23,field2=123 {TIME_NS}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"measurement_field1","tag1":"value1","tag2":"value2"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
10
app/victoria-metrics/testdata/influxdb/with_extra_labels.json
vendored
Normal file
10
app/victoria-metrics/testdata/influxdb/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "insert_with_extra_labels",
|
||||
"data": ["measurement,tag1=value1,tag2=value2 field6=1.23,field5=123 {TIME_NS}"],
|
||||
"insert_query": "?extra_label=job=test&extra_label=tag2=value10",
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"measurement_field5","tag1":"value1","job": "test","tag2":"value10"},"values":[123], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"measurement_field6","tag1":"value1","job": "test","tag2":"value10"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["put openstdb.foo.bar.baz {TIME_S} 123 tag1=value1 tag2=value2"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"openstdb.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["{\"metric\": \"opentsdbhttp.foo\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.foo","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "multiline",
|
||||
"data": ["[{\"metric\": \"opentsdbhttp.multiline1\", \"value\": 1001, \"timestamp\": \"{TIME_S}\", \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}, {\"metric\": \"opentsdbhttp.multiline2\", \"value\": 1002, \"timestamp\": {TIME_S}}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline1","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]},
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline2"},"values":[1002], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/opentsdbhttp/with_extra_labels.json
vendored
Normal file
9
app/victoria-metrics/testdata/opentsdbhttp/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "insert_with_extra_labels",
|
||||
"data": ["{\"metric\": \"opentsdbhttp.foobar\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
|
||||
"insert_query": "?extra_label=job=open-test&extra_label=x=z",
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.foobar","bar":"baz","x":"z","job": "open-test"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.bar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.bar","baz":"qux"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "case-sensitive-regex",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/161",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"sensitiveRegex\"}],\"samples\":[{\"value\":2,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"SensitiveRegex\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={label=~'(?i)sensitiveregex'}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"sensitiveRegex"},"values":[2], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"SensitiveRegex"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "duplicate_label",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/172",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.duplicate_label\"},{\"name\":\"duplicate\",\"value\":\"label\"},{\"name\":\"duplicate\",\"value\":\"label\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.duplicate_label","duplicate":"label"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
12
app/victoria-metrics/testdata/prometheus/instant-matrix.json
vendored
Normal file
12
app/victoria-metrics/testdata/prometheus/instant-matrix.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "instant query with look-behind window",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
|
||||
"query": ["/api/v1/query?query=foo[5m]"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"foo"},"values":[["{TIME_S-60s}", "1"]]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
11
app/victoria-metrics/testdata/prometheus/instant-scalar.json
vendored
Normal file
11
app/victoria-metrics/testdata/prometheus/instant-scalar.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"name": "instant scalar query",
|
||||
"query": ["/api/v1/query?query=42&time={TIME_S}"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[{"metric":{},"value":["{TIME_S}", "42"]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
13
app/victoria-metrics/testdata/prometheus/issue-5553-too-big-lookback.json
vendored
Normal file
13
app/victoria-metrics/testdata/prometheus/issue-5553-too-big-lookback.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"name": "too big look-behind window",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"},{\"name\":\"issue\",\"value\":\"5553\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
|
||||
"query": ["/api/v1/query?query=foo{issue=\"5553\"}[100y]"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"foo", "issue": "5553"},"values":[["{TIME_S-60s}", "1"]]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"name": "match_series",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/155",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"1\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"2\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"3\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"4\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/series?match[]={__name__='MatchSeries'}", "/api/v1/series?match[]={__name__=~'MatchSeries.*'}"],
|
||||
"result_series": {
|
||||
"status": "success",
|
||||
"data": [
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"1","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"2","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"3","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"4","TurbineType":"V112"}
|
||||
]
|
||||
}
|
||||
}
|
||||
18
app/victoria-metrics/testdata/prometheus/query-range.json
vendored
Normal file
18
app/victoria-metrics/testdata/prometheus/query-range.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "query range",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"bar\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}, {\"value\":2,\"timestamp\":\"{TIME_MS-120s}\"}, {\"value\":1,\"timestamp\":\"{TIME_MS-180s}\"}]}]"],
|
||||
"query": ["/api/v1/query_range?query=bar&step=30s&start={TIME_MS-180s}"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{
|
||||
"metric":{"__name__":"bar"},
|
||||
"values":[["{TIME_S-180s}", "1"],["{TIME_S-150s}", "1"],["{TIME_S-120s}", "2"],["{TIME_S-90s}", "2"], ["{TIME_S-60s}", "1"], ["{TIME_S-30s}", "1"], ["{TIME_S}", "1"]]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
9
app/victoria-metrics/testdata/prometheus/with_extra_labels.json
vendored
Normal file
9
app/victoria-metrics/testdata/prometheus/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "basic_insertion_with_extra_labels",
|
||||
"insert_query": "?extra_label=job=prom-test&extra_label=baz=bar",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.foobar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.foobar","baz":"bar","job": "prom-test"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/prometheus/with_request_extra_filter.json
vendored
Normal file
8
app/victoria-metrics/testdata/prometheus/with_request_extra_filter.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_select_with_extra_labels",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.tenant.limits\"},{\"name\":\"baz\",\"value\":\"qux\"},{\"name\":\"tenant\",\"value\":\"dev\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.up\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}&extra_label=tenant=dev"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.tenant.limits","baz":"qux","tenant": "dev"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).
|
||||
@@ -1 +0,0 @@
|
||||
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).
|
||||
20
app/vlinsert/elasticsearch/bulk_response.qtpl
Normal file
20
app/vlinsert/elasticsearch/bulk_response.qtpl
Normal file
@@ -0,0 +1,20 @@
|
||||
{% stripspace %}
|
||||
|
||||
{% func BulkResponse(n int, tookMs int64) %}
|
||||
{
|
||||
"took":{%dl tookMs %},
|
||||
"errors":false,
|
||||
"items":[
|
||||
{% for i := 0; i < n; i++ %}
|
||||
{
|
||||
"create":{
|
||||
"status":201
|
||||
}
|
||||
}
|
||||
{% if i+1 < n %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
69
app/vlinsert/elasticsearch/bulk_response.qtpl.go
Normal file
69
app/vlinsert/elasticsearch/bulk_response.qtpl.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Code generated by qtc from "bulk_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
package elasticsearch
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
func StreamBulkResponse(qw422016 *qt422016.Writer, n int, tookMs int64) {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
qw422016.N().S(`{"took":`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:5
|
||||
qw422016.N().DL(tookMs)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:5
|
||||
qw422016.N().S(`,"errors":false,"items":[`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:8
|
||||
for i := 0; i < n; i++ {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:8
|
||||
qw422016.N().S(`{"create":{"status":201}}`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
|
||||
if i+1 < n {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
|
||||
}
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:15
|
||||
}
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:15
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
}
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
func WriteBulkResponse(qq422016 qtio422016.Writer, n int, tookMs int64) {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
StreamBulkResponse(qw422016, n, tookMs)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
}
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
func BulkResponse(n int, tookMs int64) string {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
WriteBulkResponse(qb422016, n, tookMs)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
return qs422016
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
}
|
||||
280
app/vlinsert/elasticsearch/elasticsearch.go
Normal file
280
app/vlinsert/elasticsearch/elasticsearch.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
)
|
||||
|
||||
var (
|
||||
elasticsearchVersion = flag.String("elasticsearch.version", "8.9.0", "Elasticsearch version to report to client")
|
||||
)
|
||||
|
||||
// RequestHandler processes Elasticsearch insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
// This header is needed for Logstash
|
||||
w.Header().Set("X-Elastic-Product", "Elasticsearch")
|
||||
|
||||
if strings.HasPrefix(path, "/_ilm/policy") {
|
||||
// Return fake response for Elasticsearch ilm request.
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_index_template") {
|
||||
// Return fake response for Elasticsearch index template request.
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_ingest") {
|
||||
// Return fake response for Elasticsearch ingest pipeline request.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/put-pipeline-api.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_nodes") {
|
||||
// Return fake response for Elasticsearch nodes discovery request.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/cluster.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/logstash") || strings.HasPrefix(path, "/_logstash") {
|
||||
// Return fake response for Logstash APIs requests.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/logstash-apis.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
switch path {
|
||||
case "/":
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Return fake response for Elasticsearch ping request.
|
||||
// See the latest available version for Elasticsearch at https://github.com/elastic/elasticsearch/releases
|
||||
fmt.Fprintf(w, `{
|
||||
"version": {
|
||||
"number": %q
|
||||
}
|
||||
}`, *elasticsearchVersion)
|
||||
case http.MethodHead:
|
||||
// Return empty response for Logstash ping request.
|
||||
}
|
||||
|
||||
return true
|
||||
case "/_license":
|
||||
// Return fake response for Elasticsearch license request.
|
||||
fmt.Fprintf(w, `{
|
||||
"license": {
|
||||
"uid": "cbff45e7-c553-41f7-ae4f-9205eabd80xx",
|
||||
"type": "oss",
|
||||
"status": "active",
|
||||
"expiry_date_in_millis" : 4000000000000
|
||||
}
|
||||
}`)
|
||||
return true
|
||||
case "/_bulk":
|
||||
startTime := time.Now()
|
||||
bulkRequestsTotal.Inc()
|
||||
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
isGzip := r.Header.Get("Content-Encoding") == "gzip"
|
||||
n, err := readBulkRequest(r.Body, isGzip, cp.TimeField, cp.MsgField, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message #%d in /_bulk request: %s, stream fields: %s", n, err, cp.StreamFields)
|
||||
return true
|
||||
}
|
||||
|
||||
tookMs := time.Since(startTime).Milliseconds()
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
WriteBulkResponse(bw, n, tookMs)
|
||||
_ = bw.Flush()
|
||||
|
||||
// update bulkRequestDuration only for successfully parsed requests
|
||||
// There is no need in updating bulkRequestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
bulkRequestDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="elasticsearch_bulk"}`)
|
||||
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
||||
|
||||
if isGzip {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot read gzipped _bulk request: %w", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
|
||||
n := 0
|
||||
nCheckpoint := 0
|
||||
for {
|
||||
ok, err := readBulkLine(sc, timeField, msgField, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil || !ok {
|
||||
rowsIngestedTotal.Add(n - nCheckpoint)
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
if batchSize := n - nCheckpoint; n >= 1000 {
|
||||
rowsIngestedTotal.Add(batchSize)
|
||||
nCheckpoint = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
func readBulkLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
|
||||
// Read the command, must be "create" or "index"
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read "create" or "index" command, since its size exceeds -insert.maxLineSizeBytes=%d`,
|
||||
insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
line = sc.Bytes()
|
||||
}
|
||||
lineStr := bytesutil.ToUnsafeString(line)
|
||||
if !strings.Contains(lineStr, `"create"`) && !strings.Contains(lineStr, `"index"`) {
|
||||
return false, fmt.Errorf(`unexpected command %q; expecting "create" or "index"`, line)
|
||||
}
|
||||
|
||||
// Decode log message
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf("cannot read log message, since its size exceeds -insert.maxLineSizeBytes=%d", insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
|
||||
}
|
||||
line = sc.Bytes()
|
||||
p := logstorage.GetJSONParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
|
||||
ts, err := extractTimestampFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot parse timestamp: %w", err)
|
||||
}
|
||||
if ts == 0 {
|
||||
ts = time.Now().UnixNano()
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgField, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func extractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
timestamp, err := parseElasticsearchTimestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.Value = ""
|
||||
return timestamp, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func parseElasticsearchTimestamp(s string) (int64, error) {
|
||||
if s == "0" || s == "" {
|
||||
// Special case - zero or empty timestamp must be substituted
|
||||
// with the current time by the caller.
|
||||
return 0, nil
|
||||
}
|
||||
if len(s) < len("YYYY-MM-DD") || s[len("YYYY")] != '-' {
|
||||
// Try parsing timestamp in milliseconds
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp in milliseconds from %q: %w", s, err)
|
||||
}
|
||||
if n > int64(math.MaxInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too big timestamp in milliseconds: %d; mustn't exceed %d", n, int64(math.MaxInt64)/1e6)
|
||||
}
|
||||
if n < int64(math.MinInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too small timestamp in milliseconds: %d; must be bigger than %d", n, int64(math.MinInt64)/1e6)
|
||||
}
|
||||
n *= 1e6
|
||||
return n, nil
|
||||
}
|
||||
if len(s) == len("YYYY-MM-DD") {
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse date %q: %w", s, err)
|
||||
}
|
||||
return t.UnixNano(), nil
|
||||
}
|
||||
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("cannot parse timestamp %q", s)
|
||||
}
|
||||
return nsecs, nil
|
||||
}
|
||||
103
app/vlinsert/elasticsearch/elasticsearch_test.go
Normal file
103
app/vlinsert/elasticsearch/elasticsearch_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestReadBulkRequest_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, "_time", "_msg", tlp)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
if rows != 0 {
|
||||
t.Fatalf("unexpected non-zero rows=%d", rows)
|
||||
}
|
||||
}
|
||||
f("foobar")
|
||||
f(`{}`)
|
||||
f(`{"create":{}}`)
|
||||
f(`{"creat":{}}
|
||||
{}`)
|
||||
f(`{"create":{}}
|
||||
foobar`)
|
||||
}
|
||||
|
||||
func TestReadBulkRequest_Success(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, timeField, msgField, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read the request with compression
|
||||
tlp = &insertutils.TestLogMessageProcessor{}
|
||||
compressedData := compressData(data)
|
||||
r = bytes.NewBufferString(compressedData)
|
||||
rows, err = readBulkRequest(r, true, timeField, msgField, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatalf("verification failure after compression: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify an empty data
|
||||
f("", "_time", "_msg", 0, nil, "")
|
||||
f("\n", "_time", "_msg", 0, nil, "")
|
||||
f("\n\n", "_time", "_msg", 0, nil, "")
|
||||
|
||||
// Verify non-empty data
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
|
||||
{"index":{"_index":"filebeat-8.8.0"}}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
|
||||
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"@timestamp":"","_msg":"baz"}
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func compressData(s string) string {
|
||||
var bb bytes.Buffer
|
||||
zw := gzip.NewWriter(&bb)
|
||||
if _, err := zw.Write([]byte(s)); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when compressing data: %w", err))
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when closing gzip writer: %w", err))
|
||||
}
|
||||
return bb.String()
|
||||
}
|
||||
50
app/vlinsert/elasticsearch/elasticsearch_timing_test.go
Normal file
50
app/vlinsert/elasticsearch/elasticsearch_timing_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
)
|
||||
|
||||
func BenchmarkReadBulkRequest(b *testing.B) {
|
||||
b.Run("gzip:off", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, false)
|
||||
})
|
||||
b.Run("gzip:on", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, true)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
`
|
||||
if isGzip {
|
||||
data = compressData(data)
|
||||
}
|
||||
dataBytes := bytesutil.ToUnsafeBytes(data)
|
||||
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
r := &bytes.Reader{}
|
||||
for pb.Next() {
|
||||
r.Reset(dataBytes)
|
||||
_, err := readBulkRequest(r, isGzip, timeField, msgField, blp)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
200
app/vlinsert/insertutils/common_params.go
Normal file
200
app/vlinsert/insertutils/common_params.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
)
|
||||
|
||||
// CommonParams contains common HTTP parameters used by log ingestion APIs.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters
|
||||
type CommonParams struct {
|
||||
TenantID logstorage.TenantID
|
||||
TimeField string
|
||||
MsgField string
|
||||
StreamFields []string
|
||||
IgnoreFields []string
|
||||
|
||||
Debug bool
|
||||
DebugRequestURI string
|
||||
DebugRemoteAddr string
|
||||
}
|
||||
|
||||
// GetCommonParams returns CommonParams from r.
|
||||
func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract time field name from _time_field query arg
|
||||
var timeField = "_time"
|
||||
if tf := r.FormValue("_time_field"); tf != "" {
|
||||
timeField = tf
|
||||
}
|
||||
|
||||
// Extract message field name from _msg_field query arg
|
||||
var msgField = ""
|
||||
if msgf := r.FormValue("_msg_field"); msgf != "" {
|
||||
msgField = msgf
|
||||
}
|
||||
|
||||
streamFields := httputils.GetArray(r, "_stream_fields")
|
||||
ignoreFields := httputils.GetArray(r, "ignore_fields")
|
||||
|
||||
debug := httputils.GetBool(r, "debug")
|
||||
debugRequestURI := ""
|
||||
debugRemoteAddr := ""
|
||||
if debug {
|
||||
debugRequestURI = httpserver.GetRequestURI(r)
|
||||
debugRemoteAddr = httpserver.GetQuotedRemoteAddr(r)
|
||||
}
|
||||
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeField: timeField,
|
||||
MsgField: msgField,
|
||||
StreamFields: streamFields,
|
||||
IgnoreFields: ignoreFields,
|
||||
Debug: debug,
|
||||
DebugRequestURI: debugRequestURI,
|
||||
DebugRemoteAddr: debugRemoteAddr,
|
||||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID.
|
||||
func GetCommonParamsForSyslog(tenantID logstorage.TenantID) *CommonParams {
|
||||
// See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeField: "timestamp",
|
||||
MsgField: "message",
|
||||
StreamFields: []string{
|
||||
"hostname",
|
||||
"app_name",
|
||||
"proc_id",
|
||||
},
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// LogMessageProcessor is an interface for log message processors.
|
||||
type LogMessageProcessor interface {
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and the given fields.
|
||||
//
|
||||
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can re-use them.
|
||||
AddRow(timestamp int64, fields []logstorage.Field)
|
||||
|
||||
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
|
||||
MustClose()
|
||||
}
|
||||
|
||||
type logMessageProcessor struct {
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
lastFlushTime time.Time
|
||||
|
||||
cp *CommonParams
|
||||
lr *logstorage.LogRows
|
||||
}
|
||||
|
||||
func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
|
||||
lmp.wg.Add(1)
|
||||
go func() {
|
||||
defer lmp.wg.Done()
|
||||
|
||||
d := timeutil.AddJitterToDuration(time.Second)
|
||||
ticker := time.NewTicker(d)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lmp.stopCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
lmp.mu.Lock()
|
||||
if time.Since(lmp.lastFlushTime) >= d {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
lmp.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// AddRow adds new log message to lmp with the given timestamp and fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields)
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` query arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lmp.lr.NeedFlush() {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// flushLocked must be called under locked lmp.mu.
|
||||
func (lmp *logMessageProcessor) flushLocked() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
vlstorage.MustAddRows(lmp.lr)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
}
|
||||
|
||||
// MustClose flushes the remaining data to the underlying storage and closes lmp.
|
||||
func (lmp *logMessageProcessor) MustClose() {
|
||||
close(lmp.stopCh)
|
||||
lmp.wg.Wait()
|
||||
|
||||
lmp.flushLocked()
|
||||
logstorage.PutLogRows(lmp.lr)
|
||||
lmp.lr = nil
|
||||
}
|
||||
|
||||
// NewLogMessageProcessor returns new LogMessageProcessor for the given cp.
|
||||
//
|
||||
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
|
||||
func (cp *CommonParams) NewLogMessageProcessor() LogMessageProcessor {
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
lmp := &logMessageProcessor{
|
||||
cp: cp,
|
||||
lr: lr,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
lmp.initPeriodicFlush()
|
||||
|
||||
return lmp
|
||||
}
|
||||
|
||||
var rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
var rowsDroppedTotalTooManyFields = metrics.NewCounter(`vl_rows_dropped_total{reason="too_many_fields"}`)
|
||||
15
app/vlinsert/insertutils/flags.go
Normal file
15
app/vlinsert/insertutils/flags.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers")
|
||||
|
||||
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers")
|
||||
)
|
||||
53
app/vlinsert/insertutils/testutils.go
Normal file
53
app/vlinsert/insertutils/testutils.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// TestLogMessageProcessor implements LogMessageProcessor for testing.
|
||||
type TestLogMessageProcessor struct {
|
||||
timestamps []int64
|
||||
rows []string
|
||||
}
|
||||
|
||||
// AddRow adds row with the given timestamp and fields to tlp
|
||||
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
tlp.timestamps = append(tlp.timestamps, timestamp)
|
||||
tlp.rows = append(tlp.rows, string(logstorage.MarshalFieldsToJSON(nil, fields)))
|
||||
}
|
||||
|
||||
// MustClose closes tlp.
|
||||
func (tlp *TestLogMessageProcessor) MustClose() {
|
||||
}
|
||||
|
||||
// Verify verifies the number of rows, timestamps and results after AddRow calls.
|
||||
func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected []int64, resultExpected string) error {
|
||||
result := strings.Join(tlp.rows, "\n")
|
||||
if len(tlp.rows) != rowsExpected {
|
||||
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), rowsExpected, result, resultExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tlp.timestamps, timestampsExpected) {
|
||||
return fmt.Errorf("unexpected timestamps;\ngot\n%d\nwant\n%d", tlp.timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
return fmt.Errorf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BenchmarkLogMessageProcessor implements LogMessageProcessor for benchmarks.
|
||||
type BenchmarkLogMessageProcessor struct{}
|
||||
|
||||
// AddRow implements LogMessageProcessor interface.
|
||||
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _ []logstorage.Field) {
|
||||
}
|
||||
|
||||
// MustClose implements LogMessageProcessor interface.
|
||||
func (blp *BenchmarkLogMessageProcessor) MustClose() {
|
||||
}
|
||||
33
app/vlinsert/insertutils/timestamp.go
Normal file
33
app/vlinsert/insertutils/timestamp.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// ExtractTimestampRFC3339NanoFromFields extracts RFC3339 timestamp in nanoseconds from the field with the name timeField at fields.
|
||||
//
|
||||
// The value for the timeField is set to empty string after returning from the function,
|
||||
// so it could be ignored during data ingestion.
|
||||
//
|
||||
// The current timestamp is returned if fields do not contain a field with timeField name or if the timeField value is empty.
|
||||
func ExtractTimestampRFC3339NanoFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
if f.Value == "" || f.Value == "0" {
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(f.Value)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp from %s=%q", timeField, f.Value)
|
||||
}
|
||||
f.Value = ""
|
||||
return nsecs, nil
|
||||
}
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
75
app/vlinsert/insertutils/timestamp_test.go
Normal file
75
app/vlinsert/insertutils/timestamp_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
|
||||
f := func(timeField string, fields []logstorage.Field, nsecsExpected int64) {
|
||||
t.Helper()
|
||||
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields(timeField, fields)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if nsecs != nsecsExpected {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, nsecsExpected)
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
if f.Name == timeField {
|
||||
if f.Value != "" {
|
||||
t.Fatalf("unexpected value for field %s; got %q; want %q", timeField, f.Value, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20Z"},
|
||||
}, 1718753840000000000)
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20+08:00"},
|
||||
}, 1718725040000000000)
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20.123-05:30"},
|
||||
}, 1718773640123000000)
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
}, 1718773640123456789)
|
||||
}
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
fields := []logstorage.Field{
|
||||
{Name: "time", Value: s},
|
||||
}
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields("time", fields)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if nsecs != 0 {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, 0)
|
||||
}
|
||||
}
|
||||
|
||||
f("foobar")
|
||||
|
||||
// no Z at the end
|
||||
f("2024-06-18T23:37:20")
|
||||
|
||||
// incomplete time
|
||||
f("2024-06-18")
|
||||
f("2024-06-18T23:37")
|
||||
}
|
||||
135
app/vlinsert/jsonline/jsonline.go
Normal file
135
app/vlinsert/jsonline/jsonline.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package jsonline
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// RequestHandler processes jsonline insert requests
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
|
||||
if r.Method != "POST" {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
requestsTotal.Inc()
|
||||
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read gzipped jsonline request: %s", err)
|
||||
return
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
err = processStreamInternal(reader, cp.TimeField, cp.MsgField, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("jsonline: %s", err)
|
||||
} else {
|
||||
// update requestDuration only for successfully parsed requests.
|
||||
// There is no need in updating requestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
}
|
||||
}
|
||||
|
||||
func processStreamInternal(r io.Reader, timeField, msgField string, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok, err := readLine(sc, timeField, msgField, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
return fmt.Errorf("cannot read line #%d in /jsonline request: %s", n, err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func readLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read json line, since its size exceeds -insert.maxLineSizeBytes=%d`, insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
line = sc.Bytes()
|
||||
}
|
||||
|
||||
p := logstorage.GetJSONParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
ts, err := insertutils.ExtractTimestampRFC3339NanoFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot get timestamp: %w", err)
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgField, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/jsonline"}`)
|
||||
|
||||
requestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/jsonline"}`)
|
||||
)
|
||||
55
app/vlinsert/jsonline/jsonline_test.go
Normal file
55
app/vlinsert/jsonline/jsonline_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package jsonline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, timeField, msgField, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
data := `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
|
||||
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"@timestamp":"","_msg":"baz"}
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "time", "", tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// invalid json
|
||||
f("foobar")
|
||||
|
||||
// invalid timestamp field
|
||||
f(`{"time":"foobar"}`)
|
||||
}
|
||||
59
app/vlinsert/loki/loki.go
Normal file
59
app/vlinsert/loki/loki.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// RequestHandler processes Loki insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
case "/api/v1/push":
|
||||
handleInsert(r, w)
|
||||
return true
|
||||
case "/ready":
|
||||
// See https://grafana.com/docs/loki/latest/api/#identify-ready-loki-instance
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("ready"))
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// See https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
|
||||
func handleInsert(r *http.Request, w http.ResponseWriter) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
handleJSON(r, w)
|
||||
default:
|
||||
// Protobuf request body should be handled by default according to https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
|
||||
handleProtobuf(r, w)
|
||||
}
|
||||
}
|
||||
|
||||
func getCommonParams(r *http.Request) (*insertutils.CommonParams, error) {
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If parsed tenant is (0,0) it is likely to be default tenant
|
||||
// Try parsing tenant from Loki headers
|
||||
if cp.TenantID.AccountID == 0 && cp.TenantID.ProjectID == 0 {
|
||||
org := r.Header.Get("X-Scope-OrgID")
|
||||
if org != "" {
|
||||
tenantID, err := logstorage.ParseTenantID(org)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp.TenantID = tenantID
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
202
app/vlinsert/loki/loki_json.go
Normal file
202
app/vlinsert/loki/loki_json.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsJSONTotal.Inc()
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot initialize gzip reader: %s", err)
|
||||
return
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := parseJSONRequest(data, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki json request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedJSONTotal.Add(n)
|
||||
|
||||
// update requestJSONDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJSONDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestJSONDuration.UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
var (
|
||||
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="json"}`)
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
)
|
||||
|
||||
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
}
|
||||
|
||||
streamsV := v.Get("streams")
|
||||
if streamsV == nil {
|
||||
return 0, fmt.Errorf("missing `streams` item in the parsed JSON: %q", v)
|
||||
}
|
||||
streams, err := streamsV.Array()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
|
||||
}
|
||||
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
var commonFields []logstorage.Field
|
||||
rowsIngested := 0
|
||||
for _, stream := range streams {
|
||||
// populate common labels from `stream` dict
|
||||
commonFields = commonFields[:0]
|
||||
labelsV := stream.Get("stream")
|
||||
var labels *fastjson.Object
|
||||
if labelsV != nil {
|
||||
o, err := labelsV.Object()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
|
||||
}
|
||||
labels = o
|
||||
}
|
||||
labels.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
vStr, errLocal := v.StringBytes()
|
||||
if errLocal != nil {
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
commonFields = append(commonFields, logstorage.Field{
|
||||
Name: bytesutil.ToUnsafeString(k),
|
||||
Value: bytesutil.ToUnsafeString(vStr),
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("error when parsing `stream` object: %w", err)
|
||||
}
|
||||
|
||||
// populate messages from `values` array
|
||||
linesV := stream.Get("values")
|
||||
if linesV == nil {
|
||||
return rowsIngested, fmt.Errorf("missing `values` item in the parsed JSON %q", stream)
|
||||
}
|
||||
lines, err := linesV.Array()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
|
||||
}
|
||||
|
||||
fields := commonFields
|
||||
for _, line := range lines {
|
||||
lineA, err := line.Array()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
|
||||
}
|
||||
if len(lineA) != 2 {
|
||||
return rowsIngested, fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2", line, len(lineA))
|
||||
}
|
||||
|
||||
// parse timestamp
|
||||
timestamp, err := lineA[0].StringBytes()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
|
||||
}
|
||||
ts, err := parseLokiTimestamp(bytesutil.ToUnsafeString(timestamp))
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
|
||||
}
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
|
||||
// parse log message
|
||||
msg, err := lineA[1].StringBytes()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
|
||||
}
|
||||
|
||||
fields = append(fields[:len(commonFields)], logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(msg),
|
||||
})
|
||||
lmp.AddRow(ts, fields)
|
||||
}
|
||||
rowsIngested += len(lines)
|
||||
}
|
||||
|
||||
return rowsIngested, nil
|
||||
}
|
||||
|
||||
func parseLokiTimestamp(s string) (int64, error) {
|
||||
if s == "" {
|
||||
// Special case - an empty timestamp must be substituted with the current time by the caller.
|
||||
return 0, nil
|
||||
}
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
// Fall back to parsing floating-point value
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if f > math.MaxInt64 {
|
||||
return 0, fmt.Errorf("too big timestamp in nanoseconds: %v; mustn't exceed %v", f, int64(math.MaxInt64))
|
||||
}
|
||||
if f < math.MinInt64 {
|
||||
return 0, fmt.Errorf("too small timestamp in nanoseconds: %v; must be bigger or equal to %v", f, int64(math.MinInt64))
|
||||
}
|
||||
n = int64(f)
|
||||
}
|
||||
if n < 0 {
|
||||
return 0, fmt.Errorf("too small timestamp in nanoseconds: %d; must be bigger than 0", n)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
119
app/vlinsert/loki/loki_json_test.go
Normal file
119
app/vlinsert/loki/loki_json_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestParseJSONRequest_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("unexpected number of parsed lines: %d; want 0", n)
|
||||
}
|
||||
}
|
||||
f(``)
|
||||
|
||||
// Invalid json
|
||||
f(`{}`)
|
||||
f(`[]`)
|
||||
f(`"foo"`)
|
||||
f(`123`)
|
||||
|
||||
// invalid type for `streams` item
|
||||
f(`{"streams":123}`)
|
||||
|
||||
// Missing `values` item
|
||||
f(`{"streams":[{}]}`)
|
||||
|
||||
// Invalid type for `values` item
|
||||
f(`{"streams":[{"values":"foobar"}]}`)
|
||||
|
||||
// Invalid type for `stream` item
|
||||
f(`{"streams":[{"stream":[],"values":[]}]}`)
|
||||
|
||||
// Invalid type for `values` individual item
|
||||
f(`{"streams":[{"values":[123]}]}`)
|
||||
|
||||
// Invalid length of `values` individual item
|
||||
f(`{"streams":[{"values":[[]]}]}`)
|
||||
f(`{"streams":[{"values":[["123"]]}]}`)
|
||||
f(`{"streams":[{"values":[["123","456","789"]]}]}`)
|
||||
|
||||
// Invalid type for timestamp inside `values` individual item
|
||||
f(`{"streams":[{"values":[[123,"456"]}]}`)
|
||||
|
||||
// Invalid type for log message
|
||||
f(`{"streams":[{"values":[["123",1234]]}]}`)
|
||||
}
|
||||
|
||||
func TestParseJSONRequest_Success(t *testing.T) {
|
||||
f := func(s string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty streams
|
||||
f(`{"streams":[]}`, nil, ``)
|
||||
f(`{"streams":[{"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{},"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, nil, ``)
|
||||
|
||||
// Empty stream labels
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
|
||||
// Non-empty stream labels
|
||||
f(`{"streams":[{"stream":{
|
||||
"label1": "value1",
|
||||
"label2": "value2"
|
||||
},"values":[
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1477836900005000002", "abc"],
|
||||
["147.78369e9", "foobar"]
|
||||
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
{"label1":"value1","label2":"value2","_msg":"abc"}
|
||||
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
|
||||
|
||||
// Multiple streams
|
||||
f(`{
|
||||
"streams": [
|
||||
{
|
||||
"stream": {
|
||||
"foo": "bar",
|
||||
"a": "b"
|
||||
},
|
||||
"values": [
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1577836900005000002", "abc"]
|
||||
]
|
||||
},
|
||||
{
|
||||
"stream": {
|
||||
"x": "y"
|
||||
},
|
||||
"values": [
|
||||
["1877836900005000002", "yx"]
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
|
||||
{"foo":"bar","a":"b","_msg":"abc"}
|
||||
{"x":"y","_msg":"yx"}`)
|
||||
}
|
||||
79
app/vlinsert/loki/loki_json_timing_test.go
Normal file
79
app/vlinsert/loki/loki_json_timing_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func BenchmarkParseJSONRequest(b *testing.B) {
|
||||
for _, streams := range []int{5, 10} {
|
||||
for _, rows := range []int{100, 1000} {
|
||||
for _, labels := range []int{10, 50} {
|
||||
b.Run(fmt.Sprintf("streams_%d/rows_%d/labels_%d", streams, rows, labels), func(b *testing.B) {
|
||||
benchmarkParseJSONRequest(b, streams, rows, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
data := getJSONBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseJSONRequest(data, blp)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func getJSONBody(streams, rows, labels int) []byte {
|
||||
body := append([]byte{}, `{"streams":[`...)
|
||||
now := time.Now().UnixNano()
|
||||
valuePrefix := fmt.Sprintf(`["%d","value_`, now)
|
||||
|
||||
for i := 0; i < streams; i++ {
|
||||
body = append(body, `{"stream":{`...)
|
||||
|
||||
for j := 0; j < labels; j++ {
|
||||
body = append(body, `"label_`...)
|
||||
body = strconv.AppendInt(body, int64(j), 10)
|
||||
body = append(body, `":"value_`...)
|
||||
body = strconv.AppendInt(body, int64(j), 10)
|
||||
body = append(body, '"')
|
||||
if j < labels-1 {
|
||||
body = append(body, ',')
|
||||
}
|
||||
|
||||
}
|
||||
body = append(body, `}, "values":[`...)
|
||||
|
||||
for j := 0; j < rows; j++ {
|
||||
body = append(body, valuePrefix...)
|
||||
body = strconv.AppendInt(body, int64(j), 10)
|
||||
body = append(body, `"]`...)
|
||||
if j < rows-1 {
|
||||
body = append(body, ',')
|
||||
}
|
||||
}
|
||||
|
||||
body = append(body, `]}`...)
|
||||
if i < streams-1 {
|
||||
body = append(body, ',')
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
body = append(body, `]}`...)
|
||||
|
||||
return body
|
||||
}
|
||||
218
app/vlinsert/loki/loki_protobuf.go
Normal file
218
app/vlinsert/loki/loki_protobuf.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
var (
|
||||
bytesBufPool bytesutil.ByteBufferPool
|
||||
pushReqsPool sync.Pool
|
||||
)
|
||||
|
||||
func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsProtobufTotal.Inc()
|
||||
wcr := writeconcurrencylimiter.GetReader(r.Body)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := parseProtobufRequest(data, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update requestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestProtobufDuration.UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
var (
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
bb := bytesBufPool.Get()
|
||||
defer bytesBufPool.Put(bb)
|
||||
|
||||
buf, err := snappy.Decode(bb.B[:cap(bb.B)], data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
|
||||
}
|
||||
bb.B = buf
|
||||
|
||||
req := getPushRequest()
|
||||
defer putPushRequest(req)
|
||||
|
||||
err = req.UnmarshalProtobuf(bb.B)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse request body: %w", err)
|
||||
}
|
||||
|
||||
fields := getFields()
|
||||
defer putFields(fields)
|
||||
|
||||
rowsIngested := 0
|
||||
streams := req.Streams
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
for i := range streams {
|
||||
stream := &streams[i]
|
||||
// st.Labels contains labels for the stream.
|
||||
// Labels are same for all entries in the stream.
|
||||
fields.fields, err = parsePromLabels(fields.fields[:0], stream.Labels)
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
|
||||
}
|
||||
commonFieldsLen := len(fields.fields)
|
||||
|
||||
entries := stream.Entries
|
||||
for j := range entries {
|
||||
e := &entries[j]
|
||||
fields.fields = fields.fields[:commonFieldsLen]
|
||||
|
||||
for _, lp := range e.StructuredMetadata {
|
||||
fields.fields = append(fields.fields, logstorage.Field{
|
||||
Name: lp.Name,
|
||||
Value: lp.Value,
|
||||
})
|
||||
}
|
||||
|
||||
fields.fields = append(fields.fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: e.Line,
|
||||
})
|
||||
|
||||
ts := e.Timestamp.UnixNano()
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
|
||||
lmp.AddRow(ts, fields.fields)
|
||||
}
|
||||
rowsIngested += len(stream.Entries)
|
||||
}
|
||||
return rowsIngested, nil
|
||||
}
|
||||
|
||||
func getFields() *fields {
|
||||
v := fieldsPool.Get()
|
||||
if v == nil {
|
||||
return &fields{}
|
||||
}
|
||||
return v.(*fields)
|
||||
}
|
||||
|
||||
func putFields(f *fields) {
|
||||
f.fields = f.fields[:0]
|
||||
fieldsPool.Put(f)
|
||||
}
|
||||
|
||||
var fieldsPool sync.Pool
|
||||
|
||||
type fields struct {
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
// parsePromLabels parses log fields in Prometheus text exposition format from s, appends them to dst and returns the result.
|
||||
//
|
||||
// See test data of promtail for examples: https://github.com/grafana/loki/blob/a24ef7b206e0ca63ee74ca6ecb0a09b745cd2258/pkg/push/types_test.go
|
||||
func parsePromLabels(dst []logstorage.Field, s string) ([]logstorage.Field, error) {
|
||||
// Make sure s is wrapped into `{...}`
|
||||
s = strings.TrimSpace(s)
|
||||
if len(s) < 2 {
|
||||
return nil, fmt.Errorf("too short string to parse: %q", s)
|
||||
}
|
||||
if s[0] != '{' {
|
||||
return nil, fmt.Errorf("missing `{` at the beginning of %q", s)
|
||||
}
|
||||
if s[len(s)-1] != '}' {
|
||||
return nil, fmt.Errorf("missing `}` at the end of %q", s)
|
||||
}
|
||||
s = s[1 : len(s)-1]
|
||||
|
||||
for len(s) > 0 {
|
||||
// Parse label name
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n < 0 {
|
||||
return nil, fmt.Errorf("cannot find `=` char for label value at %s", s)
|
||||
}
|
||||
name := s[:n]
|
||||
s = s[n+1:]
|
||||
|
||||
// Parse label value
|
||||
qs, err := strconv.QuotedPrefix(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse value for label %q at %s: %w", name, s, err)
|
||||
}
|
||||
s = s[len(qs):]
|
||||
value, err := strconv.Unquote(qs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unquote value %q for label %q: %w", qs, name, err)
|
||||
}
|
||||
|
||||
// Append the found field to dst.
|
||||
dst = append(dst, logstorage.Field{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
|
||||
// Check whether there are other labels remaining
|
||||
if len(s) == 0 {
|
||||
break
|
||||
}
|
||||
if !strings.HasPrefix(s, ",") {
|
||||
return nil, fmt.Errorf("missing `,` char at %s", s)
|
||||
}
|
||||
s = s[1:]
|
||||
s = strings.TrimPrefix(s, " ")
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func getPushRequest() *PushRequest {
|
||||
v := pushReqsPool.Get()
|
||||
if v == nil {
|
||||
return &PushRequest{}
|
||||
}
|
||||
return v.(*PushRequest)
|
||||
}
|
||||
|
||||
func putPushRequest(req *PushRequest) {
|
||||
req.reset()
|
||||
pushReqsPool.Put(req)
|
||||
}
|
||||
168
app/vlinsert/loki/loki_protobuf_test.go
Normal file
168
app/vlinsert/loki/loki_protobuf_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
type testLogMessageProcessor struct {
|
||||
pr PushRequest
|
||||
}
|
||||
|
||||
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
msg := ""
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
msg = f.Value
|
||||
}
|
||||
}
|
||||
var a []string
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
continue
|
||||
}
|
||||
item := fmt.Sprintf("%s=%q", f.Name, f.Value)
|
||||
a = append(a, item)
|
||||
}
|
||||
labels := "{" + strings.Join(a, ", ") + "}"
|
||||
tlp.pr.Streams = append(tlp.pr.Streams, Stream{
|
||||
Labels: labels,
|
||||
Entries: []Entry{
|
||||
{
|
||||
Timestamp: time.Unix(0, timestamp),
|
||||
Line: strings.Clone(msg),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (tlp *testLogMessageProcessor) MustClose() {
|
||||
}
|
||||
|
||||
func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
f := func(s string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &testLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if n != len(tlp.pr.Streams) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), n)
|
||||
}
|
||||
|
||||
data := tlp.pr.MarshalProtobuf(nil)
|
||||
encodedData := snappy.Encode(nil, data)
|
||||
|
||||
tlp2 := &insertutils.TestLogMessageProcessor{}
|
||||
n, err = parseProtobufRequest(encodedData, tlp2)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp2.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty streams
|
||||
f(`{"streams":[]}`, nil, ``)
|
||||
f(`{"streams":[{"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{},"values":[]}]}`, nil, ``)
|
||||
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, nil, ``)
|
||||
|
||||
// Empty stream labels
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
|
||||
// Non-empty stream labels
|
||||
f(`{"streams":[{"stream":{
|
||||
"label1": "value1",
|
||||
"label2": "value2"
|
||||
},"values":[
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1477836900005000002", "abc"],
|
||||
["147.78369e9", "foobar"]
|
||||
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
{"label1":"value1","label2":"value2","_msg":"abc"}
|
||||
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
|
||||
|
||||
// Multiple streams
|
||||
f(`{
|
||||
"streams": [
|
||||
{
|
||||
"stream": {
|
||||
"foo": "bar",
|
||||
"a": "b"
|
||||
},
|
||||
"values": [
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1577836900005000002", "abc"]
|
||||
]
|
||||
},
|
||||
{
|
||||
"stream": {
|
||||
"x": "y"
|
||||
},
|
||||
"values": [
|
||||
["1877836900005000002", "yx"]
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
|
||||
{"foo":"bar","a":"b","_msg":"abc"}
|
||||
{"x":"y","_msg":"yx"}`)
|
||||
}
|
||||
|
||||
func TestParsePromLabels_Success(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
fields, err := parsePromLabels(nil, s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
var a []string
|
||||
for _, f := range fields {
|
||||
a = append(a, fmt.Sprintf("%s=%q", f.Name, f.Value))
|
||||
}
|
||||
result := "{" + strings.Join(a, ", ") + "}"
|
||||
if result != s {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, s)
|
||||
}
|
||||
}
|
||||
|
||||
f("{}")
|
||||
f(`{foo="bar"}`)
|
||||
f(`{foo="bar", baz="x", y="z"}`)
|
||||
f(`{foo="ba\"r\\z\n", a="", b="\"\\"}`)
|
||||
}
|
||||
|
||||
func TestParsePromLabels_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
fields, err := parsePromLabels(nil, s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
t.Fatalf("unexpected non-empty fields: %s", fields)
|
||||
}
|
||||
}
|
||||
|
||||
f("")
|
||||
f("{")
|
||||
f(`{foo}`)
|
||||
f(`{foo=bar}`)
|
||||
f(`{foo="bar}`)
|
||||
f(`{foo="ba\",r}`)
|
||||
f(`{foo="bar" baz="aa"}`)
|
||||
f(`foobar`)
|
||||
f(`foo{bar="baz"}`)
|
||||
}
|
||||
86
app/vlinsert/loki/loki_protobuf_timing_test.go
Normal file
86
app/vlinsert/loki/loki_protobuf_timing_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
)
|
||||
|
||||
func BenchmarkParseProtobufRequest(b *testing.B) {
|
||||
for _, streams := range []int{5, 10} {
|
||||
for _, rows := range []int{100, 1000} {
|
||||
for _, labels := range []int{10, 50} {
|
||||
b.Run(fmt.Sprintf("streams_%d/rows_%d/labels_%d", streams, rows, labels), func(b *testing.B) {
|
||||
benchmarkParseProtobufRequest(b, streams, rows, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseProtobufRequest(body, blp)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func getProtobufBody(streamsCount, rowsCount, labelsCount int) []byte {
|
||||
var b []byte
|
||||
var entries []Entry
|
||||
streams := make([]Stream, streamsCount)
|
||||
for i := range streams {
|
||||
b = b[:0]
|
||||
b = append(b, '{')
|
||||
for j := 0; j < labelsCount; j++ {
|
||||
b = append(b, "label_"...)
|
||||
b = strconv.AppendInt(b, int64(j), 10)
|
||||
b = append(b, `="value_`...)
|
||||
b = strconv.AppendInt(b, int64(j), 10)
|
||||
b = append(b, '"')
|
||||
if j < labelsCount-1 {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, '}')
|
||||
labels := string(b)
|
||||
|
||||
var rowsBuf []byte
|
||||
entriesLen := len(entries)
|
||||
for j := 0; j < rowsCount; j++ {
|
||||
rowsBufLen := len(rowsBuf)
|
||||
rowsBuf = append(rowsBuf, "value_"...)
|
||||
rowsBuf = strconv.AppendInt(rowsBuf, int64(j), 10)
|
||||
entries = append(entries, Entry{
|
||||
Timestamp: time.Now(),
|
||||
Line: bytesutil.ToUnsafeString(rowsBuf[rowsBufLen:]),
|
||||
})
|
||||
}
|
||||
|
||||
st := &streams[i]
|
||||
st.Labels = labels
|
||||
st.Entries = entries[entriesLen:]
|
||||
}
|
||||
pr := PushRequest{
|
||||
Streams: streams,
|
||||
}
|
||||
|
||||
body := pr.MarshalProtobuf(nil)
|
||||
encodedBody := snappy.Encode(nil, body)
|
||||
|
||||
return encodedBody
|
||||
}
|
||||
302
app/vlinsert/loki/pb.go
Normal file
302
app/vlinsert/loki/pb.go
Normal file
@@ -0,0 +1,302 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: push_request.proto
|
||||
// source: https://raw.githubusercontent.com/grafana/loki/main/pkg/push/push_request.proto
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// https://github.com/grafana/loki/blob/main/pkg/push/LICENSE
|
||||
|
||||
package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/easyproto"
|
||||
)
|
||||
|
||||
var mp easyproto.MarshalerPool
|
||||
|
||||
// PushRequest represents Loki PushRequest
|
||||
//
|
||||
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L14C1-L14C20
|
||||
type PushRequest struct {
|
||||
Streams []Stream
|
||||
|
||||
entriesBuf []Entry
|
||||
labelPairBuf []LabelPair
|
||||
}
|
||||
|
||||
func (pr *PushRequest) reset() {
|
||||
pr.Streams = pr.Streams[:0]
|
||||
|
||||
pr.entriesBuf = pr.entriesBuf[:0]
|
||||
pr.labelPairBuf = pr.labelPairBuf[:0]
|
||||
}
|
||||
|
||||
// UnmarshalProtobuf unmarshals pr from protobuf message at src.
|
||||
//
|
||||
// pr remains valid until src is modified.
|
||||
func (pr *PushRequest) UnmarshalProtobuf(src []byte) error {
|
||||
pr.reset()
|
||||
var err error
|
||||
pr.entriesBuf, pr.labelPairBuf, err = pr.unmarshalProtobuf(pr.entriesBuf, pr.labelPairBuf, src)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalProtobuf marshals r to protobuf message, appends it to dst and returns the result.
|
||||
func (pr *PushRequest) MarshalProtobuf(dst []byte) []byte {
|
||||
m := mp.Get()
|
||||
pr.marshalProtobuf(m.MessageMarshaler())
|
||||
dst = m.Marshal(dst)
|
||||
mp.Put(m)
|
||||
return dst
|
||||
}
|
||||
|
||||
func (pr *PushRequest) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||
for _, s := range pr.Streams {
|
||||
s.marshalProtobuf(mm.AppendMessage(1))
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *PushRequest) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []LabelPair, src []byte) ([]Entry, []LabelPair, error) {
|
||||
// message PushRequest {
|
||||
// repeated Stream streams = 1;
|
||||
// }
|
||||
var err error
|
||||
var fc easyproto.FieldContext
|
||||
for len(src) > 0 {
|
||||
src, err = fc.NextField(src)
|
||||
if err != nil {
|
||||
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read next field in PushRequest: %w", err)
|
||||
}
|
||||
switch fc.FieldNum {
|
||||
case 1:
|
||||
data, ok := fc.MessageData()
|
||||
if !ok {
|
||||
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read Stream data")
|
||||
}
|
||||
pr.Streams = append(pr.Streams, Stream{})
|
||||
s := &pr.Streams[len(pr.Streams)-1]
|
||||
entriesBuf, labelPairBuf, err = s.unmarshalProtobuf(entriesBuf, labelPairBuf, data)
|
||||
if err != nil {
|
||||
return entriesBuf, labelPairBuf, fmt.Errorf("cannot unmarshal Stream: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return entriesBuf, labelPairBuf, nil
|
||||
}
|
||||
|
||||
// Stream represents Loki stream.
|
||||
//
|
||||
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L23
|
||||
type Stream struct {
|
||||
Labels string
|
||||
Entries []Entry
|
||||
}
|
||||
|
||||
func (s *Stream) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||
mm.AppendString(1, s.Labels)
|
||||
for _, e := range s.Entries {
|
||||
e.marshalProtobuf(mm.AppendMessage(2))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []LabelPair, src []byte) ([]Entry, []LabelPair, error) {
|
||||
// message Stream {
|
||||
// string labels = 1;
|
||||
// repeated Entry entries = 2;
|
||||
// }
|
||||
var err error
|
||||
var fc easyproto.FieldContext
|
||||
entriesBufLen := len(entriesBuf)
|
||||
for len(src) > 0 {
|
||||
src, err = fc.NextField(src)
|
||||
if err != nil {
|
||||
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read next field in Stream: %w", err)
|
||||
}
|
||||
switch fc.FieldNum {
|
||||
case 1:
|
||||
labels, ok := fc.String()
|
||||
if !ok {
|
||||
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read labels")
|
||||
}
|
||||
s.Labels = labels
|
||||
case 2:
|
||||
data, ok := fc.MessageData()
|
||||
if !ok {
|
||||
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read Entry data")
|
||||
}
|
||||
entriesBuf = append(entriesBuf, Entry{})
|
||||
e := &entriesBuf[len(entriesBuf)-1]
|
||||
labelPairBuf, err = e.unmarshalProtobuf(labelPairBuf, data)
|
||||
if err != nil {
|
||||
return entriesBuf, labelPairBuf, fmt.Errorf("cannot unmarshal Entry: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
s.Entries = entriesBuf[entriesBufLen:]
|
||||
return entriesBuf, labelPairBuf, nil
|
||||
}
|
||||
|
||||
// Entry represents Loki entry.
|
||||
//
|
||||
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L38
|
||||
type Entry struct {
|
||||
Timestamp time.Time
|
||||
Line string
|
||||
StructuredMetadata []LabelPair
|
||||
}
|
||||
|
||||
func (e *Entry) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||
marshalTime(mm, 1, e.Timestamp)
|
||||
mm.AppendString(2, e.Line)
|
||||
for _, lp := range e.StructuredMetadata {
|
||||
lp.marshalProtobuf(mm.AppendMessage(3))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Entry) unmarshalProtobuf(labelPairBuf []LabelPair, src []byte) ([]LabelPair, error) {
|
||||
// message Entry {
|
||||
// Timestamp timestamp = 1;
|
||||
// string line = 2;
|
||||
// repeated LabelPair structuredMetadata = 3;
|
||||
// }
|
||||
var err error
|
||||
var fc easyproto.FieldContext
|
||||
labelPairBufLen := len(labelPairBuf)
|
||||
for len(src) > 0 {
|
||||
src, err = fc.NextField(src)
|
||||
if err != nil {
|
||||
return labelPairBuf, fmt.Errorf("cannot read next field in Entry: %w", err)
|
||||
}
|
||||
switch fc.FieldNum {
|
||||
case 1:
|
||||
data, ok := fc.MessageData()
|
||||
if !ok {
|
||||
return labelPairBuf, fmt.Errorf("cannot read Timestamp data")
|
||||
}
|
||||
timestamp, err := unmarshalTime(data)
|
||||
if err != nil {
|
||||
return labelPairBuf, fmt.Errorf("cannot unmarshal Timestamp: %w", err)
|
||||
}
|
||||
e.Timestamp = timestamp
|
||||
case 2:
|
||||
line, ok := fc.String()
|
||||
if !ok {
|
||||
return labelPairBuf, fmt.Errorf("cannot read Line")
|
||||
}
|
||||
e.Line = line
|
||||
case 3:
|
||||
data, ok := fc.MessageData()
|
||||
if !ok {
|
||||
return labelPairBuf, fmt.Errorf("cannot read StructuredMetadata")
|
||||
}
|
||||
labelPairBuf = append(labelPairBuf, LabelPair{})
|
||||
lp := &labelPairBuf[len(labelPairBuf)-1]
|
||||
if err := lp.unmarshalProtobuf(data); err != nil {
|
||||
return labelPairBuf, fmt.Errorf("cannot unmarshal StructuredMetadata: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
e.StructuredMetadata = labelPairBuf[labelPairBufLen:]
|
||||
return labelPairBuf, nil
|
||||
}
|
||||
|
||||
// LabelPair represents Loki label pair.
|
||||
//
|
||||
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L33
|
||||
type LabelPair struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (lp *LabelPair) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||
mm.AppendString(1, lp.Name)
|
||||
mm.AppendString(2, lp.Value)
|
||||
}
|
||||
|
||||
func (lp *LabelPair) unmarshalProtobuf(src []byte) (err error) {
|
||||
// message LabelPair {
|
||||
// string name = 1;
|
||||
// string value = 2;
|
||||
// }
|
||||
var fc easyproto.FieldContext
|
||||
for len(src) > 0 {
|
||||
src, err = fc.NextField(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read next field in LabelPair: %w", err)
|
||||
}
|
||||
switch fc.FieldNum {
|
||||
case 1:
|
||||
name, ok := fc.String()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot read name")
|
||||
}
|
||||
lp.Name = name
|
||||
case 2:
|
||||
value, ok := fc.String()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot unmarshal value")
|
||||
}
|
||||
lp.Value = value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalTime(mm *easyproto.MessageMarshaler, fieldNum uint32, timestamp time.Time) {
|
||||
nsecs := timestamp.UnixNano()
|
||||
ts := Timestamp{
|
||||
Seconds: nsecs / 1e9,
|
||||
Nanos: int32(nsecs % 1e9),
|
||||
}
|
||||
ts.marshalProtobuf(mm.AppendMessage(fieldNum))
|
||||
}
|
||||
|
||||
func unmarshalTime(src []byte) (time.Time, error) {
|
||||
var ts Timestamp
|
||||
if err := ts.unmarshalProtobuf(src); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
timestamp := time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
return timestamp, nil
|
||||
}
|
||||
|
||||
// Timestamp is protobuf well-known timestamp type.
|
||||
type Timestamp struct {
|
||||
Seconds int64
|
||||
Nanos int32
|
||||
}
|
||||
|
||||
func (ts *Timestamp) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||
mm.AppendInt64(1, ts.Seconds)
|
||||
mm.AppendInt32(2, ts.Nanos)
|
||||
}
|
||||
|
||||
func (ts *Timestamp) unmarshalProtobuf(src []byte) (err error) {
|
||||
// message Timestamp {
|
||||
// int64 seconds = 1;
|
||||
// int32 nanos = 2;
|
||||
// }
|
||||
var fc easyproto.FieldContext
|
||||
for len(src) > 0 {
|
||||
src, err = fc.NextField(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read next field in Timestamp: %w", err)
|
||||
}
|
||||
switch fc.FieldNum {
|
||||
case 1:
|
||||
seconds, ok := fc.Int64()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot read Seconds")
|
||||
}
|
||||
ts.Seconds = seconds
|
||||
case 2:
|
||||
nanos, ok := fc.Int32()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot read Nanos")
|
||||
}
|
||||
ts.Nanos = nanos
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
47
app/vlinsert/main.go
Normal file
47
app/vlinsert/main.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package vlinsert
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/syslog"
|
||||
)
|
||||
|
||||
// Init initializes vlinsert
|
||||
func Init() {
|
||||
syslog.MustInit()
|
||||
}
|
||||
|
||||
// Stop stops vlinsert
|
||||
func Stop() {
|
||||
syslog.MustStop()
|
||||
}
|
||||
|
||||
// RequestHandler handles insert requests for VictoriaLogs
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
if !strings.HasPrefix(path, "/insert/") {
|
||||
// Skip requests, which do not start with /insert/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/insert")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/jsonline" {
|
||||
jsonline.RequestHandler(w, r)
|
||||
return true
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/elasticsearch/"):
|
||||
path = strings.TrimPrefix(path, "/elasticsearch")
|
||||
return elasticsearch.RequestHandler(path, w, r)
|
||||
case strings.HasPrefix(path, "/loki/"):
|
||||
path = strings.TrimPrefix(path, "/loki")
|
||||
return loki.RequestHandler(path, w, r)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
531
app/vlinsert/syslog/syslog.go
Normal file
531
app/vlinsert/syslog/syslog.go
Normal file
@@ -0,0 +1,531 @@
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
syslogTimezone = flag.String("syslog.timezone", "Local", "Timezone to use when parsing timestamps in RFC3164 syslog messages. Timezone must be a valid IANA Time Zone. "+
|
||||
"For example: America/New_York, Europe/Berlin, Etc/GMT+3 . See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
|
||||
syslogTenantIDTCP = flagutil.NewArrayString("syslog.tenantID.tcp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
syslogTenantIDUDP = flagutil.NewArrayString("syslog.tenantID.udp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
|
||||
listenAddrTCP = flagutil.NewArrayString("syslog.listenAddr.tcp", "Comma-separated list of TCP addresses to listen to for Syslog messages. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
listenAddrUDP = flagutil.NewArrayString("syslog.listenAddr.udp", "Comma-separated list of UDP address to listen to for Syslog messages. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
|
||||
|
||||
tlsEnable = flagutil.NewArrayBool("syslog.tls", "Whether to enable TLS for receiving syslog messages at the corresponding -syslog.listenAddr.tcp. "+
|
||||
"The corresponding -syslog.tlsCertFile and -syslog.tlsKeyFile must be set if -syslog.tls is set. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsCertFile = flagutil.NewArrayString("syslog.tlsCertFile", "Path to file with TLS certificate for the corresponding -syslog.listenAddr.tcp if the corresponding -syslog.tls is set. "+
|
||||
"Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsKeyFile = flagutil.NewArrayString("syslog.tlsKeyFile", "Path to file with TLS key for the corresponding -syslog.listenAddr.tcp if the corresponding -syslog.tls is set. "+
|
||||
"The provided key file is automatically re-read every second, so it can be dynamically updated. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsCipherSuites = flagutil.NewArrayString("syslog.tlsCipherSuites", "Optional list of TLS cipher suites for -syslog.listenAddr.tcp if -syslog.tls is set. "+
|
||||
"See the list of supported cipher suites at https://pkg.go.dev/crypto/tls#pkg-constants . "+
|
||||
"See also https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
tlsMinVersion = flag.String("syslog.tlsMinVersion", "TLS13", "The minimum TLS version to use for -syslog.listenAddr.tcp if -syslog.tls is set. "+
|
||||
"Supported values: TLS10, TLS11, TLS12, TLS13. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
|
||||
|
||||
compressMethodTCP = flagutil.NewArrayString("syslog.compressMethod.tcp", "Compression method for syslog messages received at the corresponding -syslog.listenAddr.tcp. "+
|
||||
"Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression")
|
||||
compressMethodUDP = flagutil.NewArrayString("syslog.compressMethod.udp", "Compression method for syslog messages received at the corresponding -syslog.listenAddr.udp. "+
|
||||
"Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression")
|
||||
|
||||
useLocalTimestampTCP = flagutil.NewArrayBool("syslog.useLocalTimestamp.tcp", "Whether to use local timestamp instead of the original timestamp for the ingested syslog messages "+
|
||||
"at the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#log-timestamps")
|
||||
useLocalTimestampUDP = flagutil.NewArrayBool("syslog.useLocalTimestamp.udp", "Whether to use local timestamp instead of the original timestamp for the ingested syslog messages "+
|
||||
"at the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#log-timestamps")
|
||||
)
|
||||
|
||||
// MustInit initializes syslog parser at the given -syslog.listenAddr.tcp and -syslog.listenAddr.udp ports
|
||||
//
|
||||
// This function must be called after flag.Parse().
|
||||
//
|
||||
// MustStop() must be called in order to free up resources occupied by the initialized syslog parser.
|
||||
func MustInit() {
|
||||
if workersStopCh != nil {
|
||||
logger.Panicf("BUG: MustInit() called twice without MustStop() call")
|
||||
}
|
||||
workersStopCh = make(chan struct{})
|
||||
|
||||
for argIdx, addr := range *listenAddrTCP {
|
||||
workersWG.Add(1)
|
||||
go func(addr string, argIdx int) {
|
||||
runTCPListener(addr, argIdx)
|
||||
workersWG.Done()
|
||||
}(addr, argIdx)
|
||||
}
|
||||
|
||||
for argIdx, addr := range *listenAddrUDP {
|
||||
workersWG.Add(1)
|
||||
go func(addr string, argIdx int) {
|
||||
runUDPListener(addr, argIdx)
|
||||
workersWG.Done()
|
||||
}(addr, argIdx)
|
||||
}
|
||||
|
||||
currentYear := time.Now().Year()
|
||||
globalCurrentYear.Store(int64(currentYear))
|
||||
workersWG.Add(1)
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-workersStopCh:
|
||||
ticker.Stop()
|
||||
workersWG.Done()
|
||||
return
|
||||
case <-ticker.C:
|
||||
currentYear := time.Now().Year()
|
||||
globalCurrentYear.Store(int64(currentYear))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if *syslogTimezone != "" {
|
||||
tz, err := time.LoadLocation(*syslogTimezone)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.timezone=%q: %s", *syslogTimezone, err)
|
||||
}
|
||||
globalTimezone = tz
|
||||
} else {
|
||||
globalTimezone = time.Local
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalCurrentYear atomic.Int64
|
||||
globalTimezone *time.Location
|
||||
)
|
||||
|
||||
var (
|
||||
workersWG sync.WaitGroup
|
||||
workersStopCh chan struct{}
|
||||
)
|
||||
|
||||
// MustStop stops syslog parser initialized via MustInit()
|
||||
func MustStop() {
|
||||
close(workersStopCh)
|
||||
workersWG.Wait()
|
||||
workersStopCh = nil
|
||||
}
|
||||
|
||||
func runUDPListener(addr string, argIdx int) {
|
||||
ln, err := net.ListenPacket(netutil.GetUDPNetwork(), addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP syslog server at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
tenantIDStr := syslogTenantIDUDP.GetOptionalArg(argIdx)
|
||||
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.tenantID.udp=%q for -syslog.listenAddr.udp=%q: %s", tenantIDStr, addr, err)
|
||||
}
|
||||
|
||||
compressMethod := compressMethodUDP.GetOptionalArg(argIdx)
|
||||
checkCompressMethod(compressMethod, addr, "udp")
|
||||
|
||||
useLocalTimestamp := useLocalTimestampUDP.GetOptionalArg(argIdx)
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp)
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
logger.Infof("started accepting syslog messages at -syslog.listenAddr.udp=%q", addr)
|
||||
<-workersStopCh
|
||||
if err := ln.Close(); err != nil {
|
||||
logger.Fatalf("syslog: cannot close UDP listener at %s: %s", addr, err)
|
||||
}
|
||||
<-doneCh
|
||||
logger.Infof("finished accepting syslog messages at -syslog.listenAddr.udp=%q", addr)
|
||||
}
|
||||
|
||||
func runTCPListener(addr string, argIdx int) {
|
||||
var tlsConfig *tls.Config
|
||||
if tlsEnable.GetOptionalArg(argIdx) {
|
||||
certFile := tlsCertFile.GetOptionalArg(argIdx)
|
||||
keyFile := tlsKeyFile.GetOptionalArg(argIdx)
|
||||
tc, err := netutil.GetServerTLSConfig(certFile, keyFile, *tlsMinVersion, *tlsCipherSuites)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot load TLS cert from -syslog.tlsCertFile=%q, -syslog.tlsKeyFile=%q, -syslog.tlsMinVersion=%q, -syslog.tlsCipherSuites=%q: %s",
|
||||
certFile, keyFile, *tlsMinVersion, *tlsCipherSuites, err)
|
||||
}
|
||||
tlsConfig = tc
|
||||
}
|
||||
ln, err := netutil.NewTCPListener("syslog", addr, false, tlsConfig)
|
||||
if err != nil {
|
||||
logger.Fatalf("syslog: cannot start TCP listener at %s: %s", addr, err)
|
||||
}
|
||||
|
||||
tenantIDStr := syslogTenantIDTCP.GetOptionalArg(argIdx)
|
||||
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.tenantID.tcp=%q for -syslog.listenAddr.tcp=%q: %s", tenantIDStr, addr, err)
|
||||
}
|
||||
|
||||
compressMethod := compressMethodTCP.GetOptionalArg(argIdx)
|
||||
checkCompressMethod(compressMethod, addr, "tcp")
|
||||
|
||||
useLocalTimestamp := useLocalTimestampTCP.GetOptionalArg(argIdx)
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp)
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
logger.Infof("started accepting syslog messages at -syslog.listenAddr.tcp=%q", addr)
|
||||
<-workersStopCh
|
||||
if err := ln.Close(); err != nil {
|
||||
logger.Fatalf("syslog: cannot close TCP listener at %s: %s", addr, err)
|
||||
}
|
||||
<-doneCh
|
||||
logger.Infof("finished accepting syslog messages at -syslog.listenAddr.tcp=%q", addr)
|
||||
}
|
||||
|
||||
func checkCompressMethod(compressMethod, addr, protocol string) {
|
||||
switch compressMethod {
|
||||
case "", "none", "gzip", "deflate":
|
||||
return
|
||||
default:
|
||||
logger.Fatalf("unsupported -syslog.compressMethod.%s=%q for -syslog.listenAddr.%s=%q; supported values: 'none', 'gzip', 'deflate'", protocol, compressMethod, protocol, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) {
|
||||
gomaxprocs := cgroup.AvailableCPUs()
|
||||
var wg sync.WaitGroup
|
||||
localAddr := ln.LocalAddr()
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cp := insertutils.GetCommonParamsForSyslog(tenantID)
|
||||
var bb bytesutil.ByteBuffer
|
||||
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
|
||||
for {
|
||||
bb.Reset()
|
||||
bb.B = bb.B[:cap(bb.B)]
|
||||
n, remoteAddr, err := ln.ReadFrom(bb.B)
|
||||
if err != nil {
|
||||
udpErrorsTotal.Inc()
|
||||
var ne net.Error
|
||||
if errors.As(err, &ne) {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("syslog: temporary error when listening for UDP at %q: %s", localAddr, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
}
|
||||
logger.Errorf("syslog: cannot read UDP data from %s at %s: %s", remoteAddr, localAddr, err)
|
||||
continue
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
udpRequestsTotal.Inc()
|
||||
if err := processStream(bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process UDP data from %s at %s: %s", remoteAddr, localAddr, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) {
|
||||
var cm ingestserver.ConnsMap
|
||||
cm.Init("syslog")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
addr := ln.Addr()
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
var ne net.Error
|
||||
if errors.As(err, &ne) {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("syslog: temporary error when listening for TCP addr %q: %s", addr, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
logger.Fatalf("syslog: unrecoverable error when accepting TCP connections at %q: %s", addr, err)
|
||||
}
|
||||
logger.Fatalf("syslog: unexpected error when accepting TCP connections at %q: %s", addr, err)
|
||||
}
|
||||
if !cm.Add(c) {
|
||||
_ = c.Close()
|
||||
break
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
cp := insertutils.GetCommonParamsForSyslog(tenantID)
|
||||
if err := processStream(c, compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
cm.Delete(c)
|
||||
_ = c.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
cm.CloseAll(0)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// processStream parses a stream of syslog messages from r and ingests them into vlstorage.
|
||||
func processStream(r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
err := processStreamInternal(r, compressMethod, useLocalTimestamp, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func processStreamInternal(r io.Reader, compressMethod string, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
switch compressMethod {
|
||||
case "", "none":
|
||||
case "gzip":
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped data: %w", err)
|
||||
}
|
||||
r = zr
|
||||
case "deflate":
|
||||
zr, err := common.GetZlibReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read deflated data: %w", err)
|
||||
}
|
||||
r = zr
|
||||
default:
|
||||
logger.Panicf("BUG: unsupported compressMethod=%q; supported values: none, gzip, deflate", compressMethod)
|
||||
}
|
||||
|
||||
err := processUncompressedStream(r, useLocalTimestamp, lmp)
|
||||
|
||||
switch compressMethod {
|
||||
case "gzip":
|
||||
zr := r.(*gzip.Reader)
|
||||
common.PutGzipReader(zr)
|
||||
case "deflate":
|
||||
zr := r.(io.ReadCloser)
|
||||
common.PutZlibReader(zr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
slr := getSyslogLineReader(wcr)
|
||||
defer putSyslogLineReader(slr)
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok := slr.nextLine()
|
||||
wcr.DecConcurrency()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
currentYear := int(globalCurrentYear.Load())
|
||||
err := processLine(slr.line, currentYear, globalTimezone, useLocalTimestamp, lmp)
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
return fmt.Errorf("cannot read line #%d: %s", n, err)
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
return slr.Error()
|
||||
}
|
||||
|
||||
type syslogLineReader struct {
|
||||
line []byte
|
||||
|
||||
br *bufio.Reader
|
||||
err error
|
||||
}
|
||||
|
||||
func (slr *syslogLineReader) reset(r io.Reader) {
|
||||
slr.line = slr.line[:0]
|
||||
slr.br.Reset(r)
|
||||
slr.err = nil
|
||||
}
|
||||
|
||||
// Error returns the last error occurred in slr.
|
||||
func (slr *syslogLineReader) Error() error {
|
||||
if slr.err == nil || slr.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return slr.err
|
||||
}
|
||||
|
||||
// nextLine reads the next syslog line from slr and stores it at slr.line.
|
||||
//
|
||||
// false is returned if the next line cannot be read. Error() must be called in this case
|
||||
// in order to verify whether there is an error or just slr stream has been finished.
|
||||
func (slr *syslogLineReader) nextLine() bool {
|
||||
if slr.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
again:
|
||||
prefix, err := slr.br.ReadSlice(' ')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
slr.err = fmt.Errorf("cannot read message frame prefix: %w", err)
|
||||
return false
|
||||
}
|
||||
if len(prefix) == 0 {
|
||||
slr.err = err
|
||||
return false
|
||||
}
|
||||
}
|
||||
// skip empty lines
|
||||
for len(prefix) > 0 && prefix[0] == '\n' {
|
||||
prefix = prefix[1:]
|
||||
}
|
||||
if len(prefix) == 0 {
|
||||
// An empty prefix or a prefix with empty lines - try reading yet another prefix.
|
||||
goto again
|
||||
}
|
||||
|
||||
if prefix[0] >= '0' && prefix[0] <= '9' {
|
||||
// This is octet-counting method. See https://www.ietf.org/archive/id/draft-gerhards-syslog-plain-tcp-07.html#msgxfer
|
||||
msgLenStr := bytesutil.ToUnsafeString(prefix[:len(prefix)-1])
|
||||
msgLen, err := strconv.ParseUint(msgLenStr, 10, 64)
|
||||
if err != nil {
|
||||
slr.err = fmt.Errorf("cannot parse message length from %q: %w", msgLenStr, err)
|
||||
return false
|
||||
}
|
||||
if maxMsgLen := insertutils.MaxLineSizeBytes.IntN(); msgLen > uint64(maxMsgLen) {
|
||||
slr.err = fmt.Errorf("cannot read message longer than %d bytes; msgLen=%d", maxMsgLen, msgLen)
|
||||
return false
|
||||
}
|
||||
slr.line = slicesutil.SetLength(slr.line, int(msgLen))
|
||||
if _, err := io.ReadFull(slr.br, slr.line); err != nil {
|
||||
slr.err = fmt.Errorf("cannot read message with size %d bytes: %w", msgLen, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// This is octet-stuffing method. See https://www.ietf.org/archive/id/draft-gerhards-syslog-plain-tcp-07.html#octet-stuffing-legacy
|
||||
slr.line = append(slr.line[:0], prefix...)
|
||||
for {
|
||||
line, err := slr.br.ReadSlice('\n')
|
||||
if err == nil {
|
||||
slr.line = append(slr.line, line[:len(line)-1]...)
|
||||
return true
|
||||
}
|
||||
if err == io.EOF {
|
||||
slr.line = append(slr.line, line...)
|
||||
return true
|
||||
}
|
||||
if err == bufio.ErrBufferFull {
|
||||
slr.line = append(slr.line, line...)
|
||||
continue
|
||||
}
|
||||
slr.err = fmt.Errorf("cannot read message in octet-stuffing method: %w", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func getSyslogLineReader(r io.Reader) *syslogLineReader {
|
||||
v := syslogLineReaderPool.Get()
|
||||
if v == nil {
|
||||
br := bufio.NewReaderSize(r, 64*1024)
|
||||
return &syslogLineReader{
|
||||
br: br,
|
||||
}
|
||||
}
|
||||
slr := v.(*syslogLineReader)
|
||||
slr.reset(r)
|
||||
return slr
|
||||
}
|
||||
|
||||
func putSyslogLineReader(slr *syslogLineReader) {
|
||||
syslogLineReaderPool.Put(slr)
|
||||
}
|
||||
|
||||
var syslogLineReaderPool sync.Pool
|
||||
|
||||
func processLine(line []byte, currentYear int, timezone *time.Location, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
p := logstorage.GetSyslogParser(currentYear, timezone)
|
||||
lineStr := bytesutil.ToUnsafeString(line)
|
||||
p.Parse(lineStr)
|
||||
|
||||
var ts int64
|
||||
if useLocalTimestamp {
|
||||
ts = time.Now().UnixNano()
|
||||
} else {
|
||||
nsecs, err := insertutils.ExtractTimestampRFC3339NanoFromFields("timestamp", p.Fields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get timestamp from syslog line %q: %w", line, err)
|
||||
}
|
||||
ts = nsecs
|
||||
}
|
||||
logstorage.RenameField(p.Fields, "message", "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
logstorage.PutSyslogParser(p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="syslog"}`)
|
||||
|
||||
errorsTotal = metrics.NewCounter(`vl_errors_total{type="syslog"}`)
|
||||
|
||||
udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`)
|
||||
udpErrorsTotal = metrics.NewCounter(`vl_udp_errors_total{type="syslog"}`)
|
||||
)
|
||||
130
app/vlinsert/syslog/syslog_test.go
Normal file
130
app/vlinsert/syslog/syslog_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestSyslogLineReader_Success(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
slr := getSyslogLineReader(r)
|
||||
defer putSyslogLineReader(slr)
|
||||
|
||||
var lines []string
|
||||
for slr.nextLine() {
|
||||
lines = append(lines, string(slr.line))
|
||||
}
|
||||
if err := slr.Error(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines read;\ngot\n%q\nwant\n%q", lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("\n", nil)
|
||||
f("\n\n\n", nil)
|
||||
|
||||
f("foobar", []string{"foobar"})
|
||||
f("foobar\n", []string{"foobar\n"})
|
||||
f("\n\nfoo\n\nbar\n\n", []string{"foo\n\nbar\n\n"})
|
||||
|
||||
f(`Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...`, []string{"Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches..."})
|
||||
|
||||
f(`Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...
|
||||
|
||||
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
|
||||
|
||||
`, []string{
|
||||
"Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...",
|
||||
"<165>Jun 4 12:08:33 abcd systemd[345]: abc defg",
|
||||
`<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.`,
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyslogLineReader_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
slr := getSyslogLineReader(r)
|
||||
defer putSyslogLineReader(slr)
|
||||
|
||||
if slr.nextLine() {
|
||||
t.Fatalf("expecting failure to read the first line")
|
||||
}
|
||||
if err := slr.Error(); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// invalid format for message size
|
||||
f("12foo bar")
|
||||
|
||||
// too big message size
|
||||
f("123 aa")
|
||||
f("1233423432 abc")
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data string, currentYear, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
MustInit()
|
||||
defer MustStop()
|
||||
|
||||
globalTimezone = time.UTC
|
||||
globalCurrentYear.Store(int64(currentYear))
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "", false, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
data := `Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...
|
||||
|
||||
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
|
||||
`
|
||||
currentYear := 2023
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1685794113000000000, 1685880513000000000, 1685814132345000000}
|
||||
resultExpected := `{"format":"rfc3164","timestamp":"","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
|
||||
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","timestamp":"","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
|
||||
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","timestamp":"","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
|
||||
f(data, currentYear, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
MustInit()
|
||||
defer MustStop()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "", false, tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// invalid format for message size
|
||||
f("12foo bar")
|
||||
|
||||
// too big message size
|
||||
f("123 foo")
|
||||
f("123456789 bar")
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).
|
||||
7
app/vlogsgenerator/Makefile
Normal file
7
app/vlogsgenerator/Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
vlogsgenerator:
|
||||
APP_NAME=vlogsgenerator $(MAKE) app-local
|
||||
|
||||
vlogsgenerator-race:
|
||||
APP_NAME=vlogsgenerator RACE=-race $(MAKE) app-local
|
||||
@@ -1 +1,158 @@
|
||||
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).
|
||||
# vlogsgenerator
|
||||
|
||||
Logs generator for [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/).
|
||||
|
||||
## How to build vlogsgenerator?
|
||||
|
||||
Run `make vlogsgenerator` from the repository root. This builds `bin/vlogsgenerator` binary.
|
||||
|
||||
## How run vlogsgenerator?
|
||||
|
||||
`vlogsgenerator` generates logs in [JSON line format](https://jsonlines.org/) suitable for the ingestion
|
||||
via [`/insert/jsonline` endpoint at VictoriaLogs](https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api).
|
||||
|
||||
By default it writes the generated logs into `stdout`. For example, the following command writes generated logs to `stdout`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator
|
||||
```
|
||||
|
||||
It is possible to redirect the generated logs to file. For example, the following command writes the generated logs to `logs.json` file:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator > logs.json
|
||||
```
|
||||
|
||||
The generated logs at `logs.json` file can be inspected with the following command:
|
||||
|
||||
```
|
||||
head logs.json | jq .
|
||||
```
|
||||
|
||||
Below is an example output:
|
||||
|
||||
```json
|
||||
{
|
||||
"_time": "2024-05-08T14:34:00.854Z",
|
||||
"_msg": "message for the stream 8 and worker 0; ip=185.69.136.129; uuid=b4fe8f1a-c93c-dea3-ba11-5b9f0509291e; u64=8996587920687045253",
|
||||
"host": "host_8",
|
||||
"worker_id": "0",
|
||||
"run_id": "f9b3deee-e6b6-7f56-5deb-1586e4e81725",
|
||||
"const_0": "some value 0 8",
|
||||
"const_1": "some value 1 8",
|
||||
"const_2": "some value 2 8",
|
||||
"var_0": "some value 0 12752539384823438260",
|
||||
"dict_0": "warn",
|
||||
"dict_1": "info",
|
||||
"u8_0": "6",
|
||||
"u16_0": "35202",
|
||||
"u32_0": "1964973739",
|
||||
"u64_0": "4810489083243239145",
|
||||
"float_0": "1.868",
|
||||
"ip_0": "250.34.75.125",
|
||||
"timestamp_0": "1799-03-16T01:34:18.311Z",
|
||||
"json_0": "{\"foo\":\"bar_3\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":32}"
|
||||
}
|
||||
{
|
||||
"_time": "2024-05-08T14:34:00.854Z",
|
||||
"_msg": "message for the stream 9 and worker 0; ip=164.244.254.194; uuid=7e8373b1-ce0d-1ce7-8e96-4bcab8955598; u64=13949903463741076522",
|
||||
"host": "host_9",
|
||||
"worker_id": "0",
|
||||
"run_id": "f9b3deee-e6b6-7f56-5deb-1586e4e81725",
|
||||
"const_0": "some value 0 9",
|
||||
"const_1": "some value 1 9",
|
||||
"const_2": "some value 2 9",
|
||||
"var_0": "some value 0 5371555382075206134",
|
||||
"dict_0": "INFO",
|
||||
"dict_1": "FATAL",
|
||||
"u8_0": "219",
|
||||
"u16_0": "31459",
|
||||
"u32_0": "3918836777",
|
||||
"u64_0": "6593354256620219850",
|
||||
"float_0": "1.085",
|
||||
"ip_0": "253.151.88.158",
|
||||
"timestamp_0": "2042-10-05T16:42:57.082Z",
|
||||
"json_0": "{\"foo\":\"bar_5\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":27}"
|
||||
}
|
||||
```
|
||||
|
||||
The `run_id` field uniquely identifies every `vlogsgenerator` invocation.
|
||||
|
||||
### How to write logs to VictoriaLogs?
|
||||
|
||||
The generated logs can be written directly to VictoriaLogs by passing the address of [`/insert/jsonline` endpoint](https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api)
|
||||
to `-addr` command-line flag. For example, the following command writes the generated logs to VictoriaLogs running at `localhost`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator -addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
`vlogsgenerator` accepts various command-line flags, which can be used for configuring the number and the shape of the generated logs.
|
||||
These flags can be inspected by running `vlogsgenerator -help`. Below are the most interesting flags:
|
||||
|
||||
* `-start` - starting timestamp for generating logs. Logs are evenly generated on the [`-start` ... `-end`] interval.
|
||||
* `-end` - ending timestamp for generating logs. Logs are evenly generated on the [`-start` ... `-end`] interval.
|
||||
* `-activeStreams` - the number of active [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to generate.
|
||||
* `-logsPerStream` - the number of log entries to generate per each log stream. Log entries are evenly distributed on the [`-start` ... `-end`] interval.
|
||||
|
||||
The total number of generated logs can be calculated as `-activeStreams` * `-logsPerStream`.
|
||||
|
||||
For example, the following command generates `1_000_000` log entries on the time range `[2024-01-01 - 2024-02-01]` across `100`
|
||||
[log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields), where every logs stream contains `10_000` log entries,
|
||||
and writes them to `http://localhost:9428/insert/jsonline`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
### Churn rate
|
||||
|
||||
It is possible to generate churn rate for active [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
|
||||
by specifying `-totalStreams` command-line flag bigger than `-activeStreams`. For example, the following command generates
|
||||
logs for `1000` total streams, while the number of active streams equals to `100`. This means that at every time there are logs for `100` streams,
|
||||
but these streams change over the given [`-start` ... `-end`] time range, so the total number of streams on the given time range becomes `1000`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-totalStreams=1_000 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
In this case the total number of generated logs equals to `-totalStreams` * `-logsPerStream` = `10_000_000`.
|
||||
|
||||
### Benchmark tuning
|
||||
|
||||
By default `vlogsgenerator` generates and writes logs by a single worker. This may limit the maximum data ingestion rate during benchmarks.
|
||||
The number of workers can be changed via `-workers` command-line flag. For example, the following command generates and writes logs with `16` workers:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline \
|
||||
-workers=16
|
||||
```
|
||||
|
||||
### Output statistics
|
||||
|
||||
Every 10 seconds `vlogsgenerator` writes statistics about the generated logs into `stderr`. The frequency of the generated statistics can be adjusted via `-statInterval` command-line flag.
|
||||
For example, the following command writes statistics every 2 seconds:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline \
|
||||
-statInterval=2s
|
||||
```
|
||||
|
||||
344
app/vlogsgenerator/main.go
Normal file
344
app/vlogsgenerator/main.go
Normal file
@@ -0,0 +1,344 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", "stdout", "HTTP address to push the generated logs to; if it is set to stdout, then logs are generated to stdout")
|
||||
workers = flag.Int("workers", 1, "The number of workers to use to push logs to -addr")
|
||||
|
||||
start = newTimeFlag("start", "-1d", "Generated logs start from this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
end = newTimeFlag("end", "0s", "Generated logs end at this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
activeStreams = flag.Int("activeStreams", 100, "The number of active log streams to generate; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields")
|
||||
totalStreams = flag.Int("totalStreams", 0, "The number of total log streams; if -totalStreams > -activeStreams, then some active streams are substituted with new streams "+
|
||||
"during data generation")
|
||||
logsPerStream = flag.Int64("logsPerStream", 1_000, "The number of log entries to generate per each log stream. Log entries are evenly distributed between -start and -end")
|
||||
constFieldsPerLog = flag.Int("constFieldsPerLog", 3, "The number of fields with constaint values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
varFieldsPerLog = flag.Int("varFieldsPerLog", 1, "The number of fields with variable values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
dictFieldsPerLog = flag.Int("dictFieldsPerLog", 2, "The number of fields with up to 8 different values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u8FieldsPerLog = flag.Int("u8FieldsPerLog", 1, "The number of fields with uint8 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u16FieldsPerLog = flag.Int("u16FieldsPerLog", 1, "The number of fields with uint16 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u32FieldsPerLog = flag.Int("u32FieldsPerLog", 1, "The number of fields with uint32 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u64FieldsPerLog = flag.Int("u64FieldsPerLog", 1, "The number of fields with uint64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
floatFieldsPerLog = flag.Int("floatFieldsPerLog", 1, "The number of fields with float64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
ipFieldsPerLog = flag.Int("ipFieldsPerLog", 1, "The number of fields with IPv4 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
timestampFieldsPerLog = flag.Int("timestampFieldsPerLog", 1, "The number of fields with ISO8601 timestamps per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
jsonFieldsPerLog = flag.Int("jsonFieldsPerLog", 1, "The number of JSON fields to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
|
||||
statInterval = flag.Duration("statInterval", 10*time.Second, "The interval between publishing the stats")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
var remoteWriteURL *url.URL
|
||||
if *addr != "stdout" {
|
||||
urlParsed, err := url.Parse(*addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -addr=%q: %s", *addr, err)
|
||||
}
|
||||
qs, err := url.ParseQuery(urlParsed.RawQuery)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse query string in -addr=%q: %w", *addr, err)
|
||||
}
|
||||
qs.Set("_stream_fields", "host,worker_id")
|
||||
urlParsed.RawQuery = qs.Encode()
|
||||
remoteWriteURL = urlParsed
|
||||
}
|
||||
|
||||
if start.nsec >= end.nsec {
|
||||
logger.Fatalf("-start=%s must be smaller than -end=%s", start, end)
|
||||
}
|
||||
if *activeStreams <= 0 {
|
||||
logger.Fatalf("-activeStreams must be bigger than 0; got %d", *activeStreams)
|
||||
}
|
||||
if *logsPerStream <= 0 {
|
||||
logger.Fatalf("-logsPerStream must be bigger than 0; got %d", *logsPerStream)
|
||||
}
|
||||
if *totalStreams < *activeStreams {
|
||||
*totalStreams = *activeStreams
|
||||
}
|
||||
|
||||
cfg := &workerConfig{
|
||||
url: remoteWriteURL,
|
||||
activeStreams: *activeStreams,
|
||||
totalStreams: *totalStreams,
|
||||
}
|
||||
|
||||
// divide total and active streams among workers
|
||||
if *workers <= 0 {
|
||||
logger.Fatalf("-workers must be bigger than 0; got %d", *workers)
|
||||
}
|
||||
if *workers > *activeStreams {
|
||||
logger.Fatalf("-workers=%d cannot exceed -activeStreams=%d", *workers, *activeStreams)
|
||||
}
|
||||
cfg.activeStreams /= *workers
|
||||
cfg.totalStreams /= *workers
|
||||
|
||||
logger.Infof("start -workers=%d workers for ingesting -logsPerStream=%d log entries per each -totalStreams=%d (-activeStreams=%d) on a time range -start=%s, -end=%s to -addr=%s",
|
||||
*workers, *logsPerStream, *totalStreams, *activeStreams, toRFC3339(start.nsec), toRFC3339(end.nsec), *addr)
|
||||
|
||||
startTime := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < *workers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
generateAndPushLogs(cfg, workerID)
|
||||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
prevEntries := uint64(0)
|
||||
prevBytes := uint64(0)
|
||||
ticker := time.NewTicker(*statInterval)
|
||||
for range ticker.C {
|
||||
currEntries := logEntriesCount.Load()
|
||||
deltaEntries := currEntries - prevEntries
|
||||
rateEntries := float64(deltaEntries) / statInterval.Seconds()
|
||||
|
||||
currBytes := bytesGenerated.Load()
|
||||
deltaBytes := currBytes - prevBytes
|
||||
rateBytes := float64(deltaBytes) / statInterval.Seconds()
|
||||
logger.Infof("generated %dK log entries (%dK total) at %.0fK entries/sec, %dMB (%dMB total) at %.0fMB/sec",
|
||||
deltaEntries/1e3, currEntries/1e3, rateEntries/1e3, deltaBytes/1e6, currBytes/1e6, rateBytes/1e6)
|
||||
|
||||
prevEntries = currEntries
|
||||
prevBytes = currBytes
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
dSecs := time.Since(startTime).Seconds()
|
||||
currEntries := logEntriesCount.Load()
|
||||
currBytes := bytesGenerated.Load()
|
||||
rateEntries := float64(currEntries) / dSecs
|
||||
rateBytes := float64(currBytes) / dSecs
|
||||
logger.Infof("ingested %dK log entries (%dMB) in %.3f seconds; avg ingestion rate: %.0fK entries/sec, %.0fMB/sec", currEntries/1e3, currBytes/1e6, dSecs, rateEntries/1e3, rateBytes/1e6)
|
||||
}
|
||||
|
||||
var logEntriesCount atomic.Uint64
|
||||
|
||||
var bytesGenerated atomic.Uint64
|
||||
|
||||
type workerConfig struct {
|
||||
url *url.URL
|
||||
activeStreams int
|
||||
totalStreams int
|
||||
}
|
||||
|
||||
type statWriter struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (sw *statWriter) Write(p []byte) (int, error) {
|
||||
bytesGenerated.Add(uint64(len(p)))
|
||||
return sw.w.Write(p)
|
||||
}
|
||||
|
||||
func generateAndPushLogs(cfg *workerConfig, workerID int) {
|
||||
pr, pw := io.Pipe()
|
||||
sw := &statWriter{
|
||||
w: pw,
|
||||
}
|
||||
bw := bufio.NewWriter(sw)
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
generateLogs(bw, workerID, cfg.activeStreams, cfg.totalStreams)
|
||||
_ = bw.Flush()
|
||||
_ = pw.Close()
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
if cfg.url == nil {
|
||||
_, err := io.Copy(os.Stdout, pr)
|
||||
if err != nil {
|
||||
logger.Fatalf("unexpected error when writing logs to stdout: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", cfg.url.String(), pr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot create request to %q: %s", cfg.url, err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot perform request to %q: %s", cfg.url, err)
|
||||
}
|
||||
if resp.StatusCode/100 != 2 {
|
||||
logger.Fatalf("unexpected status code got from %q: %d; want 2xx", cfg.url, err)
|
||||
}
|
||||
|
||||
// Wait until all the generateLogs goroutine is finished.
|
||||
<-doneCh
|
||||
}
|
||||
|
||||
func generateLogs(bw *bufio.Writer, workerID, activeStreams, totalStreams int) {
|
||||
streamLifetime := int64(float64(end.nsec-start.nsec) * (float64(activeStreams) / float64(totalStreams)))
|
||||
streamStep := int64(float64(end.nsec-start.nsec) / float64(totalStreams-activeStreams+1))
|
||||
step := streamLifetime / (*logsPerStream - 1)
|
||||
|
||||
currNsec := start.nsec
|
||||
for currNsec < end.nsec {
|
||||
firstStreamID := int((currNsec - start.nsec) / streamStep)
|
||||
generateLogsAtTimestamp(bw, workerID, currNsec, firstStreamID, activeStreams)
|
||||
currNsec += step
|
||||
}
|
||||
}
|
||||
|
||||
var runID = toUUID(rand.Uint64(), rand.Uint64())
|
||||
|
||||
func generateLogsAtTimestamp(bw *bufio.Writer, workerID int, ts int64, firstStreamID, activeStreams int) {
|
||||
streamID := firstStreamID
|
||||
timeStr := toRFC3339(ts)
|
||||
for i := 0; i < activeStreams; i++ {
|
||||
ip := toIPv4(rand.Uint32())
|
||||
uuid := toUUID(rand.Uint64(), rand.Uint64())
|
||||
fmt.Fprintf(bw, `{"_time":"%s","_msg":"message for the stream %d and worker %d; ip=%s; uuid=%s; u64=%d","host":"host_%d","worker_id":"%d"`,
|
||||
timeStr, streamID, workerID, ip, uuid, rand.Uint64(), streamID, workerID)
|
||||
fmt.Fprintf(bw, `,"run_id":"%s"`, runID)
|
||||
for j := 0; j < *constFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"const_%d":"some value %d %d"`, j, j, streamID)
|
||||
}
|
||||
for j := 0; j < *varFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"var_%d":"some value %d %d"`, j, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *dictFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"dict_%d":"%s"`, j, dictValues[rand.Intn(len(dictValues))])
|
||||
}
|
||||
for j := 0; j < *u8FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u8_%d":"%d"`, j, uint8(rand.Uint32()))
|
||||
}
|
||||
for j := 0; j < *u16FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u16_%d":"%d"`, j, uint16(rand.Uint32()))
|
||||
}
|
||||
for j := 0; j < *u32FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u32_%d":"%d"`, j, rand.Uint32())
|
||||
}
|
||||
for j := 0; j < *u64FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u64_%d":"%d"`, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *floatFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"float_%d":"%v"`, j, math.Round(10_000*rand.Float64())/1000)
|
||||
}
|
||||
for j := 0; j < *ipFieldsPerLog; j++ {
|
||||
ip := toIPv4(rand.Uint32())
|
||||
fmt.Fprintf(bw, `,"ip_%d":"%s"`, j, ip)
|
||||
}
|
||||
for j := 0; j < *timestampFieldsPerLog; j++ {
|
||||
timestamp := toISO8601(int64(rand.Uint64()))
|
||||
fmt.Fprintf(bw, `,"timestamp_%d":"%s"`, j, timestamp)
|
||||
}
|
||||
for j := 0; j < *jsonFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"json_%d":"{\"foo\":\"bar_%d\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":%d}"`, j, rand.Intn(10), rand.Intn(100))
|
||||
}
|
||||
fmt.Fprintf(bw, "}\n")
|
||||
|
||||
logEntriesCount.Add(1)
|
||||
streamID++
|
||||
}
|
||||
}
|
||||
|
||||
var dictValues = []string{
|
||||
"debug",
|
||||
"info",
|
||||
"warn",
|
||||
"error",
|
||||
"fatal",
|
||||
"ERROR",
|
||||
"FATAL",
|
||||
"INFO",
|
||||
}
|
||||
|
||||
func newTimeFlag(name, defaultValue, description string) *timeFlag {
|
||||
var tf timeFlag
|
||||
if err := tf.Set(defaultValue); err != nil {
|
||||
logger.Panicf("invalid defaultValue=%q for flag %q: %w", defaultValue, name, err)
|
||||
}
|
||||
flag.Var(&tf, name, description)
|
||||
return &tf
|
||||
}
|
||||
|
||||
type timeFlag struct {
|
||||
s string
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (tf *timeFlag) Set(s string) error {
|
||||
msec, err := promutils.ParseTimeMsec(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse time from %q: %w", s, err)
|
||||
}
|
||||
tf.s = s
|
||||
tf.nsec = msec * 1e6
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tf *timeFlag) String() string {
|
||||
return tf.s
|
||||
}
|
||||
|
||||
func toRFC3339(nsec int64) string {
|
||||
return time.Unix(0, nsec).UTC().Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
func toISO8601(nsec int64) string {
|
||||
return time.Unix(0, nsec).UTC().Format("2006-01-02T15:04:05.000Z")
|
||||
}
|
||||
|
||||
func toIPv4(n uint32) string {
|
||||
dst := make([]byte, 0, len("255.255.255.255"))
|
||||
dst = marshalUint64(dst, uint64(n>>24))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64((n>>16)&0xff))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64((n>>8)&0xff))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64(n&0xff))
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
func toUUID(a, b uint64) string {
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", a&(1<<32-1), (a>>32)&(1<<16-1), (a >> 48), b&(1<<16-1), b>>16)
|
||||
}
|
||||
|
||||
// marshalUint64 appends string representation of n to dst and returns the result.
|
||||
func marshalUint64(dst []byte, n uint64) []byte {
|
||||
return strconv.AppendUint(dst, n, 10)
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).
|
||||
47
app/vlselect/logsql/buffered_writer.go
Normal file
47
app/vlselect/logsql/buffered_writer.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func getBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
v := bufferedWriterPool.Get()
|
||||
if v == nil {
|
||||
return &bufferedWriter{
|
||||
bw: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
bw := v.(*bufferedWriter)
|
||||
bw.bw.Reset(w)
|
||||
return bw
|
||||
}
|
||||
|
||||
func putBufferedWriter(bw *bufferedWriter) {
|
||||
bw.reset()
|
||||
bufferedWriterPool.Put(bw)
|
||||
}
|
||||
|
||||
var bufferedWriterPool sync.Pool
|
||||
|
||||
type bufferedWriter struct {
|
||||
mu sync.Mutex
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) reset() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) WriteIgnoreErrors(p []byte) {
|
||||
bw.mu.Lock()
|
||||
_, _ = bw.bw.Write(p)
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) FlushIgnoreErrors() {
|
||||
bw.mu.Lock()
|
||||
_ = bw.bw.Flush()
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
70
app/vlselect/logsql/hits_response.qtpl
Normal file
70
app/vlselect/logsql/hits_response.qtpl
Normal file
@@ -0,0 +1,70 @@
|
||||
{% import (
|
||||
"slices"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// FieldsForHits formats labels for /select/logsql/hits response
|
||||
{% func FieldsForHits(columns []logstorage.BlockColumn, rowIdx int) %}
|
||||
{
|
||||
{% if len(columns) > 0 %}
|
||||
{%q= columns[0].Name %}:{%q= columns[0].Values[rowIdx] %}
|
||||
{% for _, c := range columns[1:] %}
|
||||
,{%q= c.Name %}:{%q= c.Values[rowIdx] %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func HitsSeries(m map[string]*hitsSeries) %}
|
||||
{
|
||||
{% code
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
%}
|
||||
"hits":[
|
||||
{% if len(sortedKeys) > 0 %}
|
||||
{%= hitsSeriesLine(m, sortedKeys[0]) %}
|
||||
{% for _, k := range sortedKeys[1:] %}
|
||||
,{%= hitsSeriesLine(m, k) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func hitsSeriesLine(m map[string]*hitsSeries, k string) %}
|
||||
{
|
||||
{% code
|
||||
hs := m[k]
|
||||
hs.sort()
|
||||
timestamps := hs.timestamps
|
||||
hits := hs.hits
|
||||
%}
|
||||
"fields":{%s= k %},
|
||||
"timestamps":[
|
||||
{% if len(timestamps) > 0 %}
|
||||
{%q= timestamps[0] %}
|
||||
{% for _, ts := range timestamps[1:] %}
|
||||
,{%q= ts %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
"values":[
|
||||
{% if len(hits) > 0 %}
|
||||
{%dul= hits[0] %}
|
||||
{% for _, v := range hits[1:] %}
|
||||
,{%dul= v %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
"total":{%dul= hs.hitsTotal %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
223
app/vlselect/logsql/hits_response.qtpl.go
Normal file
223
app/vlselect/logsql/hits_response.qtpl.go
Normal file
@@ -0,0 +1,223 @@
|
||||
// Code generated by qtc from "hits_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:1
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// FieldsForHits formats labels for /select/logsql/hits response
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
func StreamFieldsForHits(qw422016 *qt422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:12
|
||||
if len(columns) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().Q(columns[0].Name)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().Q(columns[0].Values[rowIdx])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:14
|
||||
for _, c := range columns[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().Q(c.Name)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().Q(c.Values[rowIdx])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:16
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:17
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:17
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
func WriteFieldsForHits(qq422016 qtio422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
StreamFieldsForHits(qw422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
func FieldsForHits(columns []logstorage.BlockColumn, rowIdx int) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
WriteFieldsForHits(qb422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:21
|
||||
func StreamHitsSeries(qw422016 *qt422016.Writer, m map[string]*hitsSeries) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:21
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:24
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:29
|
||||
qw422016.N().S(`"hits":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:31
|
||||
if len(sortedKeys) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:32
|
||||
streamhitsSeriesLine(qw422016, m, sortedKeys[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:33
|
||||
for _, k := range sortedKeys[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:33
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:34
|
||||
streamhitsSeriesLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:35
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:36
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:36
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
func WriteHitsSeries(qq422016 qtio422016.Writer, m map[string]*hitsSeries) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
StreamHitsSeries(qw422016, m)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
func HitsSeries(m map[string]*hitsSeries) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
WriteHitsSeries(qb422016, m)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:41
|
||||
func streamhitsSeriesLine(qw422016 *qt422016.Writer, m map[string]*hitsSeries, k string) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:41
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:44
|
||||
hs := m[k]
|
||||
hs.sort()
|
||||
timestamps := hs.timestamps
|
||||
hits := hs.hits
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:48
|
||||
qw422016.N().S(`"fields":`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:49
|
||||
qw422016.N().S(k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:49
|
||||
qw422016.N().S(`,"timestamps":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:51
|
||||
if len(timestamps) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:52
|
||||
qw422016.N().Q(timestamps[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:53
|
||||
for _, ts := range timestamps[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:53
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:54
|
||||
qw422016.N().Q(ts)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:55
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:56
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:56
|
||||
qw422016.N().S(`],"values":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:59
|
||||
if len(hits) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:60
|
||||
qw422016.N().DUL(hits[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:61
|
||||
for _, v := range hits[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:61
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:62
|
||||
qw422016.N().DUL(v)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:63
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:64
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:64
|
||||
qw422016.N().S(`],"total":`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:66
|
||||
qw422016.N().DUL(hs.hitsTotal)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:66
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
func writehitsSeriesLine(qq422016 qtio422016.Writer, m map[string]*hitsSeries, k string) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
streamhitsSeriesLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
func hitsSeriesLine(m map[string]*hitsSeries, k string) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
writehitsSeriesLine(qb422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:68
|
||||
}
|
||||
771
app/vlselect/logsql/logsql.go
Normal file
771
app/vlselect/logsql/logsql.go
Normal file
@@ -0,0 +1,771 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
// ProcessHitsRequest handles /select/logsql/hits request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-hits-stats
|
||||
func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain step
|
||||
stepStr := r.FormValue("step")
|
||||
if stepStr == "" {
|
||||
stepStr = "1d"
|
||||
}
|
||||
step, err := promutils.ParseDuration(stepStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'step' arg: %s", err)
|
||||
return
|
||||
}
|
||||
if step <= 0 {
|
||||
httpserver.Errorf(w, r, "'step' must be bigger than zero")
|
||||
}
|
||||
|
||||
// Obtain offset
|
||||
offsetStr := r.FormValue("offset")
|
||||
if offsetStr == "" {
|
||||
offsetStr = "0s"
|
||||
}
|
||||
offset, err := promutils.ParseDuration(offsetStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'offset' arg: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain field entries
|
||||
fields := r.Form["field"]
|
||||
|
||||
// Obtain limit on the number of top fields entries.
|
||||
fieldsLimit, err := httputils.GetInt(r, "fields_limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if fieldsLimit < 0 {
|
||||
fieldsLimit = 0
|
||||
}
|
||||
|
||||
// Prepare the query for hits count.
|
||||
q.Optimize()
|
||||
q.DropAllPipes()
|
||||
q.AddCountByTimePipe(int64(step), int64(offset), fields)
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string]*hitsSeries)
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
timestampValues := columns[0].Values
|
||||
hitsValues := columns[len(columns)-1].Values
|
||||
columns = columns[1 : len(columns)-1]
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
timestampStr := strings.Clone(timestampValues[i])
|
||||
hitsStr := strings.Clone(hitsValues[i])
|
||||
hits, err := strconv.ParseUint(hitsStr, 10, 64)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot parse hitsStr=%q: %s", hitsStr, err)
|
||||
}
|
||||
|
||||
bb.Reset()
|
||||
WriteFieldsForHits(bb, columns, i)
|
||||
|
||||
mLock.Lock()
|
||||
hs, ok := m[string(bb.B)]
|
||||
if !ok {
|
||||
k := string(bb.B)
|
||||
hs = &hitsSeries{}
|
||||
m[k] = hs
|
||||
}
|
||||
hs.timestamps = append(hs.timestamps, timestampStr)
|
||||
hs.hits = append(hs.hits, hits)
|
||||
hs.hitsTotal += hits
|
||||
mLock.Unlock()
|
||||
}
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
|
||||
m = getTopHitsSeries(m, fieldsLimit)
|
||||
|
||||
// Write response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteHitsSeries(w, m)
|
||||
}
|
||||
|
||||
func getTopHitsSeries(m map[string]*hitsSeries, fieldsLimit int) map[string]*hitsSeries {
|
||||
if fieldsLimit <= 0 || fieldsLimit >= len(m) {
|
||||
return m
|
||||
}
|
||||
|
||||
type fieldsHits struct {
|
||||
fieldsStr string
|
||||
hs *hitsSeries
|
||||
}
|
||||
a := make([]fieldsHits, 0, len(m))
|
||||
for fieldsStr, hs := range m {
|
||||
a = append(a, fieldsHits{
|
||||
fieldsStr: fieldsStr,
|
||||
hs: hs,
|
||||
})
|
||||
}
|
||||
sort.Slice(a, func(i, j int) bool {
|
||||
return a[i].hs.hitsTotal > a[j].hs.hitsTotal
|
||||
})
|
||||
|
||||
hitsOther := make(map[string]uint64)
|
||||
for _, x := range a[fieldsLimit:] {
|
||||
for i, timestampStr := range x.hs.timestamps {
|
||||
hitsOther[timestampStr] += x.hs.hits[i]
|
||||
}
|
||||
}
|
||||
var hsOther hitsSeries
|
||||
for timestampStr, hits := range hitsOther {
|
||||
hsOther.timestamps = append(hsOther.timestamps, timestampStr)
|
||||
hsOther.hits = append(hsOther.hits, hits)
|
||||
hsOther.hitsTotal += hits
|
||||
}
|
||||
|
||||
mNew := make(map[string]*hitsSeries, fieldsLimit+1)
|
||||
for _, x := range a[:fieldsLimit] {
|
||||
mNew[x.fieldsStr] = x.hs
|
||||
}
|
||||
mNew["{}"] = &hsOther
|
||||
|
||||
return mNew
|
||||
}
|
||||
|
||||
type hitsSeries struct {
|
||||
hitsTotal uint64
|
||||
timestamps []string
|
||||
hits []uint64
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Len() int {
|
||||
return len(hs.timestamps)
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Swap(i, j int) {
|
||||
hs.timestamps[i], hs.timestamps[j] = hs.timestamps[j], hs.timestamps[i]
|
||||
hs.hits[i], hs.hits[j] = hs.hits[j], hs.hits[i]
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Less(i, j int) bool {
|
||||
return hs.timestamps[i] < hs.timestamps[j]
|
||||
}
|
||||
|
||||
// ProcessFieldNamesRequest handles /select/logsql/field_names request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-names
|
||||
func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain field names for the given query
|
||||
q.Optimize()
|
||||
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain field names: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, fieldNames)
|
||||
}
|
||||
|
||||
// ProcessFieldValuesRequest handles /select/logsql/field_values request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-values
|
||||
func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse fieldName query arg
|
||||
fieldName := r.FormValue("field")
|
||||
if fieldName == "" {
|
||||
httpserver.Errorf(w, r, "missing 'field' query arg")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain unique values for the given field
|
||||
q.Optimize()
|
||||
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain values for field %q: %s", fieldName, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, values)
|
||||
}
|
||||
|
||||
// ProcessStreamFieldNamesRequest processes /select/logsql/stream_field_names request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-field-names
|
||||
func ProcessStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain stream field names for the given query
|
||||
q.Optimize()
|
||||
names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream field names: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, names)
|
||||
}
|
||||
|
||||
// ProcessStreamFieldValuesRequest processes /select/logsql/stream_field_values request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-field-values
|
||||
func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse fieldName query arg
|
||||
fieldName := r.FormValue("field")
|
||||
if fieldName == "" {
|
||||
httpserver.Errorf(w, r, "missing 'field' query arg")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain stream field values for the given query and the given fieldName
|
||||
q.Optimize()
|
||||
values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream field values: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, values)
|
||||
}
|
||||
|
||||
// ProcessStreamIDsRequest processes /select/logsql/stream_ids request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream_ids
|
||||
func ProcessStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain streamIDs for the given query
|
||||
q.Optimize()
|
||||
streamIDs, err := vlstorage.GetStreamIDs(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream_ids: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, streamIDs)
|
||||
}
|
||||
|
||||
// ProcessStreamsRequest processes /select/logsql/streams request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-streams
|
||||
func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain streams for the given query
|
||||
q.Optimize()
|
||||
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain streams: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, streams)
|
||||
}
|
||||
|
||||
// ProcessLiveTailRequest processes live tailing request to /select/logsq/tail
|
||||
func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
liveTailRequests.Inc()
|
||||
defer liveTailRequests.Dec()
|
||||
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if !q.CanLiveTail() {
|
||||
httpserver.Errorf(w, r, "the query [%s] cannot be used in live tailing; see https://docs.victoriametrics.com/victorialogs/querying/#live-tailing for details", q)
|
||||
}
|
||||
q.Optimize()
|
||||
|
||||
refreshIntervalMsecs, err := httputils.GetDuration(r, "refresh_interval", 1000)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
refreshInterval := time.Millisecond * time.Duration(refreshIntervalMsecs)
|
||||
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
tp := newTailProcessor(cancel)
|
||||
|
||||
ticker := time.NewTicker(refreshInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
end := time.Now().UnixNano()
|
||||
doneCh := ctxWithCancel.Done()
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
logger.Panicf("BUG: it is expected that http.ResponseWriter (%T) supports http.Flusher interface", w)
|
||||
}
|
||||
for {
|
||||
start := end - tailOffsetNsecs
|
||||
end = time.Now().UnixNano()
|
||||
|
||||
qCopy := q.Clone()
|
||||
qCopy.AddTimeFilter(start, end)
|
||||
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, qCopy, tp.writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute tail query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
resultRows, err := tp.getTailRows()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot get tail results for query [%q]: %s", q, err)
|
||||
return
|
||||
}
|
||||
if len(resultRows) > 0 {
|
||||
WriteJSONRows(w, resultRows)
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var liveTailRequests = metrics.NewCounter(`vl_live_tailing_requests`)
|
||||
|
||||
const tailOffsetNsecs = 5e9
|
||||
|
||||
type logRow struct {
|
||||
timestamp int64
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
func sortLogRows(rows []logRow) {
|
||||
sort.SliceStable(rows, func(i, j int) bool {
|
||||
return rows[i].timestamp < rows[j].timestamp
|
||||
})
|
||||
}
|
||||
|
||||
type tailProcessor struct {
|
||||
cancel func()
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
perStreamRows map[string][]logRow
|
||||
lastTimestamps map[string]int64
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func newTailProcessor(cancel func()) *tailProcessor {
|
||||
return &tailProcessor{
|
||||
cancel: cancel,
|
||||
|
||||
perStreamRows: make(map[string][]logRow),
|
||||
lastTimestamps: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *tailProcessor) writeBlock(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(timestamps) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
tp.mu.Lock()
|
||||
defer tp.mu.Unlock()
|
||||
|
||||
if tp.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure columns contain _time field, since it is needed for proper tail work.
|
||||
hasTime := false
|
||||
for _, c := range columns {
|
||||
if c.Name == "_time" {
|
||||
hasTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasTime {
|
||||
tp.err = fmt.Errorf("missing _time field")
|
||||
tp.cancel()
|
||||
return
|
||||
}
|
||||
|
||||
// Copy block rows to tp.perStreamRows
|
||||
for i, timestamp := range timestamps {
|
||||
streamID := ""
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j, c := range columns {
|
||||
name := strings.Clone(c.Name)
|
||||
value := strings.Clone(c.Values[i])
|
||||
|
||||
fields[j] = logstorage.Field{
|
||||
Name: name,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
if name == "_stream_id" {
|
||||
streamID = value
|
||||
}
|
||||
}
|
||||
|
||||
tp.perStreamRows[streamID] = append(tp.perStreamRows[streamID], logRow{
|
||||
timestamp: timestamp,
|
||||
fields: fields,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *tailProcessor) getTailRows() ([][]logstorage.Field, error) {
|
||||
if tp.err != nil {
|
||||
return nil, tp.err
|
||||
}
|
||||
|
||||
var resultRows []logRow
|
||||
for streamID, rows := range tp.perStreamRows {
|
||||
sortLogRows(rows)
|
||||
|
||||
lastTimestamp, ok := tp.lastTimestamps[streamID]
|
||||
if ok {
|
||||
// Skip already written rows
|
||||
for len(rows) > 0 && rows[0].timestamp <= lastTimestamp {
|
||||
rows = rows[1:]
|
||||
}
|
||||
}
|
||||
if len(rows) > 0 {
|
||||
resultRows = append(resultRows, rows...)
|
||||
tp.lastTimestamps[streamID] = rows[len(rows)-1].timestamp
|
||||
}
|
||||
}
|
||||
clear(tp.perStreamRows)
|
||||
|
||||
sortLogRows(resultRows)
|
||||
|
||||
tailRows := make([][]logstorage.Field, len(resultRows))
|
||||
for i, row := range resultRows {
|
||||
tailRows[i] = row.fields
|
||||
}
|
||||
|
||||
return tailRows, nil
|
||||
}
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#http-api
|
||||
func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
bw := getBufferedWriter(w)
|
||||
defer func() {
|
||||
bw.FlushIgnoreErrors()
|
||||
putBufferedWriter(bw)
|
||||
}()
|
||||
w.Header().Set("Content-Type", "application/stream+json")
|
||||
|
||||
if limit > 0 {
|
||||
if q.CanReturnLastNResults() {
|
||||
rows, err := getLastNQueryResults(ctx, tenantIDs, q, limit)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
bb := blockResultPool.Get()
|
||||
b := bb.B
|
||||
for i := range rows {
|
||||
b = logstorage.MarshalFieldsToJSON(b[:0], rows[i].fields)
|
||||
b = append(b, '\n')
|
||||
bw.WriteIgnoreErrors(b)
|
||||
}
|
||||
bb.B = b
|
||||
blockResultPool.Put(bb)
|
||||
return
|
||||
}
|
||||
|
||||
q.AddPipeLimit(uint64(limit))
|
||||
}
|
||||
q.Optimize()
|
||||
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
WriteJSONRow(bb, columns, i)
|
||||
}
|
||||
bw.WriteIgnoreErrors(bb.B)
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
type row struct {
|
||||
timestamp int64
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
limitUpper := 2 * limit
|
||||
q.AddPipeLimit(uint64(limitUpper))
|
||||
q.Optimize()
|
||||
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rows) < limitUpper {
|
||||
// Fast path - the requested time range contains up to limitUpper rows.
|
||||
rows = getLastNRows(rows, limit)
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// Slow path - search for the time range containing up to limitUpper rows.
|
||||
start, end := q.GetFilterTimeRange()
|
||||
d := end/2 - start/2
|
||||
start += d
|
||||
|
||||
qOrig := q
|
||||
for {
|
||||
q = qOrig.Clone()
|
||||
q.AddTimeFilter(start, end)
|
||||
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(rows) >= limit && len(rows) < limitUpper || d == 0 {
|
||||
rows = getLastNRows(rows, limit)
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
lastBit := d & 1
|
||||
d /= 2
|
||||
if len(rows) > limit {
|
||||
start += d
|
||||
} else {
|
||||
start -= d + lastBit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getLastNRows(rows []row, limit int) []row {
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
return rows[i].timestamp < rows[j].timestamp
|
||||
})
|
||||
if len(rows) > limit {
|
||||
rows = rows[len(rows)-limit:]
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var rows []row
|
||||
var rowsLock sync.Mutex
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
rowsLock.Lock()
|
||||
defer rowsLock.Unlock()
|
||||
|
||||
for i, timestamp := range timestamps {
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j := range columns {
|
||||
f := &fields[j]
|
||||
f.Name = strings.Clone(columns[j].Name)
|
||||
f.Value = strings.Clone(columns[j].Values[i])
|
||||
}
|
||||
rows = append(rows, row{
|
||||
timestamp: timestamp,
|
||||
fields: fields,
|
||||
})
|
||||
}
|
||||
|
||||
if len(rows) >= limit {
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot obtain tenanID: %w", err)
|
||||
}
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
|
||||
// Parse query
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQuery(qStr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot parse query [%s]: %s", qStr, err)
|
||||
}
|
||||
|
||||
// Parse optional start and end args
|
||||
start, okStart, err := getTimeNsec(r, "start")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
end, okEnd, err := getTimeNsec(r, "end")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if okStart || okEnd {
|
||||
if !okStart {
|
||||
start = math.MinInt64
|
||||
}
|
||||
if !okEnd {
|
||||
end = math.MaxInt64
|
||||
}
|
||||
q.AddTimeFilter(start, end)
|
||||
}
|
||||
|
||||
return q, tenantIDs, nil
|
||||
}
|
||||
|
||||
func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
|
||||
s := r.FormValue(argName)
|
||||
if s == "" {
|
||||
return 0, false, nil
|
||||
}
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
nsecs, err := promutils.ParseTimeAt(s, currentTimestamp)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("cannot parse %s=%s: %w", argName, s, err)
|
||||
}
|
||||
return nsecs, true, nil
|
||||
}
|
||||
32
app/vlselect/logsql/logsql.qtpl
Normal file
32
app/vlselect/logsql/logsql.qtpl
Normal file
@@ -0,0 +1,32 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// ValuesWithHitsJSON generates JSON from the given values.
|
||||
{% func ValuesWithHitsJSON(values []logstorage.ValueWithHits) %}
|
||||
{
|
||||
"values":{%= valuesWithHitsJSONArray(values) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func valuesWithHitsJSONArray(values []logstorage.ValueWithHits) %}
|
||||
[
|
||||
{% if len(values) > 0 %}
|
||||
{%= valueWithHitsJSON(values[0]) %}
|
||||
{% for _, v := range values[1:] %}
|
||||
,{%= valueWithHitsJSON(v) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
{% func valueWithHitsJSON(v logstorage.ValueWithHits) %}
|
||||
{
|
||||
"value":{%q= v.Value %},
|
||||
"hits":{%dul= v.Hits %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user