Compare commits

..

2 Commits

Author SHA1 Message Date
Haley Wang
7d7d17d192 add changelog 2025-02-10 14:08:32 +08:00
Evgeny Kuzin
0a8b4281e5 fix race using the same list from 2 goroutines 2025-02-07 11:55:45 -05:00
6487 changed files with 480823 additions and 933560 deletions

View File

@@ -5,10 +5,10 @@ body:
- type: markdown
attributes:
value: |
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade)
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/#how-to-upgrade)
to [the latest available release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
and verify whether the bug is reproducible there.
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/victoriametrics/troubleshooting/) first.
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/troubleshooting/) first.
- type: textarea
id: describe-the-bug
attributes:
@@ -64,8 +64,8 @@ body:
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176)
See how to setup monitoring here:
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#monitoring)
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/#monitoring)
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/#monitoring)
validations:
required: false
- type: textarea

View File

@@ -5,7 +5,7 @@ body:
- type: textarea
id: describe-the-component
attributes:
label: Is your question related to a specific component?
label: Is your question request related to a specific component?
placeholder: |
VictoriaMetrics, vmagent, vmalert, vmui, etc...
validations:
@@ -24,9 +24,9 @@ body:
label: Troubleshooting docs
description: I am familiar with the following troubleshooting docs
options:
- label: General - https://docs.victoriametrics.com/victoriametrics/troubleshooting/
- label: General - https://docs.victoriametrics.com/troubleshooting/
required: false
- label: vmagent - https://docs.victoriametrics.com/victoriametrics/vmagent/#troubleshooting
- label: vmagent - https://docs.victoriametrics.com/vmagent/#troubleshooting
required: false
- label: vmalert - https://docs.victoriametrics.com/victoriametrics/vmalert/#troubleshooting
- label: vmalert - https://docs.victoriametrics.com/vmalert/#troubleshooting
required: false

View File

@@ -4,8 +4,6 @@ updates:
directory: "/"
schedule:
interval: "daily"
cooldown:
default-days: 21
- package-ecosystem: "gomod"
directory: "/"
schedule:
@@ -25,8 +23,6 @@ updates:
directory: "/"
schedule:
interval: "daily"
cooldown:
default-days: 21
- package-ecosystem: "npm"
directory: "/app/vmui/packages/vmui"
schedule:

View File

@@ -6,5 +6,4 @@ Please provide a brief description of the changes you made. Be as specific as po
The following checks are **mandatory**:
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development goals](https://docs.victoriametrics.com/victoriametrics/goals/).
- [ ] My change adheres [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env sh
set -e
CHANGELOG_FILE="docs/victoriametrics/changelog/CHANGELOG.md"
GITHUB_BASE_REF=${GITHUB_BASE_REF:-"master"}
GIT_REMOTE=${GIT_REMOTE:-"origin"}
git diff "${GIT_REMOTE}/${GITHUB_BASE_REF}"...HEAD -- $CHANGELOG_FILE > diff.txt
if ! grep -q "^+" diff.txt; then
echo "No additions in CHANGELOG.md"
exit 0
fi
ADDED_LINES=$(grep "^+\S" diff.txt | sed 's/^+//')
START_TIP=$(grep -n "^## tip" "$CHANGELOG_FILE" | head -1 | cut -d: -f1)
if [ -z "$START_TIP" ]; then
echo "ERROR: ${CHANGELOG_FILE} does not contain a ## tip section"
exit 1
fi
END_TIP=$(awk "NR>$START_TIP && /^## / {print NR; exit}" "${CHANGELOG_FILE}")
if [ -z "$END_TIP" ]; then
END_TIP=$(wc -l < "$CHANGELOG_FILE")
fi
BAD=0
while IFS= read -r line; do
# Grep exact line inside the file and get line numbers
MATCHES=$(grep -n -F "$line" "$CHANGELOG_FILE" | cut -d: -f1)
for m in $MATCHES; do
if [ "$m" -lt "$START_TIP" ] || [ "$m" -gt "$END_TIP" ]; then
echo "'$line' on line ${m} is outside ## tip section (lines ${START_TIP}-${END_TIP})"
BAD=1
fi
done
done << EOF
$ADDED_LINES
EOF
if [ "$BAD" -ne 0 ]; then
echo "CHANGELOG modifications must be placed inside the ## tip section."
exit 1
fi
echo "CHANGELOG modifications are valid."

View File

@@ -7,20 +7,16 @@ on:
- master
paths:
- '**.go'
- '**/Dockerfile'
- '**/Dockerfile*' # The trailing * is for app/vmui/Dockerfile-*.
- '**/Makefile'
- '!app/vmui/**'
- '.github/workflows/build.yml'
pull_request:
branches:
- cluster
- master
paths:
- '**.go'
- '**/Dockerfile'
- '**/Dockerfile*' # The trailing * is for app/vmui/Dockerfile-*.
- '**/Makefile'
- '!app/vmui/**'
- '.github/workflows/build.yml'
permissions:
contents: read
@@ -31,51 +27,28 @@ concurrency:
jobs:
build:
name: ${{ matrix.os }}-${{ matrix.arch }}
name: Build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- os: linux
arch: 386
- os: linux
arch: amd64
- os: linux
arch: arm64
- os: linux
arch: arm
- os: linux
arch: ppc64le
- os: linux
arch: s390x
- os: darwin
arch: amd64
- os: darwin
arch: arm64
- os: freebsd
arch: amd64
- os: openbsd
arch: amd64
- os: windows
arch: amd64
steps:
- name: Code checkout
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Setup Go
id: go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
cache-dependency-path: |
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
- run: go version
go-version: stable
cache: false
- name: Build victoria-metrics for ${{ matrix.os }}-${{ matrix.arch }}
run: make victoria-metrics-${{ matrix.os }}-${{ matrix.arch }}
- name: Cache Go artifacts
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-crossbuild-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-crossbuild-
- name: Build vmutils for ${{ matrix.os }}-${{ matrix.arch }}
run: make vmutils-${{ matrix.os }}-${{ matrix.arch }}
- name: Run crossbuild
run: make crossbuild

View File

@@ -1,19 +0,0 @@
name: 'changelog-linter'
on:
pull_request:
paths:
- "docs/victoriametrics/changelog/CHANGELOG.md"
jobs:
tip-lint:
runs-on: 'ubuntu-latest'
steps:
- uses: 'actions/checkout@v6'
with:
# needed for proper diff
fetch-depth: 0
- name: 'Validate that changelog changes are under ## tip'
run: |
GITHUB_BASE_REF=${{ github.base_ref }} ./.github/scripts/lint-changelog-tip.sh

View File

@@ -1,47 +0,0 @@
name: check-commit-signed
on:
pull_request:
jobs:
check-commit-signed:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
with:
fetch-depth: 0 # we need full history for commit verification
- name: Check commit signatures
run: |
if [ "${{ github.event_name }}" != "pull_request" ]; then
echo "Not a PR event, skipping signature check"
exit 0
fi
RANGE="${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}"
echo "Checking commits in PR range: $RANGE"
if [ -z "$(git rev-list $RANGE)" ]; then
echo "No new commits in this PR, skipping signature check"
exit 0
fi
# Check raw commit objects for a "gpgsig" header as a fast early signal for
# contributors. Both GPG and SSH signatures use this header.
# This avoids relying on %G? which returns N for SSH commits.
# This check is not a security enforcement — unsigned commits cannot be merged
# anyway due to the GitHub repository merge policy.
unsigned=""
for sha in $(git rev-list $RANGE); do
if ! git cat-file commit "$sha" | grep -q "^gpgsig"; then
unsigned="$unsigned $sha"
fi
done
if [ -n "$unsigned" ]; then
echo "Found unsigned commits:"
echo "$unsigned"
exit 1
fi
echo "All commits in PR are signed (GPG or SSH)"

View File

@@ -19,13 +19,11 @@ jobs:
- name: Setup Go
id: go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
go-version: stable
cache: false
- run: go version
- name: Cache Go artifacts
uses: actions/cache@v4
with:
@@ -34,7 +32,7 @@ jobs:
~/go/pkg/mod
~/go/bin
key: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-
- name: Check License
run: make check-licenses

View File

@@ -29,15 +29,14 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Set up Go
id: go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
cache: false
go-version-file: 'go.mod'
- run: go version
go-version: stable
- name: Cache Go artifacts
uses: actions/cache@v4
@@ -47,17 +46,17 @@ jobs:
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
uses: github/codeql-action/init@v3
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@v4
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4
uses: github/codeql-action/analyze@v3
with:
category: 'language:go'

View File

@@ -0,0 +1,46 @@
name: 'CodeQL JS/TS'
on:
push:
branches:
- cluster
- master
paths:
- '**.js'
- '**.ts'
- '**.tsx'
pull_request:
branches:
- cluster
- master
paths:
- '**.js'
- '**.ts'
- '**.tsx'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript-typescript
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: 'language:js/ts'

View File

@@ -1,57 +0,0 @@
name: publish-docs
on:
push:
branches:
- 'master'
paths:
- 'docs/**'
- '.github/workflows/docs.yaml'
workflow_dispatch: {}
permissions:
contents: read # This is required for actions/checkout and to commit back image update
deployments: write
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v6
with:
path: __vm
- name: Checkout private code
uses: actions/checkout@v6
with:
repository: VictoriaMetrics/vmdocs
token: ${{ secrets.VM_BOT_GH_TOKEN }}
path: __vm-docs
- name: Import GPG key
uses: crazy-max/ghaction-import-gpg@v7
id: import-gpg
with:
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
git_user_signingkey: true
git_commit_gpgsign: true
git_config_global: true
- name: Copy docs
id: update
run: |
find docs -type d -maxdepth 1 -mindepth 1 -exec \
sh -c 'rsync -zarvh --delete {}/ ../__vm-docs/content/$(basename {})/' \;
echo "SHORT_SHA=$(git rev-parse --short $GITHUB_SHA)" >> $GITHUB_OUTPUT
working-directory: __vm
- name: Push to vmdocs
run: |
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
if [[ -n $(git status --porcelain) ]]; then
git add .
git commit -S -m "sync docs with VictoriaMetrics/VictoriaMetrics commit: ${{ steps.update.outputs.SHORT_SHA }}"
git push
fi
working-directory: __vm-docs

122
.github/workflows/main.yml vendored Normal file
View File

@@ -0,0 +1,122 @@
name: main
on:
push:
branches:
- cluster
- master
paths:
- '**.go'
pull_request:
branches:
- cluster
- master
paths:
- '**.go'
permissions:
contents: read
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v4
- name: Setup Go
id: go
uses: actions/setup-go@v5
with:
cache: false
go-version: stable
- name: Cache Go artifacts
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-check-all-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-check-all-
- name: Run check-all
run: |
make check-all
git diff --exit-code
test:
name: test
needs: lint
runs-on: ubuntu-latest
strategy:
matrix:
scenario:
- 'test-full'
- 'test-full-386'
- 'test-pure'
steps:
- name: Code checkout
uses: actions/checkout@v4
- name: Setup Go
id: go
uses: actions/setup-go@v5
with:
cache: false
go-version: stable
- name: Cache Go artifacts
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
- name: Run tests
run: GOGC=10 make ${{ matrix.scenario}}
- name: Publish coverage
uses: codecov/codecov-action@v5
with:
file: ./coverage.txt
integration-test:
name: integration-test
needs: [lint, test]
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v4
- name: Setup Go
id: go
uses: actions/setup-go@v5
with:
cache: false
go-version: stable
- name: Cache Go artifacts
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
- name: Run integration tests
run: make integration-test

51
.github/workflows/sync-docs.yml vendored Normal file
View File

@@ -0,0 +1,51 @@
name: publish-docs
on:
push:
branches:
- 'master'
paths:
- 'docs/**'
workflow_dispatch: {}
permissions:
contents: read # This is required for actions/checkout and to commit back image update
deployments: write
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v4
with:
path: main
- name: Checkout private code
uses: actions/checkout@v4
with:
repository: VictoriaMetrics/vmdocs
token: ${{ secrets.VM_BOT_GH_TOKEN }}
path: docs
- name: Import GPG key
uses: crazy-max/ghaction-import-gpg@v6
with:
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
git_user_signingkey: true
git_commit_gpgsign: true
workdir: docs
- name: Set short git commit SHA
id: vars
run: |
calculatedSha=$(git rev-parse --short ${{ github.sha }})
echo "short_sha=$calculatedSha" >> $GITHUB_OUTPUT
working-directory: main
- name: update code and commit
run: |
rm -rf content
cp -r ../main/docs content
make clean-after-copy
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
git add .
git commit -S -m "sync docs with VictoriaMetrics/VictoriaMetrics commit: ${{ steps.vars.outputs.short_sha }}"
git push
working-directory: docs

View File

@@ -1,111 +0,0 @@
name: test
on:
push:
branches:
- cluster
- master
paths:
- '**.go'
- 'go.*'
- '.github/workflows/main.yml'
pull_request:
branches:
- cluster
- master
paths:
- '**.go'
- 'go.*'
- '.github/workflows/main.yml'
permissions:
contents: read
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v6
- name: Setup Go
id: go
uses: actions/setup-go@v6
with:
cache-dependency-path: |
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
- run: go version
- name: Cache golangci-lint
uses: actions/cache@v4
with:
path: |
~/.cache/golangci-lint
~/go/bin
key: golangci-lint-${{ runner.os }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('.golangci.yml') }}
- name: Run check-all
run: |
make check-all
git diff --exit-code
unit:
name: unit
runs-on: ubuntu-latest
strategy:
matrix:
scenario:
- 'test'
- 'test-386'
- 'test-pure'
steps:
- name: Code checkout
uses: actions/checkout@v6
- name: Setup Go
id: go
uses: actions/setup-go@v6
with:
cache-dependency-path: |
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
- run: go version
- name: Run tests
run: make ${{ matrix.scenario}}
apptest:
name: apptest
runs-on: apptest
steps:
- name: Code checkout
uses: actions/checkout@v6
- name: Setup Go
id: go
uses: actions/setup-go@v6
with:
cache-dependency-path: |
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
- run: go version
- name: Run app tests
run: make apptest

View File

@@ -1,88 +0,0 @@
name: vmui
on:
push:
branches:
- cluster
- master
paths:
- 'app/vmui/packages/vmui/**'
- '.github/workflows/vmui.yml'
pull_request:
branches:
- cluster
- master
paths:
- 'app/vmui/packages/vmui/**'
- '.github/workflows/vmui.yml'
permissions:
contents: read
packages: read
pull-requests: read
checks: write
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
vmui-checks:
name: VMUI Checks (lint, test, typecheck)
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v6
- name: Cache node_modules
id: cache
uses: actions/cache@v5
with:
path: app/vmui/packages/vmui/node_modules
key: vmui-deps-${{ runner.os }}-${{ hashFiles('app/vmui/packages/vmui/package-lock.json', 'app/vmui/Dockerfile-build') }}
restore-keys: |
vmui-deps-${{ runner.os }}-
- name: Install dependencies
if: steps.cache.outputs.cache-hit != 'true'
run: make vmui-install
- name: Run lint
id: lint
run: make vmui-lint
continue-on-error: true
env:
VMUI_SKIP_INSTALL: true
- name: Run tests
id: test
run: make vmui-test
continue-on-error: true
env:
VMUI_SKIP_INSTALL: true
- name: Run typecheck
id: typecheck
run: make vmui-typecheck
continue-on-error: true
env:
VMUI_SKIP_INSTALL: true
- name: Annotate Code Linting Results
uses: ataylorme/eslint-annotate-action@v3
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
report-json: app/vmui/packages/vmui/vmui-lint-report.json
- name: Check overall status
run: |
echo "Lint status: ${{ steps.lint.outcome }}"
echo "Test status: ${{ steps.test.outcome }}"
echo "Typecheck status: ${{ steps.typecheck.outcome }}"
if [[ "${{ steps.lint.outcome }}" == "failure" || "${{ steps.test.outcome }}" == "failure" || "${{ steps.typecheck.outcome }}" == "failure" ]]; then
echo "One or more checks failed"
exit 1
else
echo "All checks passed"
fi

2
.gitignore vendored
View File

@@ -12,7 +12,6 @@
/victoria-logs-data
/victoria-metrics-data
/vmagent-remotewrite-data
/vlagent-remotewritewrite
/vmstorage-data
/vmselect-cache
/package/temp-deb-*
@@ -28,4 +27,3 @@ _site
coverage.txt
cspell.json
*~
deployment/docker/provisioning/plugins/

View File

@@ -1,29 +1,22 @@
version: "2"
run:
timeout: 2m
linters:
settings:
errcheck:
exclude-functions:
- fmt.Fprintf
- fmt.Fprint
- (net/http.ResponseWriter).Write
exclusions:
generated: lax
presets:
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- staticcheck
text: 'SA(4003|1019|5011):'
paths:
- third_party$
- builtin$
- examples$
formatters:
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
enable:
- revive
issues:
exclude-rules:
- linters:
- staticcheck
text: "SA(4003|1019|5011):"
include:
- EXC0012
- EXC0014
linters-settings:
errcheck:
exclude-functions:
- "fmt.Fprintf"
- "fmt.Fprint"
- "(net/http.ResponseWriter).Write"

View File

@@ -1 +1 @@
The document has been moved [here](https://docs.victoriametrics.com/victoriametrics/contributing/).
The document has been moved [here](https://docs.victoriametrics.com/contributing/).

View File

@@ -175,7 +175,7 @@
END OF TERMS AND CONDITIONS
Copyright 2019-2026 VictoriaMetrics, Inc.
Copyright 2019-2025 VictoriaMetrics, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

266
Makefile
View File

@@ -5,32 +5,29 @@ MAKE_PARALLEL := $(MAKE) -j $(MAKE_CONCURRENCY)
DATEINFO_TAG ?= $(shell date -u +'%Y%m%d-%H%M%S')
BUILDINFO_TAG ?= $(shell echo $$(git describe --long --all | tr '/' '-')$$( \
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | openssl sha1 | cut -d' ' -f2 | cut -c 1-8)))
LATEST_TAG ?= latest
PKG_TAG ?= $(shell git tag -l --points-at HEAD)
ifeq ($(PKG_TAG),)
PKG_TAG := $(BUILDINFO_TAG)
endif
EXTRA_DOCKER_TAG_SUFFIX ?=
EXTRA_GO_BUILD_TAGS ?=
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
TAR_OWNERSHIP ?= --owner=1000 --group=1000
GOLANGCI_LINT_VERSION := 2.9.0
.PHONY: $(MAKECMDGOALS)
include app/*/Makefile
include codespell/Makefile
include cspell/Makefile
include docs/Makefile
include deployment/*/Makefile
include dashboards/Makefile
include package/release/Makefile
include benchmarks/Makefile
all: \
victoria-metrics-prod \
victoria-logs-prod \
vlogscli-prod \
vmagent-prod \
vmalert-prod \
vmalert-tool-prod \
@@ -54,6 +51,8 @@ publish: \
package: \
package-victoria-metrics \
package-victoria-logs \
package-vlogscli \
package-vmagent \
package-vmalert \
package-vmalert-tool \
@@ -125,15 +124,6 @@ vmutils-linux-ppc64le: \
vmrestore-linux-ppc64le \
vmctl-linux-ppc64le
vmutils-linux-s390x: \
vmagent-linux-s390x \
vmalert-linux-s390x \
vmalert-tool-linux-s390x \
vmauth-linux-s390x \
vmbackup-linux-s390x \
vmrestore-linux-s390x \
vmctl-linux-s390x
vmutils-darwin-amd64: \
vmagent-darwin-amd64 \
vmalert-darwin-amd64 \
@@ -179,11 +169,9 @@ vmutils-windows-amd64: \
vmrestore-windows-amd64 \
vmctl-windows-amd64
# When adding a new crossbuild target, please also add it to the .github/workflows/build.yml
crossbuild:
$(MAKE_PARALLEL) victoria-metrics-crossbuild vmutils-crossbuild
# When adding a new crossbuild target, please also add it to the .github/workflows/build.yml
victoria-metrics-crossbuild: \
victoria-metrics-linux-386 \
victoria-metrics-linux-amd64 \
@@ -196,7 +184,6 @@ victoria-metrics-crossbuild: \
victoria-metrics-openbsd-amd64 \
victoria-metrics-windows-amd64
# When adding a new crossbuild target, please also add it to the .github/workflows/build.yml
vmutils-crossbuild: \
vmutils-linux-386 \
vmutils-linux-amd64 \
@@ -209,52 +196,12 @@ vmutils-crossbuild: \
vmutils-openbsd-amd64 \
vmutils-windows-amd64
publish-final-images:
PKG_TAG=$(TAG) APP_NAME=victoria-metrics $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) APP_NAME=vmagent $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) APP_NAME=vmalert $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) APP_NAME=vmalert-tool $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) APP_NAME=vmauth $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) APP_NAME=vmbackup $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) APP_NAME=vmrestore $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) APP_NAME=vmctl $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=victoria-metrics $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmagent $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmalert $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmauth $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackup $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmrestore $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmgateway $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackupmanager $(MAKE) publish-via-docker-from-rc && \
PKG_TAG=$(TAG) $(MAKE) publish-latest
publish-latest:
PKG_TAG=$(TAG) APP_NAME=victoria-metrics $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG) APP_NAME=vmagent $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG) APP_NAME=vmalert $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG) APP_NAME=vmalert-tool $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG) APP_NAME=vmauth $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG) APP_NAME=vmbackup $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG) APP_NAME=vmrestore $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG) APP_NAME=vmctl $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG)-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG)-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG)-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-latest && \
PKG_TAG=$(TAG)-enterprise APP_NAME=vmgateway $(MAKE) publish-via-docker-latest
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackupmanager $(MAKE) publish-via-docker-latest
publish-release:
rm -rf bin/*
git checkout $(TAG) && $(MAKE) release && $(MAKE) publish && \
git checkout $(TAG)-cluster && $(MAKE) release && $(MAKE) publish && \
git checkout $(TAG)-enterprise && $(MAKE) release && $(MAKE) publish && \
git checkout $(TAG)-enterprise-cluster && $(MAKE) release && $(MAKE) publish
git checkout $(TAG) && $(MAKE) release && LATEST_TAG=stable $(MAKE) publish && \
git checkout $(TAG)-cluster && $(MAKE) release && LATEST_TAG=cluster-stable $(MAKE) publish && \
git checkout $(TAG)-enterprise && $(MAKE) release && LATEST_TAG=enterprise-stable $(MAKE) publish && \
git checkout $(TAG)-enterprise-cluster && $(MAKE) release && LATEST_TAG=enterprise-cluster-stable $(MAKE) publish
release:
$(MAKE_PARALLEL) \
@@ -266,7 +213,6 @@ release-victoria-metrics: \
release-victoria-metrics-linux-amd64 \
release-victoria-metrics-linux-arm \
release-victoria-metrics-linux-arm64 \
release-victoria-metrics-linux-s390x \
release-victoria-metrics-darwin-amd64 \
release-victoria-metrics-darwin-arm64 \
release-victoria-metrics-freebsd-amd64 \
@@ -285,9 +231,6 @@ release-victoria-metrics-linux-arm:
release-victoria-metrics-linux-arm64:
GOOS=linux GOARCH=arm64 $(MAKE) release-victoria-metrics-goos-goarch
release-victoria-metrics-linux-s390x:
GOOS=linux GOARCH=s390x $(MAKE) release-victoria-metrics-goos-goarch
release-victoria-metrics-darwin-amd64:
GOOS=darwin GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
@@ -322,12 +265,133 @@ release-victoria-metrics-windows-goarch: victoria-metrics-windows-$(GOARCH)-prod
cd bin && rm -rf \
victoria-metrics-windows-$(GOARCH)-prod.exe
release-victoria-logs-bundle: \
release-victoria-logs \
release-vlogscli
publish-victoria-logs-bundle: \
publish-victoria-logs \
publish-vlogscli
release-victoria-logs:
$(MAKE_PARALLEL) release-victoria-logs-linux-386 \
release-victoria-logs-linux-amd64 \
release-victoria-logs-linux-arm \
release-victoria-logs-linux-arm64 \
release-victoria-logs-darwin-amd64 \
release-victoria-logs-darwin-arm64 \
release-victoria-logs-freebsd-amd64 \
release-victoria-logs-openbsd-amd64 \
release-victoria-logs-windows-amd64
release-victoria-logs-linux-386:
GOOS=linux GOARCH=386 $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-linux-amd64:
GOOS=linux GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-linux-arm:
GOOS=linux GOARCH=arm $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-linux-arm64:
GOOS=linux GOARCH=arm64 $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-darwin-amd64:
GOOS=darwin GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-darwin-arm64:
GOOS=darwin GOARCH=arm64 $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-freebsd-amd64:
GOOS=freebsd GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-openbsd-amd64:
GOOS=openbsd GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
release-victoria-logs-windows-amd64:
GOARCH=amd64 $(MAKE) release-victoria-logs-windows-goarch
release-victoria-logs-goos-goarch: victoria-logs-$(GOOS)-$(GOARCH)-prod
cd bin && \
tar $(TAR_OWNERSHIP) --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
victoria-logs-$(GOOS)-$(GOARCH)-prod \
&& sha256sum victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
victoria-logs-$(GOOS)-$(GOARCH)-prod \
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf victoria-logs-$(GOOS)-$(GOARCH)-prod
release-victoria-logs-windows-goarch: victoria-logs-windows-$(GOARCH)-prod
cd bin && \
zip victoria-logs-windows-$(GOARCH)-$(PKG_TAG).zip \
victoria-logs-windows-$(GOARCH)-prod.exe \
&& sha256sum victoria-logs-windows-$(GOARCH)-$(PKG_TAG).zip \
victoria-logs-windows-$(GOARCH)-prod.exe \
> victoria-logs-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf \
victoria-logs-windows-$(GOARCH)-prod.exe
release-vlogscli:
$(MAKE_PARALLEL) release-vlogscli-linux-386 \
release-vlogscli-linux-amd64 \
release-vlogscli-linux-arm \
release-vlogscli-linux-arm64 \
release-vlogscli-darwin-amd64 \
release-vlogscli-darwin-arm64 \
release-vlogscli-freebsd-amd64 \
release-vlogscli-openbsd-amd64 \
release-vlogscli-windows-amd64
release-vlogscli-linux-386:
GOOS=linux GOARCH=386 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-linux-amd64:
GOOS=linux GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-linux-arm:
GOOS=linux GOARCH=arm $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-linux-arm64:
GOOS=linux GOARCH=arm64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-darwin-amd64:
GOOS=darwin GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-darwin-arm64:
GOOS=darwin GOARCH=arm64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-freebsd-amd64:
GOOS=freebsd GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-openbsd-amd64:
GOOS=openbsd GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-windows-amd64:
GOARCH=amd64 $(MAKE) release-vlogscli-windows-goarch
release-vlogscli-goos-goarch: vlogscli-$(GOOS)-$(GOARCH)-prod
cd bin && \
tar $(TAR_OWNERSHIP) --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf vlogscli-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
vlogscli-$(GOOS)-$(GOARCH)-prod \
&& sha256sum vlogscli-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
vlogscli-$(GOOS)-$(GOARCH)-prod \
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > vlogscli-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf vlogscli-$(GOOS)-$(GOARCH)-prod
release-vlogscli-windows-goarch: vlogscli-windows-$(GOARCH)-prod
cd bin && \
zip vlogscli-windows-$(GOARCH)-$(PKG_TAG).zip \
vlogscli-windows-$(GOARCH)-prod.exe \
&& sha256sum vlogscli-windows-$(GOARCH)-$(PKG_TAG).zip \
vlogscli-windows-$(GOARCH)-prod.exe \
> vlogscli-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf \
vlogscli-windows-$(GOARCH)-prod.exe
release-vmutils: \
release-vmutils-linux-386 \
release-vmutils-linux-amd64 \
release-vmutils-linux-arm64 \
release-vmutils-linux-arm \
release-vmutils-linux-s390x \
release-vmutils-darwin-amd64 \
release-vmutils-darwin-arm64 \
release-vmutils-freebsd-amd64 \
@@ -346,9 +410,6 @@ release-vmutils-linux-arm64:
release-vmutils-linux-arm:
GOOS=linux GOARCH=arm $(MAKE) release-vmutils-goos-goarch
release-vmutils-linux-s390x:
GOOS=linux GOARCH=s390x $(MAKE) release-vmutils-goos-goarch
release-vmutils-darwin-amd64:
GOOS=darwin GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
@@ -435,7 +496,7 @@ release-vmutils-windows-goarch: \
vmctl-windows-$(GOARCH)-prod.exe
pprof-cpu:
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics $(PPROF_FILE)
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
fmt:
gofmt -l -w -s ./lib
@@ -443,7 +504,7 @@ fmt:
gofmt -l -w -s ./apptest
vet:
go vet -tags 'synctest' ./lib/...
go vet ./lib/...
go vet ./app/...
go vet ./apptest/...
@@ -452,82 +513,61 @@ check-all: fmt vet golangci-lint govulncheck
clean-checkers: remove-golangci-lint remove-govulncheck
test:
go test -tags 'synctest' ./lib/... ./app/...
go test ./lib/... ./app/...
test-race:
go test -tags 'synctest' -race ./lib/... ./app/...
test-386:
GOARCH=386 go test -tags 'synctest' ./lib/... ./app/...
go test -race ./lib/... ./app/...
test-pure:
CGO_ENABLED=0 go test -tags 'synctest' ./lib/... ./app/...
CGO_ENABLED=0 go test ./lib/... ./app/...
test-full:
go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
test-full-386:
GOARCH=386 go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
apptest:
$(MAKE) victoria-metrics-race vmagent-race vmalert-race vmauth-race vmctl-race vmbackup-race vmrestore-race
go test ./apptest/... -skip="^Test(Cluster|Legacy).*"
apptest-legacy: victoria-metrics-race vmbackup-race vmrestore-race
OS=$$(uname | tr '[:upper:]' '[:lower:]'); \
ARCH=$$(uname -m | tr '[:upper:]' '[:lower:]' | sed 's/x86_64/amd64/'); \
VERSION=v1.132.0; \
VMSINGLE=victoria-metrics-$${OS}-$${ARCH}-$${VERSION}.tar.gz; \
VMCLUSTER=victoria-metrics-$${OS}-$${ARCH}-$${VERSION}-cluster.tar.gz; \
URL=https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/$${VERSION}; \
DIR=/tmp/$${VERSION}; \
test -d $${DIR} || (mkdir $${DIR} && \
curl --output-dir /tmp -LO $${URL}/$${VMSINGLE} && tar xzf /tmp/$${VMSINGLE} -C $${DIR} && \
curl --output-dir /tmp -LO $${URL}/$${VMCLUSTER} && tar xzf /tmp/$${VMCLUSTER} -C $${DIR} \
); \
VM_LEGACY_VMSINGLE_PATH=$${DIR}/victoria-metrics-prod \
VM_LEGACY_VMSTORAGE_PATH=$${DIR}/vmstorage-prod \
go test ./apptest/tests -run="^TestLegacySingle.*"
integration-test: victoria-metrics vmagent vmalert vmauth
go test ./apptest/... -skip="^TestCluster.*"
benchmark:
go test -run=NO_TESTS -bench=. ./lib/...
go test -run=NO_TESTS -bench=. ./app/...
go test -bench=. ./lib/...
go test -bench=. ./app/...
benchmark-pure:
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./lib/...
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./app/...
CGO_ENABLED=0 go test -bench=. ./lib/...
CGO_ENABLED=0 go test -bench=. ./app/...
vendor-update:
go get -u ./lib/...
go get -u ./app/...
go mod tidy -compat=1.26
go mod tidy -compat=1.23
go mod vendor
app-local:
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
app-local-pure:
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
app-local-goos-goarch:
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
app-local-windows-goarch:
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
quicktemplate-gen: install-qtc
qtc -dir=lib
qtc -dir=app
qtc
install-qtc:
which qtc || go install github.com/valyala/quicktemplate/qtc@latest
golangci-lint: install-golangci-lint
golangci-lint run --build-tags 'synctest'
golangci-lint run
install-golangci-lint:
which golangci-lint && (golangci-lint --version | grep -q $(GOLANGCI_LINT_VERSION)) || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v$(GOLANGCI_LINT_VERSION)
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.63.4
remove-golangci-lint:
rm -rf `which golangci-lint`

View File

@@ -1,35 +1,31 @@
# VictoriaMetrics
[![Latest Release](https://img.shields.io/github/v/release/VictoriaMetrics/VictoriaMetrics?sort=semver&label=&filter=!*-victorialogs&logo=github&labelColor=gray&color=gray&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Freleases%2Flatest)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
[![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics?label=&logo=docker&logoColor=white&labelColor=2496ED&color=2496ED&link=https%3A%2F%2Fhub.docker.com%2Fr%2Fvictoriametrics%2Fvictoria-metrics)](https://hub.docker.com/u/victoriametrics)
[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics?link=https%3A%2F%2Fgoreportcard.com%2Freport%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
[![Build Status](https://github.com/VictoriaMetrics/VictoriaMetrics/actions/workflows/build.yml/badge.svg?branch=master&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Factions)](https://github.com/VictoriaMetrics/VictoriaMetrics/actions/workflows/build.yml)
[![License](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics?labelColor=green&label=&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Fblob%2Fmaster%2FLICENSE)](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
[![Join Slack](https://img.shields.io/badge/Join%20Slack-4A154B?logo=slack)](https://slack.victoriametrics.com)
[![X](https://img.shields.io/twitter/follow/VictoriaMetrics?style=flat&label=Follow&color=black&logo=x&labelColor=black&link=https%3A%2F%2Fx.com%2FVictoriaMetrics)](https://x.com/VictoriaMetrics/)
[![Reddit](https://img.shields.io/reddit/subreddit-subscribers/VictoriaMetrics?style=flat&label=Join&labelColor=red&logoColor=white&logo=reddit&link=https%3A%2F%2Fwww.reddit.com%2Fr%2FVictoriaMetrics)](https://www.reddit.com/r/VictoriaMetrics/)
![Latest Release](https://img.shields.io/github/v/release/VictoriaMetrics/VictoriaMetrics?sort=semver&label=&filter=!*-victorialogs&logo=github&labelColor=gray&color=gray&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Freleases%2Flatest)
![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics?label=&logo=docker&logoColor=white&labelColor=2496ED&color=2496ED&link=https%3A%2F%2Fhub.docker.com%2Fr%2Fvictoriametrics%2Fvictoria-metrics)
![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics?link=https%3A%2F%2Fgoreportcard.com%2Freport%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics)
![Build Status](https://github.com/VictoriaMetrics/VictoriaMetrics/workflows/main/badge.svg?link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Factions)
![codecov](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics/branch/master/graph/badge.svg?link=https%3A%2F%2Fcodecov.io%2Fgh%2FVictoriaMetrics%2FVictoriaMetrics)
![License](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics?labelColor=green&label=&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Fblob%2Fmaster%2FLICENSE)
![Slack](https://img.shields.io/badge/Join-4A154B?logo=slack&link=https%3A%2F%2Fslack.victoriametrics.com)
![X](https://img.shields.io/twitter/follow/VictoriaMetrics?style=flat&label=Follow&color=black&logo=x&labelColor=black&link=https%3A%2F%2Fx.com%2FVictoriaMetrics)
![Reddit](https://img.shields.io/reddit/subreddit-subscribers/VictoriaMetrics?style=flat&label=Join&labelColor=red&logoColor=white&logo=reddit&link=https%3A%2F%2Fwww.reddit.com%2Fr%2FVictoriaMetrics)
<picture>
<source srcset="docs/victoriametrics/logo_white.webp" media="(prefers-color-scheme: dark)">
<source srcset="docs/victoriametrics/logo.webp" media="(prefers-color-scheme: light)">
<img src="docs/victoriametrics/logo.webp" width="300" alt="VictoriaMetrics logo">
<source srcset="docs/logo_white.webp" media="(prefers-color-scheme: dark)">
<source srcset="docs/logo.webp" media="(prefers-color-scheme: light)">
<img src="docs/logo.webp" width="300" alt="VictoriaMetrics logo">
</picture>
VictoriaMetrics is a fast, cost-effective, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
Here are some resources and information about VictoriaMetrics:
- **Case studies**: [Grammarly, Roblox, Wix, Spotify,...](https://docs.victoriametrics.com/victoriametrics/casestudies/).
- **Available**: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), Docker images on [Docker Hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and [Quay](https://quay.io/repository/victoriametrics/victoria-metrics), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
- **Deployment types**: [Single-node version](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and [Cluster version](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) under [Apache License 2.0](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE).
- **Getting started:** Read [key concepts](https://docs.victoriametrics.com/victoriametrics/keyconcepts/) and follow the
[quick start guide](https://docs.victoriametrics.com/victoriametrics/quick-start/).
- **Community**: [Slack](https://slack.victoriametrics.com/) (join via [Slack Inviter](https://slack.victoriametrics.com/)), [X (Twitter)](https://x.com/VictoriaMetrics), [YouTube](https://www.youtube.com/@VictoriaMetrics). See full list [here](https://docs.victoriametrics.com/victoriametrics/#community-and-contributions).
- **Changelog**: Project evolves fast - check the [CHANGELOG](https://docs.victoriametrics.com/victoriametrics/changelog/), and [How to upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade-victoriametrics).
- **Enterprise support:** [Contact us](mailto:info@victoriametrics.com) for commercial support with additional [enterprise features](https://docs.victoriametrics.com/victoriametrics/enterprise/).
- **Enterprise releases:** Enterprise and [long-term support releases (LTS)](https://docs.victoriametrics.com/victoriametrics/lts-releases/) are publicly available and can be evaluated for free
using a [free trial license](https://victoriametrics.com/products/enterprise/trial/).
- **Security:** we achieved [security certifications](https://victoriametrics.com/security/) for Database Software Development and Software-Based Monitoring Services.
- Documentation: [docs.victoriametrics.com](https://docs.victoriametrics.com)
- Case studies: [Grammarly, Roblox, Wix,...](https://docs.victoriametrics.com/casestudies/).
- Available: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics)
- Deployment types: [Single-node version](https://docs.victoriametrics.com/), [Cluster version](https://docs.victoriametrics.com/cluster-victoriametrics/), and [Enterprise version](https://docs.victoriametrics.com/enterprise/)
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/changelog/), and [How to upgrade](https://docs.victoriametrics.com/#how-to-upgrade-victoriametrics)
- Community: [Slack](https://slack.victoriametrics.com/), [X (Twitter)](https://x.com/VictoriaMetrics), [LinkedIn](https://www.linkedin.com/company/victoriametrics/), [YouTube](https://www.youtube.com/@VictoriaMetrics)
Yes, we open-source both the single-node VictoriaMetrics and the cluster version.
@@ -39,22 +35,22 @@ VictoriaMetrics is optimized for timeseries data, even when old time series are
* **Long-term storage for Prometheus** or as a drop-in replacement for Prometheus and Graphite in Grafana.
* **Powerful stream aggregation**: Can be used as a StatsD alternative.
* **Ideal for big data**: Works well with large amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://docs.victoriametrics.com/victoriametrics/enterprise/).
* **Ideal for big data**: Works well with large amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise/).
* **Query language**: Supports both PromQL and the more performant MetricsQL.
* **Easy to setup**: No dependencies, single [small binary](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d), configuration through command-line flags, but the default is also fine-tuned; backup and restore with [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
* **Global query view**: Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics and queried via a single query.
* **Various Protocols**: Support metric scraping, ingestion and backfilling in various protocol.
* [Prometheus exporters](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/victoriametrics/integrations/prometheus/), [Prometheus exposition format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format).
* [InfluxDB line protocol](https://docs.victoriametrics.com/victoriametrics/integrations/influxdb/) over HTTP, TCP and UDP.
* [Graphite plaintext protocol](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
* [OpenTSDB put message](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-telnet).
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-http).
* [JSON line format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-json-line-format).
* [Arbitrary CSV data](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-csv-data).
* [Native binary format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-native-format).
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/victoriametrics/integrations/datadog/).
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/victoriametrics/integrations/newrelic/#sending-data-from-agent).
* [OpenTelemetry metrics format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#sending-data-via-opentelemetry).
* [Prometheus exporters](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/#prometheus-setup), [Prometheus exposition format](https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format).
* [InfluxDB line protocol](https://docs.victoriametrics.com/#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
* [Graphite plaintext protocol](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
* [OpenTSDB put message](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol).
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/#sending-opentsdb-data-via-http-apiput-requests).
* [JSON line format](https://docs.victoriametrics.com/#how-to-import-data-in-json-line-format).
* [Arbitrary CSV data](https://docs.victoriametrics.com/#how-to-import-csv-data).
* [Native binary format](https://docs.victoriametrics.com/#how-to-import-data-in-native-format).
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent).
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/#how-to-send-data-from-newrelic-agent).
* [OpenTelemetry metrics format](https://docs.victoriametrics.com/#sending-data-via-opentelemetry).
* **NFS-based storages**: Supports storing data on NFS-based storages such as Amazon EFS, Google Filestore.
* And many other features such as metrics relabeling, cardinality limiter, etc.
@@ -66,9 +62,9 @@ In addition, the Enterprise version includes extra features:
- **Backup automation**: Automates regular backup procedures.
- **Multiple retentions**: Reducing storage costs by specifying different retentions for different datasets.
- **Downsampling**: Reducing storage costs and increasing performance for queries over historical data.
- **Stable releases** with long-term support lines ([LTS](https://docs.victoriametrics.com/victoriametrics/lts-releases/)).
- **Stable releases** with long-term support lines ([LTS](https://docs.victoriametrics.com/lts-releases/)).
- **Comprehensive support**: First-class consulting, feature requests and technical support provided by the core VictoriaMetrics dev team.
- Many other features, which you can read about on [the Enterprise page](https://docs.victoriametrics.com/victoriametrics/enterprise/).
- Many other features, which you can read about on [the Enterprise page](https://docs.victoriametrics.com/enterprise/).
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. Or you can request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/), downloaded Enterprise binaries are available at [Github Releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
@@ -81,7 +77,7 @@ Some good benchmarks VictoriaMetrics achieved:
* **Minimal memory footprint**: handling millions of unique timeseries with [10x less RAM](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) than InfluxDB, up to [7x less RAM](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) than Prometheus, Thanos or Cortex.
* **Highly scalable and performance** for [data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4), [20x outperforms](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) InfluxDB and TimescaleDB.
* **High data compression**: [70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4) may be stored into limited storage than TimescaleDB, [7x less storage](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) space is required than Prometheus, Thanos or Cortex.
* **Reducing storage costs**: [10x more effective](https://docs.victoriametrics.com/victoriametrics/casestudies/#grammarly) than Graphite according to the Grammarly case study.
* **Reducing storage costs**: [10x more effective](https://docs.victoriametrics.com/casestudies/#grammarly) than Graphite according to the Grammarly case study.
* **A single-node VictoriaMetrics** can replace medium-sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB. See [VictoriaMetrics vs Thanos](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683), [Measuring vertical scalability](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae), [Remote write storage wars - PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
* **Optimized for storage**: [Works well with high-latency IO](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc.).
@@ -97,7 +93,7 @@ Feel free asking any questions regarding VictoriaMetrics:
* [Telegram-ru](https://t.me/VictoriaMetrics_ru1)
* [Mastodon](https://mastodon.social/@victoriametrics/)
If you like VictoriaMetrics and want to contribute, then please [read these docs](https://docs.victoriametrics.com/victoriametrics/contributing/).
If you like VictoriaMetrics and want to contribute, then please [read these docs](https://docs.victoriametrics.com/contributing/).
## VictoriaMetrics Logo

View File

@@ -4,39 +4,15 @@
The following versions of VictoriaMetrics receive regular security fixes:
| Version | Supported |
|--------------------------------------------------------------------------------|--------------------|
| [Latest release](https://docs.victoriametrics.com/victoriametrics/changelog/) | :white_check_mark: |
| [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-releases/) | :white_check_mark: |
| other releases | :x: |
| Version | Supported |
|---------|--------------------|
| [latest release](https://docs.victoriametrics.com/changelog/) | :white_check_mark: |
| v1.102.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
| other releases | :x: |
See [this page](https://victoriametrics.com/security/) for more details.
## Software Bill of Materials (SBOM)
Every VictoriaMetrics container{{% available_from "#" %}} image published to
[Docker Hub](https://hub.docker.com/u/victoriametrics)
and [Quay.io](https://quay.io/organization/victoriametrics)
includes an [SPDX](https://spdx.dev/) SBOM attestation
generated automatically by BuildKit during
`docker buildx build`.
To inspect the SBOM for an image:
```sh
docker buildx imagetools inspect \
docker.io/victoriametrics/victoria-metrics:latest \
--format "{{ json .SBOM }}"
```
To scan an image using its SBOM attestation with
[Trivy](https://github.com/aquasecurity/trivy):
```sh
trivy image --sbom-sources oci \
docker.io/victoriametrics/victoria-metrics:latest
```
## Reporting a Vulnerability
Please report any security issues to <security@victoriametrics.com>

113
app/victoria-logs/Makefile Normal file
View File

@@ -0,0 +1,113 @@
# All these commands must run from repository root.
victoria-logs:
APP_NAME=victoria-logs $(MAKE) app-local
victoria-logs-race:
APP_NAME=victoria-logs RACE=-race $(MAKE) app-local
victoria-logs-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker
victoria-logs-pure-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-pure
victoria-logs-linux-amd64-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-amd64
victoria-logs-linux-arm-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-arm
victoria-logs-linux-arm64-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-arm64
victoria-logs-linux-ppc64le-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-ppc64le
victoria-logs-linux-386-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-386
victoria-logs-darwin-amd64-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-darwin-amd64
victoria-logs-darwin-arm64-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-darwin-arm64
victoria-logs-freebsd-amd64-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-freebsd-amd64
victoria-logs-openbsd-amd64-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-openbsd-amd64
victoria-logs-windows-amd64-prod:
APP_NAME=victoria-logs $(MAKE) app-via-docker-windows-amd64
package-victoria-logs:
APP_NAME=victoria-logs $(MAKE) package-via-docker
package-victoria-logs-pure:
APP_NAME=victoria-logs $(MAKE) package-via-docker-pure
package-victoria-logs-amd64:
APP_NAME=victoria-logs $(MAKE) package-via-docker-amd64
package-victoria-logs-arm:
APP_NAME=victoria-logs $(MAKE) package-via-docker-arm
package-victoria-logs-arm64:
APP_NAME=victoria-logs $(MAKE) package-via-docker-arm64
package-victoria-logs-ppc64le:
APP_NAME=victoria-logs $(MAKE) package-via-docker-ppc64le
package-victoria-logs-386:
APP_NAME=victoria-logs $(MAKE) package-via-docker-386
publish-victoria-logs:
APP_NAME=victoria-logs $(MAKE) publish-via-docker
victoria-logs-linux-amd64:
APP_NAME=victoria-logs CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
victoria-logs-linux-arm:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
victoria-logs-linux-arm64:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
victoria-logs-linux-ppc64le:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
victoria-logs-linux-s390x:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
victoria-logs-linux-loong64:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
victoria-logs-linux-386:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
victoria-logs-darwin-amd64:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
victoria-logs-darwin-arm64:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
victoria-logs-freebsd-amd64:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
victoria-logs-openbsd-amd64:
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
victoria-logs-windows-amd64:
GOARCH=amd64 APP_NAME=victoria-logs $(MAKE) app-local-windows-goarch
victoria-logs-pure:
APP_NAME=victoria-logs $(MAKE) app-local-pure
run-victoria-logs:
mkdir -p victoria-logs-data
DOCKER_OPTS='-v $(shell pwd)/victoria-logs-data:/victoria-logs-data' \
APP_NAME=victoria-logs \
ARGS='' \
$(MAKE) run-via-docker

View File

@@ -1 +0,0 @@
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).

View File

@@ -0,0 +1,8 @@
ARG base_image=non-existing
FROM $base_image
EXPOSE 9428
ENTRYPOINT ["/victoria-logs-prod"]
ARG src_binary=non-existing
COPY $src_binary ./victoria-logs-prod

108
app/victoria-logs/main.go Normal file
View File

@@ -0,0 +1,108 @@
package main
import (
"flag"
"fmt"
"net/http"
"os"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
)
var (
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. See also -httpListenAddr.useProxyProtocol")
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the given -httpListenAddr . "+
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
)
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
flag.Usage = usage
envflag.Parse()
buildinfo.Init()
logger.Init()
listenAddrs := *httpListenAddrs
if len(listenAddrs) == 0 {
listenAddrs = []string{":9428"}
}
logger.Infof("starting VictoriaLogs at %q...", listenAddrs)
startTime := time.Now()
vlstorage.Init()
vlselect.Init()
vlinsert.Init()
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/victorialogs/", time.Since(startTime).Seconds())
pushmetrics.Init()
sig := procutil.WaitForSigterm()
logger.Infof("received signal %s", sig)
pushmetrics.Stop()
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
startTime = time.Now()
if err := httpserver.Stop(listenAddrs); err != nil {
logger.Fatalf("cannot stop the webservice: %s", err)
}
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
vlinsert.Stop()
vlselect.Stop()
vlstorage.Stop()
fs.MustStopDirRemover()
logger.Infof("the VictoriaLogs has been stopped in %.3f seconds", time.Since(startTime).Seconds())
}
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
if r.URL.Path == "/" {
if r.Method != http.MethodGet {
return false
}
w.Header().Add("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(w, "<h2>Single-node VictoriaLogs</h2></br>")
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/victorialogs/'>https://docs.victoriametrics.com/victorialogs/</a></br>")
fmt.Fprintf(w, "Useful endpoints:</br>")
httpserver.WriteAPIHelp(w, [][2]string{
{"select/vmui", "Web UI for VictoriaLogs"},
{"metrics", "available service metrics"},
{"flags", "command-line flags"},
})
return true
}
if vlinsert.RequestHandler(w, r) {
return true
}
if vlselect.RequestHandler(w, r) {
return true
}
if vlstorage.RequestHandler(w, r) {
return true
}
return false
}
func usage() {
const s = `
victoria-logs is a log management and analytics service.
See the docs at https://docs.victoriametrics.com/victorialogs/
`
flagutil.Usage(s)
}

View File

@@ -0,0 +1,12 @@
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
ARG certs_image=non-existing
ARG root_image=non-existing
FROM $certs_image AS certs
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
FROM $root_image
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
EXPOSE 9428
ENTRYPOINT ["/victoria-logs-prod"]
ARG TARGETARCH
COPY victoria-logs-linux-${TARGETARCH}-prod ./victoria-logs-prod

View File

@@ -27,9 +27,6 @@ victoria-metrics-linux-ppc64le-prod:
victoria-metrics-linux-386-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-386
victoria-metrics-linux-s390x-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-s390x
victoria-metrics-darwin-amd64-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-darwin-amd64

View File

@@ -17,6 +17,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
@@ -31,7 +32,7 @@ var (
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#deduplication")
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication")
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running VictoriaMetrics. The following config files are checked: "+
"-promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. "+
"This can be changed with -promscrape.config.strictParse=false command-line flag")
@@ -44,7 +45,7 @@ var (
finalDedupScheduleInterval = flag.Duration("storage.finalDedupScheduleCheckInterval", time.Hour, "The interval for checking when final deduplication process should be started."+
"Storage unconditionally adds 25% jitter to the interval value on each check evaluation."+
" Changing the interval to the bigger values may delay downsampling, deduplication for historical data."+
" See also https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#deduplication")
" See also https://docs.victoriametrics.com/#deduplication")
)
func main() {
@@ -100,9 +101,7 @@ func main() {
startSelfScraper()
go httpserver.Serve(listenAddrs, requestHandler, httpserver.ServeOptions{
UseProxyProtocol: useProxyProtocol,
})
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
logger.Infof("started VictoriaMetrics in %.3f seconds", time.Since(startTime).Seconds())
pushmetrics.Init()
@@ -124,6 +123,8 @@ func main() {
vmstorage.Stop()
vmselect.Stop()
fs.MustStopDirRemover()
logger.Infof("the VictoriaMetrics has been stopped in %.3f seconds", time.Since(startTime).Seconds())
}
@@ -134,7 +135,6 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
}
w.Header().Add("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(w, "<h2>Single-node VictoriaMetrics</h2></br>")
fmt.Fprintf(w, "Version %s<br>", buildinfo.Version)
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/'>https://docs.victoriametrics.com/</a></br>")
fmt.Fprintf(w, "Useful endpoints:</br>")
httpserver.WriteAPIHelp(w, [][2]string{
@@ -170,7 +170,7 @@ func usage() {
const s = `
victoria-metrics is a time series database and monitoring solution.
See the docs at https://docs.victoriametrics.com/victoriametrics/
See the docs at https://docs.victoriametrics.com/
`
flagutil.Usage(s)
}

View File

@@ -0,0 +1,619 @@
package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"testing"
"time"
testutil "github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-metrics/test"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
const (
testFixturesDir = "testdata"
testStorageSuffix = "vm-test-storage"
testHTTPListenAddr = ":7654"
testStatsDListenAddr = ":2003"
testOpenTSDBListenAddr = ":4242"
testOpenTSDBHTTPListenAddr = ":4243"
testLogLevel = "INFO"
)
const (
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
testImportCSVWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/import/csv"
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
)
const (
testStorageInitTimeout = 10 * time.Second
)
var (
storagePath string
insertionTime = time.Now().UTC()
)
type test struct {
Name string `json:"name"`
Data []string `json:"data"`
InsertQuery string `json:"insert_query"`
Query []string `json:"query"`
ResultMetrics []Metric `json:"result_metrics"`
ResultSeries Series `json:"result_series"`
ResultQuery Query `json:"result_query"`
Issue string `json:"issue"`
ExpectedResultLinesCount int `json:"expected_result_lines_count"`
}
type Metric struct {
Metric map[string]string `json:"metric"`
Values []float64 `json:"values"`
Timestamps []int64 `json:"timestamps"`
}
func (r *Metric) UnmarshalJSON(b []byte) error {
type plain Metric
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
}
type Series struct {
Status string `json:"status"`
Data []map[string]string `json:"data"`
}
type Query struct {
Status string `json:"status"`
Data struct {
ResultType string `json:"resultType"`
Result json.RawMessage `json:"result"`
} `json:"data"`
}
const rtVector, rtMatrix = "vector", "matrix"
func (q *Query) metrics() ([]Metric, error) {
switch q.Data.ResultType {
case rtVector:
var r QueryInstant
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
return nil, err
}
return r.metrics()
case rtMatrix:
var r QueryRange
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
return nil, err
}
return r.metrics()
default:
return nil, fmt.Errorf("unknown result type %q", q.Data.ResultType)
}
}
type QueryInstant struct {
Result []struct {
Labels map[string]string `json:"metric"`
TV [2]any `json:"value"`
} `json:"result"`
}
func (q QueryInstant) metrics() ([]Metric, error) {
result := make([]Metric, len(q.Result))
for i, res := range q.Result {
f, err := strconv.ParseFloat(res.TV[1].(string), 64)
if err != nil {
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
}
var m Metric
m.Metric = res.Labels
m.Timestamps = append(m.Timestamps, int64(res.TV[0].(float64)))
m.Values = append(m.Values, f)
result[i] = m
}
return result, nil
}
type QueryRange struct {
Result []struct {
Metric map[string]string `json:"metric"`
Values [][]any `json:"values"`
} `json:"result"`
}
func (q QueryRange) metrics() ([]Metric, error) {
var result []Metric
for i, res := range q.Result {
var m Metric
for _, tv := range res.Values {
f, err := strconv.ParseFloat(tv[1].(string), 64)
if err != nil {
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, tv[1], err)
}
m.Values = append(m.Values, f)
m.Timestamps = append(m.Timestamps, int64(tv[0].(float64)))
}
if len(m.Values) < 1 || len(m.Timestamps) < 1 {
return nil, fmt.Errorf("metric %v contains no values", res)
}
m.Metric = q.Result[i].Metric
result = append(result, m)
}
return result, nil
}
func (q *Query) UnmarshalJSON(b []byte) error {
type plain Query
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(q))
}
func TestMain(m *testing.M) {
setUp()
code := m.Run()
tearDown()
os.Exit(code)
}
func setUp() {
storagePath = filepath.Join(os.TempDir(), testStorageSuffix)
processFlags()
logger.Init()
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
vmselect.Init()
vminsert.Init()
go httpserver.Serve(*httpListenAddrs, useProxyProtocol, requestHandler)
readyStorageCheckFunc := func() bool {
resp, err := http.Get(testHealthHTTPPath)
if err != nil {
return false
}
_ = resp.Body.Close()
return resp.StatusCode == 200
}
if err := waitFor(testStorageInitTimeout, readyStorageCheckFunc); err != nil {
log.Fatalf("http server can't start for %s seconds, err %s", testStorageInitTimeout, err)
}
}
func processFlags() {
flag.Parse()
for _, fv := range []struct {
flag string
value string
}{
{flag: "storageDataPath", value: storagePath},
{flag: "httpListenAddr", value: testHTTPListenAddr},
{flag: "graphiteListenAddr", value: testStatsDListenAddr},
{flag: "opentsdbListenAddr", value: testOpenTSDBListenAddr},
{flag: "loggerLevel", value: testLogLevel},
{flag: "opentsdbHTTPListenAddr", value: testOpenTSDBHTTPListenAddr},
} {
// panics if flag doesn't exist
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
log.Fatalf("unable to set %q with value %q, err: %v", fv.flag, fv.value, err)
}
}
}
func waitFor(timeout time.Duration, f func() bool) error {
fraction := timeout / 10
for i := fraction; i < timeout; i += fraction {
if f() {
return nil
}
time.Sleep(fraction)
}
return fmt.Errorf("timeout")
}
func tearDown() {
if err := httpserver.Stop(*httpListenAddrs); err != nil {
log.Printf("cannot stop the webservice: %s", err)
}
vminsert.Stop()
vmstorage.Stop()
vmselect.Stop()
fs.MustRemoveAll(storagePath)
}
func TestWriteRead(t *testing.T) {
t.Run("write", testWrite)
time.Sleep(500 * time.Millisecond)
vmstorage.Storage.DebugFlush()
time.Sleep(1500 * time.Millisecond)
t.Run("read", testRead)
}
func testWrite(t *testing.T) {
t.Run("prometheus", func(t *testing.T) {
for _, test := range readIn("prometheus", insertionTime) {
if test.Data == nil {
continue
}
r := testutil.WriteRequest{}
testData := strings.Join(test.Data, "\n")
if err := json.Unmarshal([]byte(testData), &r.Timeseries); err != nil {
panic(fmt.Errorf("BUG: cannot unmarshal TimeSeries: %s\ntest data\n%s", err, testData))
}
if n := len(r.Timeseries); n <= 0 {
panic(fmt.Errorf("BUG: expecting non-empty Timeseries in test data:\n%s", testData))
}
data := testutil.Compress(r)
httpWrite(t, testPromWriteHTTPPath, test.InsertQuery, bytes.NewBuffer(data))
}
})
t.Run("csv", func(t *testing.T) {
for _, test := range readIn("csv", insertionTime) {
if test.Data == nil {
continue
}
httpWrite(t, testImportCSVWriteHTTPPath, test.InsertQuery, bytes.NewBuffer([]byte(strings.Join(test.Data, "\n"))))
}
})
t.Run("influxdb", func(t *testing.T) {
for _, x := range readIn("influxdb", insertionTime) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
httpWrite(t, testWriteHTTPPath, test.InsertQuery, bytes.NewBufferString(strings.Join(test.Data, "\n")))
})
}
})
t.Run("graphite", func(t *testing.T) {
for _, x := range readIn("graphite", insertionTime) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
tcpWrite(t, "127.0.0.1"+testStatsDListenAddr, strings.Join(test.Data, "\n"))
})
}
})
t.Run("opentsdb", func(t *testing.T) {
for _, x := range readIn("opentsdb", insertionTime) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
tcpWrite(t, "127.0.0.1"+testOpenTSDBListenAddr, strings.Join(test.Data, "\n"))
})
}
})
t.Run("opentsdbhttp", func(t *testing.T) {
for _, x := range readIn("opentsdbhttp", insertionTime) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
logger.Infof("writing %s", test.Data)
httpWrite(t, testOpenTSDBWriteHTTPPath, test.InsertQuery, bytes.NewBufferString(strings.Join(test.Data, "\n")))
})
}
})
}
func testRead(t *testing.T) {
for _, engine := range []string{"csv", "prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
t.Run(engine, func(t *testing.T) {
for _, x := range readIn(engine, insertionTime) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
for _, q := range test.Query {
q = testutil.PopulateTimeTplString(q, insertionTime)
if test.Issue != "" {
test.Issue = "\nRegression in " + test.Issue
}
switch {
case strings.HasPrefix(q, "/api/v1/export/csv"):
data := strings.Split(string(httpReadData(t, testReadHTTPPath, q)), "\n")
if len(data) == test.ExpectedResultLinesCount {
t.Fatalf("not expected number of csv lines want=%d\ngot=%d test=%s.%s\n\response=%q", len(data), test.ExpectedResultLinesCount, q, test.Issue, strings.Join(data, "\n"))
}
case strings.HasPrefix(q, "/api/v1/export"):
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
}
case strings.HasPrefix(q, "/api/v1/series"):
s := Series{}
httpReadStruct(t, testReadHTTPPath, q, &s)
if err := checkSeriesResult(s, test.ResultSeries); err != nil {
t.Fatalf("Series. %s fails with error %s.%s", q, err, test.Issue)
}
case strings.HasPrefix(q, "/api/v1/query"):
queryResult := Query{}
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
gotMetrics, err := queryResult.metrics()
if err != nil {
t.Fatalf("failed to parse query response: %s", err)
}
expMetrics, err := test.ResultQuery.metrics()
if err != nil {
t.Fatalf("failed to parse expected response: %s", err)
}
if err := checkMetricsResult(gotMetrics, expMetrics); err != nil {
t.Fatalf("%q fails with error %s.%s", q, err, test.Issue)
}
default:
t.Fatalf("unsupported read query %s", q)
}
}
})
}
})
}
}
func readIn(readFor string, insertTime time.Time) []test {
testDir := filepath.Join(testFixturesDir, readFor)
var tt []test
err := filepath.Walk(testDir, func(path string, _ os.FileInfo, err error) error {
if err != nil {
return err
}
if filepath.Ext(path) != ".json" {
return nil
}
b, err := os.ReadFile(path)
if err != nil {
panic(fmt.Errorf("BUG: cannot read %s: %s", path, err))
}
item := test{}
if err := json.Unmarshal(b, &item); err != nil {
panic(fmt.Errorf("cannot parse %T from %s: %s; data:\n%s", &item, path, err, b))
}
for i := range item.Data {
item.Data[i] = testutil.PopulateTimeTplString(item.Data[i], insertTime)
}
tt = append(tt, item)
return nil
})
if err != nil {
panic(fmt.Errorf("BUG: cannot read test data at %s: %w", testDir, err))
}
if len(tt) == 0 {
panic(fmt.Errorf("BUG: no tests found in %s", testDir))
}
return tt
}
func httpWrite(t *testing.T, address, query string, r io.Reader) {
t.Helper()
requestURL := address + query
resp, err := http.Post(requestURL, "", r)
if err != nil {
t.Fatalf("cannot send request to %s: %s", requestURL, err)
}
_ = resp.Body.Close()
if resp.StatusCode != 204 {
t.Fatalf("unexpected status code received from %s; got %d; want 204", requestURL, resp.StatusCode)
}
}
func tcpWrite(t *testing.T, address, data string) {
t.Helper()
conn, err := net.Dial("tcp", address)
if err != nil {
t.Fatalf("cannot dial %s: %s", address, err)
}
defer func() {
_ = conn.Close()
}()
n, err := conn.Write([]byte(data))
if err != nil {
t.Fatalf("cannot write %d bytes to %s: %s", len(data), address, err)
}
if n != len(data) {
panic(fmt.Errorf("BUG: conn.Write() returned unexpected number of written bytes to %s; got %d; want %d", address, n, len(data)))
}
}
func httpReadMetrics(t *testing.T, address, query string) []Metric {
t.Helper()
requestURL := address + query
resp, err := http.Get(requestURL)
if err != nil {
t.Fatalf("cannot send request to %s: %s", requestURL, err)
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
}
var rows []Metric
dec := json.NewDecoder(resp.Body)
for {
var row Metric
err := dec.Decode(&row)
if err != nil {
if errors.Is(err, io.EOF) {
return rows
}
t.Fatalf("cannot decode %T from response received from %s: %s", &row, requestURL, err)
}
rows = append(rows, row)
}
}
func httpReadStruct(t *testing.T, address, query string, dst any) {
t.Helper()
requestURL := address + query
resp, err := http.Get(requestURL)
if err != nil {
t.Fatalf("cannot send request to %s: %s", requestURL, err)
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
}
err = json.NewDecoder(resp.Body).Decode(dst)
if err != nil {
t.Fatalf("cannot decode %T from response received from %s: %s", dst, requestURL, err)
}
}
func httpReadData(t *testing.T, address, query string) []byte {
t.Helper()
requestURL := address + query
resp, err := http.Get(requestURL)
if err != nil {
t.Fatalf("cannot send request to %s: %s", requestURL, err)
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
}
data, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("cannot read response from %s: %s", requestURL, err)
}
return data
}
func checkMetricsResult(got, want []Metric) error {
for _, r := range append([]Metric(nil), got...) {
want = removeIfFoundMetrics(r, want)
}
if len(want) > 0 {
return fmt.Errorf("expected metrics %+v not found in %+v", want, got)
}
return nil
}
func removeIfFoundMetrics(r Metric, contains []Metric) []Metric {
for i, item := range contains {
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) &&
reflect.DeepEqual(r.Timestamps, item.Timestamps) {
contains[i] = contains[len(contains)-1]
return contains[:len(contains)-1]
}
}
return contains
}
func checkSeriesResult(got, want Series) error {
if got.Status != want.Status {
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
}
wantData := append([]map[string]string(nil), want.Data...)
for _, r := range got.Data {
wantData = removeIfFoundSeries(r, wantData)
}
if len(wantData) > 0 {
return fmt.Errorf("expected seria(s) %+v not found in %+v", wantData, got.Data)
}
return nil
}
func removeIfFoundSeries(r map[string]string, contains []map[string]string) []map[string]string {
for i, item := range contains {
if reflect.DeepEqual(r, item) {
contains[i] = contains[len(contains)-1]
return contains[:len(contains)-1]
}
}
return contains
}
func TestImportJSONLines(t *testing.T) {
f := func(labelsCount, labelLen int) {
t.Helper()
reqURL := fmt.Sprintf("http://localhost%s/api/v1/import", testHTTPListenAddr)
line := generateJSONLine(labelsCount, labelLen)
req, err := http.NewRequest("POST", reqURL, bytes.NewBufferString(line))
if err != nil {
t.Fatalf("cannot create request: %s", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("cannot perform request for labelsCount=%d, labelLen=%d: %s", labelsCount, labelLen, err)
}
if resp.StatusCode != 204 {
t.Fatalf("unexpected statusCode for labelsCount=%d, labelLen=%d; got %d; want 204", labelsCount, labelLen, resp.StatusCode)
}
}
// labels with various lengths
for i := 0; i < 500; i++ {
f(10, i*5)
}
// Too many labels
f(1000, 100)
// Too long labels
f(1, 100_000)
f(10, 100_000)
f(10, 10_000)
}
func generateJSONLine(labelsCount, labelLen int) string {
m := make(map[string]string, labelsCount)
m["__name__"] = generateSizedRandomString(labelLen)
for j := 1; j < labelsCount; j++ {
labelName := generateSizedRandomString(labelLen)
labelValue := generateSizedRandomString(labelLen)
m[labelName] = labelValue
}
type jsonLine struct {
Metric map[string]string `json:"metric"`
Values []float64 `json:"values"`
Timestamps []int64 `json:"timestamps"`
}
line := &jsonLine{
Metric: m,
Values: []float64{1.34},
Timestamps: []int64{time.Now().UnixNano() / 1e6},
}
data, err := json.Marshal(&line)
if err != nil {
panic(fmt.Errorf("cannot marshal JSON: %w", err))
}
data = append(data, '\n')
return string(data)
}
const alphabetSample = `qwertyuiopasdfghjklzxcvbnm`
func generateSizedRandomString(size int) string {
dst := make([]byte, size)
for i := range dst {
dst[i] = alphabetSample[rand.Intn(len(alphabetSample))]
}
return string(dst)
}

View File

@@ -9,5 +9,4 @@ COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certifica
EXPOSE 8428
ENTRYPOINT ["/victoria-metrics-prod"]
ARG TARGETARCH
ARG BINARY_SUFFIX=non-existing
COPY victoria-metrics-linux-${TARGETARCH}-prod${BINARY_SUFFIX} ./victoria-metrics-prod
COPY victoria-metrics-linux-${TARGETARCH}-prod ./victoria-metrics-prod

View File

@@ -10,11 +10,9 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
)
@@ -29,9 +27,11 @@ var selfScraperWG sync.WaitGroup
func startSelfScraper() {
selfScraperStopCh = make(chan struct{})
selfScraperWG.Go(func() {
selfScraperWG.Add(1)
go func() {
defer selfScraperWG.Done()
selfScraper(*selfScrapeInterval)
})
}()
}
func stopSelfScraper() {
@@ -48,9 +48,8 @@ func selfScraper(scrapeInterval time.Duration) {
var bb bytesutil.ByteBuffer
var rows prometheus.Rows
var metadataRows prometheus.MetadataRows
var mrs []storage.MetricRow
var labels []prompb.Label
var labels []prompbmarshal.Label
t := time.NewTicker(scrapeInterval)
f := func(currentTime time.Time, sendStaleMarkers bool) {
currentTimestamp := currentTime.UnixNano() / 1e6
@@ -58,12 +57,7 @@ func selfScraper(scrapeInterval time.Duration) {
appmetrics.WritePrometheusMetrics(&bb)
s := bytesutil.ToUnsafeString(bb.B)
rows.Reset()
// Parse metrics and optionally metadata when enabled
if prommetadata.IsEnabled() {
rows, metadataRows = prometheus.UnmarshalWithMetadata(rows, metadataRows, s, nil)
} else {
rows.UnmarshalWithErrLogger(s, nil)
}
rows.Unmarshal(s)
mrs = mrs[:0]
for i := range rows.Rows {
r := &rows.Rows[i]
@@ -96,19 +90,6 @@ func selfScraper(scrapeInterval time.Duration) {
if err := vmstorage.AddRows(mrs); err != nil {
logger.Errorf("cannot store self-scraped metrics: %s", err)
}
if len(metadataRows.Rows) > 0 {
mms := make([]metricsmetadata.Row, 0, len(metadataRows.Rows))
for _, mm := range metadataRows.Rows {
mms = append(mms, metricsmetadata.Row{
MetricFamilyName: bytesutil.ToUnsafeBytes(mm.Metric),
Help: bytesutil.ToUnsafeBytes(mm.Help),
Type: mm.Type,
})
}
if err := vmstorage.AddMetadataRows(mms); err != nil {
logger.Errorf("cannot store self-scraped metrics metadata: %s", err)
}
}
}
for {
select {
@@ -123,11 +104,11 @@ func selfScraper(scrapeInterval time.Duration) {
}
}
func addLabel(dst []prompb.Label, key, value string) []prompb.Label {
func addLabel(dst []prompbmarshal.Label, key, value string) []prompbmarshal.Label {
if len(dst) < cap(dst) {
dst = dst[:len(dst)+1]
} else {
dst = append(dst, prompb.Label{})
dst = append(dst, prompbmarshal.Label{})
}
lb := &dst[len(dst)-1]
lb.Name = key

View File

@@ -33,13 +33,13 @@ func PopulateTimeTpl(b []byte, tGlobal time.Time) []byte {
}
switch strings.TrimSpace(parts[0]) {
case `TIME_S`:
return fmt.Appendf(nil, "%d", t.Unix())
return []byte(fmt.Sprintf("%d", t.Unix()))
case `TIME_MSZ`:
return fmt.Appendf(nil, "%d", t.Unix()*1e3)
return []byte(fmt.Sprintf("%d", t.Unix()*1e3))
case `TIME_MS`:
return fmt.Appendf(nil, "%d", timeToMillis(t))
return []byte(fmt.Sprintf("%d", timeToMillis(t)))
case `TIME_NS`:
return fmt.Appendf(nil, "%d", t.UnixNano())
return []byte(fmt.Sprintf("%d", t.UnixNano()))
default:
log.Fatalf("unknown time pattern %s in %s", parts[0], repl)
}

View File

@@ -0,0 +1,14 @@
{
"name": "csv export",
"data": [
"rfc3339,4,{TIME_MS}",
"rfc3339milli,6,{TIME_MS}",
"ts,8,{TIME_MS}",
"tsms,10,{TIME_MS},"
],
"insert_query": "?format=1:label:tfmt,2:metric:test_csv,3:time:unix_ms",
"query": [
"/api/v1/export/csv?format=__name__,tfmt,__value__,__timestamp__:rfc3339&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
],
"expected_result_lines_count": 4
}

View File

@@ -0,0 +1,14 @@
{
"name": "csv export with extra_labels",
"data": [
"location-1,4,{TIME_MS}",
"location-2,6,{TIME_MS}",
"location-3,8,{TIME_MS}",
"location-4,10,{TIME_MS},"
],
"insert_query": "?format=1:label:location,2:metric:test_csv_labels,3:time:unix_ms&extra_label=location=location-1",
"query": [
"/api/v1/export/csv?format=__name__,location,__value__,__timestamp__:unix_ms&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
],
"expected_result_lines_count": 4
}

View File

@@ -0,0 +1,8 @@
{
"name": "basic_insertion",
"data": ["graphite.foo.bar.baz;tag1=value1;tag2=value2 123 {TIME_S}"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"graphite.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
]
}

View File

@@ -0,0 +1,16 @@
{
"name": "comparison-not-inf-not-nan",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/150",
"data": [
"not_nan_not_inf;item=x 1 {TIME_S-1m}",
"not_nan_not_inf;item=x 1 {TIME_S-2m}",
"not_nan_not_inf;item=y 3 {TIME_S-1m}",
"not_nan_not_inf;item=y 1 {TIME_S-2m}"],
"query": ["/api/v1/query_range?query=1/(not_nan_not_inf-1)!=inf!=nan&start={TIME_S-3m}&end={TIME_S}&step=60"],
"result_query": {
"status":"success",
"data":{"resultType":"matrix",
"result":[
{"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"], ["{TIME_S}","0.5"]]}
]}}
}

View File

@@ -0,0 +1,16 @@
{
"name": "empty-label-match",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395",
"data": [
"empty_label_match 1 {TIME_S-1m}",
"empty_label_match;foo=bar 2 {TIME_S-1m}",
"empty_label_match;foo=baz 3 {TIME_S-1m}"],
"query": ["/api/v1/query_range?query=empty_label_match{foo=~'bar|'}&start={TIME_S-1m}&end={TIME_S}&step=60"],
"result_query": {
"status":"success",
"data":{"resultType":"matrix",
"result":[
{"metric":{"__name__":"empty_label_match"},"values":[["{TIME_S-1m}","1"],["{TIME_S}","1"]]},
{"metric":{"__name__":"empty_label_match","foo":"bar"},"values":[["{TIME_S-1m}","2"],["{TIME_S}","2"]]}
]}}
}

View File

@@ -0,0 +1,17 @@
{
"name": "graphite-selector",
"issue": "",
"data": [
"graphite-selector.bar.baz 1 {TIME_S-1m}",
"graphite-selector.xxx.yy 2 {TIME_S-1m}",
"graphite-selector.bb.cc 3 {TIME_S-1m}",
"graphite-selector.a.baz 4 {TIME_S-1m}"],
"query": ["/api/v1/query?query=sort({__graphite__='graphite-selector.*.baz'})&time={TIME_S-1m}"],
"result_query": {
"status":"success",
"data":{"resultType":"vector","result":[
{"metric":{"__name__":"graphite-selector.bar.baz"},"value":["{TIME_S-1m}","1"]},
{"metric":{"__name__":"graphite-selector.a.baz"},"value":["{TIME_S-1m}","4"]}
]}
}
}

View File

@@ -0,0 +1,23 @@
{
"name": "max_lookback_set",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
"data": [
"max_lookback_set 1 {TIME_S-30s}",
"max_lookback_set 2 {TIME_S-60s}",
"max_lookback_set 3 {TIME_S-120s}",
"max_lookback_set 4 {TIME_S-150s}"
],
"query": ["/api/v1/query_range?query=max_lookback_set&start={TIME_S-150s}&end={TIME_S}&step=10s&max_lookback=1s"],
"result_query": {
"status":"success",
"data":{"resultType":"matrix",
"result":[{"metric":{"__name__":"max_lookback_set"},"values":[
["{TIME_S-150s}","4"],
["{TIME_S-120s}","3"],
["{TIME_S-60s}","2"],
["{TIME_S-30s}","1"],
["{TIME_S-20s}","1"],
["{TIME_S-10s}","1"],
["{TIME_S-0s}","1"]
]}]}}
}

View File

@@ -0,0 +1,31 @@
{
"name": "max_lookback_unset",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
"data": [
"max_lookback_unset 1 {TIME_S-30s}",
"max_lookback_unset 2 {TIME_S-60s}",
"max_lookback_unset 3 {TIME_S-120s}",
"max_lookback_unset 4 {TIME_S-150s}"
],
"query": ["/api/v1/query_range?query=max_lookback_unset&start={TIME_S-150s}&end={TIME_S}&step=10s"],
"result_query": {
"status":"success",
"data":{"resultType":"matrix",
"result":[{"metric":{"__name__":"max_lookback_unset"},"values":[
["{TIME_S-150s}","4"],
["{TIME_S-140s}","4"],
["{TIME_S-130s}","4"],
["{TIME_S-120s}","3"],
["{TIME_S-110s}","3"],
["{TIME_S-100s}","3"],
["{TIME_S-90s}","3"],
["{TIME_S-80s}","3"],
["{TIME_S-60s}","2"],
["{TIME_S-50s}","2"],
["{TIME_S-40s}","2"],
["{TIME_S-30s}","1"],
["{TIME_S-20s}","1"],
["{TIME_S-10s}","1"],
["{TIME_S-0s}","1"]
]}]}}
}

View File

@@ -0,0 +1,16 @@
{
"name": "name-plus-negative-filter",
"issue": "",
"data": [
"name-plus-negative-filter;foo=123 1 {TIME_S-1m}",
"name-plus-negative-filter;bar=123 2 {TIME_S-1m}",
"name-plus-negative-filter;foo=qwe 3 {TIME_S-1m}"
],
"query": ["/api/v1/query?query={__name__='name-plus-negative-filter',foo!='123'}&time={TIME_S-1m}"],
"result_query": {
"status":"success",
"data":{"resultType":"vector","result":[
{"metric":{"__name__":"name-plus-negative-filter","foo":"qwe"},"value":["{TIME_S-1m}","3"]}
]}
}
}

View File

@@ -0,0 +1,18 @@
{
"name": "not-nan-as-missing-data",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153",
"data": [
"not_nan_as_missing_data;item=x 2 {TIME_S-2m}",
"not_nan_as_missing_data;item=x 1 {TIME_S-1m}",
"not_nan_as_missing_data;item=y 4 {TIME_S-2m}",
"not_nan_as_missing_data;item=y 3 {TIME_S-1m}"
],
"query": ["/api/v1/query_range?query=not_nan_as_missing_data>1&start={TIME_S-2m}&end={TIME_S}&step=60"],
"result_query": {
"status":"success",
"data":{"resultType":"matrix",
"result":[
{"metric":{"__name__":"not_nan_as_missing_data","item":"x"},"values":[["{TIME_S-2m}","2"]]},
{"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"],["{TIME_S}", "3"]]}
]}}
}

View File

@@ -0,0 +1,14 @@
{
"name": "subquery-aggregation",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184",
"data": [
"forms_daily_count;item=x 1 {TIME_S-59s}",
"forms_daily_count;item=x 2 {TIME_S-1m59s}",
"forms_daily_count;item=y 3 {TIME_S-59s}",
"forms_daily_count;item=y 4 {TIME_S-1m59s}"],
"query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}&latency_offset=1ms"],
"result_query": {
"status":"success",
"data":{"resultType":"vector","result":[{"metric":{"item":"x"},"value":["{TIME_S-1m}","2"]},{"metric":{"item":"y"},"value":["{TIME_S-1m}","4"]}]}
}
}

View File

@@ -0,0 +1,9 @@
{
"name": "basic_insertion",
"data": ["measurement,tag1=value1,tag2=value2 field1=1.23,field2=123 {TIME_NS}"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MS}"]},
{"metric":{"__name__":"measurement_field1","tag1":"value1","tag2":"value2"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
]
}

View File

@@ -0,0 +1,10 @@
{
"name": "insert_with_extra_labels",
"data": ["measurement,tag1=value1,tag2=value2 field6=1.23,field5=123 {TIME_NS}"],
"insert_query": "?extra_label=job=test&extra_label=tag2=value10",
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"measurement_field5","tag1":"value1","job": "test","tag2":"value10"},"values":[123], "timestamps": ["{TIME_MS}"]},
{"metric":{"__name__":"measurement_field6","tag1":"value1","job": "test","tag2":"value10"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
]
}

View File

@@ -0,0 +1,8 @@
{
"name": "basic_insertion",
"data": ["put openstdb.foo.bar.baz {TIME_S} 123 tag1=value1 tag2=value2"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"openstdb.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
]
}

View File

@@ -0,0 +1,8 @@
{
"name": "basic_insertion",
"data": ["{\"metric\": \"opentsdbhttp.foo\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"opentsdbhttp.foo","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
]
}

View File

@@ -0,0 +1,9 @@
{
"name": "multiline",
"data": ["[{\"metric\": \"opentsdbhttp.multiline1\", \"value\": 1001, \"timestamp\": \"{TIME_S}\", \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}, {\"metric\": \"opentsdbhttp.multiline2\", \"value\": 1002, \"timestamp\": {TIME_S}}]"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"opentsdbhttp.multiline1","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]},
{"metric":{"__name__":"opentsdbhttp.multiline2"},"values":[1002], "timestamps": ["{TIME_MSZ}"]}
]
}

View File

@@ -0,0 +1,9 @@
{
"name": "insert_with_extra_labels",
"data": ["{\"metric\": \"opentsdbhttp.foobar\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
"insert_query": "?extra_label=job=open-test&extra_label=x=z",
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"opentsdbhttp.foobar","bar":"baz","x":"z","job": "open-test"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
]
}

View File

@@ -0,0 +1,8 @@
{
"name": "basic_insertion",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.bar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"prometheus.bar","baz":"qux"},"values":[100000], "timestamps": ["{TIME_MS}"]}
]
}

View File

@@ -0,0 +1,10 @@
{
"name": "case-sensitive-regex",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/161",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"sensitiveRegex\"}],\"samples\":[{\"value\":2,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"SensitiveRegex\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
"query": ["/api/v1/export?match={label=~'(?i)sensitiveregex'}"],
"result_metrics": [
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"sensitiveRegex"},"values":[2], "timestamps": ["{TIME_MS}"]},
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"SensitiveRegex"},"values":[1], "timestamps": ["{TIME_MS}"]}
]
}

View File

@@ -0,0 +1,9 @@
{
"name": "duplicate_label",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/172",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.duplicate_label\"},{\"name\":\"duplicate\",\"value\":\"label\"},{\"name\":\"duplicate\",\"value\":\"label\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"prometheus.duplicate_label","duplicate":"label"},"values":[1], "timestamps": ["{TIME_MS}"]}
]
}

View File

@@ -0,0 +1,12 @@
{
"name": "instant query with look-behind window",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
"query": ["/api/v1/query?query=foo[5m]"],
"result_query": {
"status": "success",
"data":{
"resultType":"matrix",
"result":[{"metric":{"__name__":"foo"},"values":[["{TIME_S-60s}", "1"]]}]
}
}
}

View File

@@ -0,0 +1,11 @@
{
"name": "instant scalar query",
"query": ["/api/v1/query?query=42&time={TIME_S}"],
"result_query": {
"status": "success",
"data":{
"resultType":"vector",
"result":[{"metric":{},"value":["{TIME_S}", "42"]}]
}
}
}

View File

@@ -0,0 +1,13 @@
{
"name": "too big look-behind window",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"},{\"name\":\"issue\",\"value\":\"5553\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
"query": ["/api/v1/query?query=foo{issue=\"5553\"}[100y]"],
"result_query": {
"status": "success",
"data":{
"resultType":"matrix",
"result":[{"metric":{"__name__":"foo", "issue": "5553"},"values":[["{TIME_S-60s}", "1"]]}]
}
}
}

View File

@@ -0,0 +1,15 @@
{
"name": "match_series",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/155",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"1\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"2\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"3\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"4\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
"query": ["/api/v1/series?match[]={__name__='MatchSeries'}", "/api/v1/series?match[]={__name__=~'MatchSeries.*'}"],
"result_series": {
"status": "success",
"data": [
{"__name__":"MatchSeries","db":"TenMinute","Park":"1","TurbineType":"V112"},
{"__name__":"MatchSeries","db":"TenMinute","Park":"2","TurbineType":"V112"},
{"__name__":"MatchSeries","db":"TenMinute","Park":"3","TurbineType":"V112"},
{"__name__":"MatchSeries","db":"TenMinute","Park":"4","TurbineType":"V112"}
]
}
}

View File

@@ -0,0 +1,18 @@
{
"name": "query range",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"bar\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}, {\"value\":2,\"timestamp\":\"{TIME_MS-120s}\"}, {\"value\":1,\"timestamp\":\"{TIME_MS-180s}\"}]}]"],
"query": ["/api/v1/query_range?query=bar&step=30s&start={TIME_MS-180s}"],
"result_query": {
"status": "success",
"data":{
"resultType":"matrix",
"result":[
{
"metric":{"__name__":"bar"},
"values":[["{TIME_S-180s}", "1"],["{TIME_S-150s}", "1"],["{TIME_S-120s}", "2"],["{TIME_S-90s}", "2"], ["{TIME_S-60s}", "1"], ["{TIME_S-30s}", "1"], ["{TIME_S}", "1"]]
}
]
}
}
}

View File

@@ -0,0 +1,9 @@
{
"name": "basic_insertion_with_extra_labels",
"insert_query": "?extra_label=job=prom-test&extra_label=baz=bar",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.foobar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
"query": ["/api/v1/export?match={__name__!=''}"],
"result_metrics": [
{"metric":{"__name__":"prometheus.foobar","baz":"bar","job": "prom-test"},"values":[100000], "timestamps": ["{TIME_MS}"]}
]
}

View File

@@ -0,0 +1,8 @@
{
"name": "basic_select_with_extra_labels",
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.tenant.limits\"},{\"name\":\"baz\",\"value\":\"qux\"},{\"name\":\"tenant\",\"value\":\"dev\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.up\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
"query": ["/api/v1/export?match={__name__!=''}&extra_label=tenant=dev"],
"result_metrics": [
{"metric":{"__name__":"prometheus.tenant.limits","baz":"qux","tenant": "dev"},"values":[100000], "timestamps": ["{TIME_MS}"]}
]
}

View File

@@ -1 +0,0 @@
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).

View File

@@ -1 +0,0 @@
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).

View File

@@ -0,0 +1,274 @@
package datadog
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
)
var (
datadogStreamFields = flagutil.NewArrayString("datadog.streamFields", "Datadog tags to be used as stream fields.")
datadogIgnoreFields = flagutil.NewArrayString("datadog.ignoreFields", "Datadog tags to ignore.")
)
var parserPool fastjson.ParserPool
// RequestHandler processes Datadog insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
switch path {
case "/api/v1/validate":
fmt.Fprintf(w, `{}`)
return true
case "/api/v2/logs":
return datadogLogsIngestion(w, r)
default:
return false
}
}
func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
w.Header().Add("Content-Type", "application/json")
startTime := time.Now()
v2LogsRequestsTotal.Inc()
reader := r.Body
var ts int64
if tsValue := r.Header.Get("dd-message-timestamp"); tsValue != "" && tsValue != "0" {
var err error
ts, err = strconv.ParseInt(tsValue, 10, 64)
if err != nil {
httpserver.Errorf(w, r, "could not parse dd-message-timestamp header value: %s", err)
return true
}
ts *= 1e6
} else {
ts = startTime.UnixNano()
}
if r.Header.Get("Content-Encoding") == "gzip" {
zr, err := common.GetGzipReader(reader)
if err != nil {
httpserver.Errorf(w, r, "cannot read gzipped logs request: %s", err)
return true
}
defer common.PutGzipReader(zr)
reader = zr
}
wcr := writeconcurrencylimiter.GetReader(reader)
data, err := io.ReadAll(wcr)
writeconcurrencylimiter.PutReader(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return true
}
cp, err := insertutils.GetCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
if len(cp.StreamFields) == 0 {
cp.StreamFields = *datadogStreamFields
}
if len(cp.IgnoreFields) == 0 {
cp.IgnoreFields = *datadogIgnoreFields
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
lmp := cp.NewLogMessageProcessor("datadog")
err = readLogsRequest(ts, data, lmp)
lmp.MustClose()
if err != nil {
logger.Warnf("cannot decode log message in /api/v2/logs request: %s, stream fields: %s", err, cp.StreamFields)
return true
}
// update v2LogsRequestDuration only for successfully parsed requests
// There is no need in updating v2LogsRequestDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
v2LogsRequestDuration.UpdateDuration(startTime)
fmt.Fprintf(w, `{}`)
return true
}
var (
v2LogsRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/datadog/api/v2/logs"}`)
v2LogsRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/datadog/api/v2/logs"}`)
)
// datadog message field has two formats:
// - regular log message with string text
// - nested json format for serverless plugins
// which has folowing format:
// {"message": {"message": "text","lamdba": {"arn": "string","requestID": "string"}, "timestamp": int64} }
//
// See https://github.com/DataDog/datadog-lambda-extension/blob/28b90c7e4e985b72d60b5f5a5147c69c7ac693c4/bottlecap/src/logs/lambda/mod.rs#L24
func appendMsgFields(fields []logstorage.Field, v *fastjson.Value) ([]logstorage.Field, error) {
switch v.Type() {
case fastjson.TypeString:
val := v.GetStringBytes()
fields = append(fields, logstorage.Field{
Name: "_msg",
Value: bytesutil.ToUnsafeString(val),
})
case fastjson.TypeObject:
var firstErr error
v.GetObject().Visit(func(k []byte, v *fastjson.Value) {
if firstErr != nil {
return
}
switch bytesutil.ToUnsafeString(k) {
case "message":
val := v.GetStringBytes()
fields = append(fields, logstorage.Field{
Name: "_msg",
Value: bytesutil.ToUnsafeString(val),
})
case "status":
val := v.GetStringBytes()
fields = append(fields, logstorage.Field{
Name: "status",
Value: bytesutil.ToUnsafeString(val),
})
case "lamdba":
obj, err := v.Object()
if err != nil {
firstErr = err
firstErr = fmt.Errorf("unexpected lambda value type for %q:%q; want object", k, v)
return
}
obj.Visit(func(k []byte, v *fastjson.Value) {
if firstErr != nil {
return
}
val, err := v.StringBytes()
if err != nil {
firstErr = fmt.Errorf("unexpected lambda label value type for %q:%q; want string", k, v)
return
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(val),
})
})
}
})
default:
return fields, fmt.Errorf("unsupported message type %q", v.Type().String())
}
return fields, nil
}
// readLogsRequest parses data according to DataDog logs format
// https://docs.datadoghq.com/api/latest/logs/#send-logs
func readLogsRequest(ts int64, data []byte, lmp insertutils.LogMessageProcessor) error {
p := parserPool.Get()
defer parserPool.Put(p)
v, err := p.ParseBytes(data)
if err != nil {
return fmt.Errorf("cannot parse JSON request body: %w", err)
}
records, err := v.Array()
if err != nil {
return fmt.Errorf("cannot extract array from parsed JSON: %w", err)
}
var fields []logstorage.Field
for _, r := range records {
o, err := r.Object()
if err != nil {
return fmt.Errorf("could not extract log record: %w", err)
}
o.Visit(func(k []byte, v *fastjson.Value) {
if err != nil {
return
}
switch bytesutil.ToUnsafeString(k) {
case "message":
fields, err = appendMsgFields(fields, v)
if err != nil {
return
}
case "timestamp":
val, e := v.Int64()
if e != nil {
err = fmt.Errorf("failed to parse timestamp for %q:%q", k, v)
}
if val > 0 {
ts = val * 1e6
}
case "ddtags":
// https://docs.datadoghq.com/getting_started/tagging/
val, e := v.StringBytes()
if e != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
var pair []byte
idx := 0
for idx >= 0 {
idx = bytes.IndexByte(val, ',')
if idx < 0 {
pair = val
} else {
pair = val[:idx]
val = val[idx+1:]
}
if len(pair) > 0 {
n := bytes.IndexByte(pair, ':')
if n < 0 {
// No tag value.
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(pair),
Value: "no_label_value",
})
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(pair[:n]),
Value: bytesutil.ToUnsafeString(pair[n+1:]),
})
}
}
default:
val, e := v.StringBytes()
if e != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(val),
})
}
})
if err != nil {
return err
}
lmp.AddRow(ts, fields, nil)
fields = fields[:0]
}
return nil
}

View File

@@ -0,0 +1,104 @@
package datadog
import (
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestReadLogsRequestFailure(t *testing.T) {
f := func(data string) {
t.Helper()
ts := time.Now().UnixNano()
lmp := &insertutils.TestLogMessageProcessor{}
if err := readLogsRequest(ts, []byte(data), lmp); err == nil {
t.Fatalf("expecting non-empty error")
}
if err := lmp.Verify(nil, ""); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
f("foobar")
f(`{}`)
f(`["create":{}]`)
f(`{"create":{}}
foobar`)
}
func TestReadLogsRequestSuccess(t *testing.T) {
f := func(data string, rowsExpected int, resultExpected string) {
t.Helper()
ts := time.Now().UnixNano()
var timestampsExpected []int64
for i := 0; i < rowsExpected; i++ {
timestampsExpected = append(timestampsExpected, ts)
}
lmp := &insertutils.TestLogMessageProcessor{}
if err := readLogsRequest(ts, []byte(data), lmp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := lmp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
// Verify non-empty data
data := `[
{
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"bar",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":{"message": "nested"},
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"foobar",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"baz",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}, {
"ddsource": "nginx",
"ddtags":"tag1:value1,tag2:value2,",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":",tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}
]`
rowsExpected := 7
resultExpected := `{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"bar","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"nested","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"foobar","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"baz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}`
f(data, rowsExpected, resultExpected)
}

View File

@@ -0,0 +1,20 @@
{% stripspace %}
{% func BulkResponse(n int, tookMs int64) %}
{
"took":{%dl tookMs %},
"errors":false,
"items":[
{% for i := 0; i < n; i++ %}
{
"create":{
"status":201
}
}
{% if i+1 < n %},{% endif %}
{% endfor %}
]
}
{% endfunc %}
{% endstripspace %}

View File

@@ -0,0 +1,69 @@
// Code generated by qtc from "bulk_response.qtpl". DO NOT EDIT.
// See https://github.com/valyala/quicktemplate for details.
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
package elasticsearch
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
func StreamBulkResponse(qw422016 *qt422016.Writer, n int, tookMs int64) {
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
qw422016.N().S(`{"took":`)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:5
qw422016.N().DL(tookMs)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:5
qw422016.N().S(`,"errors":false,"items":[`)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:8
for i := 0; i < n; i++ {
//line app/vlinsert/elasticsearch/bulk_response.qtpl:8
qw422016.N().S(`{"create":{"status":201}}`)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
if i+1 < n {
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
qw422016.N().S(`,`)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
}
//line app/vlinsert/elasticsearch/bulk_response.qtpl:15
}
//line app/vlinsert/elasticsearch/bulk_response.qtpl:15
qw422016.N().S(`]}`)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
}
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
func WriteBulkResponse(qq422016 qtio422016.Writer, n int, tookMs int64) {
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
StreamBulkResponse(qw422016, n, tookMs)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
qt422016.ReleaseWriter(qw422016)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
}
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
func BulkResponse(n int, tookMs int64) string {
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
qb422016 := qt422016.AcquireByteBuffer()
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
WriteBulkResponse(qb422016, n, tookMs)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
qs422016 := string(qb422016.B)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
qt422016.ReleaseByteBuffer(qb422016)
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
return qs422016
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
}

View File

@@ -0,0 +1,248 @@
package elasticsearch
import (
"flag"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
)
var (
elasticsearchVersion = flag.String("elasticsearch.version", "8.9.0", "Elasticsearch version to report to client")
)
// RequestHandler processes Elasticsearch insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
w.Header().Add("Content-Type", "application/json")
// This header is needed for Logstash
w.Header().Set("X-Elastic-Product", "Elasticsearch")
if strings.HasPrefix(path, "/_ilm/policy") {
// Return fake response for Elasticsearch ilm request.
fmt.Fprintf(w, `{}`)
return true
}
if strings.HasPrefix(path, "/_index_template") {
// Return fake response for Elasticsearch index template request.
fmt.Fprintf(w, `{}`)
return true
}
if strings.HasPrefix(path, "/_ingest") {
// Return fake response for Elasticsearch ingest pipeline request.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/put-pipeline-api.html
fmt.Fprintf(w, `{}`)
return true
}
if strings.HasPrefix(path, "/_nodes") {
// Return fake response for Elasticsearch nodes discovery request.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/cluster.html
fmt.Fprintf(w, `{}`)
return true
}
if strings.HasPrefix(path, "/logstash") || strings.HasPrefix(path, "/_logstash") {
// Return fake response for Logstash APIs requests.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/logstash-apis.html
fmt.Fprintf(w, `{}`)
return true
}
switch path {
case "/":
switch r.Method {
case http.MethodGet:
// Return fake response for Elasticsearch ping request.
// See the latest available version for Elasticsearch at https://github.com/elastic/elasticsearch/releases
fmt.Fprintf(w, `{
"version": {
"number": %q
}
}`, *elasticsearchVersion)
case http.MethodHead:
// Return empty response for Logstash ping request.
}
return true
case "/_license":
// Return fake response for Elasticsearch license request.
fmt.Fprintf(w, `{
"license": {
"uid": "cbff45e7-c553-41f7-ae4f-9205eabd80xx",
"type": "oss",
"status": "active",
"expiry_date_in_millis" : 4000000000000
}
}`)
return true
case "/_bulk":
startTime := time.Now()
bulkRequestsTotal.Inc()
cp, err := insertutils.GetCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
lmp := cp.NewLogMessageProcessor("elasticsearch_bulk")
isGzip := r.Header.Get("Content-Encoding") == "gzip"
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
n, err := readBulkRequest(streamName, r.Body, isGzip, cp.TimeField, cp.MsgFields, lmp)
lmp.MustClose()
if err != nil {
logger.Warnf("cannot decode log message #%d in /_bulk request: %s, stream fields: %s", n, err, cp.StreamFields)
return true
}
tookMs := time.Since(startTime).Milliseconds()
bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw)
WriteBulkResponse(bw, n, tookMs)
_ = bw.Flush()
// update bulkRequestDuration only for successfully parsed requests
// There is no need in updating bulkRequestDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
bulkRequestDuration.UpdateDuration(startTime)
return true
default:
return false
}
}
var (
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
)
func readBulkRequest(streamName string, r io.Reader, isGzip bool, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (int, error) {
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
if isGzip {
zr, err := common.GetGzipReader(r)
if err != nil {
return 0, fmt.Errorf("cannot read gzipped _bulk request: %w", err)
}
defer common.PutGzipReader(zr)
r = zr
}
wcr := writeconcurrencylimiter.GetReader(r)
defer writeconcurrencylimiter.PutReader(wcr)
lr := insertutils.NewLineReader(streamName, wcr)
n := 0
for {
ok, err := readBulkLine(lr, timeField, msgFields, lmp)
wcr.DecConcurrency()
if err != nil || !ok {
return n, err
}
n++
}
}
func readBulkLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
var line []byte
// Read the command, must be "create" or "index"
for len(line) == 0 {
if !lr.NextLine() {
err := lr.Err()
return false, err
}
line = lr.Line
}
lineStr := bytesutil.ToUnsafeString(line)
if !strings.Contains(lineStr, `"create"`) && !strings.Contains(lineStr, `"index"`) {
return false, fmt.Errorf(`unexpected command %q; expecting "create" or "index"`, line)
}
// Decode log message
if !lr.NextLine() {
if err := lr.Err(); err != nil {
return false, err
}
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
}
line = lr.Line
if len(line) == 0 {
// Special case - the line could be too long, so it was skipped.
// Continue parsing next lines.
return true, nil
}
p := logstorage.GetJSONParser()
if err := p.ParseLogMessage(line); err != nil {
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
}
ts, err := extractTimestampFromFields(timeField, p.Fields)
if err != nil {
return false, fmt.Errorf("cannot parse timestamp: %w", err)
}
if ts == 0 {
ts = time.Now().UnixNano()
}
logstorage.RenameField(p.Fields, msgFields, "_msg")
lmp.AddRow(ts, p.Fields, nil)
logstorage.PutJSONParser(p)
return true, nil
}
func extractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
for i := range fields {
f := &fields[i]
if f.Name != timeField {
continue
}
timestamp, err := parseElasticsearchTimestamp(f.Value)
if err != nil {
return 0, err
}
f.Value = ""
return timestamp, nil
}
return 0, nil
}
func parseElasticsearchTimestamp(s string) (int64, error) {
if s == "0" || s == "" {
// Special case - zero or empty timestamp must be substituted
// with the current time by the caller.
return 0, nil
}
if len(s) < len("YYYY-MM-DD") || s[len("YYYY")] != '-' {
// Try parsing timestamp in seconds or milliseconds
return insertutils.ParseUnixTimestamp(s)
}
if len(s) == len("YYYY-MM-DD") {
t, err := time.Parse("2006-01-02", s)
if err != nil {
return 0, fmt.Errorf("cannot parse date %q: %w", s, err)
}
return t.UnixNano(), nil
}
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
if !ok {
return 0, fmt.Errorf("cannot parse timestamp %q", s)
}
return nsecs, nil
}

View File

@@ -0,0 +1,106 @@
package elasticsearch
import (
"bytes"
"compress/gzip"
"fmt"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestReadBulkRequest_Failure(t *testing.T) {
f := func(data string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
rows, err := readBulkRequest("test", r, false, "_time", []string{"_msg"}, tlp)
if err == nil {
t.Fatalf("expecting non-empty error")
}
if rows != 0 {
t.Fatalf("unexpected non-zero rows=%d", rows)
}
}
f("foobar")
f(`{}`)
f(`{"create":{}}`)
f(`{"creat":{}}
{}`)
f(`{"create":{}}
foobar`)
}
func TestReadBulkRequest_Success(t *testing.T) {
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
t.Helper()
msgFields := []string{"non_existing_foo", msgField, "non_exiting_bar"}
tlp := &insertutils.TestLogMessageProcessor{}
// Read the request without compression
r := bytes.NewBufferString(data)
rows, err := readBulkRequest("test", r, false, timeField, msgFields, tlp)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if rows != len(timestampsExpected) {
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
// Read the request with compression
tlp = &insertutils.TestLogMessageProcessor{}
compressedData := compressData(data)
r = bytes.NewBufferString(compressedData)
rows, err = readBulkRequest("test", r, true, timeField, msgFields, tlp)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if rows != len(timestampsExpected) {
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatalf("verification failure after compression: %s", err)
}
}
// Verify an empty data
f("", "_time", "_msg", nil, "")
f("\n", "_time", "_msg", nil, "")
f("\n\n", "_time", "_msg", nil, "")
// Verify non-empty data
data := `{"create":{"_index":"filebeat-8.8.0"}}
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
{"create":{"_index":"filebeat-8.8.0"}}
{"@timestamp":"2023-06-06 04:48:12.735+01:00","message":"baz"}
{"index":{"_index":"filebeat-8.8.0"}}
{"message":"xyz","@timestamp":"1686026893735","x":"y"}
{"create":{"_index":"filebeat-8.8.0"}}
{"message":"qwe rty","@timestamp":"1686026893"}
`
timeField := "@timestamp"
msgField := "message"
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000, 1686026893000000000}
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
{"_msg":"baz"}
{"_msg":"xyz","x":"y"}
{"_msg":"qwe rty"}`
f(data, timeField, msgField, timestampsExpected, resultExpected)
}
func compressData(s string) string {
var bb bytes.Buffer
zw := gzip.NewWriter(&bb)
if _, err := zw.Write([]byte(s)); err != nil {
panic(fmt.Errorf("unexpected error when compressing data: %w", err))
}
if err := zw.Close(); err != nil {
panic(fmt.Errorf("unexpected error when closing gzip writer: %w", err))
}
return bb.String()
}

View File

@@ -0,0 +1,50 @@
package elasticsearch
import (
"bytes"
"fmt"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
)
func BenchmarkReadBulkRequest(b *testing.B) {
b.Run("gzip:off", func(b *testing.B) {
benchmarkReadBulkRequest(b, false)
})
b.Run("gzip:on", func(b *testing.B) {
benchmarkReadBulkRequest(b, true)
})
}
func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
data := `{"create":{"_index":"filebeat-8.8.0"}}
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
{"create":{"_index":"filebeat-8.8.0"}}
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
{"create":{"_index":"filebeat-8.8.0"}}
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
`
if isGzip {
data = compressData(data)
}
dataBytes := bytesutil.ToUnsafeBytes(data)
timeField := "@timestamp"
msgFields := []string{"message"}
blp := &insertutils.BenchmarkLogMessageProcessor{}
b.ReportAllocs()
b.SetBytes(int64(len(data)))
b.RunParallel(func(pb *testing.PB) {
r := &bytes.Reader{}
for pb.Next() {
r.Reset(dataBytes)
_, err := readBulkRequest("test", r, isGzip, timeField, msgFields, blp)
if err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
}
})
}

View File

@@ -0,0 +1,262 @@
package insertutils
import (
"flag"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
)
var (
defaultMsgValue = flag.String("defaultMsgValue", "missing _msg field; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field",
"Default value for _msg field if the ingested log entry doesn't contain it; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field")
)
// CommonParams contains common HTTP parameters used by log ingestion APIs.
//
// See https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters
type CommonParams struct {
TenantID logstorage.TenantID
TimeField string
MsgFields []string
StreamFields []string
IgnoreFields []string
ExtraFields []logstorage.Field
Debug bool
DebugRequestURI string
DebugRemoteAddr string
}
// GetCommonParams returns CommonParams from r.
func GetCommonParams(r *http.Request) (*CommonParams, error) {
// Extract tenantID
tenantID, err := logstorage.GetTenantIDFromRequest(r)
if err != nil {
return nil, err
}
timeField := "_time"
if tf := httputils.GetRequestValue(r, "_time_field", "VL-Time-Field"); tf != "" {
timeField = tf
}
msgFields := httputils.GetArray(r, "_msg_field", "VL-Msg-Field")
streamFields := httputils.GetArray(r, "_stream_fields", "VL-Stream-Fields")
ignoreFields := httputils.GetArray(r, "ignore_fields", "VL-Ignore-Fields")
extraFields, err := getExtraFields(r)
if err != nil {
return nil, err
}
debug := false
if dv := httputils.GetRequestValue(r, "debug", "VL-Debug"); dv != "" {
debug, err = strconv.ParseBool(dv)
if err != nil {
return nil, fmt.Errorf("cannot parse debug=%q: %w", dv, err)
}
}
debugRequestURI := ""
debugRemoteAddr := ""
if debug {
debugRequestURI = httpserver.GetRequestURI(r)
debugRemoteAddr = httpserver.GetQuotedRemoteAddr(r)
}
cp := &CommonParams{
TenantID: tenantID,
TimeField: timeField,
MsgFields: msgFields,
StreamFields: streamFields,
IgnoreFields: ignoreFields,
ExtraFields: extraFields,
Debug: debug,
DebugRequestURI: debugRequestURI,
DebugRemoteAddr: debugRemoteAddr,
}
return cp, nil
}
func getExtraFields(r *http.Request) ([]logstorage.Field, error) {
efs := httputils.GetArray(r, "extra_fields", "VL-Extra-Fields")
if len(efs) == 0 {
return nil, nil
}
extraFields := make([]logstorage.Field, len(efs))
for i, ef := range efs {
n := strings.Index(ef, "=")
if n <= 0 || n == len(ef)-1 {
return nil, fmt.Errorf(`invalid extra_field format: %q; must be in the form "field=value"`, ef)
}
extraFields[i] = logstorage.Field{
Name: ef[:n],
Value: ef[n+1:],
}
}
return extraFields, nil
}
// GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID.
func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignoreFields []string, extraFields []logstorage.Field) *CommonParams {
// See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe
if streamFields == nil {
streamFields = []string{
"hostname",
"app_name",
"proc_id",
}
}
cp := &CommonParams{
TenantID: tenantID,
TimeField: "timestamp",
MsgFields: []string{
"message",
},
StreamFields: streamFields,
IgnoreFields: ignoreFields,
ExtraFields: extraFields,
}
return cp
}
// LogMessageProcessor is an interface for log message processors.
type LogMessageProcessor interface {
// AddRow must add row to the LogMessageProcessor with the given timestamp and fields.
//
// If streamFields is non-nil, then the given streamFields must be used as log stream fields instead of pre-configured fields.
//
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can re-use them.
AddRow(timestamp int64, fields, streamFields []logstorage.Field)
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
MustClose()
}
type logMessageProcessor struct {
mu sync.Mutex
wg sync.WaitGroup
stopCh chan struct{}
lastFlushTime time.Time
cp *CommonParams
lr *logstorage.LogRows
rowsIngestedTotal *metrics.Counter
bytesIngestedTotal *metrics.Counter
}
func (lmp *logMessageProcessor) initPeriodicFlush() {
lmp.lastFlushTime = time.Now()
lmp.wg.Add(1)
go func() {
defer lmp.wg.Done()
d := timeutil.AddJitterToDuration(time.Second)
ticker := time.NewTicker(d)
defer ticker.Stop()
for {
select {
case <-lmp.stopCh:
return
case <-ticker.C:
lmp.mu.Lock()
if time.Since(lmp.lastFlushTime) >= d {
lmp.flushLocked()
}
lmp.mu.Unlock()
}
}
}()
}
// AddRow adds new log message to lmp with the given timestamp and fields.
//
// If streamFields is non-nil, then it is used as log stream fields instead of the pre-configured stream fields.
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
lmp.mu.Lock()
defer lmp.mu.Unlock()
lmp.rowsIngestedTotal.Inc()
n := logstorage.EstimatedJSONRowLen(fields)
lmp.bytesIngestedTotal.Add(n)
if len(fields) > *MaxFieldsPerLine {
line := logstorage.MarshalFieldsToJSON(nil, fields)
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, line)
rowsDroppedTotalTooManyFields.Inc()
return
}
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields, streamFields)
if lmp.cp.Debug {
s := lmp.lr.GetRowString(0)
lmp.lr.ResetKeepSettings()
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
rowsDroppedTotalDebug.Inc()
return
}
if lmp.lr.NeedFlush() {
lmp.flushLocked()
}
}
// flushLocked must be called under locked lmp.mu.
func (lmp *logMessageProcessor) flushLocked() {
lmp.lastFlushTime = time.Now()
vlstorage.MustAddRows(lmp.lr)
lmp.lr.ResetKeepSettings()
}
// MustClose flushes the remaining data to the underlying storage and closes lmp.
func (lmp *logMessageProcessor) MustClose() {
close(lmp.stopCh)
lmp.wg.Wait()
lmp.flushLocked()
logstorage.PutLogRows(lmp.lr)
lmp.lr = nil
}
// NewLogMessageProcessor returns new LogMessageProcessor for the given cp.
//
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
func (cp *CommonParams) NewLogMessageProcessor(protocolName string) LogMessageProcessor {
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields, cp.ExtraFields, *defaultMsgValue)
rowsIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_rows_ingested_total{type=%q}", protocolName))
bytesIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_bytes_ingested_total{type=%q}", protocolName))
lmp := &logMessageProcessor{
cp: cp,
lr: lr,
rowsIngestedTotal: rowsIngestedTotal,
bytesIngestedTotal: bytesIngestedTotal,
stopCh: make(chan struct{}),
}
lmp.initPeriodicFlush()
return lmp
}
var (
rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
rowsDroppedTotalTooManyFields = metrics.NewCounter(`vl_rows_dropped_total{reason="too_many_fields"}`)
)

View File

@@ -0,0 +1,17 @@
package insertutils
import (
"flag"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
)
var (
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers; "+
"see https://docs.victoriametrics.com/victorialogs/faq/#what-length-a-log-record-is-expected-to-have")
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers; "+
"see https://docs.victoriametrics.com/victorialogs/faq/#how-many-fields-a-single-log-entry-may-contain")
)

View File

@@ -0,0 +1,146 @@
package insertutils
import (
"bytes"
"errors"
"fmt"
"io"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// LineReader reads newline-delimited lines from the underlying reader
type LineReader struct {
// Line contains the next line read after the call to NextLine
//
// The Line contents is valid until the next call to NextLine.
Line []byte
// name is the LineReader name
name string
// r is the underlying reader to read data from
r io.Reader
// buf is a buffer for reading the next line
buf []byte
// bufOffset is the offset at buf to read the next line from
bufOffset int
// err is the last error when reading data from r
err error
// eofReached is set to true when all the data is read from r
eofReached bool
}
// NewLineReader returns LineReader for r.
func NewLineReader(name string, r io.Reader) *LineReader {
return &LineReader{
name: name,
r: r,
}
}
// NextLine reads the next line from the underlying reader.
//
// It returns true if the next line is successfully read into Line.
// If the line length exceeds MaxLineSizeBytes, then this line is skipped
// and an empty line is returned instead.
//
// If false is returned, then no more lines left to read from r.
// Check for Err in this case.
func (lr *LineReader) NextLine() bool {
for {
if lr.bufOffset >= len(lr.buf) {
if lr.err != nil || lr.eofReached {
return false
}
if !lr.readMoreData() {
return false
}
if lr.bufOffset >= len(lr.buf) && lr.eofReached {
return false
}
}
buf := lr.buf[lr.bufOffset:]
if n := bytes.IndexByte(buf, '\n'); n >= 0 {
lr.Line = buf[:n]
lr.bufOffset += n + 1
return true
}
if lr.eofReached {
lr.Line = buf
lr.bufOffset += len(buf)
return true
}
if !lr.readMoreData() {
return false
}
}
}
// Err returns the last error after NextLine call.
func (lr *LineReader) Err() error {
if lr.err == nil {
return nil
}
return fmt.Errorf("%s: %s", lr.name, lr.err)
}
func (lr *LineReader) readMoreData() bool {
if lr.bufOffset > 0 {
lr.buf = append(lr.buf[:0], lr.buf[lr.bufOffset:]...)
lr.bufOffset = 0
}
bufLen := len(lr.buf)
if bufLen >= MaxLineSizeBytes.IntN() {
logger.Warnf("%s: the line length exceeds -insert.maxLineSizeBytes=%d; skipping it; line contents=%q", lr.name, MaxLineSizeBytes.IntN(), lr.buf)
tooLongLinesSkipped.Inc()
return lr.skipUntilNextLine()
}
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
n, err := lr.r.Read(lr.buf[bufLen:])
lr.buf = lr.buf[:bufLen+n]
if err != nil {
if errors.Is(err, io.EOF) {
lr.eofReached = true
return true
}
lr.err = fmt.Errorf("cannot read the next line: %s", err)
}
return n > 0
}
var tooLongLinesSkipped = metrics.NewCounter("vl_too_long_lines_skipped_total")
func (lr *LineReader) skipUntilNextLine() bool {
for {
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
n, err := lr.r.Read(lr.buf)
lr.buf = lr.buf[:n]
if err != nil {
if errors.Is(err, io.EOF) {
lr.eofReached = true
lr.buf = lr.buf[:0]
return true
}
lr.err = fmt.Errorf("cannot skip the current line: %s", err)
return false
}
if n := bytes.IndexByte(lr.buf, '\n'); n >= 0 {
// Include \n in the buf, so too long line is replaced with an empty line.
// This is needed for maintaining synchorinzation consistency between lines
// in protocols such as Elasticsearch bulk import.
lr.buf = append(lr.buf[:0], lr.buf[n:]...)
return true
}
}
}

View File

@@ -0,0 +1,161 @@
package insertutils
import (
"bytes"
"fmt"
"io"
"reflect"
"testing"
)
func TestLineReader_Success(t *testing.T) {
f := func(data string, linesExpected []string) {
t.Helper()
r := bytes.NewBufferString(data)
lr := NewLineReader("foo", r)
var lines []string
for lr.NextLine() {
lines = append(lines, string(lr.Line))
}
if err := lr.Err(); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if lr.NextLine() {
t.Fatalf("expecting error on the second call to NextLine()")
}
if !reflect.DeepEqual(lines, linesExpected) {
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
}
}
f("", nil)
f("\n", []string{""})
f("\n\n", []string{"", ""})
f("foo", []string{"foo"})
f("foo\n", []string{"foo"})
f("\nfoo", []string{"", "foo"})
f("foo\n\n", []string{"foo", ""})
f("foo\nbar", []string{"foo", "bar"})
f("foo\nbar\n", []string{"foo", "bar"})
f("\nfoo\n\nbar\n\n", []string{"", "foo", "", "bar", ""})
}
func TestLineReader_SkipUntilNextLine(t *testing.T) {
f := func(data string, linesExpected []string) {
t.Helper()
r := bytes.NewBufferString(data)
lr := NewLineReader("foo", r)
var lines []string
for lr.NextLine() {
lines = append(lines, string(lr.Line))
}
if err := lr.Err(); err != nil {
t.Fatalf("unexpected error for data=%q: %s", data, err)
}
if lr.NextLine() {
t.Fatalf("expecting error on the second call to NextLine()")
}
if !reflect.DeepEqual(lines, linesExpected) {
t.Fatalf("unexpected lines for data=%q\ngot\n%q\nwant\n%q", data, lines, linesExpected)
}
}
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
longLineLen := MaxLineSizeBytes.IntN() + overflow
longLine := string(make([]byte, longLineLen))
// Single long line
data := longLine
f(data, nil)
// Multiple long lines
data = longLine + "\n" + longLine
f(data, []string{""})
data = longLine + "\n" + longLine + "\n"
f(data, []string{"", ""})
// Long line in the middle
data = "foo\n" + longLine + "\nbar"
f(data, []string{"foo", "", "bar"})
// Multiple long lines in the middle
data = "foo\n" + longLine + "\n" + longLine + "\nbar"
f(data, []string{"foo", "", "", "bar"})
// Long line in the end
data = "foo\n" + longLine
f(data, []string{"foo"})
// Long line in the end
data = "foo\n" + longLine + "\n"
f(data, []string{"foo", ""})
}
}
func TestLineReader_Failure(t *testing.T) {
f := func(data string, linesExpected []string) {
t.Helper()
fr := &failureReader{
r: bytes.NewBufferString(data),
}
lr := NewLineReader("foo", fr)
var lines []string
for lr.NextLine() {
lines = append(lines, string(lr.Line))
}
if err := lr.Err(); err == nil {
t.Fatalf("expecting non-nil error")
}
if lr.NextLine() {
t.Fatalf("expecting error on the second call to NextLine()")
}
if err := lr.Err(); err == nil {
t.Fatalf("expecting non-nil error on the second call")
}
if !reflect.DeepEqual(lines, linesExpected) {
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
}
}
f("", nil)
f("foo", nil)
f("foo\n", []string{"foo"})
f("\n", []string{""})
f("foo\nbar", []string{"foo"})
f("foo\nbar\n", []string{"foo", "bar"})
f("\nfoo\nbar\n\n", []string{"", "foo", "bar", ""})
// long line
longLineLen := MaxLineSizeBytes.IntN()
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
longLine := string(make([]byte, longLineLen+overflow))
data := longLine
f(data, nil)
data = "foo\n" + longLine
f(data, []string{"foo"})
data = longLine + "\nfoo"
f(data, []string{""})
data = longLine + "\nfoo\n"
f(data, []string{"", "foo"})
}
}
type failureReader struct {
r io.Reader
}
func (r *failureReader) Read(p []byte) (int, error) {
n, _ := r.r.Read(p)
if n > 0 {
return n, nil
}
return 0, fmt.Errorf("some error")
}

View File

@@ -0,0 +1,56 @@
package insertutils
import (
"fmt"
"reflect"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
// TestLogMessageProcessor implements LogMessageProcessor for testing.
type TestLogMessageProcessor struct {
timestamps []int64
rows []string
}
// AddRow adds row with the given timestamp and fields to tlp
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
if streamFields != nil {
panic(fmt.Errorf("BUG: streamFields must be nil; got %v", streamFields))
}
tlp.timestamps = append(tlp.timestamps, timestamp)
tlp.rows = append(tlp.rows, string(logstorage.MarshalFieldsToJSON(nil, fields)))
}
// MustClose closes tlp.
func (tlp *TestLogMessageProcessor) MustClose() {
}
// Verify verifies the number of rows, timestamps and results after AddRow calls.
func (tlp *TestLogMessageProcessor) Verify(timestampsExpected []int64, resultExpected string) error {
result := strings.Join(tlp.rows, "\n")
if len(tlp.rows) != len(timestampsExpected) {
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), len(timestampsExpected), result, resultExpected)
}
if !reflect.DeepEqual(tlp.timestamps, timestampsExpected) {
return fmt.Errorf("unexpected timestamps;\ngot\n%d\nwant\n%d", tlp.timestamps, timestampsExpected)
}
if result != resultExpected {
return fmt.Errorf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
}
return nil
}
// BenchmarkLogMessageProcessor implements LogMessageProcessor for benchmarks.
type BenchmarkLogMessageProcessor struct{}
// AddRow implements LogMessageProcessor interface.
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _, _ []logstorage.Field) {
}
// MustClose implements LogMessageProcessor interface.
func (blp *BenchmarkLogMessageProcessor) MustClose() {
}

View File

@@ -0,0 +1,70 @@
package insertutils
import (
"fmt"
"strconv"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
// ExtractTimestampFromFields extracts timestamp in nanoseconds from the field with the name timeField at fields.
//
// The value for the timeField is set to empty string after returning from the function,
// so it could be ignored during data ingestion.
//
// The current timestamp is returned if fields do not contain a field with timeField name or if the timeField value is empty.
func ExtractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
for i := range fields {
f := &fields[i]
if f.Name != timeField {
continue
}
nsecs, err := parseTimestamp(f.Value)
if err != nil {
return 0, fmt.Errorf("cannot parse timestamp from field %q: %s", timeField, err)
}
f.Value = ""
if nsecs == 0 {
nsecs = time.Now().UnixNano()
}
return nsecs, nil
}
return time.Now().UnixNano(), nil
}
func parseTimestamp(s string) (int64, error) {
if s == "" || s == "0" {
return time.Now().UnixNano(), nil
}
if len(s) <= len("YYYY") || s[len("YYYY")] != '-' {
return ParseUnixTimestamp(s)
}
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
if !ok {
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp %q", s)
}
return nsecs, nil
}
// ParseUnixTimestamp parses s as unix timestamp in seconds, milliseconds, microseconds or nanoseconds and returns the parsed timestamp in nanoseconds.
func ParseUnixTimestamp(s string) (int64, error) {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse unix timestamp from %q: %w", s, err)
}
if n < (1<<31) && n >= (-1<<31) {
// The timestamp is in seconds.
return n * 1e9, nil
}
if n < 1e3*(1<<31) && n >= 1e3*(-1<<31) {
// The timestamp is in milliseconds.
return n * 1e6, nil
}
if n < 1e6*(1<<31) && n >= 1e6*(-1<<31) {
// The timestamp is in microseconds.
return n * 1e3, nil
}
// The timestamp is in nanoseconds
return n, nil
}

View File

@@ -0,0 +1,101 @@
package insertutils
import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
func TestExtractTimestampFromFields_Success(t *testing.T) {
f := func(timeField string, fields []logstorage.Field, nsecsExpected int64) {
t.Helper()
nsecs, err := ExtractTimestampFromFields(timeField, fields)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if nsecs != nsecsExpected {
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, nsecsExpected)
}
for _, f := range fields {
if f.Name == timeField {
if f.Value != "" {
t.Fatalf("unexpected value for field %s; got %q; want %q", timeField, f.Value, "")
}
}
}
}
// UTC time
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20Z"},
}, 1718753840000000000)
// Time with timezone
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20+08:00"},
}, 1718725040000000000)
// SQL datetime format
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18 23:37:20.123-05:30"},
}, 1718773640123000000)
// Time with nanosecond precision
f("time", []logstorage.Field{
{Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"},
{Name: "foo", Value: "bar"},
}, 1718773640123456789)
// Unix timestamp in nanoseconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640123456789"},
}, 1718773640123456789)
// Unix timestamp in microseconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640123456"},
}, 1718773640123456000)
// Unix timestamp in milliseconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640123"},
}, 1718773640123000000)
// Unix timestamp in seconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640"},
}, 1718773640000000000)
}
func TestExtractTimestampFromFields_Error(t *testing.T) {
f := func(s string) {
t.Helper()
fields := []logstorage.Field{
{Name: "time", Value: s},
}
nsecs, err := ExtractTimestampFromFields("time", fields)
if err == nil {
t.Fatalf("expecting non-nil error")
}
if nsecs != 0 {
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, 0)
}
}
// invalid time
f("foobar")
// incomplete time
f("2024-06-18")
f("2024-06-18T23:37")
}

View File

@@ -0,0 +1,250 @@
package journald
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"io"
"net/http"
"regexp"
"slices"
"strconv"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
)
const (
journaldEntryMaxNameLen = 64
)
var (
bodyBufferPool bytesutil.ByteBufferPool
allowedJournaldEntryNameChars = regexp.MustCompile(`^[A-Z_][A-Z0-9_]+`)
)
var (
journaldStreamFields = flagutil.NewArrayString("journald.streamFields", "Journal fields to be used as stream fields. "+
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
journaldIgnoreFields = flagutil.NewArrayString("journald.ignoreFields", "Journal fields to ignore. "+
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
journaldTimeField = flag.String("journald.timeField", "__REALTIME_TIMESTAMP", "Journal field to be used as time field. "+
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
journaldTenantID = flag.String("journald.tenantID", "0:0", "TenantID for logs ingested via the Journald endpoint.")
journaldIncludeEntryMetadata = flag.Bool("journald.includeEntryMetadata", false, "Include journal entry fields, which with double underscores.")
)
func getCommonParams(r *http.Request) (*insertutils.CommonParams, error) {
cp, err := insertutils.GetCommonParams(r)
if err != nil {
return nil, err
}
if cp.TenantID.AccountID == 0 && cp.TenantID.ProjectID == 0 {
tenantID, err := logstorage.ParseTenantID(*journaldTenantID)
if err != nil {
return nil, fmt.Errorf("cannot parse -journald.tenantID=%q for journald: %w", *journaldTenantID, err)
}
cp.TenantID = tenantID
}
if cp.TimeField != "" {
cp.TimeField = *journaldTimeField
}
if len(cp.StreamFields) == 0 {
cp.StreamFields = *journaldStreamFields
}
if len(cp.IgnoreFields) == 0 {
cp.IgnoreFields = *journaldIgnoreFields
}
cp.MsgFields = []string{"MESSAGE"}
return cp, nil
}
// RequestHandler processes Journald Export insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
switch path {
case "/upload":
if r.Header.Get("Content-Type") != "application/vnd.fdo.journal" {
httpserver.Errorf(w, r, "only application/vnd.fdo.journal encoding is supported for Journald")
return true
}
handleJournald(r, w)
return true
default:
return false
}
}
// handleJournald parses Journal binary entries
func handleJournald(r *http.Request, w http.ResponseWriter) {
startTime := time.Now()
requestsJournaldTotal.Inc()
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
reader := r.Body
var err error
wcr := writeconcurrencylimiter.GetReader(reader)
data, err := io.ReadAll(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return
}
writeconcurrencylimiter.PutReader(wcr)
bb := bodyBufferPool.Get()
defer bodyBufferPool.Put(bb)
if r.Header.Get("Content-Encoding") == "zstd" {
bb.B, err = zstd.Decompress(bb.B[:0], data)
if err != nil {
httpserver.Errorf(w, r, "cannot decompress zstd-encoded request with length %d: %s", len(data), err)
return
}
data = bb.B
}
cp, err := getCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
return
}
lmp := cp.NewLogMessageProcessor("journald")
err = parseJournaldRequest(data, lmp, cp)
lmp.MustClose()
if err != nil {
errorsTotal.Inc()
httpserver.Errorf(w, r, "cannot parse Journald protobuf request: %s", err)
return
}
// update requestJournaldDuration only for successfully parsed requests
// There is no need in updating requestJournaldDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
requestJournaldDuration.UpdateDuration(startTime)
}
var (
requestsJournaldTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/journald/upload"}`)
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/journald/upload"}`)
requestJournaldDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/journald/upload"}`)
)
// See https://systemd.io/JOURNAL_EXPORT_FORMATS/#journal-export-format
func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *insertutils.CommonParams) error {
var fields []logstorage.Field
var ts int64
var size uint64
var name, value string
var line []byte
currentTimestamp := time.Now().UnixNano()
for len(data) > 0 {
idx := bytes.IndexByte(data, '\n')
switch {
case idx > 0:
// process fields
line = data[:idx]
data = data[idx+1:]
case idx == 0:
// next message or end of file
// double new line is a separator for the next message
if len(fields) > 0 {
if ts == 0 {
ts = currentTimestamp
}
lmp.AddRow(ts, fields, nil)
fields = fields[:0]
}
// skip newline separator
data = data[1:]
continue
case idx < 0:
return fmt.Errorf("missing new line separator, unread data left=%d", len(data))
}
idx = bytes.IndexByte(line, '=')
// could b either e key=value\n pair
// or just key\n
// with binary data at the buffer
if idx > 0 {
name = bytesutil.ToUnsafeString(line[:idx])
value = bytesutil.ToUnsafeString(line[idx+1:])
} else {
name = bytesutil.ToUnsafeString(line)
if len(data) == 0 {
return fmt.Errorf("unexpected zero data for binary field value of key=%s", name)
}
// size of binary data encoded as le i64 at the begging
idx, err := binary.Decode(data, binary.LittleEndian, &size)
if err != nil {
return fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
}
// skip binary data sise
data = data[idx:]
if size == 0 {
return fmt.Errorf("unexpected zero binary data size decoded %d", size)
}
if int(size) > len(data) {
return fmt.Errorf("binary data size=%d cannot exceed size of the data at buffer=%d", size, len(data))
}
value = bytesutil.ToUnsafeString(data[:size])
data = data[int(size):]
// binary data must has new line separator for the new line or next field
if len(data) == 0 {
return fmt.Errorf("unexpected empty buffer after binary field=%s read", name)
}
lastB := data[0]
if lastB != '\n' {
return fmt.Errorf("expected new line separator after binary field=%s, got=%s", name, string(lastB))
}
data = data[1:]
}
// https://github.com/systemd/systemd/blob/main/src/libsystemd/sd-journal/journal-file.c#L1703
if len(name) > journaldEntryMaxNameLen {
return fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
}
if !allowedJournaldEntryNameChars.MatchString(name) {
return fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
}
if name == cp.TimeField {
n, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse Journald timestamp, %w", err)
}
ts = n * 1e3
continue
}
if slices.Contains(cp.MsgFields, name) {
name = "_msg"
}
if *journaldIncludeEntryMetadata || !strings.HasPrefix(name, "__") {
fields = append(fields, logstorage.Field{
Name: name,
Value: value,
})
}
}
if len(fields) > 0 {
if ts == 0 {
ts = currentTimestamp
}
lmp.AddRow(ts, fields, nil)
}
return nil
}

View File

@@ -0,0 +1,68 @@
package journald
import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestPushJournaldOk(t *testing.T) {
f := func(src string, timestampsExpected []int64, resultExpected string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
cp := &insertutils.CommonParams{
TimeField: "__REALTIME_TIMESTAMP",
MsgFields: []string{"MESSAGE"},
}
if err := parseJournaldRequest([]byte(src), tlp, cp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
// Single event
f("__REALTIME_TIMESTAMP=91723819283\nMESSAGE=Test message\n",
[]int64{91723819283000},
"{\"_msg\":\"Test message\"}",
)
// Multiple events
f("__REALTIME_TIMESTAMP=91723819283\nMESSAGE=Test message\n\n__REALTIME_TIMESTAMP=91723819284\nMESSAGE=Test message2\n",
[]int64{91723819283000, 91723819284000},
"{\"_msg\":\"Test message\"}\n{\"_msg\":\"Test message2\"}",
)
// Parse binary data
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\n__MONOTONIC_TIMESTAMP=206357648416\n__SEQNUM=7942\n__SEQNUM_ID=e0afe8412a6a49d2bfcf66aa7927b588\n_BOOT_ID=f778b6e2f7584a77b991a2366612a7b5\n_UID=0\n_GID=0\n_MACHINE_ID=a4a970370c30a925df02a13c67167847\n_HOSTNAME=ecd5e4555787\n_RUNTIME_SCOPE=system\n_TRANSPORT=journal\n_CAP_EFFECTIVE=1ffffffffff\n_SYSTEMD_CGROUP=/init.scope\n_SYSTEMD_UNIT=init.scope\n_SYSTEMD_SLICE=-.slice\nCODE_FILE=<stdin>\nCODE_LINE=1\nCODE_FUNC=<module>\nSYSLOG_IDENTIFIER=python3\n_COMM=python3\n_EXE=/usr/bin/python3.12\n_CMDLINE=python3\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasda\nasda\n_PID=2763\n_SOURCE_REALTIME_TIMESTAMP=1729698775704375\n\n",
[]int64{1729698775704404000},
"{\"_BOOT_ID\":\"f778b6e2f7584a77b991a2366612a7b5\",\"_UID\":\"0\",\"_GID\":\"0\",\"_MACHINE_ID\":\"a4a970370c30a925df02a13c67167847\",\"_HOSTNAME\":\"ecd5e4555787\",\"_RUNTIME_SCOPE\":\"system\",\"_TRANSPORT\":\"journal\",\"_CAP_EFFECTIVE\":\"1ffffffffff\",\"_SYSTEMD_CGROUP\":\"/init.scope\",\"_SYSTEMD_UNIT\":\"init.scope\",\"_SYSTEMD_SLICE\":\"-.slice\",\"CODE_FILE\":\"\\u003cstdin>\",\"CODE_LINE\":\"1\",\"CODE_FUNC\":\"\\u003cmodule>\",\"SYSLOG_IDENTIFIER\":\"python3\",\"_COMM\":\"python3\",\"_EXE\":\"/usr/bin/python3.12\",\"_CMDLINE\":\"python3\",\"_msg\":\"foo\\nbar\\n\\n\\nasda\\nasda\",\"_PID\":\"2763\",\"_SOURCE_REALTIME_TIMESTAMP\":\"1729698775704375\"}",
)
}
func TestPushJournald_Failure(t *testing.T) {
f := func(data string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
cp := &insertutils.CommonParams{
TimeField: "__REALTIME_TIMESTAMP",
MsgFields: []string{"MESSAGE"},
}
if err := parseJournaldRequest([]byte(data), tlp, cp); err == nil {
t.Fatalf("expected non nil error")
}
}
// missing new line terminator for binary encoded message
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasdaasda2")
// missing new line terminator
f("__REALTIME_TIMESTAMP=91723819283\n=Test message")
// empty field name
f("__REALTIME_TIMESTAMP=91723819283\n=Test message\n")
// field name starting with number
f("__REALTIME_TIMESTAMP=91723819283\n1incorrect=Test message\n")
// field name exceeds 64 limit
f("__REALTIME_TIMESTAMP=91723819283\ntoolooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooongcorrecooooooooooooong=Test message\n")
// Only allow A-Z0-9 and '_'
f("__REALTIME_TIMESTAMP=91723819283\nbadC!@$!@$as=Test message\n")
}

View File

@@ -0,0 +1,118 @@
package jsonline
import (
"fmt"
"io"
"net/http"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
)
// RequestHandler processes jsonline insert requests
func RequestHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
w.Header().Add("Content-Type", "application/json")
if r.Method != "POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
requestsTotal.Inc()
cp, err := insertutils.GetCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
reader := r.Body
if r.Header.Get("Content-Encoding") == "gzip" {
zr, err := common.GetGzipReader(reader)
if err != nil {
logger.Errorf("cannot read gzipped jsonline request: %s", err)
return
}
defer common.PutGzipReader(zr)
reader = zr
}
lmp := cp.NewLogMessageProcessor("jsonline")
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
err = processStreamInternal(streamName, reader, cp.TimeField, cp.MsgFields, lmp)
lmp.MustClose()
if err != nil {
logger.Errorf("jsonline: %s", err)
} else {
// update requestDuration only for successfully parsed requests.
// There is no need in updating requestDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
requestDuration.UpdateDuration(startTime)
}
}
func processStreamInternal(streamName string, r io.Reader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) error {
wcr := writeconcurrencylimiter.GetReader(r)
defer writeconcurrencylimiter.PutReader(wcr)
lr := insertutils.NewLineReader(streamName, wcr)
n := 0
for {
ok, err := readLine(lr, timeField, msgFields, lmp)
wcr.DecConcurrency()
if err != nil {
errorsTotal.Inc()
return fmt.Errorf("cannot read line #%d in /jsonline request: %s", n, err)
}
if !ok {
return nil
}
n++
}
}
func readLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
var line []byte
for len(line) == 0 {
if !lr.NextLine() {
err := lr.Err()
return false, err
}
line = lr.Line
}
p := logstorage.GetJSONParser()
if err := p.ParseLogMessage(line); err != nil {
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
}
ts, err := insertutils.ExtractTimestampFromFields(timeField, p.Fields)
if err != nil {
return false, fmt.Errorf("cannot get timestamp: %w", err)
}
logstorage.RenameField(p.Fields, msgFields, "_msg")
lmp.AddRow(ts, p.Fields, nil)
logstorage.PutJSONParser(p)
return true, nil
}
var (
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/jsonline"}`)
requestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/jsonline"}`)
)

View File

@@ -0,0 +1,66 @@
package jsonline
import (
"bytes"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestProcessStreamInternal_Success(t *testing.T) {
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
t.Helper()
msgFields := []string{msgField}
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
if err := processStreamInternal("test", r, timeField, msgFields, tlp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
data := `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
{"@timestamp":"2023-06-06T04:48:12.735+01:00","message":"baz"}
{"message":"xyz","@timestamp":"2023-06-06 04:48:13.735Z","x":"y"}
`
timeField := "@timestamp"
msgField := "message"
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000}
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
{"_msg":"baz"}
{"_msg":"xyz","x":"y"}`
f(data, timeField, msgField, timestampsExpected, resultExpected)
// Non-existing msgField
data = `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
{"@timestamp":"2023-06-06T04:48:12.735+01:00","message":"baz"}
`
timeField = "@timestamp"
msgField = "foobar"
timestampsExpected = []int64{1686026891735000000, 1686023292735000000}
resultExpected = `{"log.offset":"71770","log.file.path":"/var/log/auth.log","message":"foobar"}
{"message":"baz"}`
f(data, timeField, msgField, timestampsExpected, resultExpected)
}
func TestProcessStreamInternal_Failure(t *testing.T) {
f := func(data string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
if err := processStreamInternal("test", r, "time", nil, tlp); err == nil {
t.Fatalf("expecting non-nil error")
}
}
// invalid json
f("foobar")
// invalid timestamp field
f(`{"time":"foobar"}`)
}

59
app/vlinsert/loki/loki.go Normal file
View File

@@ -0,0 +1,59 @@
package loki
import (
"net/http"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
// RequestHandler processes Loki insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
switch path {
case "/api/v1/push":
handleInsert(r, w)
return true
case "/ready":
// See https://grafana.com/docs/loki/latest/api/#identify-ready-loki-instance
w.WriteHeader(http.StatusOK)
w.Write([]byte("ready"))
return true
default:
return false
}
}
// See https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
func handleInsert(r *http.Request, w http.ResponseWriter) {
contentType := r.Header.Get("Content-Type")
switch contentType {
case "application/json":
handleJSON(r, w)
default:
// Protobuf request body should be handled by default according to https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
handleProtobuf(r, w)
}
}
func getCommonParams(r *http.Request) (*insertutils.CommonParams, error) {
cp, err := insertutils.GetCommonParams(r)
if err != nil {
return nil, err
}
// If parsed tenant is (0,0) it is likely to be default tenant
// Try parsing tenant from Loki headers
if cp.TenantID.AccountID == 0 && cp.TenantID.ProjectID == 0 {
org := r.Header.Get("X-Scope-OrgID")
if org != "" {
tenantID, err := logstorage.ParseTenantID(org)
if err != nil {
return nil, err
}
cp.TenantID = tenantID
}
}
return cp, nil
}

View File

@@ -0,0 +1,224 @@
package loki
import (
"fmt"
"io"
"math"
"net/http"
"strconv"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
)
var parserPool fastjson.ParserPool
func handleJSON(r *http.Request, w http.ResponseWriter) {
startTime := time.Now()
requestsJSONTotal.Inc()
reader := r.Body
if r.Header.Get("Content-Encoding") == "gzip" {
zr, err := common.GetGzipReader(reader)
if err != nil {
httpserver.Errorf(w, r, "cannot initialize gzip reader: %s", err)
return
}
defer common.PutGzipReader(zr)
reader = zr
}
wcr := writeconcurrencylimiter.GetReader(reader)
data, err := io.ReadAll(wcr)
writeconcurrencylimiter.PutReader(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return
}
cp, err := getCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
return
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
lmp := cp.NewLogMessageProcessor("loki_json")
useDefaultStreamFields := len(cp.StreamFields) == 0
err = parseJSONRequest(data, lmp, useDefaultStreamFields)
lmp.MustClose()
if err != nil {
httpserver.Errorf(w, r, "cannot parse Loki json request: %s; data=%s", err, data)
return
}
// update requestJSONDuration only for successfully parsed requests
// There is no need in updating requestJSONDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
requestJSONDuration.UpdateDuration(startTime)
}
var (
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
)
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
p := parserPool.Get()
defer parserPool.Put(p)
v, err := p.ParseBytes(data)
if err != nil {
return fmt.Errorf("cannot parse JSON request body: %w", err)
}
streamsV := v.Get("streams")
if streamsV == nil {
return fmt.Errorf("missing `streams` item in the parsed JSON")
}
streams, err := streamsV.Array()
if err != nil {
return fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
}
currentTimestamp := time.Now().UnixNano()
var commonFields []logstorage.Field
for _, stream := range streams {
// populate common labels from `stream` dict
commonFields = commonFields[:0]
labelsV := stream.Get("stream")
var labels *fastjson.Object
if labelsV != nil {
o, err := labelsV.Object()
if err != nil {
return fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
}
labels = o
}
labels.Visit(func(k []byte, v *fastjson.Value) {
vStr, errLocal := v.StringBytes()
if errLocal != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
commonFields = append(commonFields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(vStr),
})
})
if err != nil {
return fmt.Errorf("error when parsing `stream` object: %w", err)
}
// populate messages from `values` array
linesV := stream.Get("values")
if linesV == nil {
return fmt.Errorf("missing `values` item in the parsed `stream` object %q", stream)
}
lines, err := linesV.Array()
if err != nil {
return fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
}
fields := commonFields
for _, line := range lines {
lineA, err := line.Array()
if err != nil {
return fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
}
if len(lineA) < 2 || len(lineA) > 3 {
return fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2 or 3", line, len(lineA))
}
// parse timestamp
timestamp, err := lineA[0].StringBytes()
if err != nil {
return fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
}
ts, err := parseLokiTimestamp(bytesutil.ToUnsafeString(timestamp))
if err != nil {
return fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
}
if ts == 0 {
ts = currentTimestamp
}
// parse log message
msg, err := lineA[1].StringBytes()
if err != nil {
return fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
}
fields = append(fields[:len(commonFields)], logstorage.Field{
Name: "_msg",
Value: bytesutil.ToUnsafeString(msg),
})
// parse structured metadata - see https://grafana.com/docs/loki/latest/reference/loki-http-api/#ingest-logs
if len(lineA) > 2 {
structuredMetadata, err := lineA[2].Object()
if err != nil {
return fmt.Errorf("unexpected structured metadata type for %q; want JSON object", lineA[2])
}
structuredMetadata.Visit(func(k []byte, v *fastjson.Value) {
vStr, errLocal := v.StringBytes()
if errLocal != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(vStr),
})
})
if err != nil {
return fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
}
}
var streamFields []logstorage.Field
if useDefaultStreamFields {
streamFields = commonFields
}
lmp.AddRow(ts, fields, streamFields)
}
}
return nil
}
func parseLokiTimestamp(s string) (int64, error) {
if s == "" {
// Special case - an empty timestamp must be substituted with the current time by the caller.
return 0, nil
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
// Fall back to parsing floating-point value
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0, err
}
if f > math.MaxInt64 {
return 0, fmt.Errorf("too big timestamp in nanoseconds: %v; mustn't exceed %v", f, int64(math.MaxInt64))
}
if f < math.MinInt64 {
return 0, fmt.Errorf("too small timestamp in nanoseconds: %v; must be bigger or equal to %v", f, int64(math.MinInt64))
}
n = int64(f)
}
if n < 0 {
return 0, fmt.Errorf("too small timestamp in nanoseconds: %d; must be bigger than 0", n)
}
return n, nil
}

View File

@@ -0,0 +1,127 @@
package loki
import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestParseJSONRequest_Failure(t *testing.T) {
f := func(s string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
if err := parseJSONRequest([]byte(s), tlp, false); err == nil {
t.Fatalf("expecting non-nil error")
}
if err := tlp.Verify(nil, ""); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
f(``)
// Invalid json
f(`{}`)
f(`[]`)
f(`"foo"`)
f(`123`)
// invalid type for `streams` item
f(`{"streams":123}`)
// Missing `values` item
f(`{"streams":[{}]}`)
// Invalid type for `values` item
f(`{"streams":[{"values":"foobar"}]}`)
// Invalid type for `stream` item
f(`{"streams":[{"stream":[],"values":[]}]}`)
// Invalid type for `values` individual item
f(`{"streams":[{"values":[123]}]}`)
// Invalid length of `values` individual item
f(`{"streams":[{"values":[[]]}]}`)
f(`{"streams":[{"values":[["123"]]}]}`)
f(`{"streams":[{"values":[["123","456","789","8123"]]}]}`)
// Invalid type for timestamp inside `values` individual item
f(`{"streams":[{"values":[[123,"456"]}]}`)
// Invalid type for log message
f(`{"streams":[{"values":[["123",1234]]}]}`)
// invalid structured metadata type
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", ["metadata_1", "md_value"]]]}]}`)
// structured metadata with unexpected value type
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": 1}]] }]}`)
}
func TestParseJSONRequest_Success(t *testing.T) {
f := func(s string, timestampsExpected []int64, resultExpected string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
// Empty streams
f(`{"streams":[]}`, nil, ``)
f(`{"streams":[{"values":[]}]}`, nil, ``)
f(`{"streams":[{"stream":{},"values":[]}]}`, nil, ``)
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, nil, ``)
// Empty stream labels
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
// Non-empty stream labels
f(`{"streams":[{"stream":{
"label1": "value1",
"label2": "value2"
},"values":[
["1577836800000000001", "foo bar"],
["1477836900005000002", "abc"],
["147.78369e9", "foobar"]
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
{"label1":"value1","label2":"value2","_msg":"abc"}
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
// Multiple streams
f(`{
"streams": [
{
"stream": {
"foo": "bar",
"a": "b"
},
"values": [
["1577836800000000001", "foo bar"],
["1577836900005000002", "abc"]
]
},
{
"stream": {
"x": "y"
},
"values": [
["1877836900005000002", "yx"]
]
}
]
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
{"foo":"bar","a":"b","_msg":"abc"}
{"x":"y","_msg":"yx"}`)
// values with metadata
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": "md_value"}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar","metadata_1":"md_value"}`)
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
}

View File

@@ -0,0 +1,78 @@
package loki
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func BenchmarkParseJSONRequest(b *testing.B) {
for _, streams := range []int{5, 10} {
for _, rows := range []int{100, 1000} {
for _, labels := range []int{10, 50} {
b.Run(fmt.Sprintf("streams_%d/rows_%d/labels_%d", streams, rows, labels), func(b *testing.B) {
benchmarkParseJSONRequest(b, streams, rows, labels)
})
}
}
}
}
func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
blp := &insertutils.BenchmarkLogMessageProcessor{}
b.ReportAllocs()
b.SetBytes(int64(streams * rows))
b.RunParallel(func(pb *testing.PB) {
data := getJSONBody(streams, rows, labels)
for pb.Next() {
if err := parseJSONRequest(data, blp, false); err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
}
})
}
func getJSONBody(streams, rows, labels int) []byte {
body := append([]byte{}, `{"streams":[`...)
now := time.Now().UnixNano()
valuePrefix := fmt.Sprintf(`["%d","value_`, now)
for i := 0; i < streams; i++ {
body = append(body, `{"stream":{`...)
for j := 0; j < labels; j++ {
body = append(body, `"label_`...)
body = strconv.AppendInt(body, int64(j), 10)
body = append(body, `":"value_`...)
body = strconv.AppendInt(body, int64(j), 10)
body = append(body, '"')
if j < labels-1 {
body = append(body, ',')
}
}
body = append(body, `}, "values":[`...)
for j := 0; j < rows; j++ {
body = append(body, valuePrefix...)
body = strconv.AppendInt(body, int64(j), 10)
body = append(body, `"]`...)
if j < rows-1 {
body = append(body, ',')
}
}
body = append(body, `]}`...)
if i < streams-1 {
body = append(body, ',')
}
}
body = append(body, `]}`...)
return body
}

View File

@@ -0,0 +1,218 @@
package loki
import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
"github.com/golang/snappy"
)
var (
bytesBufPool bytesutil.ByteBufferPool
pushReqsPool sync.Pool
)
func handleProtobuf(r *http.Request, w http.ResponseWriter) {
startTime := time.Now()
requestsProtobufTotal.Inc()
wcr := writeconcurrencylimiter.GetReader(r.Body)
data, err := io.ReadAll(wcr)
writeconcurrencylimiter.PutReader(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return
}
cp, err := getCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
return
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
lmp := cp.NewLogMessageProcessor("loki_protobuf")
useDefaultStreamFields := len(cp.StreamFields) == 0
err = parseProtobufRequest(data, lmp, useDefaultStreamFields)
lmp.MustClose()
if err != nil {
httpserver.Errorf(w, r, "cannot parse Loki protobuf request: %s", err)
return
}
// update requestProtobufDuration only for successfully parsed requests
// There is no need in updating requestProtobufDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
requestProtobufDuration.UpdateDuration(startTime)
}
var (
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
)
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
bb := bytesBufPool.Get()
defer bytesBufPool.Put(bb)
buf, err := snappy.Decode(bb.B[:cap(bb.B)], data)
if err != nil {
return fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
}
bb.B = buf
req := getPushRequest()
defer putPushRequest(req)
err = req.UnmarshalProtobuf(bb.B)
if err != nil {
return fmt.Errorf("cannot parse request body: %w", err)
}
fields := getFields()
defer putFields(fields)
streams := req.Streams
currentTimestamp := time.Now().UnixNano()
for i := range streams {
stream := &streams[i]
// st.Labels contains labels for the stream.
// Labels are same for all entries in the stream.
fields.fields, err = parsePromLabels(fields.fields[:0], stream.Labels)
if err != nil {
return fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
}
commonFieldsLen := len(fields.fields)
entries := stream.Entries
for j := range entries {
e := &entries[j]
fields.fields = fields.fields[:commonFieldsLen]
for _, lp := range e.StructuredMetadata {
fields.fields = append(fields.fields, logstorage.Field{
Name: lp.Name,
Value: lp.Value,
})
}
fields.fields = append(fields.fields, logstorage.Field{
Name: "_msg",
Value: e.Line,
})
ts := e.Timestamp.UnixNano()
if ts == 0 {
ts = currentTimestamp
}
var streamFields []logstorage.Field
if useDefaultStreamFields {
streamFields = fields.fields[:commonFieldsLen]
}
lmp.AddRow(ts, fields.fields, streamFields)
}
}
return nil
}
func getFields() *fields {
v := fieldsPool.Get()
if v == nil {
return &fields{}
}
return v.(*fields)
}
func putFields(f *fields) {
f.fields = f.fields[:0]
fieldsPool.Put(f)
}
var fieldsPool sync.Pool
type fields struct {
fields []logstorage.Field
}
// parsePromLabels parses log fields in Prometheus text exposition format from s, appends them to dst and returns the result.
//
// See test data of promtail for examples: https://github.com/grafana/loki/blob/a24ef7b206e0ca63ee74ca6ecb0a09b745cd2258/pkg/push/types_test.go
func parsePromLabels(dst []logstorage.Field, s string) ([]logstorage.Field, error) {
// Make sure s is wrapped into `{...}`
s = strings.TrimSpace(s)
if len(s) < 2 {
return nil, fmt.Errorf("too short string to parse: %q", s)
}
if s[0] != '{' {
return nil, fmt.Errorf("missing `{` at the beginning of %q", s)
}
if s[len(s)-1] != '}' {
return nil, fmt.Errorf("missing `}` at the end of %q", s)
}
s = s[1 : len(s)-1]
for len(s) > 0 {
// Parse label name
n := strings.IndexByte(s, '=')
if n < 0 {
return nil, fmt.Errorf("cannot find `=` char for label value at %s", s)
}
name := s[:n]
s = s[n+1:]
// Parse label value
qs, err := strconv.QuotedPrefix(s)
if err != nil {
return nil, fmt.Errorf("cannot parse value for label %q at %s: %w", name, s, err)
}
s = s[len(qs):]
value, err := strconv.Unquote(qs)
if err != nil {
return nil, fmt.Errorf("cannot unquote value %q for label %q: %w", qs, name, err)
}
// Append the found field to dst.
dst = append(dst, logstorage.Field{
Name: name,
Value: value,
})
// Check whether there are other labels remaining
if len(s) == 0 {
break
}
if !strings.HasPrefix(s, ",") {
return nil, fmt.Errorf("missing `,` char at %s", s)
}
s = s[1:]
s = strings.TrimPrefix(s, " ")
}
return dst, nil
}
func getPushRequest() *PushRequest {
v := pushReqsPool.Get()
if v == nil {
return &PushRequest{}
}
return v.(*PushRequest)
}
func putPushRequest(req *PushRequest) {
req.reset()
pushReqsPool.Put(req)
}

View File

@@ -0,0 +1,169 @@
package loki
import (
"fmt"
"strings"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/golang/snappy"
)
type testLogMessageProcessor struct {
pr PushRequest
}
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
if streamFields != nil {
panic(fmt.Errorf("unexpected non-nil streamFields: %v", streamFields))
}
msg := ""
for _, f := range fields {
if f.Name == "_msg" {
msg = f.Value
}
}
var a []string
for _, f := range fields {
if f.Name == "_msg" {
continue
}
item := fmt.Sprintf("%s=%q", f.Name, f.Value)
a = append(a, item)
}
labels := "{" + strings.Join(a, ", ") + "}"
tlp.pr.Streams = append(tlp.pr.Streams, Stream{
Labels: labels,
Entries: []Entry{
{
Timestamp: time.Unix(0, timestamp),
Line: strings.Clone(msg),
},
},
})
}
func (tlp *testLogMessageProcessor) MustClose() {
}
func TestParseProtobufRequest_Success(t *testing.T) {
f := func(s string, timestampsExpected []int64, resultExpected string) {
t.Helper()
tlp := &testLogMessageProcessor{}
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if len(tlp.pr.Streams) != len(timestampsExpected) {
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), len(timestampsExpected))
}
data := tlp.pr.MarshalProtobuf(nil)
encodedData := snappy.Encode(nil, data)
tlp2 := &insertutils.TestLogMessageProcessor{}
if err := parseProtobufRequest(encodedData, tlp2, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp2.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
// Empty streams
f(`{"streams":[]}`, nil, ``)
f(`{"streams":[{"values":[]}]}`, nil, ``)
f(`{"streams":[{"stream":{},"values":[]}]}`, nil, ``)
f(`{"streams":[{"stream":{"foo":"bar"},"values":[]}]}`, nil, ``)
// Empty stream labels
f(`{"streams":[{"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
f(`{"streams":[{"stream":{},"values":[["1577836800000000001", "foo bar"]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
// Non-empty stream labels
f(`{"streams":[{"stream":{
"label1": "value1",
"label2": "value2"
},"values":[
["1577836800000000001", "foo bar"],
["1477836900005000002", "abc"],
["147.78369e9", "foobar"]
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
{"label1":"value1","label2":"value2","_msg":"abc"}
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
// Multiple streams
f(`{
"streams": [
{
"stream": {
"foo": "bar",
"a": "b"
},
"values": [
["1577836800000000001", "foo bar"],
["1577836900005000002", "abc"]
]
},
{
"stream": {
"x": "y"
},
"values": [
["1877836900005000002", "yx"]
]
}
]
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
{"foo":"bar","a":"b","_msg":"abc"}
{"x":"y","_msg":"yx"}`)
}
func TestParsePromLabels_Success(t *testing.T) {
f := func(s string) {
t.Helper()
fields, err := parsePromLabels(nil, s)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
var a []string
for _, f := range fields {
a = append(a, fmt.Sprintf("%s=%q", f.Name, f.Value))
}
result := "{" + strings.Join(a, ", ") + "}"
if result != s {
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, s)
}
}
f("{}")
f(`{foo="bar"}`)
f(`{foo="bar", baz="x", y="z"}`)
f(`{foo="ba\"r\\z\n", a="", b="\"\\"}`)
}
func TestParsePromLabels_Failure(t *testing.T) {
f := func(s string) {
t.Helper()
fields, err := parsePromLabels(nil, s)
if err == nil {
t.Fatalf("expecting non-nil error")
}
if len(fields) > 0 {
t.Fatalf("unexpected non-empty fields: %s", fields)
}
}
f("")
f("{")
f(`{foo}`)
f(`{foo=bar}`)
f(`{foo="bar}`)
f(`{foo="ba\",r}`)
f(`{foo="bar" baz="aa"}`)
f(`foobar`)
f(`foo{bar="baz"}`)
}

View File

@@ -0,0 +1,85 @@
package loki
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/golang/snappy"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
)
func BenchmarkParseProtobufRequest(b *testing.B) {
for _, streams := range []int{5, 10} {
for _, rows := range []int{100, 1000} {
for _, labels := range []int{10, 50} {
b.Run(fmt.Sprintf("streams_%d/rows_%d/labels_%d", streams, rows, labels), func(b *testing.B) {
benchmarkParseProtobufRequest(b, streams, rows, labels)
})
}
}
}
}
func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
blp := &insertutils.BenchmarkLogMessageProcessor{}
b.ReportAllocs()
b.SetBytes(int64(streams * rows))
b.RunParallel(func(pb *testing.PB) {
body := getProtobufBody(streams, rows, labels)
for pb.Next() {
if err := parseProtobufRequest(body, blp, false); err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
}
})
}
func getProtobufBody(streamsCount, rowsCount, labelsCount int) []byte {
var b []byte
var entries []Entry
streams := make([]Stream, streamsCount)
for i := range streams {
b = b[:0]
b = append(b, '{')
for j := 0; j < labelsCount; j++ {
b = append(b, "label_"...)
b = strconv.AppendInt(b, int64(j), 10)
b = append(b, `="value_`...)
b = strconv.AppendInt(b, int64(j), 10)
b = append(b, '"')
if j < labelsCount-1 {
b = append(b, ',')
}
}
b = append(b, '}')
labels := string(b)
var rowsBuf []byte
entriesLen := len(entries)
for j := 0; j < rowsCount; j++ {
rowsBufLen := len(rowsBuf)
rowsBuf = append(rowsBuf, "value_"...)
rowsBuf = strconv.AppendInt(rowsBuf, int64(j), 10)
entries = append(entries, Entry{
Timestamp: time.Now(),
Line: bytesutil.ToUnsafeString(rowsBuf[rowsBufLen:]),
})
}
st := &streams[i]
st.Labels = labels
st.Entries = entries[entriesLen:]
}
pr := PushRequest{
Streams: streams,
}
body := pr.MarshalProtobuf(nil)
encodedBody := snappy.Encode(nil, body)
return encodedBody
}

302
app/vlinsert/loki/pb.go Normal file
View File

@@ -0,0 +1,302 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: push_request.proto
// source: https://raw.githubusercontent.com/grafana/loki/main/pkg/push/push_request.proto
// Licensed under the Apache License, Version 2.0 (the "License");
// https://github.com/grafana/loki/blob/main/pkg/push/LICENSE
package loki
import (
"fmt"
"time"
"github.com/VictoriaMetrics/easyproto"
)
var mp easyproto.MarshalerPool
// PushRequest represents Loki PushRequest
//
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L14
type PushRequest struct {
Streams []Stream
entriesBuf []Entry
labelPairBuf []LabelPair
}
func (pr *PushRequest) reset() {
pr.Streams = pr.Streams[:0]
pr.entriesBuf = pr.entriesBuf[:0]
pr.labelPairBuf = pr.labelPairBuf[:0]
}
// UnmarshalProtobuf unmarshals pr from protobuf message at src.
//
// pr remains valid until src is modified.
func (pr *PushRequest) UnmarshalProtobuf(src []byte) error {
pr.reset()
var err error
pr.entriesBuf, pr.labelPairBuf, err = pr.unmarshalProtobuf(pr.entriesBuf, pr.labelPairBuf, src)
return err
}
// MarshalProtobuf marshals r to protobuf message, appends it to dst and returns the result.
func (pr *PushRequest) MarshalProtobuf(dst []byte) []byte {
m := mp.Get()
pr.marshalProtobuf(m.MessageMarshaler())
dst = m.Marshal(dst)
mp.Put(m)
return dst
}
func (pr *PushRequest) marshalProtobuf(mm *easyproto.MessageMarshaler) {
for _, s := range pr.Streams {
s.marshalProtobuf(mm.AppendMessage(1))
}
}
func (pr *PushRequest) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []LabelPair, src []byte) ([]Entry, []LabelPair, error) {
// message PushRequest {
// repeated Stream streams = 1;
// }
var err error
var fc easyproto.FieldContext
for len(src) > 0 {
src, err = fc.NextField(src)
if err != nil {
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read next field in PushRequest: %w", err)
}
switch fc.FieldNum {
case 1:
data, ok := fc.MessageData()
if !ok {
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read Stream data")
}
pr.Streams = append(pr.Streams, Stream{})
s := &pr.Streams[len(pr.Streams)-1]
entriesBuf, labelPairBuf, err = s.unmarshalProtobuf(entriesBuf, labelPairBuf, data)
if err != nil {
return entriesBuf, labelPairBuf, fmt.Errorf("cannot unmarshal Stream: %w", err)
}
}
}
return entriesBuf, labelPairBuf, nil
}
// Stream represents Loki stream.
//
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L23
type Stream struct {
Labels string
Entries []Entry
}
func (s *Stream) marshalProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(1, s.Labels)
for _, e := range s.Entries {
e.marshalProtobuf(mm.AppendMessage(2))
}
}
func (s *Stream) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []LabelPair, src []byte) ([]Entry, []LabelPair, error) {
// message Stream {
// string labels = 1;
// repeated Entry entries = 2;
// }
var err error
var fc easyproto.FieldContext
entriesBufLen := len(entriesBuf)
for len(src) > 0 {
src, err = fc.NextField(src)
if err != nil {
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read next field in Stream: %w", err)
}
switch fc.FieldNum {
case 1:
labels, ok := fc.String()
if !ok {
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read labels")
}
s.Labels = labels
case 2:
data, ok := fc.MessageData()
if !ok {
return entriesBuf, labelPairBuf, fmt.Errorf("cannot read Entry data")
}
entriesBuf = append(entriesBuf, Entry{})
e := &entriesBuf[len(entriesBuf)-1]
labelPairBuf, err = e.unmarshalProtobuf(labelPairBuf, data)
if err != nil {
return entriesBuf, labelPairBuf, fmt.Errorf("cannot unmarshal Entry: %w", err)
}
}
}
s.Entries = entriesBuf[entriesBufLen:]
return entriesBuf, labelPairBuf, nil
}
// Entry represents Loki entry.
//
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L38
type Entry struct {
Timestamp time.Time
Line string
StructuredMetadata []LabelPair
}
func (e *Entry) marshalProtobuf(mm *easyproto.MessageMarshaler) {
marshalTime(mm, 1, e.Timestamp)
mm.AppendString(2, e.Line)
for _, lp := range e.StructuredMetadata {
lp.marshalProtobuf(mm.AppendMessage(3))
}
}
func (e *Entry) unmarshalProtobuf(labelPairBuf []LabelPair, src []byte) ([]LabelPair, error) {
// message Entry {
// Timestamp timestamp = 1;
// string line = 2;
// repeated LabelPair structuredMetadata = 3;
// }
var err error
var fc easyproto.FieldContext
labelPairBufLen := len(labelPairBuf)
for len(src) > 0 {
src, err = fc.NextField(src)
if err != nil {
return labelPairBuf, fmt.Errorf("cannot read next field in Entry: %w", err)
}
switch fc.FieldNum {
case 1:
data, ok := fc.MessageData()
if !ok {
return labelPairBuf, fmt.Errorf("cannot read Timestamp data")
}
timestamp, err := unmarshalTime(data)
if err != nil {
return labelPairBuf, fmt.Errorf("cannot unmarshal Timestamp: %w", err)
}
e.Timestamp = timestamp
case 2:
line, ok := fc.String()
if !ok {
return labelPairBuf, fmt.Errorf("cannot read Line")
}
e.Line = line
case 3:
data, ok := fc.MessageData()
if !ok {
return labelPairBuf, fmt.Errorf("cannot read StructuredMetadata")
}
labelPairBuf = append(labelPairBuf, LabelPair{})
lp := &labelPairBuf[len(labelPairBuf)-1]
if err := lp.unmarshalProtobuf(data); err != nil {
return labelPairBuf, fmt.Errorf("cannot unmarshal StructuredMetadata: %w", err)
}
}
}
e.StructuredMetadata = labelPairBuf[labelPairBufLen:]
return labelPairBuf, nil
}
// LabelPair represents Loki label pair.
//
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L33
type LabelPair struct {
Name string
Value string
}
func (lp *LabelPair) marshalProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(1, lp.Name)
mm.AppendString(2, lp.Value)
}
func (lp *LabelPair) unmarshalProtobuf(src []byte) (err error) {
// message LabelPair {
// string name = 1;
// string value = 2;
// }
var fc easyproto.FieldContext
for len(src) > 0 {
src, err = fc.NextField(src)
if err != nil {
return fmt.Errorf("cannot read next field in LabelPair: %w", err)
}
switch fc.FieldNum {
case 1:
name, ok := fc.String()
if !ok {
return fmt.Errorf("cannot read name")
}
lp.Name = name
case 2:
value, ok := fc.String()
if !ok {
return fmt.Errorf("cannot unmarshal value")
}
lp.Value = value
}
}
return nil
}
func marshalTime(mm *easyproto.MessageMarshaler, fieldNum uint32, timestamp time.Time) {
nsecs := timestamp.UnixNano()
ts := Timestamp{
Seconds: nsecs / 1e9,
Nanos: int32(nsecs % 1e9),
}
ts.marshalProtobuf(mm.AppendMessage(fieldNum))
}
func unmarshalTime(src []byte) (time.Time, error) {
var ts Timestamp
if err := ts.unmarshalProtobuf(src); err != nil {
return time.Time{}, err
}
timestamp := time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
return timestamp, nil
}
// Timestamp is protobuf well-known timestamp type.
type Timestamp struct {
Seconds int64
Nanos int32
}
func (ts *Timestamp) marshalProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendInt64(1, ts.Seconds)
mm.AppendInt32(2, ts.Nanos)
}
func (ts *Timestamp) unmarshalProtobuf(src []byte) (err error) {
// message Timestamp {
// int64 seconds = 1;
// int32 nanos = 2;
// }
var fc easyproto.FieldContext
for len(src) > 0 {
src, err = fc.NextField(src)
if err != nil {
return fmt.Errorf("cannot read next field in Timestamp: %w", err)
}
switch fc.FieldNum {
case 1:
seconds, ok := fc.Int64()
if !ok {
return fmt.Errorf("cannot read Seconds")
}
ts.Seconds = seconds
case 2:
nanos, ok := fc.Int32()
if !ok {
return fmt.Errorf("cannot read Nanos")
}
ts.Nanos = nanos
}
}
return nil
}

67
app/vlinsert/main.go Normal file
View File

@@ -0,0 +1,67 @@
package vlinsert
import (
"fmt"
"net/http"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/datadog"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/journald"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/opentelemetry"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/syslog"
)
// Init initializes vlinsert
func Init() {
syslog.MustInit()
}
// Stop stops vlinsert
func Stop() {
syslog.MustStop()
}
// RequestHandler handles insert requests for VictoriaLogs
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
path := r.URL.Path
if !strings.HasPrefix(path, "/insert/") {
// Skip requests, which do not start with /insert/, since these aren't our requests.
return false
}
path = strings.TrimPrefix(path, "/insert")
path = strings.ReplaceAll(path, "//", "/")
switch path {
case "/jsonline":
jsonline.RequestHandler(w, r)
return true
case "/ready":
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
fmt.Fprintf(w, `{"status":"ok"}`)
return true
}
switch {
case strings.HasPrefix(path, "/elasticsearch/"):
path = strings.TrimPrefix(path, "/elasticsearch")
return elasticsearch.RequestHandler(path, w, r)
case strings.HasPrefix(path, "/loki/"):
path = strings.TrimPrefix(path, "/loki")
return loki.RequestHandler(path, w, r)
case strings.HasPrefix(path, "/opentelemetry/"):
path = strings.TrimPrefix(path, "/opentelemetry")
return opentelemetry.RequestHandler(path, w, r)
case strings.HasPrefix(path, "/journald/"):
path = strings.TrimPrefix(path, "/journald")
return journald.RequestHandler(path, w, r)
case strings.HasPrefix(path, "/datadog/"):
path = strings.TrimPrefix(path, "/datadog")
return datadog.RequestHandler(path, w, r)
default:
return false
}
}

View File

@@ -0,0 +1,141 @@
package opentelemetry
import (
"fmt"
"io"
"net/http"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
)
// RequestHandler processes Opentelemetry insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
switch path {
// use the same path as opentelemetry collector
// https://opentelemetry.io/docs/specs/otlp/#otlphttp-request
case "/v1/logs":
if r.Header.Get("Content-Type") == "application/json" {
httpserver.Errorf(w, r, "json encoding isn't supported for opentelemetry format. Use protobuf encoding")
return true
}
handleProtobuf(r, w)
return true
default:
return false
}
}
func handleProtobuf(r *http.Request, w http.ResponseWriter) {
startTime := time.Now()
requestsProtobufTotal.Inc()
reader := r.Body
if r.Header.Get("Content-Encoding") == "gzip" {
zr, err := common.GetGzipReader(reader)
if err != nil {
httpserver.Errorf(w, r, "cannot initialize gzip reader: %s", err)
return
}
defer common.PutGzipReader(zr)
reader = zr
}
wcr := writeconcurrencylimiter.GetReader(reader)
data, err := io.ReadAll(wcr)
writeconcurrencylimiter.PutReader(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return
}
cp, err := insertutils.GetCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
return
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
lmp := cp.NewLogMessageProcessor("opentelelemtry_protobuf")
useDefaultStreamFields := len(cp.StreamFields) == 0
err = pushProtobufRequest(data, lmp, useDefaultStreamFields)
lmp.MustClose()
if err != nil {
httpserver.Errorf(w, r, "cannot parse OpenTelemetry protobuf request: %s", err)
return
}
// update requestProtobufDuration only for successfully parsed requests
// There is no need in updating requestProtobufDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
requestProtobufDuration.UpdateDuration(startTime)
}
var (
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
)
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
var req pb.ExportLogsServiceRequest
if err := req.UnmarshalProtobuf(data); err != nil {
errorsTotal.Inc()
return fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
}
var commonFields []logstorage.Field
for _, rl := range req.ResourceLogs {
attributes := rl.Resource.Attributes
commonFields = slicesutil.SetLength(commonFields, len(attributes))
for i, attr := range attributes {
commonFields[i].Name = attr.Key
commonFields[i].Value = attr.Value.FormatString()
}
commonFieldsLen := len(commonFields)
for _, sc := range rl.ScopeLogs {
commonFields = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp, useDefaultStreamFields)
}
}
return nil
}
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) []logstorage.Field {
fields := commonFields
for _, lr := range sc.LogRecords {
fields = fields[:len(commonFields)]
fields = append(fields, logstorage.Field{
Name: "_msg",
Value: lr.Body.FormatString(),
})
for _, attr := range lr.Attributes {
fields = append(fields, logstorage.Field{
Name: attr.Key,
Value: attr.Value.FormatString(),
})
}
fields = append(fields, logstorage.Field{
Name: "severity",
Value: lr.FormatSeverity(),
})
var streamFields []logstorage.Field
if useDefaultStreamFields {
streamFields = commonFields
}
lmp.AddRow(lr.ExtractTimestampNano(), fields, streamFields)
}
return fields
}

View File

@@ -0,0 +1,127 @@
package opentelemetry
import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
)
func TestPushProtoOk(t *testing.T) {
f := func(src []pb.ResourceLogs, timestampsExpected []int64, resultExpected string) {
t.Helper()
lr := pb.ExportLogsServiceRequest{
ResourceLogs: src,
}
pData := lr.MarshalProtobuf(nil)
tlp := &insertutils.TestLogMessageProcessor{}
if err := pushProtobufRequest(pData, tlp, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
// single line without resource attributes
f([]pb.ResourceLogs{
{
ScopeLogs: []pb.ScopeLogs{
{
LogRecords: []pb.LogRecord{
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
},
},
},
},
},
[]int64{1234},
`{"_msg":"log-line-message","severity":"Trace"}`,
)
// multi-line with resource attributes
f([]pb.ResourceLogs{
{
Resource: pb.Resource{
Attributes: []*pb.KeyValue{
{Key: "logger", Value: &pb.AnyValue{StringValue: ptrTo("context")}},
{Key: "instance_id", Value: &pb.AnyValue{IntValue: ptrTo[int64](10)}},
{Key: "node_taints", Value: &pb.AnyValue{KeyValueList: &pb.KeyValueList{
Values: []*pb.KeyValue{
{Key: "role", Value: &pb.AnyValue{StringValue: ptrTo("dev")}},
{Key: "cluster_load_percent", Value: &pb.AnyValue{DoubleValue: ptrTo(0.55)}},
},
}}},
},
},
ScopeLogs: []pb.ScopeLogs{
{
LogRecords: []pb.LogRecord{
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1235, SeverityNumber: 21, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1236, SeverityNumber: -1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
},
},
},
},
},
[]int64{1234, 1235, 1236},
`{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message","severity":"Trace"}
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Unspecified"}
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Unspecified"}`,
)
// multi-scope with resource attributes and multi-line
f([]pb.ResourceLogs{
{
Resource: pb.Resource{
Attributes: []*pb.KeyValue{
{Key: "logger", Value: &pb.AnyValue{StringValue: ptrTo("context")}},
{Key: "instance_id", Value: &pb.AnyValue{IntValue: ptrTo[int64](10)}},
{Key: "node_taints", Value: &pb.AnyValue{KeyValueList: &pb.KeyValueList{
Values: []*pb.KeyValue{
{Key: "role", Value: &pb.AnyValue{StringValue: ptrTo("dev")}},
{Key: "cluster_load_percent", Value: &pb.AnyValue{DoubleValue: ptrTo(0.55)}},
},
}}},
},
},
ScopeLogs: []pb.ScopeLogs{
{
LogRecords: []pb.LogRecord{
{TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
{TimeUnixNano: 1235, SeverityNumber: 5, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
},
},
},
},
{
ScopeLogs: []pb.ScopeLogs{
{
LogRecords: []pb.LogRecord{
{TimeUnixNano: 2345, SeverityNumber: 10, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-0-0")}},
{TimeUnixNano: 2346, SeverityNumber: 10, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-0-1")}},
},
},
{
LogRecords: []pb.LogRecord{
{TimeUnixNano: 2347, SeverityNumber: 12, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-0")}},
{ObservedTimeUnixNano: 2348, SeverityNumber: 12, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-1")}},
},
},
},
},
},
[]int64{1234, 1235, 2345, 2346, 2347, 2348},
`{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message","severity":"Trace"}
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Debug"}
{"_msg":"log-line-resource-scope-1-0-0","severity":"Info2"}
{"_msg":"log-line-resource-scope-1-0-1","severity":"Info2"}
{"_msg":"log-line-resource-scope-1-1-0","severity":"Info4"}
{"_msg":"log-line-resource-scope-1-1-1","severity":"Info4"}`,
)
}
func ptrTo[T any](s T) *T {
return &s
}

View File

@@ -0,0 +1,78 @@
package opentelemetry
import (
"fmt"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
)
func BenchmarkParseProtobufRequest(b *testing.B) {
for _, scopes := range []int{1, 2} {
for _, rows := range []int{100, 1000} {
for _, attributes := range []int{5, 10} {
b.Run(fmt.Sprintf("scopes_%d/rows_%d/attributes_%d", scopes, rows, attributes), func(b *testing.B) {
benchmarkParseProtobufRequest(b, scopes, rows, attributes)
})
}
}
}
}
func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
blp := &insertutils.BenchmarkLogMessageProcessor{}
b.ReportAllocs()
b.SetBytes(int64(streams * rows))
b.RunParallel(func(pb *testing.PB) {
body := getProtobufBody(streams, rows, labels)
for pb.Next() {
if err := pushProtobufRequest(body, blp, false); err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
}
})
}
func getProtobufBody(scopesCount, rowsCount, attributesCount int) []byte {
msg := "12345678910"
attrValues := []*pb.AnyValue{
{StringValue: ptrTo("string-attribute")},
{IntValue: ptrTo[int64](12345)},
{DoubleValue: ptrTo(3.14)},
}
attrs := make([]*pb.KeyValue, attributesCount)
for j := 0; j < attributesCount; j++ {
attrs[j] = &pb.KeyValue{
Key: fmt.Sprintf("key-%d", j),
Value: attrValues[j%3],
}
}
entries := make([]pb.LogRecord, rowsCount)
for j := 0; j < rowsCount; j++ {
entries[j] = pb.LogRecord{
TimeUnixNano: 12345678910, ObservedTimeUnixNano: 12345678910, Body: pb.AnyValue{StringValue: &msg},
}
}
scopes := make([]pb.ScopeLogs, scopesCount)
for j := 0; j < scopesCount; j++ {
scopes[j] = pb.ScopeLogs{
LogRecords: entries,
}
}
pr := pb.ExportLogsServiceRequest{
ResourceLogs: []pb.ResourceLogs{
{
Resource: pb.Resource{
Attributes: attrs,
},
ScopeLogs: scopes,
},
},
}
return pr.MarshalProtobuf(nil)
}

View File

@@ -0,0 +1,615 @@
package syslog
import (
"bufio"
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"net"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/klauspost/compress/gzip"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
)
var (
syslogTimezone = flag.String("syslog.timezone", "Local", "Timezone to use when parsing timestamps in RFC3164 syslog messages. Timezone must be a valid IANA Time Zone. "+
"For example: America/New_York, Europe/Berlin, Etc/GMT+3 . See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
streamFieldsTCP = flagutil.NewArrayString("syslog.streamFields.tcp", "Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields`)
streamFieldsUDP = flagutil.NewArrayString("syslog.streamFields.udp", "Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields`)
ignoreFieldsTCP = flagutil.NewArrayString("syslog.ignoreFields.tcp", "Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields`)
ignoreFieldsUDP = flagutil.NewArrayString("syslog.ignoreFields.udp", "Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields`)
extraFieldsTCP = flagutil.NewArrayString("syslog.extraFields.tcp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields`)
extraFieldsUDP = flagutil.NewArrayString("syslog.extraFields.udp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields`)
tenantIDTCP = flagutil.NewArrayString("syslog.tenantID.tcp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy")
tenantIDUDP = flagutil.NewArrayString("syslog.tenantID.udp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy")
listenAddrTCP = flagutil.NewArrayString("syslog.listenAddr.tcp", "Comma-separated list of TCP addresses to listen to for Syslog messages. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
listenAddrUDP = flagutil.NewArrayString("syslog.listenAddr.udp", "Comma-separated list of UDP address to listen to for Syslog messages. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
tlsEnable = flagutil.NewArrayBool("syslog.tls", "Whether to enable TLS for receiving syslog messages at the corresponding -syslog.listenAddr.tcp. "+
"The corresponding -syslog.tlsCertFile and -syslog.tlsKeyFile must be set if -syslog.tls is set. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
tlsCertFile = flagutil.NewArrayString("syslog.tlsCertFile", "Path to file with TLS certificate for the corresponding -syslog.listenAddr.tcp if the corresponding -syslog.tls is set. "+
"Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
tlsKeyFile = flagutil.NewArrayString("syslog.tlsKeyFile", "Path to file with TLS key for the corresponding -syslog.listenAddr.tcp if the corresponding -syslog.tls is set. "+
"The provided key file is automatically re-read every second, so it can be dynamically updated. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
tlsCipherSuites = flagutil.NewArrayString("syslog.tlsCipherSuites", "Optional list of TLS cipher suites for -syslog.listenAddr.tcp if -syslog.tls is set. "+
"See the list of supported cipher suites at https://pkg.go.dev/crypto/tls#pkg-constants . "+
"See also https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
tlsMinVersion = flag.String("syslog.tlsMinVersion", "TLS13", "The minimum TLS version to use for -syslog.listenAddr.tcp if -syslog.tls is set. "+
"Supported values: TLS10, TLS11, TLS12, TLS13. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#security")
compressMethodTCP = flagutil.NewArrayString("syslog.compressMethod.tcp", "Compression method for syslog messages received at the corresponding -syslog.listenAddr.tcp. "+
"Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression")
compressMethodUDP = flagutil.NewArrayString("syslog.compressMethod.udp", "Compression method for syslog messages received at the corresponding -syslog.listenAddr.udp. "+
"Supported values: none, gzip, deflate. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#compression")
useLocalTimestampTCP = flagutil.NewArrayBool("syslog.useLocalTimestamp.tcp", "Whether to use local timestamp instead of the original timestamp for the ingested syslog messages "+
"at the corresponding -syslog.listenAddr.tcp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#log-timestamps")
useLocalTimestampUDP = flagutil.NewArrayBool("syslog.useLocalTimestamp.udp", "Whether to use local timestamp instead of the original timestamp for the ingested syslog messages "+
"at the corresponding -syslog.listenAddr.udp. See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#log-timestamps")
)
// MustInit initializes syslog parser at the given -syslog.listenAddr.tcp and -syslog.listenAddr.udp ports
//
// This function must be called after flag.Parse().
//
// MustStop() must be called in order to free up resources occupied by the initialized syslog parser.
func MustInit() {
if workersStopCh != nil {
logger.Panicf("BUG: MustInit() called twice without MustStop() call")
}
workersStopCh = make(chan struct{})
for argIdx, addr := range *listenAddrTCP {
workersWG.Add(1)
go func(addr string, argIdx int) {
runTCPListener(addr, argIdx)
workersWG.Done()
}(addr, argIdx)
}
for argIdx, addr := range *listenAddrUDP {
workersWG.Add(1)
go func(addr string, argIdx int) {
runUDPListener(addr, argIdx)
workersWG.Done()
}(addr, argIdx)
}
currentYear := time.Now().Year()
globalCurrentYear.Store(int64(currentYear))
workersWG.Add(1)
go func() {
ticker := time.NewTicker(time.Minute)
for {
select {
case <-workersStopCh:
ticker.Stop()
workersWG.Done()
return
case <-ticker.C:
currentYear := time.Now().Year()
globalCurrentYear.Store(int64(currentYear))
}
}
}()
if *syslogTimezone != "" {
tz, err := time.LoadLocation(*syslogTimezone)
if err != nil {
logger.Fatalf("cannot parse -syslog.timezone=%q: %s", *syslogTimezone, err)
}
globalTimezone = tz
} else {
globalTimezone = time.Local
}
}
var (
globalCurrentYear atomic.Int64
globalTimezone *time.Location
)
var (
workersWG sync.WaitGroup
workersStopCh chan struct{}
)
// MustStop stops syslog parser initialized via MustInit()
func MustStop() {
close(workersStopCh)
workersWG.Wait()
workersStopCh = nil
}
func runUDPListener(addr string, argIdx int) {
ln, err := net.ListenPacket(netutil.GetUDPNetwork(), addr)
if err != nil {
logger.Fatalf("cannot start UDP syslog server at %q: %s", addr, err)
}
tenantIDStr := tenantIDUDP.GetOptionalArg(argIdx)
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.tenantID.udp=%q for -syslog.listenAddr.udp=%q: %s", tenantIDStr, addr, err)
}
compressMethod := compressMethodUDP.GetOptionalArg(argIdx)
checkCompressMethod(compressMethod, addr, "udp")
useLocalTimestamp := useLocalTimestampUDP.GetOptionalArg(argIdx)
streamFieldsStr := streamFieldsUDP.GetOptionalArg(argIdx)
streamFields, err := parseFieldsList(streamFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.streamFields.udp=%q for -syslog.listenAddr.udp=%q: %s", streamFieldsStr, addr, err)
}
ignoreFieldsStr := ignoreFieldsUDP.GetOptionalArg(argIdx)
ignoreFields, err := parseFieldsList(ignoreFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.ignoreFields.udp=%q for -syslog.listenAddr.udp=%q: %s", ignoreFieldsStr, addr, err)
}
extraFieldsStr := extraFieldsUDP.GetOptionalArg(argIdx)
extraFields, err := parseExtraFields(extraFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.extraFields.udp=%q for -syslog.listenAddr.udp=%q: %s", extraFieldsStr, addr, err)
}
doneCh := make(chan struct{})
go func() {
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
close(doneCh)
}()
logger.Infof("started accepting syslog messages at -syslog.listenAddr.udp=%q", addr)
<-workersStopCh
if err := ln.Close(); err != nil {
logger.Fatalf("syslog: cannot close UDP listener at %s: %s", addr, err)
}
<-doneCh
logger.Infof("finished accepting syslog messages at -syslog.listenAddr.udp=%q", addr)
}
func runTCPListener(addr string, argIdx int) {
var tlsConfig *tls.Config
if tlsEnable.GetOptionalArg(argIdx) {
certFile := tlsCertFile.GetOptionalArg(argIdx)
keyFile := tlsKeyFile.GetOptionalArg(argIdx)
tc, err := netutil.GetServerTLSConfig(certFile, keyFile, *tlsMinVersion, *tlsCipherSuites)
if err != nil {
logger.Fatalf("cannot load TLS cert from -syslog.tlsCertFile=%q, -syslog.tlsKeyFile=%q, -syslog.tlsMinVersion=%q, -syslog.tlsCipherSuites=%q: %s",
certFile, keyFile, *tlsMinVersion, *tlsCipherSuites, err)
}
tlsConfig = tc
}
ln, err := netutil.NewTCPListener("syslog", addr, false, tlsConfig)
if err != nil {
logger.Fatalf("syslog: cannot start TCP listener at %s: %s", addr, err)
}
tenantIDStr := tenantIDTCP.GetOptionalArg(argIdx)
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.tenantID.tcp=%q for -syslog.listenAddr.tcp=%q: %s", tenantIDStr, addr, err)
}
compressMethod := compressMethodTCP.GetOptionalArg(argIdx)
checkCompressMethod(compressMethod, addr, "tcp")
useLocalTimestamp := useLocalTimestampTCP.GetOptionalArg(argIdx)
streamFieldsStr := streamFieldsTCP.GetOptionalArg(argIdx)
streamFields, err := parseFieldsList(streamFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.streamFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", streamFieldsStr, addr, err)
}
ignoreFieldsStr := ignoreFieldsTCP.GetOptionalArg(argIdx)
ignoreFields, err := parseFieldsList(ignoreFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.ignoreFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", ignoreFieldsStr, addr, err)
}
extraFieldsStr := extraFieldsTCP.GetOptionalArg(argIdx)
extraFields, err := parseExtraFields(extraFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.extraFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", extraFieldsStr, addr, err)
}
doneCh := make(chan struct{})
go func() {
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
close(doneCh)
}()
logger.Infof("started accepting syslog messages at -syslog.listenAddr.tcp=%q", addr)
<-workersStopCh
if err := ln.Close(); err != nil {
logger.Fatalf("syslog: cannot close TCP listener at %s: %s", addr, err)
}
<-doneCh
logger.Infof("finished accepting syslog messages at -syslog.listenAddr.tcp=%q", addr)
}
func checkCompressMethod(compressMethod, addr, protocol string) {
switch compressMethod {
case "", "none", "gzip", "deflate":
return
default:
logger.Fatalf("unsupported -syslog.compressMethod.%s=%q for -syslog.listenAddr.%s=%q; supported values: 'none', 'gzip', 'deflate'", protocol, compressMethod, protocol, addr)
}
}
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
gomaxprocs := cgroup.AvailableCPUs()
var wg sync.WaitGroup
localAddr := ln.LocalAddr()
for i := 0; i < gomaxprocs; i++ {
wg.Add(1)
go func() {
defer wg.Done()
cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
var bb bytesutil.ByteBuffer
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
for {
bb.Reset()
bb.B = bb.B[:cap(bb.B)]
n, remoteAddr, err := ln.ReadFrom(bb.B)
if err != nil {
udpErrorsTotal.Inc()
var ne net.Error
if errors.As(err, &ne) {
if ne.Temporary() {
logger.Errorf("syslog: temporary error when listening for UDP at %q: %s", localAddr, err)
time.Sleep(time.Second)
continue
}
if strings.Contains(err.Error(), "use of closed network connection") {
break
}
}
logger.Errorf("syslog: cannot read UDP data from %s at %s: %s", remoteAddr, localAddr, err)
continue
}
bb.B = bb.B[:n]
udpRequestsTotal.Inc()
if err := processStream("udp", bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
logger.Errorf("syslog: cannot process UDP data from %s at %s: %s", remoteAddr, localAddr, err)
}
}
}()
}
wg.Wait()
}
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
var cm ingestserver.ConnsMap
cm.Init("syslog")
var wg sync.WaitGroup
addr := ln.Addr()
for {
c, err := ln.Accept()
if err != nil {
var ne net.Error
if errors.As(err, &ne) {
if ne.Temporary() {
logger.Errorf("syslog: temporary error when listening for TCP addr %q: %s", addr, err)
time.Sleep(time.Second)
continue
}
if strings.Contains(err.Error(), "use of closed network connection") {
break
}
logger.Fatalf("syslog: unrecoverable error when accepting TCP connections at %q: %s", addr, err)
}
logger.Fatalf("syslog: unexpected error when accepting TCP connections at %q: %s", addr, err)
}
if !cm.Add(c) {
_ = c.Close()
break
}
wg.Add(1)
go func() {
cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
if err := processStream("tcp", c, compressMethod, useLocalTimestamp, cp); err != nil {
logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err)
}
cm.Delete(c)
_ = c.Close()
wg.Done()
}()
}
cm.CloseAll(0)
wg.Wait()
}
// processStream parses a stream of syslog messages from r and ingests them into vlstorage.
func processStream(protocol string, r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
if err := vlstorage.CanWriteData(); err != nil {
return err
}
lmp := cp.NewLogMessageProcessor("syslog_" + protocol)
err := processStreamInternal(r, compressMethod, useLocalTimestamp, lmp)
lmp.MustClose()
return err
}
func processStreamInternal(r io.Reader, compressMethod string, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
switch compressMethod {
case "", "none":
case "gzip":
zr, err := common.GetGzipReader(r)
if err != nil {
return fmt.Errorf("cannot read gzipped data: %w", err)
}
r = zr
case "deflate":
zr, err := common.GetZlibReader(r)
if err != nil {
return fmt.Errorf("cannot read deflated data: %w", err)
}
r = zr
default:
logger.Panicf("BUG: unsupported compressMethod=%q; supported values: none, gzip, deflate", compressMethod)
}
err := processUncompressedStream(r, useLocalTimestamp, lmp)
switch compressMethod {
case "gzip":
zr := r.(*gzip.Reader)
common.PutGzipReader(zr)
case "deflate":
zr := r.(io.ReadCloser)
common.PutZlibReader(zr)
}
return err
}
func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
wcr := writeconcurrencylimiter.GetReader(r)
defer writeconcurrencylimiter.PutReader(wcr)
slr := getSyslogLineReader(wcr)
defer putSyslogLineReader(slr)
n := 0
for {
ok := slr.nextLine()
wcr.DecConcurrency()
if !ok {
break
}
currentYear := int(globalCurrentYear.Load())
err := processLine(slr.line, currentYear, globalTimezone, useLocalTimestamp, lmp)
if err != nil {
errorsTotal.Inc()
return fmt.Errorf("cannot read line #%d: %s", n, err)
}
n++
}
return slr.Error()
}
type syslogLineReader struct {
line []byte
br *bufio.Reader
err error
}
func (slr *syslogLineReader) reset(r io.Reader) {
slr.line = slr.line[:0]
slr.br.Reset(r)
slr.err = nil
}
// Error returns the last error occurred in slr.
func (slr *syslogLineReader) Error() error {
if slr.err == nil || slr.err == io.EOF {
return nil
}
return slr.err
}
// nextLine reads the next syslog line from slr and stores it at slr.line.
//
// false is returned if the next line cannot be read. Error() must be called in this case
// in order to verify whether there is an error or just slr stream has been finished.
func (slr *syslogLineReader) nextLine() bool {
if slr.err != nil {
return false
}
again:
prefix, err := slr.br.ReadSlice(' ')
if err != nil {
if err != io.EOF {
slr.err = fmt.Errorf("cannot read message frame prefix: %w", err)
return false
}
if len(prefix) == 0 {
slr.err = err
return false
}
}
// skip empty lines
for len(prefix) > 0 && prefix[0] == '\n' {
prefix = prefix[1:]
}
if len(prefix) == 0 {
// An empty prefix or a prefix with empty lines - try reading yet another prefix.
goto again
}
if prefix[0] >= '0' && prefix[0] <= '9' {
// This is octet-counting method. See https://www.ietf.org/archive/id/draft-gerhards-syslog-plain-tcp-07.html#msgxfer
msgLenStr := bytesutil.ToUnsafeString(prefix[:len(prefix)-1])
msgLen, err := strconv.ParseUint(msgLenStr, 10, 64)
if err != nil {
slr.err = fmt.Errorf("cannot parse message length from %q: %w", msgLenStr, err)
return false
}
if maxMsgLen := insertutils.MaxLineSizeBytes.IntN(); msgLen > uint64(maxMsgLen) {
slr.err = fmt.Errorf("cannot read message longer than %d bytes; msgLen=%d", maxMsgLen, msgLen)
return false
}
slr.line = slicesutil.SetLength(slr.line, int(msgLen))
if _, err := io.ReadFull(slr.br, slr.line); err != nil {
slr.err = fmt.Errorf("cannot read message with size %d bytes: %w", msgLen, err)
return false
}
return true
}
// This is octet-stuffing method. See https://www.ietf.org/archive/id/draft-gerhards-syslog-plain-tcp-07.html#octet-stuffing-legacy
slr.line = append(slr.line[:0], prefix...)
for {
line, err := slr.br.ReadSlice('\n')
if err == nil {
slr.line = append(slr.line, line[:len(line)-1]...)
return true
}
if err == io.EOF {
slr.line = append(slr.line, line...)
return true
}
if err == bufio.ErrBufferFull {
slr.line = append(slr.line, line...)
continue
}
slr.err = fmt.Errorf("cannot read message in octet-stuffing method: %w", err)
return false
}
}
func getSyslogLineReader(r io.Reader) *syslogLineReader {
v := syslogLineReaderPool.Get()
if v == nil {
br := bufio.NewReaderSize(r, 64*1024)
return &syslogLineReader{
br: br,
}
}
slr := v.(*syslogLineReader)
slr.reset(r)
return slr
}
func putSyslogLineReader(slr *syslogLineReader) {
syslogLineReaderPool.Put(slr)
}
var syslogLineReaderPool sync.Pool
func processLine(line []byte, currentYear int, timezone *time.Location, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
p := logstorage.GetSyslogParser(currentYear, timezone)
lineStr := bytesutil.ToUnsafeString(line)
p.Parse(lineStr)
var ts int64
if useLocalTimestamp {
ts = time.Now().UnixNano()
} else {
nsecs, err := insertutils.ExtractTimestampFromFields("timestamp", p.Fields)
if err != nil {
return fmt.Errorf("cannot get timestamp from syslog line %q: %w", line, err)
}
ts = nsecs
}
logstorage.RenameField(p.Fields, msgFields, "_msg")
lmp.AddRow(ts, p.Fields, nil)
logstorage.PutSyslogParser(p)
return nil
}
var msgFields = []string{"message"}
var (
errorsTotal = metrics.NewCounter(`vl_errors_total{type="syslog"}`)
udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`)
udpErrorsTotal = metrics.NewCounter(`vl_udp_errors_total{type="syslog"}`)
)
func parseFieldsList(s string) ([]string, error) {
if s == "" {
return nil, nil
}
var a []string
err := json.Unmarshal([]byte(s), &a)
return a, err
}
func parseExtraFields(s string) ([]logstorage.Field, error) {
if s == "" {
return nil, nil
}
var m map[string]string
if err := json.Unmarshal([]byte(s), &m); err != nil {
return nil, err
}
fields := make([]logstorage.Field, 0, len(m))
for k, v := range m {
fields = append(fields, logstorage.Field{
Name: k,
Value: v,
})
}
sort.Slice(fields, func(i, j int) bool {
return fields[i].Name < fields[j].Name
})
return fields, nil
}

View File

@@ -0,0 +1,129 @@
package syslog
import (
"bytes"
"reflect"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestSyslogLineReader_Success(t *testing.T) {
f := func(data string, linesExpected []string) {
t.Helper()
r := bytes.NewBufferString(data)
slr := getSyslogLineReader(r)
defer putSyslogLineReader(slr)
var lines []string
for slr.nextLine() {
lines = append(lines, string(slr.line))
}
if err := slr.Error(); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if !reflect.DeepEqual(lines, linesExpected) {
t.Fatalf("unexpected lines read;\ngot\n%q\nwant\n%q", lines, linesExpected)
}
}
f("", nil)
f("\n", nil)
f("\n\n\n", nil)
f("foobar", []string{"foobar"})
f("foobar\n", []string{"foobar\n"})
f("\n\nfoo\n\nbar\n\n", []string{"foo\n\nbar\n\n"})
f(`Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...`, []string{"Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches..."})
f(`Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
`, []string{
"Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...",
"<165>Jun 4 12:08:33 abcd systemd[345]: abc defg",
`<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.`,
})
}
func TestSyslogLineReader_Failure(t *testing.T) {
f := func(data string) {
t.Helper()
r := bytes.NewBufferString(data)
slr := getSyslogLineReader(r)
defer putSyslogLineReader(slr)
if slr.nextLine() {
t.Fatalf("expecting failure to read the first line")
}
if err := slr.Error(); err == nil {
t.Fatalf("expecting non-nil error")
}
}
// invalid format for message size
f("12foo bar")
// too big message size
f("123 aa")
f("1233423432 abc")
}
func TestProcessStreamInternal_Success(t *testing.T) {
f := func(data string, currentYear int, timestampsExpected []int64, resultExpected string) {
t.Helper()
MustInit()
defer MustStop()
globalTimezone = time.UTC
globalCurrentYear.Store(int64(currentYear))
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
if err := processStreamInternal(r, "", false, tlp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
data := `Jun 3 12:08:33 abcd systemd: Starting Update the local ESM caches...
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
`
currentYear := 2023
timestampsExpected := []int64{1685794113000000000, 1685880513000000000, 1685814132345000000}
resultExpected := `{"format":"rfc3164","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
f(data, currentYear, timestampsExpected, resultExpected)
}
func TestProcessStreamInternal_Failure(t *testing.T) {
f := func(data string) {
t.Helper()
MustInit()
defer MustStop()
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
if err := processStreamInternal(r, "", false, tlp); err == nil {
t.Fatalf("expecting non-nil error")
}
}
// invalid format for message size
f("12foo bar")
// too big message size
f("123 foo")
f("123456789 bar")
}

109
app/vlogscli/Makefile Normal file
View File

@@ -0,0 +1,109 @@
# All these commands must run from repository root.
vlogscli:
APP_NAME=vlogscli $(MAKE) app-local
vlogscli-race:
APP_NAME=vlogscli RACE=-race $(MAKE) app-local
vlogscli-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker
vlogscli-pure-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-pure
vlogscli-linux-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-amd64
vlogscli-linux-arm-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-arm
vlogscli-linux-arm64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-arm64
vlogscli-linux-ppc64le-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-ppc64le
vlogscli-linux-386-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-386
vlogscli-darwin-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-darwin-amd64
vlogscli-darwin-arm64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-darwin-arm64
vlogscli-freebsd-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-freebsd-amd64
vlogscli-openbsd-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-openbsd-amd64
vlogscli-windows-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-windows-amd64
package-vlogscli:
APP_NAME=vlogscli $(MAKE) package-via-docker
package-vlogscli-pure:
APP_NAME=vlogscli $(MAKE) package-via-docker-pure
package-vlogscli-amd64:
APP_NAME=vlogscli $(MAKE) package-via-docker-amd64
package-vlogscli-arm:
APP_NAME=vlogscli $(MAKE) package-via-docker-arm
package-vlogscli-arm64:
APP_NAME=vlogscli $(MAKE) package-via-docker-arm64
package-vlogscli-ppc64le:
APP_NAME=vlogscli $(MAKE) package-via-docker-ppc64le
package-vlogscli-386:
APP_NAME=vlogscli $(MAKE) package-via-docker-386
publish-vlogscli:
APP_NAME=vlogscli $(MAKE) publish-via-docker
vlogscli-linux-amd64:
APP_NAME=vlogscli CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-linux-arm:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
vlogscli-linux-arm64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
vlogscli-linux-ppc64le:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
vlogscli-linux-s390x:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
vlogscli-linux-loong64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
vlogscli-linux-386:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
vlogscli-darwin-amd64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-darwin-arm64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
vlogscli-freebsd-amd64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-openbsd-amd64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-windows-amd64:
GOARCH=amd64 APP_NAME=vlogscli $(MAKE) app-local-windows-goarch
vlogscli-pure:
APP_NAME=vlogscli $(MAKE) app-local-pure
run-vlogscli:
APP_NAME=vlogscli $(MAKE) run-via-docker

View File

@@ -1 +1,5 @@
VictoriaLogs source code has been moved to [github.com/VictoriaMetrics/VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/).
# vlogscli
Command-line utility for querying [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/).
See [these docs](https://docs.victoriametrics.com/victorialogs/querying/vlogscli/).

View File

@@ -0,0 +1,6 @@
ARG base_image=non-existing
FROM $base_image
ENTRYPOINT ["/vlogscli-prod"]
ARG src_binary=non-existing
COPY $src_binary ./vlogscli-prod

View File

@@ -0,0 +1,245 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"io"
"sort"
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
type outputMode int
const (
outputModeJSONMultiline = outputMode(0)
outputModeJSONSingleline = outputMode(1)
outputModeLogfmt = outputMode(2)
outputModeCompact = outputMode(3)
)
func getOutputFormatter(outputMode outputMode) func(w io.Writer, fields []logstorage.Field) error {
switch outputMode {
case outputModeJSONMultiline:
return func(w io.Writer, fields []logstorage.Field) error {
return writeJSONObject(w, fields, true)
}
case outputModeJSONSingleline:
return func(w io.Writer, fields []logstorage.Field) error {
return writeJSONObject(w, fields, false)
}
case outputModeLogfmt:
return writeLogfmtObject
case outputModeCompact:
return writeCompactObject
default:
panic(fmt.Errorf("BUG: unexpected outputMode=%d", outputMode))
}
}
type jsonPrettifier struct {
r io.ReadCloser
formatter func(w io.Writer, fields []logstorage.Field) error
d *json.Decoder
pr *io.PipeReader
pw *io.PipeWriter
bw *bufio.Writer
wg sync.WaitGroup
}
func newJSONPrettifier(r io.ReadCloser, outputMode outputMode) *jsonPrettifier {
d := json.NewDecoder(r)
pr, pw := io.Pipe()
bw := bufio.NewWriter(pw)
formatter := getOutputFormatter(outputMode)
jp := &jsonPrettifier{
r: r,
formatter: formatter,
d: d,
pr: pr,
pw: pw,
bw: bw,
}
jp.wg.Add(1)
go func() {
defer jp.wg.Done()
err := jp.prettifyJSONLines()
jp.closePipesWithError(err)
}()
return jp
}
func (jp *jsonPrettifier) closePipesWithError(err error) {
_ = jp.pr.CloseWithError(err)
_ = jp.pw.CloseWithError(err)
}
func (jp *jsonPrettifier) prettifyJSONLines() error {
for jp.d.More() {
fields, err := readNextJSONObject(jp.d)
if err != nil {
return err
}
sort.Slice(fields, func(i, j int) bool {
return fields[i].Name < fields[j].Name
})
if err := jp.formatter(jp.bw, fields); err != nil {
return err
}
// Flush bw after every output line in order to show results as soon as they appear.
if err := jp.bw.Flush(); err != nil {
return err
}
}
return nil
}
func (jp *jsonPrettifier) Close() error {
jp.closePipesWithError(io.ErrUnexpectedEOF)
err := jp.r.Close()
jp.wg.Wait()
return err
}
func (jp *jsonPrettifier) Read(p []byte) (int, error) {
return jp.pr.Read(p)
}
func readNextJSONObject(d *json.Decoder) ([]logstorage.Field, error) {
t, err := d.Token()
if err != nil {
return nil, fmt.Errorf("cannot read '{': %w", err)
}
delim, ok := t.(json.Delim)
if !ok || delim.String() != "{" {
return nil, fmt.Errorf("unexpected token read; got %q; want '{'", delim)
}
var fields []logstorage.Field
for {
// Read object key
t, err := d.Token()
if err != nil {
return nil, fmt.Errorf("cannot read JSON object key or closing brace: %w", err)
}
delim, ok := t.(json.Delim)
if ok {
if delim.String() == "}" {
return fields, nil
}
return nil, fmt.Errorf("unexpected delimiter read; got %q; want '}'", delim)
}
key, ok := t.(string)
if !ok {
return nil, fmt.Errorf("unexpected token read for object key: %v; want string or '}'", t)
}
// read object value
t, err = d.Token()
if err != nil {
return nil, fmt.Errorf("cannot read JSON object value: %w", err)
}
value, ok := t.(string)
if !ok {
return nil, fmt.Errorf("unexpected token read for oject value: %v; want string", t)
}
fields = append(fields, logstorage.Field{
Name: key,
Value: value,
})
}
}
func writeLogfmtObject(w io.Writer, fields []logstorage.Field) error {
data := logstorage.MarshalFieldsToLogfmt(nil, fields)
_, err := fmt.Fprintf(w, "%s\n", data)
return err
}
func writeCompactObject(w io.Writer, fields []logstorage.Field) error {
if len(fields) == 1 {
// Just write field value as is without name
_, err := fmt.Fprintf(w, "%s\n", fields[0].Value)
return err
}
if len(fields) == 2 && (fields[0].Name == "_time" || fields[1].Name == "_time") {
// Write _time\tfieldValue as is
if fields[0].Name == "_time" {
_, err := fmt.Fprintf(w, "%s\t%s\n", fields[0].Value, fields[1].Value)
return err
}
_, err := fmt.Fprintf(w, "%s\t%s\n", fields[1].Value, fields[0].Value)
return err
}
// Fall back to logfmt
return writeLogfmtObject(w, fields)
}
func writeJSONObject(w io.Writer, fields []logstorage.Field, isMultiline bool) error {
if len(fields) == 0 {
fmt.Fprintf(w, "{}\n")
return nil
}
fmt.Fprintf(w, "{")
writeNewlineIfNeeded(w, isMultiline)
if err := writeJSONObjectKeyValue(w, fields[0], isMultiline); err != nil {
return err
}
for _, f := range fields[1:] {
fmt.Fprintf(w, ",")
writeNewlineIfNeeded(w, isMultiline)
if err := writeJSONObjectKeyValue(w, f, isMultiline); err != nil {
return err
}
}
writeNewlineIfNeeded(w, isMultiline)
fmt.Fprintf(w, "}\n")
return nil
}
func writeNewlineIfNeeded(w io.Writer, isMultiline bool) {
if isMultiline {
fmt.Fprintf(w, "\n")
}
}
func writeJSONObjectKeyValue(w io.Writer, f logstorage.Field, isMultiline bool) error {
key := getJSONString(f.Name)
value := getJSONString(f.Value)
if isMultiline {
_, err := fmt.Fprintf(w, " %s: %s", key, value)
return err
}
_, err := fmt.Fprintf(w, "%s:%s", key, value)
return err
}
func getJSONString(s string) string {
data, err := json.Marshal(s)
if err != nil {
panic(fmt.Errorf("unexpected error when marshaling string to JSON: %w", err))
}
return jsonHTMLReplacer.Replace(string(data))
}
var jsonHTMLReplacer = strings.NewReplacer(
`\u003c`, "\u003c",
`\u003e`, "\u003e",
`\u0026`, "\u0026",
)

View File

@@ -0,0 +1,120 @@
package main
import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/signal"
"sync"
"syscall"
"github.com/mattn/go-isatty"
)
func isTerminal() bool {
return isatty.IsTerminal(os.Stdout.Fd()) && isatty.IsTerminal(os.Stderr.Fd())
}
func readWithLess(r io.Reader, wrapLongLines bool) error {
if !isTerminal() {
// Just write everything to stdout if no terminal is available.
_, err := io.Copy(os.Stdout, r)
if err != nil && !isErrPipe(err) {
return fmt.Errorf("error when forwarding data to stdout: %w", err)
}
if err := os.Stdout.Sync(); err != nil {
return fmt.Errorf("cannot sync data to stdout: %w", err)
}
return nil
}
pr, pw, err := os.Pipe()
if err != nil {
return fmt.Errorf("cannot create pipe: %w", err)
}
defer func() {
_ = pr.Close()
_ = pw.Close()
}()
// Ignore Ctrl+C in the current process, so 'less' could handle it properly
cancel := ignoreSignals(os.Interrupt)
defer cancel()
// Start 'less' process
path, err := exec.LookPath("less")
if err != nil {
return fmt.Errorf("cannot find 'less' command: %w", err)
}
opts := []string{"less", "-F", "-X"}
if !wrapLongLines {
opts = append(opts, "-S")
}
p, err := os.StartProcess(path, opts, &os.ProcAttr{
Env: append(os.Environ(), "LESSCHARSET=utf-8"),
Files: []*os.File{pr, os.Stdout, os.Stderr},
})
if err != nil {
return fmt.Errorf("cannot start 'less' process: %w", err)
}
// Close pr after 'less' finishes in a parallel goroutine
// in order to unblock forwarding data to stopped 'less' below.
waitch := make(chan *os.ProcessState)
go func() {
// Wait for 'less' process to finish.
ps, err := p.Wait()
if err != nil {
fatalf("unexpected error when waiting for 'less' process: %w", err)
}
_ = pr.Close()
waitch <- ps
}()
// Forward data from r to 'less'
_, err = io.Copy(pw, r)
_ = pw.Sync()
_ = pw.Close()
// Wait until 'less' finished
ps := <-waitch
// Verify 'less' status.
if !ps.Success() {
return fmt.Errorf("'less' finished with unexpected code %d", ps.ExitCode())
}
if err != nil && !isErrPipe(err) {
return fmt.Errorf("error when forwarding data to 'less': %w", err)
}
return nil
}
func isErrPipe(err error) bool {
return errors.Is(err, syscall.EPIPE) || errors.Is(err, io.ErrClosedPipe)
}
func ignoreSignals(sigs ...os.Signal) func() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, sigs...)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
_, ok := <-ch
if !ok {
return
}
}
}()
return func() {
signal.Stop(ch)
close(ch)
wg.Wait()
}
}

Some files were not shown because too many files have changed in this diff Show More