mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
312 Commits
fs-paralle
...
issue-1056
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
782693b6be | ||
|
|
186db07c5d | ||
|
|
13ab8cfb78 | ||
|
|
f8a101e45e | ||
|
|
a1a35fd870 | ||
|
|
0d5df2722d | ||
|
|
db3353c6e1 | ||
|
|
cfbc5ae31d | ||
|
|
fdb3c96fc1 | ||
|
|
486d923351 | ||
|
|
f8552bdc96 | ||
|
|
893c981c57 | ||
|
|
3d7ff783b6 | ||
|
|
78543b7f87 | ||
|
|
f54d22562a | ||
|
|
b672e05dce | ||
|
|
847871b916 | ||
|
|
d93a62afd0 | ||
|
|
2aecca1163 | ||
|
|
d1efb2dd37 | ||
|
|
6882c72075 | ||
|
|
60eb543dba | ||
|
|
7db42b0659 | ||
|
|
8d924f0631 | ||
|
|
791679253d | ||
|
|
a745bb797a | ||
|
|
3607c53b7c | ||
|
|
7969647553 | ||
|
|
5f887b66c5 | ||
|
|
d3e2946791 | ||
|
|
603dc03c7d | ||
|
|
1cc471a6c1 | ||
|
|
d40adb1e58 | ||
|
|
8056806d5f | ||
|
|
3d67942a65 | ||
|
|
23bdd14cee | ||
|
|
18a2955553 | ||
|
|
570a9ef627 | ||
|
|
40e27fc2c8 | ||
|
|
befbf9afca | ||
|
|
65d0a8e129 | ||
|
|
c2841ca36c | ||
|
|
cd2026e430 | ||
|
|
216821aa1c | ||
|
|
ef507d372b | ||
|
|
e383b62f59 | ||
|
|
8f34284dd2 | ||
|
|
8f4eca39f7 | ||
|
|
d467faf739 | ||
|
|
673b2ca7db | ||
|
|
40ccf0c333 | ||
|
|
fe341a4204 | ||
|
|
83ebf00659 | ||
|
|
5e602726f5 | ||
|
|
a6200cc83d | ||
|
|
a5811d3c3b | ||
|
|
5962b47c31 | ||
|
|
9a4edc738a | ||
|
|
30d01e9cae | ||
|
|
6b46f3920c | ||
|
|
97b11146ee | ||
|
|
2ef74bd6ea | ||
|
|
845161e377 | ||
|
|
f176a6624a | ||
|
|
4d06e34b66 | ||
|
|
6d8ddcb9ed | ||
|
|
dd4167709a | ||
|
|
71e253e1f0 | ||
|
|
9e155ffd9e | ||
|
|
2e9e40dc75 | ||
|
|
10d4294f9b | ||
|
|
5e77771668 | ||
|
|
dda5545078 | ||
|
|
087efbc451 | ||
|
|
68e64536b1 | ||
|
|
6e3ce4d55c | ||
|
|
8d1b88f985 | ||
|
|
3d3c057d52 | ||
|
|
94622fef29 | ||
|
|
804d77ffc5 | ||
|
|
79b18e9742 | ||
|
|
3404a47a6d | ||
|
|
0b8205ef46 | ||
|
|
53514febdc | ||
|
|
8531d86da0 | ||
|
|
a47d32e129 | ||
|
|
df96f4d3ab | ||
|
|
84dc5453ad | ||
|
|
8093d98c0e | ||
|
|
809f9471df | ||
|
|
f9d6d2e428 | ||
|
|
32eac31416 | ||
|
|
4d4c1ff72e | ||
|
|
645ce2b6b3 | ||
|
|
89600bd229 | ||
|
|
9b3a60efee | ||
|
|
a8c5934d1b | ||
|
|
43544fdb63 | ||
|
|
7a4df5755a | ||
|
|
83bcbc43d1 | ||
|
|
79921cf434 | ||
|
|
40402fdac3 | ||
|
|
05943abc11 | ||
|
|
e66e71c87e | ||
|
|
7f682c4c76 | ||
|
|
4947cd7f14 | ||
|
|
5ea7314912 | ||
|
|
655f0e9c1d | ||
|
|
2ffd25a120 | ||
|
|
175fcf6676 | ||
|
|
c05516afbe | ||
|
|
6b12684e56 | ||
|
|
8f7c94f512 | ||
|
|
4a6259a9b2 | ||
|
|
d5b9d3e641 | ||
|
|
6863de2c0e | ||
|
|
51a3e4e27a | ||
|
|
d7046d6e19 | ||
|
|
7e6c03e9c6 | ||
|
|
5267f35104 | ||
|
|
172ff84299 | ||
|
|
a3f955dd84 | ||
|
|
19e7d986fe | ||
|
|
db2ad6f900 | ||
|
|
db1f3f4ab8 | ||
|
|
7386a35942 | ||
|
|
6be2d89008 | ||
|
|
e5c8581bad | ||
|
|
14bc51554b | ||
|
|
7db81d062c | ||
|
|
ad62fe88ed | ||
|
|
40b85eb211 | ||
|
|
88b2464fe8 | ||
|
|
e4221f97a7 | ||
|
|
d40696a2f2 | ||
|
|
b2a74ec494 | ||
|
|
9774fe8df1 | ||
|
|
efd3b66609 | ||
|
|
785daff65d | ||
|
|
e3a57a3d80 | ||
|
|
161633158c | ||
|
|
7b708a8947 | ||
|
|
16d5f281fe | ||
|
|
6846ca09cb | ||
|
|
71997bc754 | ||
|
|
2ec6fafed0 | ||
|
|
a8ac5dfae5 | ||
|
|
6292d5fefa | ||
|
|
2a09f25f78 | ||
|
|
6824ade224 | ||
|
|
3a3c2084d3 | ||
|
|
3d6f353430 | ||
|
|
b1f333093b | ||
|
|
19403b9cd1 | ||
|
|
4edff7eae2 | ||
|
|
ce4b131816 | ||
|
|
cf69c56bb7 | ||
|
|
42ec981fe9 | ||
|
|
35e287d740 | ||
|
|
9df9a77169 | ||
|
|
17c514d2fa | ||
|
|
c12512bdd7 | ||
|
|
a108da8215 | ||
|
|
4e7606f669 | ||
|
|
060d7f6ed1 | ||
|
|
b3c1b00e4d | ||
|
|
a65f693649 | ||
|
|
6285bc4179 | ||
|
|
e89f131e34 | ||
|
|
493c1d410f | ||
|
|
b0029ee933 | ||
|
|
97e1308386 | ||
|
|
a279517034 | ||
|
|
f7ba76a59d | ||
|
|
60dbd5a97e | ||
|
|
32ddfa973b | ||
|
|
d9554a3a22 | ||
|
|
fbab6403dc | ||
|
|
07dd79608b | ||
|
|
5915c57b46 | ||
|
|
f36e1857c0 | ||
|
|
04f4a28cf4 | ||
|
|
7f3d370244 | ||
|
|
c89b7f7ad5 | ||
|
|
d9dabea303 | ||
|
|
09d2ce36e8 | ||
|
|
08755c838b | ||
|
|
d2e438ef41 | ||
|
|
e508fa5fe2 | ||
|
|
9a7deca207 | ||
|
|
60cadfbad1 | ||
|
|
b36c8b1110 | ||
|
|
90f0405b11 | ||
|
|
eac0a7ed86 | ||
|
|
a8a99105b1 | ||
|
|
c7f52992e7 | ||
|
|
5fe14e5479 | ||
|
|
c7ef079eba | ||
|
|
424d007a39 | ||
|
|
ad4562cd56 | ||
|
|
f9895d7e5e | ||
|
|
6bc809813b | ||
|
|
9b40fd00e0 | ||
|
|
9d59a31290 | ||
|
|
8391be18be | ||
|
|
8feb8c17aa | ||
|
|
634b4d035d | ||
|
|
1db7597e45 | ||
|
|
23fe7db35c | ||
|
|
817f2dc9e7 | ||
|
|
731ba17962 | ||
|
|
bb163692ba | ||
|
|
952ef51cd1 | ||
|
|
1fc548b63a | ||
|
|
aa5236877c | ||
|
|
c705da74f6 | ||
|
|
2e9bda2bff | ||
|
|
e1413536fc | ||
|
|
1a438a04ba | ||
|
|
4ad47d6fe3 | ||
|
|
879443f915 | ||
|
|
bd6788cb8f | ||
|
|
22696f378c | ||
|
|
7205f479aa | ||
|
|
5c7031c000 | ||
|
|
a227128467 | ||
|
|
777c8913b3 | ||
|
|
e35a9a366c | ||
|
|
6bbc03ecf8 | ||
|
|
ca34ae48b4 | ||
|
|
f18fd37433 | ||
|
|
f191a052dc | ||
|
|
0fdd5cb435 | ||
|
|
76dd8f4adb | ||
|
|
e31abfc25c | ||
|
|
ac6d9d632f | ||
|
|
e43de2a2b3 | ||
|
|
efe4a3b2dd | ||
|
|
3bf5f0297b | ||
|
|
5632ccc64a | ||
|
|
446452857c | ||
|
|
4ff409eb27 | ||
|
|
1b7f0172d2 | ||
|
|
1c77ee9527 | ||
|
|
2a0e382a99 | ||
|
|
02c8ea5a48 | ||
|
|
34f242a6b8 | ||
|
|
bc8f6c5688 | ||
|
|
c0fe67c2db | ||
|
|
ede1c2cde9 | ||
|
|
ad34a5eb53 | ||
|
|
eaf7a68c92 | ||
|
|
c5e43e1c91 | ||
|
|
b343f541f0 | ||
|
|
a23a902953 | ||
|
|
54c60706ca | ||
|
|
cd2e11b7cf | ||
|
|
5423d5e93a | ||
|
|
48819b6781 | ||
|
|
c4bff27f46 | ||
|
|
432b313a48 | ||
|
|
7bd5d19f62 | ||
|
|
8d18bc288f | ||
|
|
ff6e5c2983 | ||
|
|
23af0086d8 | ||
|
|
8657470068 | ||
|
|
3f16bc7cb2 | ||
|
|
655a0eb0c3 | ||
|
|
7cbd2a8600 | ||
|
|
5f67f04f6b | ||
|
|
2056e5b46d | ||
|
|
4d1f262ec4 | ||
|
|
afca599a46 | ||
|
|
d667f694bc | ||
|
|
fe2c60c79b | ||
|
|
36460f6297 | ||
|
|
d107dee9c7 | ||
|
|
b33d7c3ef9 | ||
|
|
d3848f6802 | ||
|
|
415ff27c74 | ||
|
|
90f59383b2 | ||
|
|
8fec7005d0 | ||
|
|
4d42b291e5 | ||
|
|
50f4fbf28e | ||
|
|
a5da6afb88 | ||
|
|
71f9e7f2c4 | ||
|
|
eb7c5df65e | ||
|
|
5af493297a | ||
|
|
2f61fa867e | ||
|
|
729b1099d8 | ||
|
|
945ca569b9 | ||
|
|
7fb8a8a0b2 | ||
|
|
89f95f74ed | ||
|
|
46e13fe0ca | ||
|
|
50d8ad6733 | ||
|
|
3b8550adb1 | ||
|
|
1708b73312 | ||
|
|
57defe7ab4 | ||
|
|
d58cfb7f36 | ||
|
|
a244750bc6 | ||
|
|
f06e7f9a6e | ||
|
|
7a5003212e | ||
|
|
846392405e | ||
|
|
37c3d8c26b | ||
|
|
8bc0475ee7 | ||
|
|
89414062bf | ||
|
|
67c51b009d | ||
|
|
e8160fc8fb | ||
|
|
e3a4ceaef3 | ||
|
|
e9cedca8c8 | ||
|
|
b720e55c13 | ||
|
|
ab1429c896 |
23
.github/copilot-instructions.md
vendored
23
.github/copilot-instructions.md
vendored
@@ -1,23 +0,0 @@
|
||||
# Project Overview
|
||||
|
||||
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
|
||||
## Folder Structure
|
||||
|
||||
- `/app`: Contains the compilable binaries.
|
||||
- `/lib`: Contains the golang reusable libraries
|
||||
- `/docs/victoriametrics`: Contains documentation for the project.
|
||||
- `/apptest/tests`: Contains integration tests.
|
||||
|
||||
## Libraries and Frameworks
|
||||
|
||||
- Backend: Golang, no framework. Use third-party libraries sparingly.
|
||||
- Frontend: React.
|
||||
|
||||
## Code review guidelines
|
||||
|
||||
Ensure the feature or bugfix includes a changelog entry in /docs/victoriametrics/changelog/CHANGELOG.md.
|
||||
Verify the entry is under the ## tip section and matches the structure and style of existing entries.
|
||||
Chore-only changes may be omitted from the changelog.
|
||||
|
||||
|
||||
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
@@ -71,7 +71,8 @@ jobs:
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version: stable
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
|
||||
- name: Build victoria-metrics for ${{ matrix.os }}-${{ matrix.arch }}
|
||||
run: make victoria-metrics-${{ matrix.os }}-${{ matrix.arch }}
|
||||
|
||||
6
.github/workflows/check-licenses.yml
vendored
6
.github/workflows/check-licenses.yml
vendored
@@ -21,9 +21,11 @@ jobs:
|
||||
id: go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: stable
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
|
||||
- run: go version
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
@@ -32,7 +34,7 @@ jobs:
|
||||
~/go/pkg/mod
|
||||
~/go/bin
|
||||
key: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-
|
||||
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-
|
||||
|
||||
- name: Check License
|
||||
run: make check-licenses
|
||||
|
||||
5
.github/workflows/codeql-analysis-go.yml
vendored
5
.github/workflows/codeql-analysis-go.yml
vendored
@@ -36,7 +36,8 @@ jobs:
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
cache: false
|
||||
go-version: stable
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
@@ -46,7 +47,7 @@ jobs:
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-
|
||||
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
|
||||
23
.github/workflows/test.yml
vendored
23
.github/workflows/test.yml
vendored
@@ -42,8 +42,9 @@ jobs:
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version: stable
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- run: go version
|
||||
|
||||
- name: Cache golangci-lint
|
||||
uses: actions/cache@v4
|
||||
@@ -51,7 +52,7 @@ jobs:
|
||||
path: |
|
||||
~/.cache/golangci-lint
|
||||
~/go/bin
|
||||
key: golangci-lint-${{ runner.os }}-${{ hashFiles('.golangci.yml') }}
|
||||
key: golangci-lint-${{ runner.os }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('.golangci.yml') }}
|
||||
|
||||
- name: Run check-all
|
||||
run: |
|
||||
@@ -81,19 +82,20 @@ jobs:
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version: stable
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
|
||||
- name: Run tests
|
||||
run: GOGC=10 make ${{ matrix.scenario}}
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./coverage.txt
|
||||
|
||||
integration:
|
||||
name: integration
|
||||
runs-on: ubuntu-latest
|
||||
apptest:
|
||||
name: apptest
|
||||
runs-on: apptest
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
@@ -107,7 +109,8 @@ jobs:
|
||||
go.sum
|
||||
Makefile
|
||||
app/**/Makefile
|
||||
go-version: stable
|
||||
go-version-file: 'go.mod'
|
||||
- run: go version
|
||||
|
||||
- name: Run integration tests
|
||||
run: make integration-test
|
||||
- name: Run app tests
|
||||
run: make apptest
|
||||
|
||||
26
.github/workflows/vmui.yml
vendored
26
.github/workflows/vmui.yml
vendored
@@ -34,33 +34,39 @@ jobs:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v6
|
||||
- name: Cache node_modules
|
||||
id: cache
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
node-version: '24.x'
|
||||
path: app/vmui/packages/vmui/node_modules
|
||||
key: vmui-deps-${{ runner.os }}-${{ hashFiles('app/vmui/packages/vmui/package-lock.json', 'app/vmui/Dockerfile-build') }}
|
||||
restore-keys: |
|
||||
vmui-deps-${{ runner.os }}-
|
||||
|
||||
- name: Cache node-modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
app/vmui/packages/vmui/node_modules
|
||||
key: vmui-artifacts-${{ runner.os }}-${{ hashFiles('package-lock.json') }}
|
||||
restore-keys: vmui-artifacts-${{ runner.os }}-
|
||||
- name: Install dependencies
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: make vmui-install
|
||||
|
||||
- name: Run lint
|
||||
id: lint
|
||||
run: make vmui-lint
|
||||
continue-on-error: true
|
||||
env:
|
||||
VMUI_SKIP_INSTALL: true
|
||||
|
||||
- name: Run tests
|
||||
id: test
|
||||
run: make vmui-test
|
||||
continue-on-error: true
|
||||
env:
|
||||
VMUI_SKIP_INSTALL: true
|
||||
|
||||
- name: Run typecheck
|
||||
id: typecheck
|
||||
run: make vmui-typecheck
|
||||
continue-on-error: true
|
||||
env:
|
||||
VMUI_SKIP_INSTALL: true
|
||||
|
||||
- name: Annotate Code Linting Results
|
||||
uses: ataylorme/eslint-annotate-action@v3
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -175,7 +175,7 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2019-2025 VictoriaMetrics, Inc.
|
||||
Copyright 2019-2026 VictoriaMetrics, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
31
Makefile
31
Makefile
@@ -17,7 +17,7 @@ EXTRA_GO_BUILD_TAGS ?=
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
|
||||
TAR_OWNERSHIP ?= --owner=1000 --group=1000
|
||||
|
||||
GOLANGCI_LINT_VERSION := 2.7.2
|
||||
GOLANGCI_LINT_VERSION := 2.9.0
|
||||
|
||||
.PHONY: $(MAKECMDGOALS)
|
||||
|
||||
@@ -443,7 +443,7 @@ fmt:
|
||||
gofmt -l -w -s ./apptest
|
||||
|
||||
vet:
|
||||
GOEXPERIMENT=synctest go vet ./lib/...
|
||||
go vet -tags 'synctest' ./lib/...
|
||||
go vet ./app/...
|
||||
go vet ./apptest/...
|
||||
|
||||
@@ -452,28 +452,25 @@ check-all: fmt vet golangci-lint govulncheck
|
||||
clean-checkers: remove-golangci-lint remove-govulncheck
|
||||
|
||||
test:
|
||||
GOEXPERIMENT=synctest go test ./lib/... ./app/...
|
||||
go test -tags 'synctest' ./lib/... ./app/...
|
||||
|
||||
test-race:
|
||||
GOEXPERIMENT=synctest go test -race ./lib/... ./app/...
|
||||
go test -tags 'synctest' -race ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
GOEXPERIMENT=synctest CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
CGO_ENABLED=0 go test -tags 'synctest' ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
GOEXPERIMENT=synctest go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
test-full-386:
|
||||
GOEXPERIMENT=synctest GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
integration-test:
|
||||
$(MAKE) apptest
|
||||
GOARCH=386 go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
apptest:
|
||||
$(MAKE) victoria-metrics vmagent vmalert vmauth vmctl vmbackup vmrestore
|
||||
go test ./apptest/... -skip="^Test(Cluster|Legacy).*"
|
||||
|
||||
integration-test-legacy: victoria-metrics vmbackup vmrestore
|
||||
apptest-legacy: victoria-metrics vmbackup vmrestore
|
||||
OS=$$(uname | tr '[:upper:]' '[:lower:]'); \
|
||||
ARCH=$$(uname -m | tr '[:upper:]' '[:lower:]' | sed 's/x86_64/amd64/'); \
|
||||
VERSION=v1.132.0; \
|
||||
@@ -490,17 +487,17 @@ integration-test-legacy: victoria-metrics vmbackup vmrestore
|
||||
go test ./apptest/tests -run="^TestLegacySingle.*"
|
||||
|
||||
benchmark:
|
||||
GOEXPERIMENT=synctest go test -bench=. ./lib/...
|
||||
go test -bench=. ./app/...
|
||||
go test -run=NO_TESTS -bench=. ./lib/...
|
||||
go test -run=NO_TESTS -bench=. ./app/...
|
||||
|
||||
benchmark-pure:
|
||||
GOEXPERIMENT=synctest CGO_ENABLED=0 go test -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -bench=. ./app/...
|
||||
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./app/...
|
||||
|
||||
vendor-update:
|
||||
go get -u ./lib/...
|
||||
go get -u ./app/...
|
||||
go mod tidy -compat=1.24
|
||||
go mod tidy -compat=1.26
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
@@ -524,7 +521,7 @@ install-qtc:
|
||||
|
||||
|
||||
golangci-lint: install-golangci-lint
|
||||
GOEXPERIMENT=synctest golangci-lint run
|
||||
golangci-lint run --build-tags 'synctest'
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint && (golangci-lint --version | grep -q $(GOLANGCI_LINT_VERSION)) || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v$(GOLANGCI_LINT_VERSION)
|
||||
|
||||
19
README.md
19
README.md
@@ -16,16 +16,21 @@
|
||||
<img src="docs/victoriametrics/logo.webp" width="300" alt="VictoriaMetrics logo">
|
||||
</picture>
|
||||
|
||||
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
VictoriaMetrics is a fast, cost-effective, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
|
||||
Here are some resources and information about VictoriaMetrics:
|
||||
|
||||
- Documentation: [docs.victoriametrics.com](https://docs.victoriametrics.com)
|
||||
- Case studies: [Grammarly, Roblox, Wix,...](https://docs.victoriametrics.com/victoriametrics/casestudies/).
|
||||
- Available: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), docker images [Docker Hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and [Quay](https://quay.io/repository/victoriametrics/victoria-metrics), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- Deployment types: [Single-node version](https://docs.victoriametrics.com/), [Cluster version](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/), and [Enterprise version](https://docs.victoriametrics.com/victoriametrics/enterprise/)
|
||||
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/victoriametrics/changelog/), and [How to upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade-victoriametrics)
|
||||
- Community: [Slack](https://slack.victoriametrics.com/), [X (Twitter)](https://x.com/VictoriaMetrics), [LinkedIn](https://www.linkedin.com/company/victoriametrics/), [YouTube](https://www.youtube.com/@VictoriaMetrics)
|
||||
- **Case studies**: [Grammarly, Roblox, Wix, Spotify,...](https://docs.victoriametrics.com/victoriametrics/casestudies/).
|
||||
- **Available**: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), Docker images on [Docker Hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and [Quay](https://quay.io/repository/victoriametrics/victoria-metrics), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
- **Deployment types**: [Single-node version](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and [Cluster version](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) under [Apache License 2.0](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE).
|
||||
- **Getting started:** Read [key concepts](https://docs.victoriametrics.com/victoriametrics/keyconcepts/) and follow the
|
||||
[quick start guide](https://docs.victoriametrics.com/victoriametrics/quick-start/).
|
||||
- **Community**: [Slack](https://slack.victoriametrics.com/) (join via [Slack Inviter](https://slack.victoriametrics.com/)), [X (Twitter)](https://x.com/VictoriaMetrics), [YouTube](https://www.youtube.com/@VictoriaMetrics). See full list [here](https://docs.victoriametrics.com/victoriametrics/#community-and-contributions).
|
||||
- **Changelog**: Project evolves fast - check the [CHANGELOG](https://docs.victoriametrics.com/victoriametrics/changelog/), and [How to upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade-victoriametrics).
|
||||
- **Enterprise support:** [Contact us](mailto:info@victoriametrics.com) for commercial support with additional [enterprise features](https://docs.victoriametrics.com/victoriametrics/enterprise/).
|
||||
- **Enterprise releases:** Enterprise and [long-term support releases (LTS)](https://docs.victoriametrics.com/victoriametrics/lts-releases/) are publicly available and can be evaluated for free
|
||||
using a [free trial license](https://victoriametrics.com/products/enterprise/trial/).
|
||||
- **Security:** we achieved [security certifications](https://victoriametrics.com/security/) for Database Software Development and Software-Based Monitoring Services.
|
||||
|
||||
Yes, we open-source both the single-node VictoriaMetrics and the cluster version.
|
||||
|
||||
|
||||
25
SECURITY.md
25
SECURITY.md
@@ -12,6 +12,31 @@ The following versions of VictoriaMetrics receive regular security fixes:
|
||||
|
||||
See [this page](https://victoriametrics.com/security/) for more details.
|
||||
|
||||
## Software Bill of Materials (SBOM)
|
||||
|
||||
Every VictoriaMetrics container{{% available_from "#" %}} image published to
|
||||
[Docker Hub](https://hub.docker.com/u/victoriametrics)
|
||||
and [Quay.io](https://quay.io/organization/victoriametrics)
|
||||
includes an [SPDX](https://spdx.dev/) SBOM attestation
|
||||
generated automatically by BuildKit during
|
||||
`docker buildx build`.
|
||||
|
||||
To inspect the SBOM for an image:
|
||||
|
||||
```sh
|
||||
docker buildx imagetools inspect \
|
||||
docker.io/victoriametrics/victoria-metrics:latest \
|
||||
--format "{{ json .SBOM }}"
|
||||
```
|
||||
|
||||
To scan an image using its SBOM attestation with
|
||||
[Trivy](https://github.com/aquasecurity/trivy):
|
||||
|
||||
```sh
|
||||
trivy image --sbom-sources oci \
|
||||
docker.io/victoriametrics/victoria-metrics:latest
|
||||
```
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report any security issues to <security@victoriametrics.com>
|
||||
|
||||
@@ -134,6 +134,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>Single-node VictoriaMetrics</h2></br>")
|
||||
fmt.Fprintf(w, "Version %s<br>", buildinfo.Version)
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/'>https://docs.victoriametrics.com/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
|
||||
@@ -29,11 +29,9 @@ var selfScraperWG sync.WaitGroup
|
||||
|
||||
func startSelfScraper() {
|
||||
selfScraperStopCh = make(chan struct{})
|
||||
selfScraperWG.Add(1)
|
||||
go func() {
|
||||
defer selfScraperWG.Done()
|
||||
selfScraperWG.Go(func() {
|
||||
selfScraper(*selfScrapeInterval)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func stopSelfScraper() {
|
||||
|
||||
@@ -33,13 +33,13 @@ func PopulateTimeTpl(b []byte, tGlobal time.Time) []byte {
|
||||
}
|
||||
switch strings.TrimSpace(parts[0]) {
|
||||
case `TIME_S`:
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()))
|
||||
return fmt.Appendf(nil, "%d", t.Unix())
|
||||
case `TIME_MSZ`:
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()*1e3))
|
||||
return fmt.Appendf(nil, "%d", t.Unix()*1e3)
|
||||
case `TIME_MS`:
|
||||
return []byte(fmt.Sprintf("%d", timeToMillis(t)))
|
||||
return fmt.Appendf(nil, "%d", timeToMillis(t))
|
||||
case `TIME_NS`:
|
||||
return []byte(fmt.Sprintf("%d", t.UnixNano()))
|
||||
return fmt.Appendf(nil, "%d", t.UnixNano())
|
||||
default:
|
||||
log.Fatalf("unknown time pattern %s in %s", parts[0], repl)
|
||||
}
|
||||
|
||||
@@ -49,6 +49,11 @@ func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels
|
||||
Name: "__name__",
|
||||
Value: m.Name,
|
||||
})
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10557
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: "host",
|
||||
Value: sketch.Host,
|
||||
})
|
||||
for _, label := range m.Labels {
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: label.Name,
|
||||
@@ -57,9 +62,6 @@ func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels
|
||||
}
|
||||
for _, tag := range sketch.Tags {
|
||||
name, value := datadogutil.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: name,
|
||||
Value: value,
|
||||
|
||||
@@ -245,6 +245,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>vmagent</h2>")
|
||||
fmt.Fprintf(w, "Version %s<br>", buildinfo.Version)
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/victoriametrics/vmagent/'>https://docs.victoriametrics.com/victoriametrics/vmagent/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
|
||||
@@ -202,14 +202,10 @@ func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
|
||||
c.retriesCount = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.sanitizedURL))
|
||||
c.sendDuration = metrics.GetOrCreateFloatCounter(fmt.Sprintf(`vmagent_remotewrite_send_duration_seconds_total{url=%q}`, c.sanitizedURL))
|
||||
metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_queues{url=%q}`, c.sanitizedURL), func() float64 {
|
||||
return float64(*queues)
|
||||
return float64(concurrency)
|
||||
})
|
||||
for i := 0; i < concurrency; i++ {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
c.runWorker()
|
||||
}()
|
||||
for range concurrency {
|
||||
c.wg.Go(c.runWorker)
|
||||
}
|
||||
logger.Infof("initialized client for -remoteWrite.url=%q", c.sanitizedURL)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestCalculateRetryDuration(t *testing.T) {
|
||||
f := func(retryAfterDuration, retryDuration time.Duration, n int, expectMinDuration time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
retryDuration = getRetryDuration(retryAfterDuration, retryDuration, time.Minute)
|
||||
}
|
||||
|
||||
|
||||
@@ -48,11 +48,7 @@ func newPendingSeries(fq *persistentqueue.FastQueue, isVMRemoteWrite *atomic.Boo
|
||||
ps.wr.significantFigures = significantFigures
|
||||
ps.wr.roundDigits = roundDigits
|
||||
ps.stopCh = make(chan struct{})
|
||||
ps.periodicFlusherWG.Add(1)
|
||||
go func() {
|
||||
defer ps.periodicFlusherWG.Done()
|
||||
ps.periodicFlusher()
|
||||
}()
|
||||
ps.periodicFlusherWG.Go(ps.periodicFlusher)
|
||||
return &ps
|
||||
}
|
||||
|
||||
|
||||
@@ -51,9 +51,9 @@ func testPushWriteRequest(t *testing.T, rowsCount, expectedBlockLenProm, expecte
|
||||
|
||||
func newTestWriteRequest(seriesCount, labelsCount int) *prompb.WriteRequest {
|
||||
var wr prompb.WriteRequest
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
for i := range seriesCount {
|
||||
var labels []prompb.Label
|
||||
for j := 0; j < labelsCount; j++ {
|
||||
for j := range labelsCount {
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: fmt.Sprintf("label_%d_%d", i, j),
|
||||
Value: fmt.Sprintf("value_%d_%d", i, j),
|
||||
|
||||
@@ -9,19 +9,18 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
unparsedLabelsGlobal = flagutil.NewArrayString("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
||||
"Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage")
|
||||
unparsedLabelsGlobal = flagutil.NewArrayString("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to all -remoteWrite.url.")
|
||||
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabeling configs, which are applied "+
|
||||
"to all the metrics before sending them to -remoteWrite.url. See also -remoteWrite.urlRelabelConfig. "+
|
||||
"The path can point either to local file or to http url. "+
|
||||
@@ -39,7 +38,7 @@ var (
|
||||
labelsGlobal []prompb.Label
|
||||
|
||||
remoteWriteRelabelConfigData atomic.Pointer[[]byte]
|
||||
remoteWriteURLRelabelConfigData atomic.Pointer[[]interface{}]
|
||||
remoteWriteURLRelabelConfigData atomic.Pointer[[]any]
|
||||
|
||||
relabelConfigReloads *metrics.Counter
|
||||
relabelConfigReloadErrors *metrics.Counter
|
||||
@@ -91,8 +90,8 @@ func WriteURLRelabelConfigData(w io.Writer) {
|
||||
return
|
||||
}
|
||||
type urlRelabelCfg struct {
|
||||
Url string `yaml:"url"`
|
||||
RelabelConfig interface{} `yaml:"relabel_config"`
|
||||
Url string `yaml:"url"`
|
||||
RelabelConfig any `yaml:"relabel_config"`
|
||||
}
|
||||
var cs []urlRelabelCfg
|
||||
for i, url := range *remoteWriteURLs {
|
||||
@@ -139,12 +138,13 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
|
||||
remoteWriteRelabelConfigData.Store(&rawCfg)
|
||||
rcs.global = global
|
||||
}
|
||||
|
||||
if len(*relabelConfigPaths) > len(*remoteWriteURLs) {
|
||||
return nil, fmt.Errorf("too many -remoteWrite.urlRelabelConfig args: %d; it mustn't exceed the number of -remoteWrite.url args: %d",
|
||||
len(*relabelConfigPaths), (len(*remoteWriteURLs)))
|
||||
}
|
||||
|
||||
var urlRelabelCfgs []interface{}
|
||||
var urlRelabelCfgs []any
|
||||
rcs.perURL = make([]*promrelabel.ParsedConfigs, len(*remoteWriteURLs))
|
||||
for i, path := range *relabelConfigPaths {
|
||||
if len(path) == 0 {
|
||||
@@ -157,7 +157,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
|
||||
}
|
||||
rcs.perURL[i] = prc
|
||||
|
||||
var parsedCfg interface{}
|
||||
var parsedCfg any
|
||||
_ = yaml.Unmarshal(rawCfg, &parsedCfg)
|
||||
urlRelabelCfgs = append(urlRelabelCfgs, parsedCfg)
|
||||
}
|
||||
@@ -176,19 +176,9 @@ type relabelConfigs struct {
|
||||
perURL []*promrelabel.ParsedConfigs
|
||||
}
|
||||
|
||||
// isSet indicates whether (global or per-URL) command-line flags is set
|
||||
func (rcs *relabelConfigs) isSet() bool {
|
||||
if rcs == nil {
|
||||
return false
|
||||
}
|
||||
if rcs.global.Len() > 0 {
|
||||
return true
|
||||
}
|
||||
for _, pc := range rcs.perURL {
|
||||
if pc.Len() > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return *relabelConfigPathGlobal != "" || len(*relabelConfigPaths) > 0
|
||||
}
|
||||
|
||||
// initLabelsGlobal must be called after parsing command-line flags.
|
||||
|
||||
@@ -59,7 +59,7 @@ var (
|
||||
"See also -remoteWrite.maxDiskUsagePerURL and -remoteWrite.disableOnDiskQueue")
|
||||
keepDanglingQueues = flag.Bool("remoteWrite.keepDanglingQueues", false, "Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. "+
|
||||
"Useful when -remoteWrite.url is changed temporarily and persistent queue files will be needed later on.")
|
||||
queues = flag.Int("remoteWrite.queues", cgroup.AvailableCPUs()*2, "The number of concurrent queues to each -remoteWrite.url. Set more queues if default number of queues "+
|
||||
queues = flagutil.NewArrayInt("remoteWrite.queues", cgroup.AvailableCPUs()*2, "The number of concurrent queues to each -remoteWrite.url. Set more queues if default number of queues "+
|
||||
"isn't enough for sending high volume of collected data to remote storage. "+
|
||||
"Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage")
|
||||
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
|
||||
@@ -176,13 +176,6 @@ func Init() {
|
||||
})
|
||||
}
|
||||
|
||||
if *queues > maxQueues {
|
||||
*queues = maxQueues
|
||||
}
|
||||
if *queues <= 0 {
|
||||
*queues = 1
|
||||
}
|
||||
|
||||
if len(*shardByURLLabels) > 0 && len(*shardByURLIgnoreLabels) > 0 {
|
||||
logger.Fatalf("-remoteWrite.shardByURL.labels and -remoteWrite.shardByURL.ignoreLabels cannot be set simultaneously; " +
|
||||
"see https://docs.victoriametrics.com/victoriametrics/vmagent/#sharding-among-remote-storages")
|
||||
@@ -215,9 +208,7 @@ func Init() {
|
||||
dropDanglingQueues()
|
||||
|
||||
// Start config reloader.
|
||||
configReloaderWG.Add(1)
|
||||
go func() {
|
||||
defer configReloaderWG.Done()
|
||||
configReloaderWG.Go(func() {
|
||||
for {
|
||||
select {
|
||||
case <-configReloaderStopCh:
|
||||
@@ -227,7 +218,7 @@ func Init() {
|
||||
reloadRelabelConfigs()
|
||||
reloadStreamAggrConfigs()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func dropDanglingQueues() {
|
||||
@@ -267,17 +258,6 @@ func initRemoteWriteCtxs(urls []string) {
|
||||
if len(urls) == 0 {
|
||||
logger.Panicf("BUG: urls must be non-empty")
|
||||
}
|
||||
|
||||
maxInmemoryBlocks := memory.Allowed() / len(urls) / *maxRowsPerBlock / 100
|
||||
if maxInmemoryBlocks / *queues > 100 {
|
||||
// There is no much sense in keeping higher number of blocks in memory,
|
||||
// since this means that the producer outperforms consumer and the queue
|
||||
// will continue growing. It is better storing the queue to file.
|
||||
maxInmemoryBlocks = 100 * *queues
|
||||
}
|
||||
if maxInmemoryBlocks < 2 {
|
||||
maxInmemoryBlocks = 2
|
||||
}
|
||||
rwctxs := make([]*remoteWriteCtx, len(urls))
|
||||
rwctxIdx := make([]int, len(urls))
|
||||
if retryMaxTime.String() != "" {
|
||||
@@ -292,9 +272,12 @@ func initRemoteWriteCtxs(urls []string) {
|
||||
if *showRemoteWriteURL {
|
||||
sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL)
|
||||
}
|
||||
rwctxs[i] = newRemoteWriteCtx(i, remoteWriteURL, maxInmemoryBlocks, sanitizedURL)
|
||||
rwctxs[i] = newRemoteWriteCtx(i, remoteWriteURL, sanitizedURL)
|
||||
rwctxIdx[i] = i
|
||||
}
|
||||
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_fs_type{path=%q, type=%q}`, *tmpDataPath, fs.GetFsName(*tmpDataPath)), func() float64 {
|
||||
return 1
|
||||
})
|
||||
|
||||
if *shardByURL {
|
||||
consistentHashNodes := make([]string, 0, len(urls))
|
||||
@@ -558,11 +541,9 @@ func tryPushMetadataToRemoteStorages(rwctxs []*remoteWriteCtx, mms []prompb.Metr
|
||||
// Push metadata to remote storage systems in parallel to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
var anyPushFailed atomic.Bool
|
||||
for _, rwctx := range rwctxs {
|
||||
go func(rwctx *remoteWriteCtx) {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
if !rwctx.tryPushMetadataInternal(mms) {
|
||||
rwctx.pushFailures.Inc()
|
||||
if forceDropSamplesOnFailure {
|
||||
@@ -571,7 +552,7 @@ func tryPushMetadataToRemoteStorages(rwctxs []*remoteWriteCtx, mms []prompb.Metr
|
||||
}
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx)
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
return !anyPushFailed.Load()
|
||||
@@ -603,15 +584,13 @@ func tryPushTimeSeriesToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prom
|
||||
// Push tssBlock to remote storage systems in parallel to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
var anyPushFailed atomic.Bool
|
||||
for _, rwctx := range rwctxs {
|
||||
go func(rwctx *remoteWriteCtx) {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
if !rwctx.TryPushTimeSeries(tssBlock, forceDropSamplesOnFailure) {
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx)
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
return !anyPushFailed.Load()
|
||||
@@ -633,13 +612,11 @@ func tryShardingTimeSeriesAmongRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock
|
||||
if len(shard) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx, tss []prompb.TimeSeries) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPushTimeSeries(tss, forceDropSamplesOnFailure) {
|
||||
wg.Go(func() {
|
||||
if !rwctx.TryPushTimeSeries(shard, forceDropSamplesOnFailure) {
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx, shard)
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
return !anyPushFailed.Load()
|
||||
@@ -848,7 +825,7 @@ type remoteWriteCtx struct {
|
||||
rowsDroppedOnPushFailure *metrics.Counter
|
||||
}
|
||||
|
||||
func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks int, sanitizedURL string) *remoteWriteCtx {
|
||||
func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, sanitizedURL string) *remoteWriteCtx {
|
||||
// strip query params, otherwise changing params resets pq
|
||||
pqURL := *remoteWriteURL
|
||||
pqURL.RawQuery = ""
|
||||
@@ -863,6 +840,23 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
}
|
||||
|
||||
isPQDisabled := disableOnDiskQueue.GetOptionalArg(argIdx)
|
||||
queuesSize := queues.GetOptionalArg(argIdx)
|
||||
if queuesSize > maxQueues {
|
||||
queuesSize = maxQueues
|
||||
} else if queuesSize <= 0 {
|
||||
queuesSize = 1
|
||||
}
|
||||
|
||||
maxInmemoryBlocks := memory.Allowed() / len(*remoteWriteURLs) / *maxRowsPerBlock / 100
|
||||
if maxInmemoryBlocks/queuesSize > 100 {
|
||||
// There is no much sense in keeping higher number of blocks in memory,
|
||||
// since this means that the producer outperforms consumer and the queue
|
||||
// will continue growing. It is better storing the queue to file.
|
||||
maxInmemoryBlocks = 100 * queuesSize
|
||||
}
|
||||
if maxInmemoryBlocks < 2 {
|
||||
maxInmemoryBlocks = 2
|
||||
}
|
||||
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes, isPQDisabled)
|
||||
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{path=%q, url=%q}`, queuePath, sanitizedURL), func() float64 {
|
||||
return float64(fq.GetPendingBytes())
|
||||
@@ -880,16 +874,16 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
var c *client
|
||||
switch remoteWriteURL.Scheme {
|
||||
case "http", "https":
|
||||
c = newHTTPClient(argIdx, remoteWriteURL.String(), sanitizedURL, fq, *queues)
|
||||
c = newHTTPClient(argIdx, remoteWriteURL.String(), sanitizedURL, fq, queuesSize)
|
||||
default:
|
||||
logger.Fatalf("unsupported scheme: %s for remoteWriteURL: %s, want `http`, `https`", remoteWriteURL.Scheme, sanitizedURL)
|
||||
}
|
||||
c.init(argIdx, *queues, sanitizedURL)
|
||||
c.init(argIdx, queuesSize, sanitizedURL)
|
||||
|
||||
// Initialize pss
|
||||
sf := significantFigures.GetOptionalArg(argIdx)
|
||||
rd := roundDigits.GetOptionalArg(argIdx)
|
||||
pssLen := *queues
|
||||
pssLen := queuesSize
|
||||
if n := cgroup.AvailableCPUs(); pssLen > n {
|
||||
// There is no sense in running more than availableCPUs concurrent pendingSeries,
|
||||
// since every pendingSeries can saturate up to a single CPU.
|
||||
@@ -1089,7 +1083,7 @@ func (rwctx *remoteWriteCtx) tryPushTimeSeriesInternal(tss []prompb.TimeSeries)
|
||||
}()
|
||||
|
||||
if len(labelsGlobal) > 0 {
|
||||
// Make a copy of tss before adding extra labels in order to prevent
|
||||
// Make a copy of tss before adding extra labels to prevent
|
||||
// from affecting time series for other remoteWrite.url configs.
|
||||
rctx = getRelabelCtx()
|
||||
v = tssPool.Get().(*[]prompb.TimeSeries)
|
||||
|
||||
@@ -28,12 +28,12 @@ func TestGetLabelsHash_Distribution(t *testing.T) {
|
||||
itemsCount := 1_000 * bucketsCount
|
||||
m := make([]int, bucketsCount)
|
||||
var labels []prompb.Label
|
||||
for i := 0; i < itemsCount; i++ {
|
||||
for i := range itemsCount {
|
||||
labels = append(labels[:0], prompb.Label{
|
||||
Name: "__name__",
|
||||
Value: fmt.Sprintf("some_name_%d", i),
|
||||
})
|
||||
for j := 0; j < 10; j++ {
|
||||
for j := range 10 {
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: fmt.Sprintf("label_%d", j),
|
||||
Value: fmt.Sprintf("value_%d_%d", i, j),
|
||||
@@ -248,7 +248,7 @@ func TestShardAmountRemoteWriteCtx(t *testing.T) {
|
||||
seriesCount := 100000
|
||||
// build 1000000 series
|
||||
tssBlock := make([]prompb.TimeSeries, 0, seriesCount)
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
for i := range seriesCount {
|
||||
tssBlock = append(tssBlock, prompb.TimeSeries{
|
||||
Labels: []prompb.Label{
|
||||
{
|
||||
@@ -269,7 +269,7 @@ func TestShardAmountRemoteWriteCtx(t *testing.T) {
|
||||
// build active time series set
|
||||
nodes := make([]string, 0, remoteWriteCount)
|
||||
activeTimeSeriesByNodes := make([]map[string]struct{}, remoteWriteCount)
|
||||
for i := 0; i < remoteWriteCount; i++ {
|
||||
for i := range remoteWriteCount {
|
||||
nodes = append(nodes, fmt.Sprintf("node%d", i))
|
||||
activeTimeSeriesByNodes[i] = make(map[string]struct{})
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestParseInputValue_Success(t *testing.T) {
|
||||
if len(outputExpected) != len(output) {
|
||||
t.Fatalf("unexpected output length; got %d; want %d", len(outputExpected), len(output))
|
||||
}
|
||||
for i := 0; i < len(outputExpected); i++ {
|
||||
for i := range outputExpected {
|
||||
if outputExpected[i].Omitted != output[i].Omitted {
|
||||
t.Fatalf("unexpected Omitted field in the output\ngot\n%v\nwant\n%v", output, outputExpected)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -348,9 +350,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
||||
for k := range alertEvalTimesMap {
|
||||
alertEvalTimes = append(alertEvalTimes, k)
|
||||
}
|
||||
sort.Slice(alertEvalTimes, func(i, j int) bool {
|
||||
return alertEvalTimes[i] < alertEvalTimes[j]
|
||||
})
|
||||
slices.Sort(alertEvalTimes)
|
||||
|
||||
// sort group eval order according to the given "group_eval_order".
|
||||
sort.Slice(testGroups, func(i, j int) bool {
|
||||
@@ -361,12 +361,8 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
||||
var groups []*rule.Group
|
||||
for _, group := range testGroups {
|
||||
mergedExternalLabels := make(map[string]string)
|
||||
for k, v := range tg.ExternalLabels {
|
||||
mergedExternalLabels[k] = v
|
||||
}
|
||||
for k, v := range externalLabels {
|
||||
mergedExternalLabels[k] = v
|
||||
}
|
||||
maps.Copy(mergedExternalLabels, tg.ExternalLabels)
|
||||
maps.Copy(mergedExternalLabels, externalLabels)
|
||||
ng := rule.NewGroup(group, q, time.Minute, mergedExternalLabels)
|
||||
ng.Init()
|
||||
groups = append(groups, ng)
|
||||
|
||||
@@ -81,12 +81,9 @@ func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool)
|
||||
if g.Interval.Duration() < 0 {
|
||||
return fmt.Errorf("interval shouldn't be lower than 0")
|
||||
}
|
||||
if g.EvalOffset.Duration() < 0 {
|
||||
return fmt.Errorf("eval_offset shouldn't be lower than 0")
|
||||
}
|
||||
// if `eval_offset` is set, interval won't use global evaluationInterval flag and must bigger than offset.
|
||||
if g.EvalOffset.Duration() > g.Interval.Duration() {
|
||||
return fmt.Errorf("eval_offset should be smaller than interval; now eval_offset: %v, interval: %v", g.EvalOffset.Duration(), g.Interval.Duration())
|
||||
// if `eval_offset` is set, the group interval must be specified explicitly(instead of inherited from global evaluationInterval flag) and must bigger than offset.
|
||||
if g.EvalOffset.Duration().Abs() > g.Interval.Duration() {
|
||||
return fmt.Errorf("the abs value of eval_offset should be smaller than interval; now eval_offset: %v, interval: %v", g.EvalOffset.Duration(), g.Interval.Duration())
|
||||
}
|
||||
if g.EvalOffset != nil && g.EvalDelay != nil {
|
||||
return fmt.Errorf("eval_offset cannot be used with eval_delay")
|
||||
|
||||
@@ -176,11 +176,17 @@ func TestGroupValidate_Failure(t *testing.T) {
|
||||
}, false, "interval shouldn't be lower than 0")
|
||||
|
||||
f(&Group{
|
||||
Name: "wrong eval_offset",
|
||||
Name: "too big eval_offset",
|
||||
Interval: promutil.NewDuration(time.Minute),
|
||||
EvalOffset: promutil.NewDuration(2 * time.Minute),
|
||||
}, false, "eval_offset should be smaller than interval")
|
||||
|
||||
f(&Group{
|
||||
Name: "too big negative eval_offset",
|
||||
Interval: promutil.NewDuration(time.Minute),
|
||||
EvalOffset: promutil.NewDuration(-2 * time.Minute),
|
||||
}, false, "eval_offset should be smaller than interval")
|
||||
|
||||
limit := -1
|
||||
f(&Group{
|
||||
Name: "wrong limit",
|
||||
|
||||
@@ -2,6 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaLogs/lib/logstorage"
|
||||
@@ -76,13 +77,12 @@ func (t *Type) ValidateExpr(expr string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad LogsQL expr: %q, err: %w", expr, err)
|
||||
}
|
||||
fields, _ := q.GetStatsByFields()
|
||||
for i := range fields {
|
||||
// VictoriaLogs inserts `_time` field as a label in result when query with `stats by (_time:step)`,
|
||||
// making the result meaningless and may lead to cardinality issues.
|
||||
if fields[i] == "_time" {
|
||||
return fmt.Errorf("bad LogsQL expr: %q, err: cannot contain time buckets stats pipe `stats by (_time:step)`", expr)
|
||||
}
|
||||
labels, err := q.GetStatsLabels()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels from LogsQL expr: %q, err: %w", expr, err)
|
||||
}
|
||||
if slices.Contains(labels, "_time") {
|
||||
return fmt.Errorf("bad LogsQL expr: %q, err: cannot contain time buckets stats pipe `stats by (_time:step)`", expr)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown datasource type=%q", t.Name)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -91,9 +92,7 @@ func (c *Client) Clone() *Client {
|
||||
ns.extraHeaders = make([]keyValue, len(c.extraHeaders))
|
||||
copy(ns.extraHeaders, c.extraHeaders)
|
||||
}
|
||||
for k, v := range c.extraParams {
|
||||
ns.extraParams[k] = v
|
||||
}
|
||||
maps.Copy(ns.extraParams, c.extraParams)
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ type promResponse struct {
|
||||
// Stats supported by VictoriaMetrics since v1.90
|
||||
Stats struct {
|
||||
SeriesFetched *string `json:"seriesFetched,omitempty"`
|
||||
} `json:"stats,omitempty"`
|
||||
} `json:"stats"`
|
||||
// IsPartial supported by VictoriaMetrics
|
||||
IsPartial *bool `json:"isPartial,omitempty"`
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ func (ls Labels) String() string {
|
||||
func LabelCompare(a, b Labels) int {
|
||||
l := min(len(b), len(a))
|
||||
|
||||
for i := 0; i < l; i++ {
|
||||
for i := range l {
|
||||
if a[i].Name != b[i].Name {
|
||||
if a[i].Name < b[i].Name {
|
||||
return -1
|
||||
|
||||
@@ -13,7 +13,7 @@ func BenchmarkPromInstantUnmarshal(b *testing.B) {
|
||||
|
||||
// BenchmarkParsePrometheusResponse/Instant_std+fastjson-10 1760 668959 ns/op 280147 B/op 5781 allocs/op
|
||||
b.Run("Instant std+fastjson", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
var pi promInstant
|
||||
err = pi.Unmarshal(data)
|
||||
if err != nil {
|
||||
|
||||
@@ -81,9 +81,7 @@ absolute path to all .tpl files in root.
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.")
|
||||
)
|
||||
|
||||
var (
|
||||
extURL *url.URL
|
||||
)
|
||||
var extURL *url.URL
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
@@ -161,7 +159,7 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
manager, err := newManager(ctx)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init: %s", err)
|
||||
logger.Fatalf("failed to create manager: %s", err)
|
||||
}
|
||||
logger.Infof("reading rules configuration file from %q", strings.Join(*rulePath, ";"))
|
||||
groupsCfg, err := config.Parse(*rulePath, validateTplFn, *validateExpressions)
|
||||
|
||||
@@ -65,13 +65,11 @@ func TestManagerUpdateConcurrent(t *testing.T) {
|
||||
|
||||
const workers = 500
|
||||
const iterations = 10
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func(n int) {
|
||||
defer wg.Done()
|
||||
var wg sync.WaitGroup
|
||||
for n := range workers {
|
||||
wg.Go(func() {
|
||||
r := rand.New(rand.NewSource(int64(n)))
|
||||
for i := 0; i < iterations; i++ {
|
||||
for range iterations {
|
||||
rnd := r.Intn(len(paths))
|
||||
cfg, err := config.Parse([]string{paths[rnd]}, notifier.ValidateTemplates, true)
|
||||
if err != nil { // update can fail and this is expected
|
||||
@@ -79,7 +77,7 @@ func TestManagerUpdateConcurrent(t *testing.T) {
|
||||
}
|
||||
_ = m.update(context.Background(), cfg, false)
|
||||
}
|
||||
}(i)
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -261,7 +259,7 @@ func compareGroups(t *testing.T, a, b *rule.Group) {
|
||||
for i, r := range a.Rules {
|
||||
got, want := r, b.Rules[i]
|
||||
if a.CreateID() != b.CreateID() {
|
||||
t.Fatalf("expected to have rule %q; got %q", want.ID(), got.ID())
|
||||
t.Fatalf("expected to have rule %d; got %d", want.ID(), got.ID())
|
||||
}
|
||||
if err := rule.CompareRules(t, want, got); err != nil {
|
||||
t.Fatalf("comparison error: %s", err)
|
||||
|
||||
@@ -80,14 +80,15 @@ func (as AlertState) String() string {
|
||||
|
||||
// AlertTplData is used to execute templating
|
||||
type AlertTplData struct {
|
||||
Type string
|
||||
Labels map[string]string
|
||||
Value float64
|
||||
Expr string
|
||||
AlertID uint64
|
||||
GroupID uint64
|
||||
ActiveAt time.Time
|
||||
For time.Duration
|
||||
Type string
|
||||
Labels map[string]string
|
||||
Value float64
|
||||
Expr string
|
||||
AlertID uint64
|
||||
GroupID uint64
|
||||
ActiveAt time.Time
|
||||
For time.Duration
|
||||
IsPartial bool
|
||||
}
|
||||
|
||||
var tplHeaders = []string{
|
||||
@@ -101,6 +102,7 @@ var tplHeaders = []string{
|
||||
"{{ $groupID := .GroupID }}",
|
||||
"{{ $activeAt := .ActiveAt }}",
|
||||
"{{ $for := .For }}",
|
||||
"{{ $isPartial := .IsPartial }}",
|
||||
}
|
||||
|
||||
// ExecTemplate executes the Alert template for given
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
@@ -172,11 +171,6 @@ const alertManagerPath = "/api/v2/alerts"
|
||||
func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg promauth.HTTPClientConfig,
|
||||
relabelCfg *promrelabel.ParsedConfigs, timeout time.Duration,
|
||||
) (*AlertManager, error) {
|
||||
|
||||
if err := httputil.CheckURL(alertManagerURL); err != nil {
|
||||
return nil, fmt.Errorf("invalid alertmanager URL: %w", err)
|
||||
}
|
||||
|
||||
tls := &promauth.TLSConfig{}
|
||||
if authCfg.TLSConfig != nil {
|
||||
tls = authCfg.TLSConfig
|
||||
|
||||
@@ -212,18 +212,16 @@ consul_sd_configs:
|
||||
|
||||
const workers = 500
|
||||
const iterations = 10
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func(n int) {
|
||||
defer wg.Done()
|
||||
var wg sync.WaitGroup
|
||||
for n := range workers {
|
||||
wg.Go(func() {
|
||||
r := rand.New(rand.NewSource(int64(n)))
|
||||
for i := 0; i < iterations; i++ {
|
||||
for range iterations {
|
||||
rnd := r.Intn(len(paths))
|
||||
_ = cw.reload(paths[rnd]) // update can fail and this is expected
|
||||
_ = cw.notifiers()
|
||||
}
|
||||
}(i)
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
@@ -229,6 +229,9 @@ func notifiersFromFlags(gen AlertURLGenerator) ([]Notifier, error) {
|
||||
Headers: []string{headers.GetOptionalArg(i)},
|
||||
}
|
||||
|
||||
if err := httputil.CheckURL(addr); err != nil {
|
||||
return nil, fmt.Errorf("invalid notifier.url %q: %w", addr, err)
|
||||
}
|
||||
addr = strings.TrimSuffix(addr, "/")
|
||||
am, err := NewAlertManager(addr+alertManagerPath, gen, authCfg, nil, sendTimeout.GetOptionalArg(i))
|
||||
if err != nil {
|
||||
@@ -266,7 +269,7 @@ func GetTargets() map[TargetType][]Target {
|
||||
if getActiveNotifiers == nil {
|
||||
return nil
|
||||
}
|
||||
var targets = make(map[TargetType][]Target)
|
||||
targets := make(map[TargetType][]Target)
|
||||
// use cached targets from configWatcher instead of getActiveNotifiers for the extra target labels
|
||||
if cw != nil {
|
||||
cw.targetsMu.RLock()
|
||||
@@ -287,7 +290,7 @@ func GetTargets() map[TargetType][]Target {
|
||||
}
|
||||
|
||||
// Send sends alerts to all active notifiers
|
||||
func Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string) *vmalertutil.ErrGroup {
|
||||
func Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string) chan error {
|
||||
alertsToSend := make([]Alert, 0, len(alerts))
|
||||
lblss := make([][]prompb.Label, 0, len(alerts))
|
||||
// apply global relabel config first without modifying original alerts in alerts
|
||||
@@ -300,17 +303,18 @@ func Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string
|
||||
lblss = append(lblss, lbls)
|
||||
}
|
||||
|
||||
errGr := new(vmalertutil.ErrGroup)
|
||||
wg := sync.WaitGroup{}
|
||||
activeNotifiers := getActiveNotifiers()
|
||||
errCh := make(chan error, len(activeNotifiers))
|
||||
defer close(errCh)
|
||||
for i := range activeNotifiers {
|
||||
nt := activeNotifiers[i]
|
||||
wg.Go(func() {
|
||||
if err := nt.Send(ctx, alertsToSend, lblss, notifierHeaders); err != nil {
|
||||
errGr.Add(fmt.Errorf("failed to send alerts to addr %q: %w", nt.Addr(), err))
|
||||
errCh <- fmt.Errorf("failed to send alerts to addr %q: %w", nt.Addr(), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
return errGr
|
||||
return errCh
|
||||
}
|
||||
|
||||
@@ -55,9 +55,9 @@ func TestInitNegative(t *testing.T) {
|
||||
*blackHole = oldBlackHole
|
||||
}()
|
||||
|
||||
f := func(path, addr string, bh bool) {
|
||||
f := func(path string, addr []string, bh bool) {
|
||||
*configPath = path
|
||||
*addrs = flagutil.ArrayString{addr}
|
||||
*addrs = flagutil.ArrayString(addr)
|
||||
*blackHole = bh
|
||||
if err := Init(nil, ""); err == nil {
|
||||
t.Fatalf("expected to get error; got nil instead")
|
||||
@@ -65,9 +65,12 @@ func TestInitNegative(t *testing.T) {
|
||||
}
|
||||
|
||||
// *configPath, *addrs and *blackhole are mutually exclusive
|
||||
f("/dummy/path", "127.0.0.1", false)
|
||||
f("/dummy/path", "", true)
|
||||
f("", "127.0.0.1", true)
|
||||
f("/dummy/path", []string{"127.0.0.1"}, false)
|
||||
f("/dummy/path", []string{}, true)
|
||||
f("", []string{"127.0.0.1"}, true)
|
||||
// addr cannot be ""
|
||||
f("", []string{""}, false)
|
||||
f("", []string{"127.0.0.1", ""}, false)
|
||||
}
|
||||
|
||||
func TestBlackHole(t *testing.T) {
|
||||
@@ -202,7 +205,9 @@ alert_relabel_configs:
|
||||
},
|
||||
}
|
||||
errG := Send(context.Background(), firingAlerts, nil)
|
||||
if errG.Err() != nil {
|
||||
t.Fatalf("unexpected error when sending alerts: %s", err)
|
||||
for err := range errG {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error when sending alerts: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
|
||||
input: make(chan prompb.TimeSeries, cfg.MaxQueueSize),
|
||||
}
|
||||
|
||||
for i := 0; i < cc; i++ {
|
||||
for range cc {
|
||||
c.run(ctx)
|
||||
}
|
||||
return c, nil
|
||||
@@ -186,6 +186,11 @@ func (c *Client) run(ctx context.Context) {
|
||||
return
|
||||
case <-ticker.C:
|
||||
c.flush(ctx, wr)
|
||||
// drain the potential stale tick to avoid small or empty flushes after a slow flush.
|
||||
select {
|
||||
case <-ticker.C:
|
||||
default:
|
||||
}
|
||||
case ts, ok := <-c.input:
|
||||
if !ok {
|
||||
continue
|
||||
@@ -238,8 +243,10 @@ func (c *Client) flush(ctx context.Context, wr *prompb.WriteRequest) {
|
||||
defer func() {
|
||||
sendDuration.Add(time.Since(timeStart).Seconds())
|
||||
}()
|
||||
|
||||
attempts := 0
|
||||
L:
|
||||
for attempts := 0; ; attempts++ {
|
||||
for {
|
||||
err := c.send(ctx, b)
|
||||
if err != nil && (errors.Is(err, io.EOF) || netutil.IsTrivialNetworkError(err)) {
|
||||
// Something in the middle between client and destination might be closing
|
||||
@@ -281,6 +288,7 @@ L:
|
||||
time.Sleep(retryInterval)
|
||||
retryInterval *= 2
|
||||
|
||||
attempts++
|
||||
}
|
||||
|
||||
rwErrors.Inc()
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestClient_Push(t *testing.T) {
|
||||
|
||||
r := rand.New(rand.NewSource(1))
|
||||
const rowsN = int(1e4)
|
||||
for i := 0; i < rowsN; i++ {
|
||||
for range rowsN {
|
||||
s := prompb.TimeSeries{
|
||||
Samples: []prompb.Sample{{
|
||||
Value: r.Float64(),
|
||||
@@ -102,7 +102,7 @@ func TestClient_run_maxBatchSizeDuringShutdown(t *testing.T) {
|
||||
}
|
||||
|
||||
// push time series to the client.
|
||||
for i := 0; i < pushCnt; i++ {
|
||||
for range pushCnt {
|
||||
if err = rwClient.Push(prompb.TimeSeries{}); err != nil {
|
||||
t.Fatalf("cannot time series to the client: %s", err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestDebugClient_Push(t *testing.T) {
|
||||
|
||||
const rowsN = 100
|
||||
var sent int
|
||||
for i := 0; i < rowsN; i++ {
|
||||
for i := range rowsN {
|
||||
s := prompb.TimeSeries{
|
||||
Samples: []prompb.Sample{{
|
||||
Value: float64(i),
|
||||
|
||||
@@ -346,6 +346,8 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
|
||||
ls.processed[l.Name] = l.Value
|
||||
}
|
||||
|
||||
// labels only support limited templating variables,
|
||||
// including `labels`, `value` and `expr`, to avoid breaking alert states or causing cardinality issue with results
|
||||
extraLabels, err := notifier.ExecTemplate(qFn, ar.Labels, notifier.AlertTplData{
|
||||
Labels: ls.origin,
|
||||
Value: m.Values[0],
|
||||
@@ -387,11 +389,7 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
return nil, err
|
||||
}
|
||||
alertID := hash(ls.processed)
|
||||
as, err := ar.expandAnnotationTemplates(s, qFn, time.Time{}, ls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := ar.newAlert(s, time.Time{}, ls.processed, as) // initial alert
|
||||
a := ar.newAlert(s, time.Time{}, ls.processed, nil) // initial alert
|
||||
|
||||
prevT := time.Time{}
|
||||
for i := range s.Values {
|
||||
@@ -407,8 +405,6 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
// reset to Pending if there are gaps > EvalInterval between DPs
|
||||
a.State = notifier.StatePending
|
||||
a.ActiveAt = at
|
||||
// re-template the annotations as active timestamp is changed
|
||||
a.Annotations, _ = ar.expandAnnotationTemplates(s, qFn, at, ls)
|
||||
a.Start = time.Time{}
|
||||
} else if at.Sub(a.ActiveAt) >= ar.For && a.State != notifier.StateFiring {
|
||||
a.State = notifier.StateFiring
|
||||
@@ -463,7 +459,8 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
||||
}
|
||||
|
||||
ar.logDebugf(ts, nil, "query returned %d series (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartialResponse(res))
|
||||
isPartial := isPartialResponse(res)
|
||||
ar.logDebugf(ts, nil, "query returned %d series (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartial)
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res.Data, err
|
||||
@@ -489,7 +486,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
at = a.ActiveAt
|
||||
}
|
||||
}
|
||||
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls)
|
||||
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls, isPartial)
|
||||
if err != nil {
|
||||
// only set error in current state, but do not break alert processing
|
||||
curState.Err = err
|
||||
@@ -607,16 +604,17 @@ func (ar *AlertingRule) expandLabelTemplates(m datasource.Metric, qFn templates.
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templates.QueryFn, activeAt time.Time, ls *labelSet) (map[string]string, error) {
|
||||
func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templates.QueryFn, activeAt time.Time, ls *labelSet, isPartial bool) (map[string]string, error) {
|
||||
tplData := notifier.AlertTplData{
|
||||
Value: m.Values[0],
|
||||
Type: ar.Type.String(),
|
||||
Labels: ls.origin,
|
||||
Expr: ar.Expr,
|
||||
AlertID: hash(ls.processed),
|
||||
GroupID: ar.GroupID,
|
||||
ActiveAt: activeAt,
|
||||
For: ar.For,
|
||||
Value: m.Values[0],
|
||||
Type: ar.Type.String(),
|
||||
Labels: ls.origin,
|
||||
Expr: ar.Expr,
|
||||
AlertID: hash(ls.processed),
|
||||
GroupID: ar.GroupID,
|
||||
ActiveAt: activeAt,
|
||||
For: ar.For,
|
||||
IsPartial: isPartial,
|
||||
}
|
||||
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
|
||||
if err != nil {
|
||||
@@ -820,7 +818,9 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
|
||||
expr := fmt.Sprintf("default_rollup(%s{%s%s}[%ds])",
|
||||
alertForStateMetricName, nameStr, labelsFilter, int(lookback.Seconds()))
|
||||
|
||||
res, _, err := q.Query(ctx, expr, ts)
|
||||
// query ALERTS_FOR_STATE at `ts-1s` instead `ts` to avoid retrieving data written in the current run,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10335
|
||||
res, _, err := q.Query(ctx, expr, ts.Add(-1*time.Second))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute restore query %q: %w ", expr, err)
|
||||
}
|
||||
|
||||
106
app/vmalert/rule/alerting_synctest_test.go
Normal file
106
app/vmalert/rule/alerting_synctest_test.go
Normal file
@@ -0,0 +1,106 @@
|
||||
//go:build synctest
|
||||
|
||||
package rule
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
)
|
||||
|
||||
// TestAlertingRule_ActiveAtPreservedInAnnotations ensures that the fix for
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9543 is preserved
|
||||
// while allowing query templates in labels (https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9783)
|
||||
func TestAlertingRule_ActiveAtPreservedInAnnotations(t *testing.T) {
|
||||
// wrap into synctest because of time manipulations
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
fq := &datasource.FakeQuerier{}
|
||||
|
||||
ar := &AlertingRule{
|
||||
Name: "TestActiveAtPreservation",
|
||||
Labels: map[string]string{
|
||||
"test_query_in_label": `{{ "static_value" }}`,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"description": "Alert active since {{ $activeAt }}",
|
||||
},
|
||||
alerts: make(map[uint64]*notifier.Alert),
|
||||
q: fq,
|
||||
state: &ruleState{
|
||||
entries: make([]StateEntry, 10),
|
||||
},
|
||||
}
|
||||
|
||||
// Mock query result - return empty result to make suppress_for_mass_alert = false
|
||||
// (no need to add anything to fq for empty result)
|
||||
|
||||
// Add a metric that should trigger the alert
|
||||
fq.Add(metricWithValueAndLabels(t, 1, "instance", "server1"))
|
||||
|
||||
// First execution - creates new alert
|
||||
ts1 := time.Now()
|
||||
_, err := ar.exec(context.TODO(), ts1, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error on first exec: %s", err)
|
||||
}
|
||||
|
||||
if len(ar.alerts) != 1 {
|
||||
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
|
||||
}
|
||||
|
||||
firstAlert := ar.GetAlerts()[0]
|
||||
// Verify first execution: activeAt should be ts1 and annotation should reflect it
|
||||
if !firstAlert.ActiveAt.Equal(ts1) {
|
||||
t.Fatalf("expected activeAt to be %v, got %v", ts1, firstAlert.ActiveAt)
|
||||
}
|
||||
|
||||
// Extract time from annotation (format will be like "Alert active since 2025-09-30 08:55:13.638551611 -0400 EDT m=+0.002928464")
|
||||
expectedTimeStr := ts1.Format("2006-01-02 15:04:05")
|
||||
if !strings.Contains(firstAlert.Annotations["description"], expectedTimeStr) {
|
||||
t.Fatalf("first exec annotation should contain time %s, got: %s", expectedTimeStr, firstAlert.Annotations["description"])
|
||||
}
|
||||
|
||||
// Second execution - should preserve activeAt in annotation
|
||||
|
||||
// Ensure different timestamp with different seconds
|
||||
// sleep is non-blocking thanks to synctest
|
||||
time.Sleep(2 * time.Second)
|
||||
ts2 := time.Now()
|
||||
_, err = ar.exec(context.TODO(), ts2, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error on second exec: %s", err)
|
||||
}
|
||||
|
||||
// Get the alert again (should be the same alert)
|
||||
if len(ar.alerts) != 1 {
|
||||
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
|
||||
}
|
||||
secondAlert := ar.GetAlerts()[0]
|
||||
|
||||
// Critical test: activeAt should still be ts1, not ts2
|
||||
if !secondAlert.ActiveAt.Equal(ts1) {
|
||||
t.Fatalf("activeAt should be preserved as %v, but got %v", ts1, secondAlert.ActiveAt)
|
||||
}
|
||||
|
||||
// Critical test: annotation should still contain ts1 time, not ts2
|
||||
if !strings.Contains(secondAlert.Annotations["description"], expectedTimeStr) {
|
||||
t.Fatalf("second exec annotation should still contain original time %s, got: %s", expectedTimeStr, secondAlert.Annotations["description"])
|
||||
}
|
||||
|
||||
// Additional verification: annotation should NOT contain ts2 time
|
||||
ts2TimeStr := ts2.Format("2006-01-02 15:04:05")
|
||||
if strings.Contains(secondAlert.Annotations["description"], ts2TimeStr) {
|
||||
t.Fatalf("annotation should NOT contain new eval time %s, got: %s", ts2TimeStr, secondAlert.Annotations["description"])
|
||||
}
|
||||
|
||||
// Verify query template in labels still works (this would fail if query templates were broken)
|
||||
if firstAlert.Labels["test_query_in_label"] != "static_value" {
|
||||
t.Fatalf("expected test_query_in_label=static_value, got %s", firstAlert.Labels["test_query_in_label"])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -664,7 +663,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
|
||||
Name: "for-pending",
|
||||
Type: config.NewPrometheusType().String(),
|
||||
Labels: map[string]string{"alertname": "for-pending"},
|
||||
Annotations: map[string]string{"activeAt": "5000"},
|
||||
Annotations: map[string]string{},
|
||||
State: notifier.StatePending,
|
||||
ActiveAt: time.Unix(5, 0),
|
||||
Value: 1,
|
||||
@@ -684,7 +683,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
|
||||
Name: "for-firing",
|
||||
Type: config.NewPrometheusType().String(),
|
||||
Labels: map[string]string{"alertname": "for-firing"},
|
||||
Annotations: map[string]string{"activeAt": "1000"},
|
||||
Annotations: map[string]string{},
|
||||
State: notifier.StateFiring,
|
||||
ActiveAt: time.Unix(1, 0),
|
||||
Start: time.Unix(5, 0),
|
||||
@@ -705,7 +704,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
|
||||
Name: "for-hold-pending",
|
||||
Type: config.NewPrometheusType().String(),
|
||||
Labels: map[string]string{"alertname": "for-hold-pending"},
|
||||
Annotations: map[string]string{"activeAt": "5000"},
|
||||
Annotations: map[string]string{},
|
||||
State: notifier.StatePending,
|
||||
ActiveAt: time.Unix(5, 0),
|
||||
Value: 1,
|
||||
@@ -1120,7 +1119,7 @@ func TestAlertingRuleLimit_Success(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAlertingRule_Template(t *testing.T) {
|
||||
f := func(rule *AlertingRule, metrics []datasource.Metric, alertsExpected map[uint64]*notifier.Alert) {
|
||||
f := func(rule *AlertingRule, metrics []datasource.Metric, isResponsePartial bool, alertsExpected map[uint64]*notifier.Alert) {
|
||||
t.Helper()
|
||||
|
||||
fakeGroup := Group{
|
||||
@@ -1133,6 +1132,7 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
entries: make([]StateEntry, 10),
|
||||
}
|
||||
fq.Add(metrics...)
|
||||
fq.SetPartialResponse(isResponsePartial)
|
||||
|
||||
if _, err := rule.exec(context.TODO(), time.Now(), 0); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
@@ -1163,7 +1163,7 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
}, []datasource.Metric{
|
||||
metricWithValueAndLabels(t, 1, "instance", "foo"),
|
||||
metricWithValueAndLabels(t, 1, "instance", "bar"),
|
||||
}, map[uint64]*notifier.Alert{
|
||||
}, false, map[uint64]*notifier.Alert{
|
||||
hash(map[string]string{alertNameLabel: "common", "region": "east", "instance": "foo"}): {
|
||||
Annotations: map[string]string{
|
||||
"summary": `common: Too high connection number for "foo"`,
|
||||
@@ -1192,14 +1192,14 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
"instance": "{{ $labels.instance }}",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": `{{ $labels.__name__ }}: Too high connection number for "{{ $labels.instance }}"`,
|
||||
"summary": `{{ $labels.__name__ }}: Too high connection number for "{{ $labels.instance }}".{{ if $isPartial }} WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.{{ end }}`,
|
||||
"description": `{{ $labels.alertname}}: It is {{ $value }} connections for "{{ $labels.instance }}"`,
|
||||
},
|
||||
alerts: make(map[uint64]*notifier.Alert),
|
||||
}, []datasource.Metric{
|
||||
metricWithValueAndLabels(t, 2, "__name__", "first", "instance", "foo", alertNameLabel, "override"),
|
||||
metricWithValueAndLabels(t, 10, "__name__", "second", "instance", "bar", alertNameLabel, "override"),
|
||||
}, map[uint64]*notifier.Alert{
|
||||
}, false, map[uint64]*notifier.Alert{
|
||||
hash(map[string]string{alertNameLabel: "override label", "exported_alertname": "override", "instance": "foo"}): {
|
||||
Labels: map[string]string{
|
||||
alertNameLabel: "override label",
|
||||
@@ -1207,7 +1207,7 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
"instance": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": `first: Too high connection number for "foo"`,
|
||||
"summary": `first: Too high connection number for "foo".`,
|
||||
"description": `override: It is 2 connections for "foo"`,
|
||||
},
|
||||
},
|
||||
@@ -1218,7 +1218,7 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
"instance": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": `second: Too high connection number for "bar"`,
|
||||
"summary": `second: Too high connection number for "bar".`,
|
||||
"description": `override: It is 10 connections for "bar"`,
|
||||
},
|
||||
},
|
||||
@@ -1231,7 +1231,7 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
"instance": "{{ $labels.instance }}",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}`,
|
||||
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}.{{ if $isPartial }} WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.{{ end }}`,
|
||||
},
|
||||
alerts: make(map[uint64]*notifier.Alert),
|
||||
}, []datasource.Metric{
|
||||
@@ -1239,7 +1239,7 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
alertNameLabel, "originAlertname",
|
||||
alertGroupNameLabel, "originGroupname",
|
||||
"instance", "foo"),
|
||||
}, map[uint64]*notifier.Alert{
|
||||
}, true, map[uint64]*notifier.Alert{
|
||||
hash(map[string]string{
|
||||
alertNameLabel: "OriginLabels",
|
||||
"exported_alertname": "originAlertname",
|
||||
@@ -1255,7 +1255,7 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||
"instance": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": `Alert "originAlertname(originGroupname)" for instance foo`,
|
||||
"summary": `Alert "originAlertname(originGroupname)" for instance foo. WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.`,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -1385,7 +1385,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
"group": "vmalert",
|
||||
"alertname": "ConfigurationReloadFailure",
|
||||
"alertgroup": "vmalert",
|
||||
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
|
||||
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
|
||||
}
|
||||
|
||||
expectedProcessedLabels := map[string]string{
|
||||
@@ -1395,7 +1395,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
"exported_alertname": "ConfigurationReloadFailure",
|
||||
"group": "vmalert",
|
||||
"alertgroup": "vmalert",
|
||||
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
|
||||
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
|
||||
}
|
||||
|
||||
ls, err := ar.toLabels(metric, nil)
|
||||
@@ -1478,95 +1478,3 @@ func TestAlertingRule_QueryTemplateInLabels(t *testing.T) {
|
||||
t.Fatalf("expected 'suppress_for_mass_alert' label to be 'true' or 'false', got '%s'", suppressLabel)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAlertingRule_ActiveAtPreservedInAnnotations ensures that the fix for
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9543 is preserved
|
||||
// while allowing query templates in labels (https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9783)
|
||||
func TestAlertingRule_ActiveAtPreservedInAnnotations(t *testing.T) {
|
||||
// wrap into synctest because of time manipulations
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
fq := &datasource.FakeQuerier{}
|
||||
|
||||
ar := &AlertingRule{
|
||||
Name: "TestActiveAtPreservation",
|
||||
Labels: map[string]string{
|
||||
"test_query_in_label": `{{ "static_value" }}`,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"description": "Alert active since {{ $activeAt }}",
|
||||
},
|
||||
alerts: make(map[uint64]*notifier.Alert),
|
||||
q: fq,
|
||||
state: &ruleState{
|
||||
entries: make([]StateEntry, 10),
|
||||
},
|
||||
}
|
||||
|
||||
// Mock query result - return empty result to make suppress_for_mass_alert = false
|
||||
// (no need to add anything to fq for empty result)
|
||||
|
||||
// Add a metric that should trigger the alert
|
||||
fq.Add(metricWithValueAndLabels(t, 1, "instance", "server1"))
|
||||
|
||||
// First execution - creates new alert
|
||||
ts1 := time.Now()
|
||||
_, err := ar.exec(context.TODO(), ts1, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error on first exec: %s", err)
|
||||
}
|
||||
|
||||
if len(ar.alerts) != 1 {
|
||||
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
|
||||
}
|
||||
|
||||
firstAlert := ar.GetAlerts()[0]
|
||||
// Verify first execution: activeAt should be ts1 and annotation should reflect it
|
||||
if !firstAlert.ActiveAt.Equal(ts1) {
|
||||
t.Fatalf("expected activeAt to be %v, got %v", ts1, firstAlert.ActiveAt)
|
||||
}
|
||||
|
||||
// Extract time from annotation (format will be like "Alert active since 2025-09-30 08:55:13.638551611 -0400 EDT m=+0.002928464")
|
||||
expectedTimeStr := ts1.Format("2006-01-02 15:04:05")
|
||||
if !strings.Contains(firstAlert.Annotations["description"], expectedTimeStr) {
|
||||
t.Fatalf("first exec annotation should contain time %s, got: %s", expectedTimeStr, firstAlert.Annotations["description"])
|
||||
}
|
||||
|
||||
// Second execution - should preserve activeAt in annotation
|
||||
|
||||
// Ensure different timestamp with different seconds
|
||||
// sleep is non-blocking thanks to synctest
|
||||
time.Sleep(2 * time.Second)
|
||||
ts2 := time.Now()
|
||||
_, err = ar.exec(context.TODO(), ts2, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error on second exec: %s", err)
|
||||
}
|
||||
|
||||
// Get the alert again (should be the same alert)
|
||||
if len(ar.alerts) != 1 {
|
||||
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
|
||||
}
|
||||
secondAlert := ar.GetAlerts()[0]
|
||||
|
||||
// Critical test: activeAt should still be ts1, not ts2
|
||||
if !secondAlert.ActiveAt.Equal(ts1) {
|
||||
t.Fatalf("activeAt should be preserved as %v, but got %v", ts1, secondAlert.ActiveAt)
|
||||
}
|
||||
|
||||
// Critical test: annotation should still contain ts1 time, not ts2
|
||||
if !strings.Contains(secondAlert.Annotations["description"], expectedTimeStr) {
|
||||
t.Fatalf("second exec annotation should still contain original time %s, got: %s", expectedTimeStr, secondAlert.Annotations["description"])
|
||||
}
|
||||
|
||||
// Additional verification: annotation should NOT contain ts2 time
|
||||
ts2TimeStr := ts2.Format("2006-01-02 15:04:05")
|
||||
if strings.Contains(secondAlert.Annotations["description"], ts2TimeStr) {
|
||||
t.Fatalf("annotation should NOT contain new eval time %s, got: %s", ts2TimeStr, secondAlert.Annotations["description"])
|
||||
}
|
||||
|
||||
// Verify query template in labels still works (this would fail if query templates were broken)
|
||||
if firstAlert.Labels["test_query_in_label"] != "static_value" {
|
||||
t.Fatalf("expected test_query_in_label=static_value, got %s", firstAlert.Labels["test_query_in_label"])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"maps"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
@@ -29,8 +31,8 @@ var (
|
||||
"0 means no limit.")
|
||||
ruleUpdateEntriesLimit = flag.Int("rule.updateEntriesLimit", 20, "Defines the max number of rule's state updates stored in-memory. "+
|
||||
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param.")
|
||||
resendDelay = flag.Duration("rule.resendDelay", 0, "MiniMum amount of time to wait before resending an alert to notifier.")
|
||||
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maxiMum duration for automatic alert expiration, "+
|
||||
resendDelay = flag.Duration("rule.resendDelay", 0, "Minimum amount of time to wait before resending an alert to notifier.")
|
||||
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maximum duration for automatic alert expiration, "+
|
||||
"which by default is 4 times evaluationInterval of the parent group")
|
||||
evalDelay = flag.Duration("rule.evalDelay", 30*time.Second, "Adjustment of the 'time' parameter for rule evaluation requests to compensate intentional data delay from the datasource. "+
|
||||
"Normally, should be equal to '-search.latencyOffset' (cmd-line flag configured for VictoriaMetrics single-node or vmselect). "+
|
||||
@@ -96,9 +98,7 @@ type groupMetrics struct {
|
||||
// set2 has priority over set1.
|
||||
func mergeLabels(groupName, ruleName string, set1, set2 map[string]string) map[string]string {
|
||||
r := map[string]string{}
|
||||
for k, v := range set1 {
|
||||
r[k] = v
|
||||
}
|
||||
maps.Copy(r, set1)
|
||||
for k, v := range set2 {
|
||||
if prevV, ok := r[k]; ok {
|
||||
logger.Infof("label %q=%q for rule %q.%q overwritten with external label %q=%q",
|
||||
@@ -374,7 +374,7 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
|
||||
g.infof("started")
|
||||
|
||||
eval := func(ctx context.Context, ts time.Time) {
|
||||
eval := func(ctx context.Context, ts time.Time) time.Time {
|
||||
g.metrics.iterationTotal.Inc()
|
||||
|
||||
start := time.Now()
|
||||
@@ -382,7 +382,7 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
if len(g.Rules) < 1 {
|
||||
g.metrics.iterationDuration.UpdateDuration(start)
|
||||
g.LastEvaluation = start
|
||||
return
|
||||
return ts
|
||||
}
|
||||
|
||||
resolveDuration := getResolveDuration(g.Interval, *resendDelay, *maxResolveDuration)
|
||||
@@ -396,6 +396,7 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
}
|
||||
g.metrics.iterationDuration.UpdateDuration(start)
|
||||
g.LastEvaluation = start
|
||||
return ts
|
||||
}
|
||||
|
||||
evalCtx, cancel := context.WithCancel(ctx)
|
||||
@@ -404,7 +405,7 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
g.mu.Unlock()
|
||||
defer g.evalCancel()
|
||||
|
||||
eval(evalCtx, evalTS)
|
||||
realEvalTS := eval(evalCtx, evalTS)
|
||||
|
||||
t := time.NewTicker(g.Interval)
|
||||
defer t.Stop()
|
||||
@@ -412,7 +413,7 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
// restore the rules state after the first evaluation
|
||||
// so only active alerts can be restored.
|
||||
if rr != nil {
|
||||
err := g.restore(ctx, rr, evalTS, *remoteReadLookBack)
|
||||
err := g.restore(ctx, rr, realEvalTS, *remoteReadLookBack)
|
||||
if err != nil {
|
||||
logger.Errorf("error while restoring ruleState for group %q: %s", g.Name, err)
|
||||
}
|
||||
@@ -483,8 +484,15 @@ func (g *Group) UpdateWith(newGroup *Group) {
|
||||
// delayBeforeStart calculates delay based on Group ID, so all groups will start at different moments of time.
|
||||
func (g *Group) delayBeforeStart(ts time.Time, maxDelay time.Duration) time.Duration {
|
||||
if g.EvalOffset != nil {
|
||||
offset := *g.EvalOffset
|
||||
// adjust the offset for negative evalOffset, the rule is:
|
||||
// `eval_offset: -x` is equivalent to `eval_offset: y` for `interval: x+y`.
|
||||
// For example, `eval_offset: -6m` is equivalent to `eval_offset: 4m` for `interval: 10m`.
|
||||
if offset < 0 {
|
||||
offset += g.Interval
|
||||
}
|
||||
// if offset is specified, ignore the maxDelay and return a duration aligned with offset
|
||||
currentOffsetPoint := ts.Truncate(g.Interval).Add(*g.EvalOffset)
|
||||
currentOffsetPoint := ts.Truncate(g.Interval).Add(offset)
|
||||
if currentOffsetPoint.Before(ts) {
|
||||
// wait until the next offset point
|
||||
return currentOffsetPoint.Add(g.Interval).Sub(ts)
|
||||
@@ -493,11 +501,8 @@ func (g *Group) delayBeforeStart(ts time.Time, maxDelay time.Duration) time.Dura
|
||||
}
|
||||
|
||||
// otherwise, return a random duration between [0..min(interval, maxDelay)] based on group ID
|
||||
interval := g.Interval
|
||||
if interval > maxDelay {
|
||||
// artificially limit interval, so groups with big intervals could start sooner.
|
||||
interval = maxDelay
|
||||
}
|
||||
// artificially limit interval, so groups with big intervals could start sooner.
|
||||
interval := min(g.Interval, maxDelay)
|
||||
var randSleep time.Duration
|
||||
randSleep = time.Duration(float64(interval) * (float64(g.GetID()) / (1 << 64)))
|
||||
sleepOffset := time.Duration(ts.UnixNano() % interval.Nanoseconds())
|
||||
@@ -755,6 +760,7 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
return fmt.Errorf("rule %q: failed to execute: %w", r, err)
|
||||
}
|
||||
|
||||
var errG vmalertutil.ErrGroup
|
||||
if e.Rw != nil {
|
||||
pushToRW := func(tss []prompb.TimeSeries) error {
|
||||
var lastErr error
|
||||
@@ -766,20 +772,26 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
return lastErr
|
||||
}
|
||||
if err := pushToRW(tss); err != nil {
|
||||
return err
|
||||
errG.Add(err)
|
||||
}
|
||||
}
|
||||
|
||||
ar, ok := r.(*AlertingRule)
|
||||
if !ok {
|
||||
return nil
|
||||
return errG.Err()
|
||||
}
|
||||
|
||||
alerts := ar.alertsToSend(resolveDuration, *resendDelay)
|
||||
if len(alerts) < 1 {
|
||||
return nil
|
||||
return errG.Err()
|
||||
}
|
||||
|
||||
errGr := notifier.Send(ctx, alerts, e.notifierHeaders)
|
||||
return errGr.Err()
|
||||
notifierErr := notifier.Send(ctx, alerts, e.notifierHeaders)
|
||||
for err := range notifierErr {
|
||||
if err != nil {
|
||||
errG.Add(fmt.Errorf("rule %q: notifier failure: %w", r, err))
|
||||
}
|
||||
}
|
||||
|
||||
return errG.Err()
|
||||
}
|
||||
|
||||
@@ -405,7 +405,8 @@ func TestGroupStart(t *testing.T) {
|
||||
|
||||
var cur uint64
|
||||
prev := g.metrics.iterationTotal.Get()
|
||||
for i := 0; ; i++ {
|
||||
i := 0
|
||||
for {
|
||||
if i > 40 {
|
||||
t.Fatalf("group wasn't able to perform %d evaluations during %d eval intervals", n, i)
|
||||
}
|
||||
@@ -414,6 +415,7 @@ func TestGroupStart(t *testing.T) {
|
||||
return
|
||||
}
|
||||
time.Sleep(interval)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -604,6 +606,15 @@ func TestGroupStartDelay(t *testing.T) {
|
||||
f("2023-01-01T00:03:30.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
f("2023-01-01T00:08:00.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
|
||||
// test group with negative offset -2min, which is equivalent to 3min offset for 5min interval
|
||||
offset = -2 * time.Minute
|
||||
g.EvalOffset = &offset
|
||||
|
||||
f("2023-01-01T00:00:15.000+00:00", "2023-01-01T00:03:00.000+00:00")
|
||||
f("2023-01-01T00:01:00.000+00:00", "2023-01-01T00:03:00.000+00:00")
|
||||
f("2023-01-01T00:03:30.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
f("2023-01-01T00:08:00.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
|
||||
maxDelay = time.Minute * 1
|
||||
g.EvalOffset = nil
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *ruleState) add(e StateEntry) {
|
||||
func replayRule(r Rule, start, end time.Time, rw remotewrite.RWClient, replayRuleRetryAttempts int) (int, error) {
|
||||
var err error
|
||||
var tss []prompb.TimeSeries
|
||||
for i := 0; i < replayRuleRetryAttempts; i++ {
|
||||
for i := range replayRuleRetryAttempts {
|
||||
tss, err = r.execRange(context.Background(), start, end)
|
||||
if err == nil {
|
||||
break
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestRule_state(t *testing.T) {
|
||||
}
|
||||
|
||||
var last time.Time
|
||||
for i := 0; i < stateEntriesN*2; i++ {
|
||||
for range stateEntriesN * 2 {
|
||||
last = time.Now()
|
||||
r.state.add(StateEntry{At: last})
|
||||
}
|
||||
@@ -65,17 +65,15 @@ func TestRule_stateConcurrent(_ *testing.T) {
|
||||
r := &AlertingRule{state: &ruleState{entries: make([]StateEntry, 20)}}
|
||||
const workers = 50
|
||||
const iterations = 100
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < iterations; i++ {
|
||||
var wg sync.WaitGroup
|
||||
for range workers {
|
||||
wg.Go(func() {
|
||||
for range iterations {
|
||||
r.state.add(StateEntry{At: time.Now()})
|
||||
r.state.getAll()
|
||||
r.state.getLast()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -19,13 +19,13 @@ func CompareRules(t *testing.T, a, b Rule) error {
|
||||
case *AlertingRule:
|
||||
br, ok := b.(*AlertingRule)
|
||||
if !ok {
|
||||
return fmt.Errorf("rule %q supposed to be of type AlertingRule", b.ID())
|
||||
return fmt.Errorf("rule %d supposed to be of type AlertingRule", b.ID())
|
||||
}
|
||||
return compareAlertingRules(t, v, br)
|
||||
case *RecordingRule:
|
||||
br, ok := b.(*RecordingRule)
|
||||
if !ok {
|
||||
return fmt.Errorf("rule %q supposed to be of type RecordingRule", b.ID())
|
||||
return fmt.Errorf("rule %d supposed to be of type RecordingRule", b.ID())
|
||||
}
|
||||
return compareRecordingRules(t, v, br)
|
||||
default:
|
||||
|
||||
@@ -45,7 +45,7 @@ func (eg *ErrGroup) Error() string {
|
||||
return ""
|
||||
}
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "errors(%d): ", len(eg.errs))
|
||||
fmt.Fprintf(&b, "errors(%d): \n", len(eg.errs))
|
||||
for i, err := range eg.errs {
|
||||
b.WriteString(err.Error())
|
||||
if i != len(eg.errs)-1 {
|
||||
|
||||
@@ -30,8 +30,8 @@ func TestErrGroup(t *testing.T) {
|
||||
}
|
||||
|
||||
f(nil, "")
|
||||
f([]error{errors.New("timeout")}, "errors(1): timeout")
|
||||
f([]error{errors.New("timeout"), errors.New("deadline")}, "errors(2): timeout\ndeadline")
|
||||
f([]error{errors.New("timeout")}, "errors(1): \ntimeout")
|
||||
f([]error{errors.New("timeout"), errors.New("deadline")}, "errors(2): \ntimeout\ndeadline")
|
||||
}
|
||||
|
||||
// TestErrGroupConcurrent supposed to test concurrent
|
||||
@@ -42,7 +42,7 @@ func TestErrGroupConcurrent(_ *testing.T) {
|
||||
|
||||
const writersN = 4
|
||||
payload := make(chan error, writersN)
|
||||
for i := 0; i < writersN; i++ {
|
||||
for range writersN {
|
||||
go func() {
|
||||
for err := range payload {
|
||||
eg.Add(err)
|
||||
@@ -51,7 +51,7 @@ func TestErrGroupConcurrent(_ *testing.T) {
|
||||
}
|
||||
|
||||
const iterations = 500
|
||||
for i := 0; i < iterations; i++ {
|
||||
for i := range iterations {
|
||||
payload <- fmt.Errorf("error %d", i)
|
||||
if i%10 == 0 {
|
||||
_ = eg.Err()
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/rule"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
) %}
|
||||
|
||||
{% func Controls(prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) %}
|
||||
@@ -78,6 +79,8 @@
|
||||
{% func Welcome(r *http.Request) %}
|
||||
{%= tpl.Header(r, navItems, "vmalert", getLastConfigError()) %}
|
||||
<p>
|
||||
Version {%s buildinfo.Version %} <br>
|
||||
|
||||
API:<br>
|
||||
{% for _, p := range apiLinks %}
|
||||
{%code p, doc := p[0], p[1] %}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
@@ -65,10 +67,11 @@ type AuthConfig struct {
|
||||
type UserInfo struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||
AuthToken string `yaml:"auth_token,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||
JWT *JWTConfig `yaml:"jwt,omitempty"`
|
||||
AuthToken string `yaml:"auth_token,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
|
||||
@@ -89,6 +92,8 @@ type UserInfo struct {
|
||||
|
||||
MetricLabels map[string]string `yaml:"metric_labels,omitempty"`
|
||||
|
||||
AccessLog *AccessLog `yaml:"access_log,omitempty"`
|
||||
|
||||
concurrencyLimitCh chan struct{}
|
||||
concurrencyLimitReached *metrics.Counter
|
||||
|
||||
@@ -101,11 +106,37 @@ type UserInfo struct {
|
||||
requestsDuration *metrics.Summary
|
||||
}
|
||||
|
||||
// AccessLog represents configuration for access log settings.
|
||||
type AccessLog struct {
|
||||
Filters *AccessLogFilters `yaml:"filters"`
|
||||
}
|
||||
|
||||
// AccessLogFilters represents list of filters for access logs printing
|
||||
type AccessLogFilters struct {
|
||||
// SkipStatusCodes is a list of HTTP status codes for which access logs will be skipped
|
||||
SkipStatusCodes []int `yaml:"skip_status_codes"`
|
||||
}
|
||||
|
||||
func (ui *UserInfo) logRequest(r *http.Request, userName string, statusCode int) {
|
||||
filters := ui.AccessLog.Filters
|
||||
if filters != nil && len(filters.SkipStatusCodes) > 0 {
|
||||
if slices.Contains(filters.SkipStatusCodes, statusCode) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("access_log request_host=%q request_uri=%q status_code=%d remote_addr=%s user_agent=%q referer=%q username=%q",
|
||||
r.Host, requestURI, statusCode, remoteAddr, r.UserAgent(), r.Referer(), userName)
|
||||
}
|
||||
|
||||
// HeadersConf represents config for request and response headers.
|
||||
type HeadersConf struct {
|
||||
RequestHeaders []*Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
|
||||
KeepOriginalHost *bool `yaml:"keep_original_host,omitempty"`
|
||||
RequestHeaders []*Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
|
||||
KeepOriginalHost *bool `yaml:"keep_original_host,omitempty"`
|
||||
hasAnyPlaceHolders bool
|
||||
}
|
||||
|
||||
func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
|
||||
@@ -113,10 +144,8 @@ func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
|
||||
case ui.concurrencyLimitCh <- struct{}{}:
|
||||
return nil
|
||||
default:
|
||||
ui.concurrencyLimitReached.Inc()
|
||||
|
||||
// The per-user limit for the number of concurrent requests is reached.
|
||||
// Wait until the currently executed requests are finished, so the current request could be executed.
|
||||
// The number of concurrently executed requests for the given user equals the limt.
|
||||
// Wait until some of the currently executed requests are finished, so the current request could be executed.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
|
||||
select {
|
||||
case ui.concurrencyLimitCh <- struct{}{}:
|
||||
@@ -124,6 +153,8 @@ func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
// The current request couldn't be executed until the request timeout.
|
||||
ui.concurrencyLimitReached.Inc()
|
||||
return fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because %d concurrent requests from the user %s are executed",
|
||||
*maxQueueDuration, ui.getMaxConcurrentRequests(), ui.name())
|
||||
}
|
||||
@@ -150,12 +181,22 @@ func (ui *UserInfo) stopHealthChecks() {
|
||||
if ui == nil {
|
||||
return
|
||||
}
|
||||
if ui.URLPrefix == nil {
|
||||
return
|
||||
}
|
||||
|
||||
bus := ui.URLPrefix.bus.Load()
|
||||
bus.stopHealthChecks()
|
||||
if ui.URLPrefix != nil {
|
||||
bus := ui.URLPrefix.bus.Load()
|
||||
bus.stopHealthChecks()
|
||||
}
|
||||
if ui.DefaultURL != nil {
|
||||
bus := ui.DefaultURL.bus.Load()
|
||||
bus.stopHealthChecks()
|
||||
}
|
||||
for i := range ui.URLMaps {
|
||||
um := &ui.URLMaps[i]
|
||||
if um.URLPrefix != nil {
|
||||
bus := um.URLPrefix.bus.Load()
|
||||
bus.stopHealthChecks()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Header is `Name: Value` http header, which must be added to the proxied request.
|
||||
@@ -338,6 +379,7 @@ func (bus *backendURLs) add(u *url.URL) {
|
||||
url: u,
|
||||
healthCheckContext: bus.healthChecksContext,
|
||||
healthCheckWG: &bus.healthChecksWG,
|
||||
hasPlaceHolders: hasAnyPlaceholders(u),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -355,6 +397,8 @@ type backendURL struct {
|
||||
concurrentRequests atomic.Int32
|
||||
|
||||
url *url.URL
|
||||
|
||||
hasPlaceHolders bool
|
||||
}
|
||||
|
||||
func (bu *backendURL) isBroken() bool {
|
||||
@@ -363,12 +407,10 @@ func (bu *backendURL) isBroken() bool {
|
||||
|
||||
func (bu *backendURL) setBroken() {
|
||||
if bu.broken.CompareAndSwap(false, true) {
|
||||
bu.healthCheckWG.Add(1)
|
||||
go func() {
|
||||
defer bu.healthCheckWG.Done()
|
||||
bu.healthCheckWG.Go(func() {
|
||||
bu.runHealthCheck()
|
||||
bu.broken.Store(false)
|
||||
}()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -394,7 +436,7 @@ func (bu *backendURL) runHealthCheck() {
|
||||
if errors.Is(bu.healthCheckContext.Err(), context.Canceled) {
|
||||
return
|
||||
}
|
||||
logger.Warnf("ignoring the backend at %s for %s becasue of dial error: %s", addr, *failTimeout, err)
|
||||
logger.Warnf("ignoring the backend at %s for %s because of dial error: %s", addr, *failTimeout, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -580,7 +622,7 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
|
||||
|
||||
// Slow path - select other backend urls.
|
||||
n := atomicCounter.Add(1) - 1
|
||||
for i := uint32(0); i < uint32(len(bus)); i++ {
|
||||
for i := range uint32(len(bus)) {
|
||||
idx := (n + i) % uint32(len(bus))
|
||||
bu := bus[idx]
|
||||
if bu.isBroken() {
|
||||
@@ -733,11 +775,9 @@ func initAuthConfig() {
|
||||
configTimestamp.Set(fasttime.UnixTimestamp())
|
||||
|
||||
stopCh = make(chan struct{})
|
||||
authConfigWG.Add(1)
|
||||
go func() {
|
||||
defer authConfigWG.Done()
|
||||
authConfigWG.Go(func() {
|
||||
authConfigReloader(sighupCh)
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func stopAuthConfig() {
|
||||
@@ -793,6 +833,9 @@ var (
|
||||
// authUsers contains the currently loaded auth users
|
||||
authUsers atomic.Pointer[map[string]*UserInfo]
|
||||
|
||||
// jwt authentication cache
|
||||
jwtAuthCache atomic.Pointer[jwtCache]
|
||||
|
||||
authConfigWG sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
)
|
||||
@@ -809,7 +852,7 @@ func reloadAuthConfig() (bool, error) {
|
||||
|
||||
ok, err := reloadAuthConfigData(data)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to pars -auth.config=%q: %w", *authConfigPath, err)
|
||||
return false, fmt.Errorf("failed to parse -auth.config=%q: %w", *authConfigPath, err)
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
@@ -832,6 +875,14 @@ func reloadAuthConfigData(data []byte) (bool, error) {
|
||||
return false, fmt.Errorf("failed to parse auth config: %w", err)
|
||||
}
|
||||
|
||||
jui, err := parseJWTUsers(ac)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse JWT users from auth config: %w", err)
|
||||
}
|
||||
jwtc := &jwtCache{
|
||||
users: jui,
|
||||
}
|
||||
|
||||
m, err := parseAuthConfigUsers(ac)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse users from auth config: %w", err)
|
||||
@@ -851,6 +902,7 @@ func reloadAuthConfigData(data []byte) (bool, error) {
|
||||
authConfig.Store(ac)
|
||||
authConfigData.Store(&data)
|
||||
authUsers.Store(&m)
|
||||
jwtAuthCache.Store(jwtc)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -875,12 +927,18 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
if ui.BearerToken != "" {
|
||||
return nil, fmt.Errorf("field bearer_token can't be specified for unauthorized_user section")
|
||||
}
|
||||
if ui.JWT != nil {
|
||||
return nil, fmt.Errorf("field jwt can't be specified for unauthorized_user section")
|
||||
}
|
||||
if ui.AuthToken != "" {
|
||||
return nil, fmt.Errorf("field auth_token can't be specified for unauthorized_user section")
|
||||
}
|
||||
if ui.Name != "" {
|
||||
return nil, fmt.Errorf("field name can't be specified for unauthorized_user section")
|
||||
}
|
||||
if err := parseJWTPlaceholdersForUserInfo(ui, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -921,16 +979,27 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
}
|
||||
for i := range uis {
|
||||
ui := &uis[i]
|
||||
// users with jwt tokens are parsed by parseJWTUsers function.
|
||||
// the function also checks that users with jwt tokens do not have auth tokens, bearer tokens, usernames and passwords.
|
||||
if ui.JWT != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ats, err := getAuthTokens(ui.AuthToken, ui.BearerToken, ui.Username, ui.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, at := range ats {
|
||||
if uiOld := byAuthToken[at]; uiOld != nil {
|
||||
return nil, fmt.Errorf("duplicate auth token=%q found for username=%q, name=%q; the previous one is set for username=%q, name=%q",
|
||||
at, ui.Username, ui.Name, uiOld.Username, uiOld.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if err := parseJWTPlaceholdersForUserInfo(ui, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1030,6 +1099,7 @@ func (ui *UserInfo) initURLs() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range ui.URLMaps {
|
||||
if len(e.SrcPaths) == 0 && len(e.SrcHosts) == 0 && len(e.SrcQueryArgs) == 0 && len(e.SrcHeaders) == 0 {
|
||||
return fmt.Errorf("missing `src_paths`, `src_hosts`, `src_query_args` and `src_headers` in `url_map`")
|
||||
@@ -1089,6 +1159,9 @@ func (ui *UserInfo) name() string {
|
||||
h := xxhash.Sum64([]byte(ui.AuthToken))
|
||||
return fmt.Sprintf("auth_token:hash:%016X", h)
|
||||
}
|
||||
if ui.JWT != nil {
|
||||
return `jwt`
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
|
||||
@@ -276,6 +276,50 @@ users:
|
||||
url_prefix: http://foo.bar
|
||||
metric_labels:
|
||||
not-prometheus-compatible: value
|
||||
`)
|
||||
// placeholder in url_prefix
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
|
||||
`)
|
||||
// placeholder in a header
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
headers:
|
||||
- 'X-Foo: {{a_placeholder}}'
|
||||
url_prefix: 'http://ahost'
|
||||
`)
|
||||
// placeholder in url_prefix
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
|
||||
`)
|
||||
// placeholder in a header in url_map
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_map:
|
||||
- src_paths: ["/select/.*"]
|
||||
headers:
|
||||
- 'X-Foo: {{a_placeholder}}'
|
||||
url_prefix: 'http://ahost'
|
||||
`)
|
||||
|
||||
// placeholder in a header in url_map
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_map:
|
||||
- src_paths: ["/select/.*"]
|
||||
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -378,7 +422,7 @@ users:
|
||||
RetryStatusCodes: []int{500, 501},
|
||||
LoadBalancingPolicy: "first_available",
|
||||
MergeQueryArgs: []string{"foo", "bar"},
|
||||
DropSrcPathPrefixParts: intp(1),
|
||||
DropSrcPathPrefixParts: new(1),
|
||||
DiscoverBackendIPs: &discoverBackendIPsTrue,
|
||||
},
|
||||
}, nil)
|
||||
@@ -621,6 +665,47 @@ unauthorized_user:
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// skip user info with jwt, it is parsed by parseJWTUsers
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix: http://aaa:343/bbb
|
||||
- jwt: {skip_verify: true}
|
||||
url_prefix: http://aaa:343/bbb
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBasicToken("foo", "bar"): {
|
||||
Username: "foo",
|
||||
Password: "bar",
|
||||
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
// Multiple users with access logs enabled
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
url_prefix: http://foo
|
||||
access_log: {}
|
||||
- username: bar
|
||||
url_prefix: https://bar/x/
|
||||
access_log:
|
||||
filters:
|
||||
skip_status_codes: [404]
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBasicToken("foo", ""): {
|
||||
Username: "foo",
|
||||
URLPrefix: mustParseURL("http://foo"),
|
||||
AccessLog: &AccessLog{},
|
||||
},
|
||||
getHTTPAuthBasicToken("bar", ""): {
|
||||
Username: "bar",
|
||||
URLPrefix: mustParseURL("https://bar/x/"),
|
||||
AccessLog: &AccessLog{Filters: &AccessLogFilters{SkipStatusCodes: []int{404}}},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
}
|
||||
|
||||
func TestParseAuthConfigPassesTLSVerificationConfig(t *testing.T) {
|
||||
@@ -831,7 +916,7 @@ func TestBrokenBackend(t *testing.T) {
|
||||
bus[1].setBroken()
|
||||
|
||||
// broken backend should never return while there are healthy backends
|
||||
for i := 0; i < 1e3; i++ {
|
||||
for range int(1e3) {
|
||||
b := up.getBackendURL()
|
||||
if b.isBroken() {
|
||||
t.Fatalf("unexpected broken backend %q", b.url)
|
||||
@@ -963,10 +1048,6 @@ func mustParseURLs(us []string) *URLPrefix {
|
||||
return up
|
||||
}
|
||||
|
||||
func intp(n int) *int {
|
||||
return &n
|
||||
}
|
||||
|
||||
func mustNewRegex(s string) *Regex {
|
||||
var re Regex
|
||||
if err := yaml.Unmarshal([]byte(s), &re); err != nil {
|
||||
|
||||
@@ -125,3 +125,8 @@ unauthorized_user:
|
||||
- http://vmselect-az1/?deny_partial_response=1
|
||||
- http://vmselect-az2/?deny_partial_response=1
|
||||
retry_status_codes: [503, 500]
|
||||
# log access for requests routed to this user
|
||||
access_log:
|
||||
filters:
|
||||
# except requests with Status Codes below
|
||||
skip_status_codes: [200, 202]
|
||||
|
||||
373
app/vmauth/jwt.go
Normal file
373
app/vmauth/jwt.go
Normal file
@@ -0,0 +1,373 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
metricsTenantPlaceholder = `{{.MetricsTenant}}`
|
||||
metricsExtraLabelsPlaceholder = `{{.MetricsExtraLabels}}`
|
||||
metricsExtraFiltersPlaceholder = `{{.MetricsExtraFilters}}`
|
||||
|
||||
logsAccountIDPlaceholder = `{{.LogsAccountID}}`
|
||||
logsProjectIDPlaceholder = `{{.LogsProjectID}}`
|
||||
logsExtraFiltersPlaceholder = `{{.LogsExtraFilters}}`
|
||||
logsExtraStreamFiltersPlaceholder = `{{.LogsExtraStreamFilters}}`
|
||||
|
||||
placeholderPrefix = `{{`
|
||||
)
|
||||
|
||||
var allPlaceholders = []string{
|
||||
metricsTenantPlaceholder,
|
||||
metricsExtraLabelsPlaceholder,
|
||||
metricsExtraFiltersPlaceholder,
|
||||
logsAccountIDPlaceholder,
|
||||
logsProjectIDPlaceholder,
|
||||
logsExtraFiltersPlaceholder,
|
||||
logsExtraStreamFiltersPlaceholder,
|
||||
}
|
||||
|
||||
var urlPathPlaceHolders = []string{
|
||||
metricsTenantPlaceholder,
|
||||
logsAccountIDPlaceholder,
|
||||
logsProjectIDPlaceholder,
|
||||
}
|
||||
|
||||
type jwtCache struct {
|
||||
// users contain UserInfo`s from AuthConfig with JWTConfig set
|
||||
users []*UserInfo
|
||||
}
|
||||
|
||||
type JWTConfig struct {
|
||||
PublicKeys []string `yaml:"public_keys,omitempty"`
|
||||
PublicKeyFiles []string `yaml:"public_key_files,omitempty"`
|
||||
SkipVerify bool `yaml:"skip_verify,omitempty"`
|
||||
|
||||
verifierPool *jwt.VerifierPool
|
||||
}
|
||||
|
||||
func parseJWTUsers(ac *AuthConfig) ([]*UserInfo, error) {
|
||||
jui := make([]*UserInfo, 0, len(ac.Users))
|
||||
for _, ui := range ac.Users {
|
||||
jwtToken := ui.JWT
|
||||
if jwtToken == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if ui.AuthToken != "" || ui.BearerToken != "" || ui.Username != "" || ui.Password != "" {
|
||||
return nil, fmt.Errorf("auth_token, bearer_token, username and password cannot be specified if jwt is set")
|
||||
}
|
||||
if len(jwtToken.PublicKeys) == 0 && len(jwtToken.PublicKeyFiles) == 0 && !jwtToken.SkipVerify {
|
||||
return nil, fmt.Errorf("jwt must contain at least a single public key, public_key_files or have skip_verify=true")
|
||||
}
|
||||
|
||||
if len(jwtToken.PublicKeys) > 0 || len(jwtToken.PublicKeyFiles) > 0 {
|
||||
keys := make([]any, 0, len(jwtToken.PublicKeys)+len(jwtToken.PublicKeyFiles))
|
||||
|
||||
for i := range jwtToken.PublicKeys {
|
||||
k, err := jwt.ParseKey([]byte(jwtToken.PublicKeys[i]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
for _, filePath := range jwtToken.PublicKeyFiles {
|
||||
keyData, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read public key from file %q: %w", filePath, err)
|
||||
}
|
||||
k, err := jwt.ParseKey(keyData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse public key from file %q: %w", filePath, err)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
vp, err := jwt.NewVerifierPool(keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jwtToken.verifierPool = vp
|
||||
}
|
||||
if err := parseJWTPlaceholdersForUserInfo(&ui, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metricLabels, err := ui.getMetricLabels()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse metric_labels: %w", err)
|
||||
}
|
||||
ui.requests = ac.ms.GetOrCreateCounter(`vmauth_user_requests_total` + metricLabels)
|
||||
ui.requestErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_errors_total` + metricLabels)
|
||||
ui.backendRequests = ac.ms.GetOrCreateCounter(`vmauth_user_request_backend_requests_total` + metricLabels)
|
||||
ui.backendErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_backend_errors_total` + metricLabels)
|
||||
ui.requestsDuration = ac.ms.GetOrCreateSummary(`vmauth_user_request_duration_seconds` + metricLabels)
|
||||
mcr := ui.getMaxConcurrentRequests()
|
||||
ui.concurrencyLimitCh = make(chan struct{}, mcr)
|
||||
ui.concurrencyLimitReached = ac.ms.GetOrCreateCounter(`vmauth_user_concurrent_requests_limit_reached_total` + metricLabels)
|
||||
_ = ac.ms.GetOrCreateGauge(`vmauth_user_concurrent_requests_capacity`+metricLabels, func() float64 {
|
||||
return float64(cap(ui.concurrencyLimitCh))
|
||||
})
|
||||
_ = ac.ms.GetOrCreateGauge(`vmauth_user_concurrent_requests_current`+metricLabels, func() float64 {
|
||||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
|
||||
rt, err := newRoundTripper(ui.TLSCAFile, ui.TLSCertFile, ui.TLSKeyFile, ui.TLSServerName, ui.TLSInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
|
||||
}
|
||||
ui.rt = rt
|
||||
|
||||
jui = append(jui, &ui)
|
||||
}
|
||||
|
||||
// TODO: the limitation will be lifted once claim based matching will be implemented
|
||||
if len(jui) > 1 {
|
||||
return nil, fmt.Errorf("multiple users with JWT tokens are not supported; found %d users", len(jui))
|
||||
}
|
||||
|
||||
return jui, nil
|
||||
}
|
||||
|
||||
func getUserInfoByJWTToken(ats []string) (*UserInfo, *jwt.Token) {
|
||||
js := *jwtAuthCache.Load()
|
||||
if len(js.users) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, at := range ats {
|
||||
if strings.Count(at, ".") != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
at, _ = strings.CutPrefix(at, `http_auth:`)
|
||||
|
||||
tkn, err := jwt.NewToken(at, true)
|
||||
if err != nil {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("cannot parse jwt token: %s", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if tkn.IsExpired(time.Now()) {
|
||||
if *logInvalidAuthTokens {
|
||||
// TODO: add more context:
|
||||
// token claims with issuer
|
||||
logger.Infof("jwt token is expired")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ui := range js.users {
|
||||
if ui.JWT.SkipVerify {
|
||||
return ui, tkn
|
||||
}
|
||||
|
||||
if err := ui.JWT.verifierPool.Verify(tkn); err != nil {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("cannot verify jwt token: %s", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return ui, tkn
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func replaceJWTPlaceholders(bu *backendURL, hc HeadersConf, vma *jwt.VMAccessClaim) (*url.URL, HeadersConf) {
|
||||
if !bu.hasPlaceHolders && !hc.hasAnyPlaceHolders {
|
||||
return bu.url, hc
|
||||
}
|
||||
targetURL := bu.url
|
||||
data := jwtClaimsData(vma)
|
||||
if bu.hasPlaceHolders {
|
||||
// template url params and request path
|
||||
// make a copy of url
|
||||
uCopy := *bu.url
|
||||
for _, uph := range urlPathPlaceHolders {
|
||||
replacement := data[uph]
|
||||
uCopy.Path = strings.ReplaceAll(uCopy.Path, uph, replacement[0])
|
||||
}
|
||||
query := uCopy.Query()
|
||||
var foundAnyQueryPlaceholder bool
|
||||
var templatedValues []string
|
||||
for param, values := range query {
|
||||
templatedValues = templatedValues[:0]
|
||||
// filter in-place values with placeholders
|
||||
// and accumulate replacements
|
||||
// it will change the order of param values
|
||||
// but it's not guaranteed
|
||||
// and will be changed in any way with multiple arg templates
|
||||
var cnt int
|
||||
for _, value := range values {
|
||||
if dv, ok := data[value]; ok {
|
||||
foundAnyQueryPlaceholder = true
|
||||
templatedValues = append(templatedValues, dv...)
|
||||
continue
|
||||
}
|
||||
values[cnt] = value
|
||||
cnt++
|
||||
}
|
||||
values = values[:cnt]
|
||||
values = append(values, templatedValues...)
|
||||
query[param] = values
|
||||
}
|
||||
if foundAnyQueryPlaceholder {
|
||||
uCopy.RawQuery = query.Encode()
|
||||
}
|
||||
targetURL = &uCopy
|
||||
}
|
||||
if hc.hasAnyPlaceHolders {
|
||||
// make a copy of headers and update only values with placeholder
|
||||
rhs := make([]*Header, 0, len(hc.RequestHeaders))
|
||||
for _, rh := range hc.RequestHeaders {
|
||||
if dv, ok := data[rh.Value]; ok {
|
||||
rh := &Header{
|
||||
Name: rh.Name,
|
||||
Value: strings.Join(dv, ","),
|
||||
}
|
||||
rhs = append(rhs, rh)
|
||||
continue
|
||||
}
|
||||
rhs = append(rhs, rh)
|
||||
}
|
||||
hc.RequestHeaders = rhs
|
||||
}
|
||||
|
||||
return targetURL, hc
|
||||
}
|
||||
|
||||
func jwtClaimsData(vma *jwt.VMAccessClaim) map[string][]string {
|
||||
data := map[string][]string{
|
||||
// TODO: optimize at parsing stage
|
||||
metricsTenantPlaceholder: {fmt.Sprintf("%d:%d", vma.MetricsAccountID, vma.MetricsProjectID)},
|
||||
metricsExtraLabelsPlaceholder: vma.MetricsExtraLabels,
|
||||
metricsExtraFiltersPlaceholder: vma.MetricsExtraFilters,
|
||||
|
||||
// TODO: optimize at parsing stage
|
||||
logsAccountIDPlaceholder: {fmt.Sprintf("%d", vma.LogsAccountID)},
|
||||
logsProjectIDPlaceholder: {fmt.Sprintf("%d", vma.LogsProjectID)},
|
||||
logsExtraFiltersPlaceholder: vma.LogsExtraFilters,
|
||||
logsExtraStreamFiltersPlaceholder: vma.LogsExtraStreamFilters,
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func parseJWTPlaceholdersForUserInfo(ui *UserInfo, isAllowed bool) error {
|
||||
if ui.URLPrefix != nil {
|
||||
if err := validateJWTPlaceholdersForURL(ui.URLPrefix, isAllowed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := parsePlaceholdersForHC(&ui.HeadersConf, isAllowed); err != nil {
|
||||
return err
|
||||
}
|
||||
if ui.DefaultURL != nil {
|
||||
if err := validateJWTPlaceholdersForURL(ui.DefaultURL, isAllowed); err != nil {
|
||||
return fmt.Errorf("invalid `default_url` placeholders: %w", err)
|
||||
}
|
||||
}
|
||||
for i := range ui.URLMaps {
|
||||
e := &ui.URLMaps[i]
|
||||
if e.URLPrefix != nil {
|
||||
if err := validateJWTPlaceholdersForURL(e.URLPrefix, isAllowed); err != nil {
|
||||
return fmt.Errorf("invalid `url_map` `url_prefix` placeholders: %w", err)
|
||||
}
|
||||
}
|
||||
if err := parsePlaceholdersForHC(&e.HeadersConf, isAllowed); err != nil {
|
||||
return fmt.Errorf("invalid `url_map` headers placeholders: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateJWTPlaceholdersForURL(up *URLPrefix, isAllowed bool) error {
|
||||
for _, bu := range up.busOriginal {
|
||||
ok := strings.Contains(bu.Path, placeholderPrefix)
|
||||
if ok && !isAllowed {
|
||||
return fmt.Errorf("placeholder: %q is only allowed at JWT token context", bu.Path)
|
||||
}
|
||||
if ok {
|
||||
p := bu.Path
|
||||
for _, ph := range allPlaceholders {
|
||||
p = strings.ReplaceAll(p, ph, ``)
|
||||
}
|
||||
if strings.Contains(p, placeholderPrefix) {
|
||||
return fmt.Errorf("invalid placeholder found in URL request path: %q, supported values are: %s", bu.Path, strings.Join(allPlaceholders, ", "))
|
||||
|
||||
}
|
||||
}
|
||||
for param, values := range bu.Query() {
|
||||
for _, value := range values {
|
||||
ok := strings.Contains(value, placeholderPrefix)
|
||||
if ok && !isAllowed {
|
||||
return fmt.Errorf("query param: %q with placeholder: %q is only allowed at JWT token context", param, value)
|
||||
}
|
||||
if ok {
|
||||
// possible placeholder
|
||||
if !slices.Contains(allPlaceholders, value) {
|
||||
return fmt.Errorf("query param: %q has unsupported placeholder string: %q, supported values are: %s", param, value, strings.Join(allPlaceholders, ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parsePlaceholdersForHC(hc *HeadersConf, isAllowed bool) error {
|
||||
for _, rhs := range hc.RequestHeaders {
|
||||
ok := strings.Contains(rhs.Value, placeholderPrefix)
|
||||
if ok && !isAllowed {
|
||||
return fmt.Errorf("request header: %q placeholder: %q is only supported at JWT context", rhs.Name, rhs.Value)
|
||||
}
|
||||
if ok {
|
||||
if !slices.Contains(allPlaceholders, rhs.Value) {
|
||||
return fmt.Errorf("request header: %q has unsupported placeholder: %q, supported values are: %s", rhs.Name, rhs.Value, strings.Join(allPlaceholders, ", "))
|
||||
}
|
||||
hc.hasAnyPlaceHolders = true
|
||||
}
|
||||
}
|
||||
for _, rhs := range hc.ResponseHeaders {
|
||||
if strings.Contains(rhs.Value, placeholderPrefix) {
|
||||
return fmt.Errorf("response header placeholders are not supported; found placeholder prefix at header: %q with value: %q", rhs.Name, rhs.Value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasAnyPlaceholders(u *url.URL) bool {
|
||||
if strings.Contains(u.Path, placeholderPrefix) {
|
||||
return true
|
||||
}
|
||||
if len(u.Query()) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, values := range u.Query() {
|
||||
for _, value := range values {
|
||||
if strings.HasPrefix(value, placeholderPrefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
336
app/vmauth/jwt_test.go
Normal file
336
app/vmauth/jwt_test.go
Normal file
@@ -0,0 +1,336 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJWTParseAuthConfigFailure(t *testing.T) {
|
||||
validRSAPublicKey := `-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiX7oPWKOWRQsGFEWvwZO
|
||||
mL2PYsdYUsu9nr0qtPCjxQHUJgLfT3rdKlvKpPFYv7ZmKnqTncg36Wz9uiYmWJ7e
|
||||
IB5Z+fko8kVIMzarCqVvpAJDzYF/pUii68xvuYoK3L9TIOAeyCXv+prwnr2IH+Mw
|
||||
9AONzWbRrYoO74XyTE9vMU5qmI/L1VPk+PR8lqPOSptLvzsfoaIk2ED4yK2nRB+6
|
||||
st+k4nccPqbErqHc8aiXnXfugfnr6b+NPFYUzKsDqkymGOokVijrI8B3jNw6c6Do
|
||||
zphk+D3wgLsXYHfMcZbXIMqffqm/aB8Qg88OpFOkQ3rd2p6R9+hacnZkfkn3Phiw
|
||||
yQIDAQAB
|
||||
-----END PUBLIC KEY-----
|
||||
`
|
||||
// ECDSA with the P-521 curve
|
||||
validECDSAPublicKey := `-----BEGIN PUBLIC KEY-----
|
||||
MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAU9RmtkCRuYTKCyvLlDn5DtBZOHSe
|
||||
QTa5j9q/oQVpCKqcXVFrH5dgh0GL+P/ZhkeuowPzCZqntGf0+7wPt9OxSJcADVJm
|
||||
dv92m540MXss8zdHf5qtE0gsu2Ved0R7Z8a8QwGZ/1mYZ+kFGGbdQTlSvRqDySTq
|
||||
XOtclIk1uhc03oL9nOQ=
|
||||
-----END PUBLIC KEY-----
|
||||
`
|
||||
|
||||
f := func(s string, expErr string) {
|
||||
t.Helper()
|
||||
ac, err := parseAuthConfig([]byte(s))
|
||||
if err != nil {
|
||||
if expErr != err.Error() {
|
||||
t.Fatalf("unexpected error; got\n%q\nwant\n%q", err.Error(), expErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
users, err := parseJWTUsers(ac)
|
||||
if err != nil {
|
||||
if expErr != err.Error() {
|
||||
t.Fatalf("unexpected error; got\n%q\nwant \n%q", err.Error(), expErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Fatalf("expecting non-nil error; got %v", users)
|
||||
}
|
||||
|
||||
// unauthorized_user cannot be used with jwt
|
||||
f(`
|
||||
unauthorized_user:
|
||||
jwt: {skip_verify: true}
|
||||
url_prefix: http://foo.bar
|
||||
`, `field jwt can't be specified for unauthorized_user section`)
|
||||
|
||||
// username and jwt in a single config
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
jwt: {skip_verify: true}
|
||||
url_prefix: http://foo.bar
|
||||
`, `auth_token, bearer_token, username and password cannot be specified if jwt is set`)
|
||||
// bearer_token and jwt in a single config
|
||||
f(`
|
||||
users:
|
||||
- bearer_token: foo
|
||||
jwt: {skip_verify: true}
|
||||
url_prefix: http://foo.bar
|
||||
`, `auth_token, bearer_token, username and password cannot be specified if jwt is set`)
|
||||
// bearer_token and jwt in a single config
|
||||
f(`
|
||||
users:
|
||||
- auth_token: "Foo token"
|
||||
jwt: {skip_verify: true}
|
||||
url_prefix: http://foo.bar
|
||||
`, `auth_token, bearer_token, username and password cannot be specified if jwt is set`)
|
||||
|
||||
// jwt public_keys or skip_verify must be set, part 1
|
||||
f(`
|
||||
users:
|
||||
- jwt: {}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
|
||||
// jwt public_keys or skip_verify must be set, part 2
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_keys: null}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
|
||||
// jwt public_keys or skip_verify must be set, part 3
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_keys: []}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
|
||||
// jwt public_keys, public_key_files or skip_verify must be set
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_key_files: []}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
|
||||
// invalid public key, part 1
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_keys: [""]}
|
||||
url_prefix: http://foo.bar
|
||||
`, `failed to parse key "": failed to decode PEM block containing public key`)
|
||||
|
||||
// invalid public key, part 2
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_keys: ["invalid"]}
|
||||
url_prefix: http://foo.bar
|
||||
`, `failed to parse key "invalid": failed to decode PEM block containing public key`)
|
||||
|
||||
// invalid public key, part 2
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
- %q
|
||||
- "invalid"
|
||||
url_prefix: http://foo.bar
|
||||
`, validRSAPublicKey, validECDSAPublicKey), `failed to parse key "invalid": failed to decode PEM block containing public key`)
|
||||
|
||||
// several jwt users
|
||||
// invalid public key, part 2
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, validRSAPublicKey, validECDSAPublicKey), `multiple users with JWT tokens are not supported; found 2 users`)
|
||||
|
||||
// public key file doesn't exist
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
public_key_files:
|
||||
- /path/to/nonexistent/file.pem
|
||||
url_prefix: http://foo.bar
|
||||
`, "cannot read public key from file \"/path/to/nonexistent/file.pem\": open /path/to/nonexistent/file.pem: no such file or directory")
|
||||
|
||||
// public key file invalid
|
||||
// auth with key from file
|
||||
publicKeyFile := filepath.Join(t.TempDir(), "a_public_key.pem")
|
||||
if err := os.WriteFile(publicKeyFile, []byte(`invalidPEM`), 0o644); err != nil {
|
||||
t.Fatalf("failed to write public key file: %s", err)
|
||||
}
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
public_key_files:
|
||||
- `+publicKeyFile+`
|
||||
url_prefix: http://foo.bar
|
||||
`, "cannot parse public key from file \""+publicKeyFile+"\": failed to parse key \"invalidPEM\": failed to decode PEM block containing public key")
|
||||
|
||||
// unsupported placeholder in a header
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
url_prefix: http://foo.bar/{{.UnsupportedPlaceholder}}/foo`,
|
||||
"invalid placeholder found in URL request path: \"/{{.UnsupportedPlaceholder}}/foo\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
|
||||
)
|
||||
// unsupported placeholder in a header
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
headers:
|
||||
- "AccountID: {{.UnsupportedPlaceholder}}"
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"request header: \"AccountID\" has unsupported placeholder: \"{{.UnsupportedPlaceholder}}\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
|
||||
)
|
||||
|
||||
// spaces in templating not allowed
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
headers:
|
||||
- "AccountID: {{ .LogsAccountID }}"
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"request header: \"AccountID\" has unsupported placeholder: \"{{ .LogsAccountID }}\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
|
||||
)
|
||||
}
|
||||
|
||||
func TestJWTParseAuthConfigSuccess(t *testing.T) {
|
||||
validRSAPublicKey := `-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiX7oPWKOWRQsGFEWvwZO
|
||||
mL2PYsdYUsu9nr0qtPCjxQHUJgLfT3rdKlvKpPFYv7ZmKnqTncg36Wz9uiYmWJ7e
|
||||
IB5Z+fko8kVIMzarCqVvpAJDzYF/pUii68xvuYoK3L9TIOAeyCXv+prwnr2IH+Mw
|
||||
9AONzWbRrYoO74XyTE9vMU5qmI/L1VPk+PR8lqPOSptLvzsfoaIk2ED4yK2nRB+6
|
||||
st+k4nccPqbErqHc8aiXnXfugfnr6b+NPFYUzKsDqkymGOokVijrI8B3jNw6c6Do
|
||||
zphk+D3wgLsXYHfMcZbXIMqffqm/aB8Qg88OpFOkQ3rd2p6R9+hacnZkfkn3Phiw
|
||||
yQIDAQAB
|
||||
-----END PUBLIC KEY-----
|
||||
`
|
||||
// ECDSA with the P-521 curve
|
||||
validECDSAPublicKey := `-----BEGIN PUBLIC KEY-----
|
||||
MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAU9RmtkCRuYTKCyvLlDn5DtBZOHSe
|
||||
QTa5j9q/oQVpCKqcXVFrH5dgh0GL+P/ZhkeuowPzCZqntGf0+7wPt9OxSJcADVJm
|
||||
dv92m540MXss8zdHf5qtE0gsu2Ved0R7Z8a8QwGZ/1mYZ+kFGGbdQTlSvRqDySTq
|
||||
XOtclIk1uhc03oL9nOQ=
|
||||
-----END PUBLIC KEY-----
|
||||
`
|
||||
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
ac, err := parseAuthConfig([]byte(s))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
jui, err := parseJWTUsers(ac)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
for _, ui := range jui {
|
||||
if ui.JWT == nil {
|
||||
t.Fatalf("unexpected nil JWTConfig")
|
||||
}
|
||||
|
||||
if ui.JWT.SkipVerify {
|
||||
if ui.JWT.verifierPool != nil {
|
||||
t.Fatalf("unexpected non-nil verifier pool for skip_verify=true")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if ui.JWT.verifierPool == nil {
|
||||
t.Fatalf("unexpected nil verifier pool for non-empty public keys")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, validRSAPublicKey))
|
||||
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, validECDSAPublicKey))
|
||||
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, validRSAPublicKey, validECDSAPublicKey))
|
||||
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
url_prefix: http://foo.bar
|
||||
`)
|
||||
|
||||
// combined with other auth methods
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix: http://foo.bar
|
||||
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
url_prefix: http://foo.bar
|
||||
|
||||
- bearer_token: foo
|
||||
url_prefix: http://foo.bar
|
||||
`)
|
||||
|
||||
rsaKeyFile := filepath.Join(t.TempDir(), "rsa_public_key.pem")
|
||||
if err := os.WriteFile(rsaKeyFile, []byte(validRSAPublicKey), 0o644); err != nil {
|
||||
t.Fatalf("failed to write RSA key file: %s", err)
|
||||
}
|
||||
ecdsaKeyFile := filepath.Join(t.TempDir(), "ecdsa_public_key.pem")
|
||||
if err := os.WriteFile(ecdsaKeyFile, []byte(validECDSAPublicKey), 0o644); err != nil {
|
||||
t.Fatalf("failed to write ECDSA key file: %s", err)
|
||||
}
|
||||
|
||||
// Test single public key file
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_key_files:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, rsaKeyFile))
|
||||
|
||||
// Test multiple public key files
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_key_files:
|
||||
- %q
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, rsaKeyFile, ecdsaKeyFile))
|
||||
|
||||
// Test combined inline keys and files
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
public_key_files:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, validECDSAPublicKey, rsaKeyFile))
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ioutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
@@ -40,27 +42,38 @@ var (
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host. "+
|
||||
"See also -maxConcurrentRequests")
|
||||
idleConnTimeout = flag.Duration("idleConnTimeout", 50*time.Second, "The timeout for HTTP keep-alive connections to backend services. "+
|
||||
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host")
|
||||
idleConnTimeout = flag.Duration("idleConnTimeout", 50*time.Second, "The timeout for HTTP keep-alive connections to backend services. "+
|
||||
"It is recommended setting this value to values smaller than -http.idleConnTimeout set at backend services")
|
||||
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
|
||||
|
||||
maxConcurrentRequests = flag.Int("maxConcurrentRequests", 1000, "The maximum number of concurrent requests vmauth can process. Other requests are rejected with "+
|
||||
"'429 Too Many Requests' http status code. See also -maxQueueDuration, -maxConcurrentPerUserRequests and -maxIdleConnsPerBackend command-line options")
|
||||
maxConcurrentPerUserRequests = flag.Int("maxConcurrentPerUserRequests", 300, "The maximum number of concurrent requests vmauth can process per each configured user. "+
|
||||
"Other requests are rejected with '429 Too Many Requests' http status code. See also -maxQueueDuration and -maxConcurrentRequests command-line options "+
|
||||
"and max_concurrent_requests option in per-user config")
|
||||
maxQueueDuration = flag.Duration("maxQueueDuration", 10*time.Second, "The maximum duration the request waits for execution when the number of concurrently executed "+
|
||||
"requests reach -maxConcurrentRequests or -maxConcurrentPerUserRequests before returning '429 Too Many Requests' error. "+
|
||||
"This allows graceful handling of short spikes in the number of concurrent requests")
|
||||
requestBufferSize = flagutil.NewBytes("requestBufferSize", 32*1024, "The size of the buffer for reading the request body before proxying the request to backends. "+
|
||||
"This allows reducing the comsumption of backend resources when processing requests from clients connected via slow networks. "+
|
||||
"Set to 0 to disable request buffering. See https://docs.victoriametrics.com/victoriametrics/vmauth/#request-body-buffering")
|
||||
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size to buffer in memory for potential retries at other backends. "+
|
||||
"Request bodies larger than this size cannot be retried if the backend fails. Zero or negative value disables request body buffering and retries. "+
|
||||
"See also -requestBufferSize")
|
||||
|
||||
maxConcurrentRequests = flag.Int("maxConcurrentRequests", 1000, "The maximum number of concurrent requests vmauth can process simultaneously. "+
|
||||
"Requests exceeding this limit are queued for up to -maxQueueDuration and then rejected with '429 Too Many Requests' http status code if the limit is still reached. "+
|
||||
"This protects vmauth itself from overloading and out-of-memory (OOM) failures. See also -maxConcurrentPerUserRequests "+
|
||||
"and https://docs.victoriametrics.com/victoriametrics/vmauth/#concurrency-limiting")
|
||||
maxConcurrentPerUserRequests = flag.Int("maxConcurrentPerUserRequests", 100, "The maximum number of concurrent requests vmauth can process per each configured user. "+
|
||||
"Requests exceeding this limit are queued for up to -maxQueueDuration and then rejected with '429 Too Many Requests' http status code if the limit is still reached. "+
|
||||
"This provides fairness and isolation between users, preventing a single user from consuming all the available resources. "+
|
||||
"It works in conjunction with -maxConcurrentRequests, which sets the global limit across all users. "+
|
||||
"This default can be overridden for individual users via max_concurrent_requests option in per-user config. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/vmauth/#concurrency-limiting")
|
||||
maxQueueDuration = flag.Duration("maxQueueDuration", 10*time.Second, "The maximum duration to wait before rejecting incoming requests if concurrency limit "+
|
||||
"specified via -maxConcurrentRequests or -maxConcurrentPerUserRequests command-line flags is reached. "+
|
||||
"Requests are rejected with '429 Too Many Requests' http status code if the limit is still reached after the -maxQueueDuration duration. "+
|
||||
"This allows graceful handling of short spikes in concurrent requests. See https://docs.victoriametrics.com/victoriametrics/vmauth/#concurrency-limiting")
|
||||
|
||||
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*")
|
||||
logInvalidAuthTokens = flag.Bool("logInvalidAuthTokens", false, "Whether to log requests with invalid auth tokens. "+
|
||||
`Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`)
|
||||
failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend")
|
||||
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size, which can be cached and re-tried at other backends. "+
|
||||
"Bigger values may require more memory. Zero or negative value disables caching of request body. This may be useful when proxying data ingestion requests")
|
||||
failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend")
|
||||
|
||||
backendTLSInsecureSkipVerify = flag.Bool("backend.tlsInsecureSkipVerify", false, "Whether to skip TLS verification when connecting to backends over HTTPS. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/vmauth/#backend-tls-setup")
|
||||
backendTLSCAFile = flag.String("backend.TLSCAFile", "", "Optional path to TLS root CA file, which is used for TLS verification when connecting to backends over HTTPS. "+
|
||||
@@ -161,7 +174,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// Process requests for unauthorized users
|
||||
ui := authConfig.Load().UnauthorizedUser
|
||||
if ui != nil {
|
||||
processUserRequest(w, r, ui)
|
||||
processUserRequest(w, r, ui, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -169,29 +182,36 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ui := getUserInfoByAuthTokens(ats)
|
||||
if ui == nil {
|
||||
uu := authConfig.Load().UnauthorizedUser
|
||||
if uu != nil {
|
||||
processUserRequest(w, r, uu)
|
||||
return true
|
||||
if ui := getUserInfoByAuthTokens(ats); ui != nil {
|
||||
processUserRequest(w, r, ui, nil)
|
||||
return true
|
||||
}
|
||||
if ui, tkn := getUserInfoByJWTToken(ats); ui != nil {
|
||||
if tkn == nil {
|
||||
logger.Panicf("BUG: unexpected nil jwt token for user %q", ui.name())
|
||||
}
|
||||
|
||||
invalidAuthTokenRequests.Inc()
|
||||
if *logInvalidAuthTokens {
|
||||
err := fmt.Errorf("cannot authorize request with auth tokens %q", ats)
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: err,
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
} else {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
}
|
||||
processUserRequest(w, r, ui, tkn)
|
||||
return true
|
||||
}
|
||||
|
||||
processUserRequest(w, r, ui)
|
||||
uu := authConfig.Load().UnauthorizedUser
|
||||
if uu != nil {
|
||||
processUserRequest(w, r, uu, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
invalidAuthTokenRequests.Inc()
|
||||
if *logInvalidAuthTokens {
|
||||
err := fmt.Errorf("cannot authorize request with auth tokens %q", ats)
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: err,
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
} else {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -206,7 +226,37 @@ func getUserInfoByAuthTokens(ats []string) *UserInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
// responseWriterWithStatus is a wrapper around http.ResponseWriter that captures the status code written to the response.
|
||||
type responseWriterWithStatus struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
}
|
||||
|
||||
// WriteHeader records the status so it can be easily retrieved later
|
||||
func (rws *responseWriterWithStatus) WriteHeader(status int) {
|
||||
rws.status = status
|
||||
rws.ResponseWriter.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Flush implements net/http.Flusher interface
|
||||
//
|
||||
// This is needed for the copyStreamToClient()
|
||||
func (rws *responseWriterWithStatus) Flush() {
|
||||
flusher, ok := rws.ResponseWriter.(http.Flusher)
|
||||
if !ok {
|
||||
logger.Panicf("BUG: it is expected http.ResponseWriter (%T) supports http.Flusher interface", rws.ResponseWriter)
|
||||
}
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
// Unwrap returns the original ResponseWriter wrapped by rws.
|
||||
//
|
||||
// This is needed for the net/http.ResponseController - see https://pkg.go.dev/net/http#NewResponseController
|
||||
func (rws *responseWriterWithStatus) Unwrap() http.ResponseWriter {
|
||||
return rws.ResponseWriter
|
||||
}
|
||||
|
||||
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *jwt.Token) {
|
||||
startTime := time.Now()
|
||||
defer ui.requestsDuration.UpdateDuration(startTime)
|
||||
|
||||
@@ -215,49 +265,131 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), *maxQueueDuration)
|
||||
defer cancel()
|
||||
|
||||
// Limit the concurrency of requests to backends
|
||||
userName := ui.name()
|
||||
if userName == "" {
|
||||
userName = "unauthorized"
|
||||
}
|
||||
|
||||
if ui.AccessLog != nil {
|
||||
w = &responseWriterWithStatus{ResponseWriter: w}
|
||||
defer func() {
|
||||
rws := w.(*responseWriterWithStatus)
|
||||
ui.logRequest(r, userName, rws.status)
|
||||
}()
|
||||
}
|
||||
|
||||
// Acquire global concurrency limit.
|
||||
if err := beginConcurrencyLimit(ctx); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return
|
||||
}
|
||||
defer endConcurrencyLimit()
|
||||
|
||||
// Set read deadline for reading the initial chunk for the request body.
|
||||
rc := http.NewResponseController(w)
|
||||
deadline, ok := ctx.Deadline()
|
||||
if !ok {
|
||||
logger.Panicf("BUG: expecting valid deadline for the context")
|
||||
}
|
||||
if err := rc.SetReadDeadline(deadline); err != nil {
|
||||
logger.Panicf("BUG: cannot set read deadline: %s", err)
|
||||
}
|
||||
|
||||
// Read the initial chunk for the request body.
|
||||
bb, err := bufferRequestBody(ctx, r.Body, userName)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
r.Body = bb
|
||||
|
||||
// Disable the read deadline for the rest of the request body.
|
||||
if err := rc.SetReadDeadline(time.Time{}); err != nil {
|
||||
logger.Panicf("BUG: cannot reset read deadline: %s", err)
|
||||
}
|
||||
|
||||
// Acquire concurrency limit for the given user.
|
||||
if err := ui.beginConcurrencyLimit(ctx); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return
|
||||
}
|
||||
defer ui.endConcurrencyLimit()
|
||||
|
||||
// Process the request.
|
||||
processRequest(w, r, ui, tkn)
|
||||
}
|
||||
|
||||
func beginConcurrencyLimit(ctx context.Context) error {
|
||||
concurrencyLimitOnce.Do(concurrencyLimitInit)
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
if err := ui.beginConcurrencyLimit(ctx); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
<-concurrencyLimitCh
|
||||
return
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
// The -maxConcurrentRequests are executed. Wait until some of the requests are finished,
|
||||
// so the current request could be executed.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
if err := ui.beginConcurrencyLimit(ctx); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
<-concurrencyLimitCh
|
||||
return
|
||||
}
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
|
||||
concurrentRequestsLimitReached.Inc()
|
||||
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
err = fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because -maxConcurrentRequests=%d concurrent requests are executed",
|
||||
// The current request couldn't be executed until the request timeout.
|
||||
concurrentRequestsLimitReached.Inc()
|
||||
return fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because -maxConcurrentRequests=%d concurrent requests are executed",
|
||||
*maxQueueDuration, cap(concurrencyLimitCh))
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = fmt.Errorf("cannot start executing the request because -maxConcurrentRequests=%d concurrent requests are executed: %w", cap(concurrencyLimitCh), err)
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return
|
||||
return fmt.Errorf("cannot start executing the request because -maxConcurrentRequests=%d concurrent requests are executed: %w", cap(concurrencyLimitCh), err)
|
||||
}
|
||||
}
|
||||
processRequest(w, r, ui)
|
||||
ui.endConcurrencyLimit()
|
||||
}
|
||||
|
||||
func endConcurrencyLimit() {
|
||||
<-concurrencyLimitCh
|
||||
}
|
||||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
func bufferRequestBody(ctx context.Context, r io.ReadCloser, userName string) (io.ReadCloser, error) {
|
||||
if r == nil {
|
||||
// This is a GET request with nil reader.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
maxBufSize := max(requestBufferSize.IntN(), maxRequestBodySizeToRetry.IntN())
|
||||
if maxBufSize <= 0 {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
lr := ioutil.GetLimitedReader(r, int64(maxBufSize))
|
||||
defer ioutil.PutLimitedReader(lr)
|
||||
|
||||
start := time.Now()
|
||||
buf, err := io.ReadAll(lr)
|
||||
bufferRequestBodyDuration.UpdateDuration(start)
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
rejectSlowClientRequests.Inc()
|
||||
|
||||
d := time.Since(start)
|
||||
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("reject request from the user %s because the request body couldn't be read in -maxQueueDuration=%s; read %d bytes in %s",
|
||||
userName, *maxQueueDuration, len(buf), d.Truncate(time.Second)),
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot read request body: %w", err),
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
bb := newBufferedBody(r, buf, maxBufSize)
|
||||
return bb, nil
|
||||
}
|
||||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *jwt.Token) {
|
||||
u := normalizeURL(r.URL)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Host, r.Header)
|
||||
isDefault := false
|
||||
@@ -282,28 +414,29 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
isDefault = true
|
||||
}
|
||||
|
||||
rtb := newReadTrackingBody(r.Body, maxRequestBodySizeToRetry.IntN())
|
||||
r.Body = rtb
|
||||
|
||||
maxAttempts := up.getBackendsCount()
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
for range maxAttempts {
|
||||
bu := up.getBackendURL()
|
||||
if bu == nil {
|
||||
break
|
||||
}
|
||||
targetURL := bu.url
|
||||
// Don't change path and add request_path query param for default route.
|
||||
if tkn != nil {
|
||||
// for security reasons allow templating only for configured url values and headers
|
||||
targetURL, hc = replaceJWTPlaceholders(bu, hc, tkn.VMAccess())
|
||||
}
|
||||
if isDefault {
|
||||
// Don't change path and add request_path query param for default route.
|
||||
query := targetURL.Query()
|
||||
query.Set("request_path", u.String())
|
||||
targetURL.RawQuery = query.Encode()
|
||||
} else { // Update path for regular routes.
|
||||
} else {
|
||||
// Update path for regular routes.
|
||||
targetURL = mergeURLs(targetURL, u, up.dropSrcPathPrefixParts, up.mergeQueryArgs)
|
||||
}
|
||||
|
||||
wasLocalRetry := false
|
||||
again:
|
||||
ok, needLocalRetry := tryProcessingRequest(w, r, targetURL, hc, up.retryStatusCodes, ui)
|
||||
ok, needLocalRetry := tryProcessingRequest(w, r, targetURL, hc, up.retryStatusCodes, ui, bu)
|
||||
if needLocalRetry && !wasLocalRetry {
|
||||
wasLocalRetry = true
|
||||
goto again
|
||||
@@ -313,18 +446,19 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
bu.setBroken()
|
||||
ui.backendErrors.Inc()
|
||||
}
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("all the %d backends for the user %q are unavailable", up.getBackendsCount(), ui.name()),
|
||||
Err: fmt.Errorf("all the %d backends for the user %q are unavailable for proxying the request - check previous WARN logs to see the exact error for each failed backend", up.getBackendsCount(), ui.name()),
|
||||
StatusCode: http.StatusBadGateway,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
ui.requestErrors.Inc()
|
||||
}
|
||||
|
||||
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int, ui *UserInfo) (bool, bool) {
|
||||
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int, ui *UserInfo, bu *backendURL) (bool, bool) {
|
||||
ui.backendRequests.Inc()
|
||||
req := sanitizeRequestHeaders(r)
|
||||
|
||||
@@ -339,27 +473,19 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
}
|
||||
}
|
||||
|
||||
rtb, rtbOK := req.Body.(*readTrackingBody)
|
||||
bb, bbOK := req.Body.(*bufferedBody)
|
||||
canRetry := !bbOK || bb.canRetry()
|
||||
|
||||
res, err := ui.rt.RoundTrip(req)
|
||||
|
||||
if ctxErr := r.Context().Err(); ctxErr != nil {
|
||||
// Override the error returned by the RoundTrip with the context error if it isn't non-nil
|
||||
// This makes sure the proper logging for canceled and timed out requests - log the real cause of the error
|
||||
// instead of the random error, which could be returned from RoundTrip because of canceled or timed out request.
|
||||
err = ctxErr
|
||||
if errors.Is(r.Context().Err(), context.Canceled) {
|
||||
// Do not retry canceled requests.
|
||||
clientCanceledRequests.Inc()
|
||||
return true, false
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
// Do not retry canceled or timed out requests
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
// Timed out request must be counted as errors, since this usually means that the backend is slow.
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; timeout while proxying the response from %s: %s", remoteAddr, requestURI, targetURL, err)
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
if !rtbOK || !rtb.canRetry() {
|
||||
if !canRetry {
|
||||
// Request body cannot be re-sent to another backend. Return the error to the client then.
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot proxy the request to %s: %w", targetURL, err),
|
||||
@@ -368,27 +494,32 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
ui.backendErrors.Inc()
|
||||
ui.requestErrors.Inc()
|
||||
bu.setBroken()
|
||||
return true, false
|
||||
}
|
||||
if netutil.IsTrivialNetworkError(err) {
|
||||
// Retry request at the same backend on trivial network errors, such as proxy idle timeout misconfiguration or socket close by OS
|
||||
if bbOK {
|
||||
bb.resetReader()
|
||||
}
|
||||
return false, true
|
||||
}
|
||||
|
||||
// Request body wasn't read yet, this usually means that the backend isn't reachable; retry the request at another backend
|
||||
// Retry the request at another backend
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
// NOTE: do not use httpserver.GetRequestURI
|
||||
// it explicitly reads request body, which may fail retries.
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed: %s, retrying the request at another backend", remoteAddr, req.URL, targetURL, err)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed: %s, retrying the request at another backend", remoteAddr, requestURI, targetURL, err)
|
||||
if bbOK {
|
||||
bb.resetReader()
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
if slices.Contains(retryStatusCodes, res.StatusCode) {
|
||||
_ = res.Body.Close()
|
||||
if !rtbOK || !rtb.canRetry() {
|
||||
if !canRetry {
|
||||
// If we get an error from the retry_status_codes list, but cannot execute retry,
|
||||
// we consider such a request an error as well.
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("got response status code=%d from %s, but cannot retry the request at another backend, because the request has been already consumed",
|
||||
Err: fmt.Errorf("got response status code=%d from %s, but cannot retry the request at another backend, because the request body has been already consumed",
|
||||
res.StatusCode, targetURL),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
@@ -397,13 +528,16 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
ui.requestErrors.Inc()
|
||||
return true, false
|
||||
}
|
||||
|
||||
// Retry requests at other backends if it matches retryStatusCodes.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4893
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
// NOTE: do not use httpserver.GetRequestURI
|
||||
// it explicitly reads request body, which may fail retries.
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed, retrying the request at another backend because response status code=%d belongs to retry_status_codes=%d",
|
||||
remoteAddr, req.URL, targetURL, res.StatusCode, retryStatusCodes)
|
||||
remoteAddr, requestURI, targetURL, res.StatusCode, retryStatusCodes)
|
||||
if bbOK {
|
||||
bb.resetReader()
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
removeHopHeaders(res.Header)
|
||||
@@ -413,10 +547,16 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
|
||||
err = copyStreamToClient(w, res.Body)
|
||||
_ = res.Body.Close()
|
||||
if err != nil && !netutil.IsTrivialNetworkError(err) && !errors.Is(err, context.Canceled) {
|
||||
|
||||
if errors.Is(r.Context().Err(), context.Canceled) {
|
||||
// Do not retry canceled requests.
|
||||
clientCanceledRequests.Inc()
|
||||
return true, false
|
||||
}
|
||||
|
||||
if err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying response body from %s: %s", remoteAddr, requestURI, targetURL, err)
|
||||
ui.requestErrors.Inc()
|
||||
return true, false
|
||||
@@ -546,6 +686,10 @@ var (
|
||||
configReloadRequests = metrics.NewCounter(`vmauth_http_requests_total{path="/-/reload"}`)
|
||||
invalidAuthTokenRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="invalid_auth_token"}`)
|
||||
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
|
||||
clientCanceledRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="client_canceled"}`)
|
||||
rejectSlowClientRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="reject_slow_client"}`)
|
||||
|
||||
bufferRequestBodyDuration = metrics.NewSummary(`vmauth_buffer_request_body_duration_seconds`)
|
||||
)
|
||||
|
||||
func newRoundTripper(caFileOpt, certFileOpt, keyFileOpt, serverNameOpt string, insecureSkipVerifyP *bool) (http.RoundTripper, error) {
|
||||
@@ -629,10 +773,10 @@ func handleMissingAuthorizationError(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
ctx := r.Context()
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
if errors.Is(r.Context().Err(), context.Canceled) {
|
||||
// Do not return any response for the request canceled by the client,
|
||||
// since the connection to the client is already closed.
|
||||
clientCanceledRequests.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -644,123 +788,78 @@ func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err err
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
}
|
||||
|
||||
// readTrackingBody must be obtained via getReadTrackingBody()
|
||||
type readTrackingBody struct {
|
||||
// maxBodySize is the maximum body size to cache in buf.
|
||||
// bufferedBody serves two purposes:
|
||||
// 1. Enables request retries when the body size does not exceed maxBodySize
|
||||
// by fully buffering the body in memory.
|
||||
// 2. Prevents slow clients from reducing effective server capacity by
|
||||
// buffering the request body before acquiring a per-user concurrency slot.
|
||||
//
|
||||
// See bufferRequestBody for details on how bufferedBody is used.
|
||||
type bufferedBody struct {
|
||||
// r contains reader for reading the data after buf is read.
|
||||
//
|
||||
// Bigger bodies cannot be retried.
|
||||
maxBodySize int
|
||||
|
||||
// r contains reader for initial data reading
|
||||
// r is nil if buf contains all the data.
|
||||
r io.ReadCloser
|
||||
|
||||
// buf is a buffer for data read from r. Buf size is limited by maxBodySize.
|
||||
// If more than maxBodySize is read from r, then cannotRetry is set to true.
|
||||
// buf contains the initial buffer read from r.
|
||||
buf []byte
|
||||
|
||||
// readBuf points to the cached data at buf, which must be read in the next call to Read().
|
||||
readBuf []byte
|
||||
// bufOffset is the offset at buf for already read bytes.
|
||||
bufOffset int
|
||||
|
||||
// cannotRetry is set to true when more than maxBodySize bytes are read from r.
|
||||
// In this case the read data cannot fit buf, so it cannot be re-read from buf.
|
||||
// cannotRetry is set to true after Close() call on non-nil r.
|
||||
cannotRetry bool
|
||||
|
||||
// bufComplete is set to true when buf contains complete request body read from r.
|
||||
bufComplete bool
|
||||
}
|
||||
|
||||
func newReadTrackingBody(r io.ReadCloser, maxBodySize int) *readTrackingBody {
|
||||
// do not use sync.Pool there
|
||||
// since http.RoundTrip may still use request body after return
|
||||
// See this issue for details https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
|
||||
rtb := &readTrackingBody{}
|
||||
if maxBodySize < 0 {
|
||||
maxBodySize = 0
|
||||
func newBufferedBody(r io.ReadCloser, buf []byte, maxBufSize int) *bufferedBody {
|
||||
// Do not use sync.Pool here, since http.RoundTrip may still use request body after return.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
|
||||
|
||||
if len(buf) < maxBufSize {
|
||||
// Read the full request body into buf.
|
||||
r = nil
|
||||
}
|
||||
rtb.maxBodySize = maxBodySize
|
||||
|
||||
if r == nil {
|
||||
// This is GET request without request body
|
||||
r = (*zeroReader)(nil)
|
||||
return &bufferedBody{
|
||||
r: r,
|
||||
buf: buf,
|
||||
}
|
||||
rtb.r = r
|
||||
return rtb
|
||||
}
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
func (r *zeroReader) Read(_ []byte) (int, error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func (r *zeroReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read implements io.Reader interface.
|
||||
func (rtb *readTrackingBody) Read(p []byte) (int, error) {
|
||||
if len(rtb.readBuf) > 0 {
|
||||
n := copy(p, rtb.readBuf)
|
||||
rtb.readBuf = rtb.readBuf[n:]
|
||||
func (bb *bufferedBody) Read(p []byte) (int, error) {
|
||||
if bb.cannotRetry {
|
||||
return 0, fmt.Errorf("cannot read already closed body")
|
||||
}
|
||||
if bb.bufOffset < len(bb.buf) {
|
||||
n := copy(p, bb.buf[bb.bufOffset:])
|
||||
bb.bufOffset += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
if rtb.r == nil {
|
||||
if rtb.bufComplete {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return 0, fmt.Errorf("cannot read client request body after closing client reader")
|
||||
if bb.r == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err := rtb.r.Read(p)
|
||||
if rtb.cannotRetry {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if len(rtb.buf)+n > rtb.maxBodySize {
|
||||
rtb.cannotRetry = true
|
||||
return n, err
|
||||
}
|
||||
rtb.buf = append(rtb.buf, p[:n]...)
|
||||
if err == io.EOF {
|
||||
rtb.bufComplete = true
|
||||
}
|
||||
return n, err
|
||||
return bb.r.Read(p)
|
||||
}
|
||||
|
||||
func (rtb *readTrackingBody) canRetry() bool {
|
||||
if rtb.cannotRetry {
|
||||
return false
|
||||
}
|
||||
if rtb.bufComplete {
|
||||
return true
|
||||
}
|
||||
return rtb.r != nil
|
||||
func (bb *bufferedBody) canRetry() bool {
|
||||
return bb.r == nil
|
||||
}
|
||||
|
||||
// Close implements io.Closer interface.
|
||||
func (rtb *readTrackingBody) Close() error {
|
||||
if !rtb.cannotRetry {
|
||||
rtb.readBuf = rtb.buf
|
||||
} else {
|
||||
rtb.readBuf = nil
|
||||
func (bb *bufferedBody) Close() error {
|
||||
bb.resetReader()
|
||||
if bb.r != nil {
|
||||
bb.cannotRetry = true
|
||||
return bb.r.Close()
|
||||
}
|
||||
|
||||
// Close rtb.r only if the request body is completely read or if it is too big.
|
||||
// http.Roundtrip performs body.Close call even without any Read calls,
|
||||
// so this hack allows us to reuse request body.
|
||||
if rtb.bufComplete || rtb.cannotRetry {
|
||||
if rtb.r == nil {
|
||||
return nil
|
||||
}
|
||||
err := rtb.r.Close()
|
||||
rtb.r = nil
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bb *bufferedBody) resetReader() {
|
||||
bb.bufOffset = 0
|
||||
}
|
||||
|
||||
func debugInfo(u *url.URL, r *http.Request) string {
|
||||
s := &strings.Builder{}
|
||||
fmt.Fprintf(s, " (host: %q; ", r.Host)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -174,7 +174,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
},
|
||||
RetryStatusCodes: []int{503, 501},
|
||||
LoadBalancingPolicy: "first_available",
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
DropSrcPathPrefixParts: new(2),
|
||||
}, "/a/b/c", "http://foo.bar/c", `bb: aaa`, `x: y`, []int{503, 501}, "first_available", 2)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/federate"),
|
||||
@@ -219,13 +219,13 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
},
|
||||
RetryStatusCodes: []int{503, 500, 501},
|
||||
LoadBalancingPolicy: "first_available",
|
||||
DropSrcPathPrefixParts: intp(1),
|
||||
DropSrcPathPrefixParts: new(1),
|
||||
},
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURL("http://vminsert/0/prometheus"),
|
||||
RetryStatusCodes: []int{},
|
||||
DropSrcPathPrefixParts: intp(0),
|
||||
DropSrcPathPrefixParts: new(0),
|
||||
},
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/metrics"}),
|
||||
@@ -242,7 +242,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{502},
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
DropSrcPathPrefixParts: new(2),
|
||||
}
|
||||
f(ui, "http://host42/vmsingle/api/v1/query?query=up&db=foo", "http://vmselect/0/prometheus/api/v1/query?db=foo&query=up",
|
||||
"xx: aa\nyy: asdf", "qwe: rty", []int{503, 500, 501}, "first_available", 1)
|
||||
@@ -259,7 +259,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURL("http://vminsert/0/prometheus"),
|
||||
RetryStatusCodes: []int{},
|
||||
DropSrcPathPrefixParts: intp(0),
|
||||
DropSrcPathPrefixParts: new(0),
|
||||
},
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/metrics/a/b"}),
|
||||
@@ -275,7 +275,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{502},
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
DropSrcPathPrefixParts: new(2),
|
||||
}
|
||||
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", []int{}, "least_loaded", 0)
|
||||
f(ui, "https://foo-host/metrics/a/b", "http://metrics-server/b", "", "", []int{502}, "least_loaded", 2)
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
@@ -45,7 +47,7 @@ func New(retries int, factor float64, minDuration time.Duration) (*Backoff, erro
|
||||
// Retry process retries until all attempts are completed
|
||||
func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
|
||||
var attempt uint64
|
||||
for i := 0; i < b.retries; i++ {
|
||||
for i := range b.retries {
|
||||
err := cb()
|
||||
if err == nil {
|
||||
return attempt, nil
|
||||
@@ -55,6 +57,7 @@ func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
|
||||
return attempt, err // fail fast if not recoverable
|
||||
}
|
||||
attempt++
|
||||
retriesTotal.Inc()
|
||||
backoff := float64(b.minDuration) * math.Pow(b.factor, float64(i))
|
||||
dur := time.Duration(backoff)
|
||||
logger.Errorf("got error: %s on attempt: %d; will retry in %v", err, attempt, dur)
|
||||
@@ -74,3 +77,7 @@ func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
|
||||
}
|
||||
return attempt, fmt.Errorf("execution failed after %d retry attempts", b.retries)
|
||||
}
|
||||
|
||||
var (
|
||||
retriesTotal = metrics.NewCounter(`vmctl_backoff_retries_total`)
|
||||
)
|
||||
|
||||
@@ -14,6 +14,12 @@ const (
|
||||
globalSilent = "s"
|
||||
globalVerbose = "verbose"
|
||||
globalDisableProgressBar = "disable-progress-bar"
|
||||
|
||||
globalPushMetricsURL = "pushmetrics.url"
|
||||
globalPushMetricsInterval = "pushmetrics.interval"
|
||||
globalPushExtraLabels = "pushmetrics.extraLabel"
|
||||
globalPushHeaders = "pushmetrics.header"
|
||||
globalPushDisableCompression = "pushmetrics.disableCompression"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -33,6 +39,29 @@ var (
|
||||
Value: false,
|
||||
Usage: "Whether to disable progress bar during the import.",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: globalPushMetricsURL,
|
||||
Usage: "Optional URL to push metrics. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#push-metrics",
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: globalPushMetricsInterval,
|
||||
Value: 10 * time.Second,
|
||||
Usage: "Interval for pushing metrics to every -pushmetrics.url",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: globalPushExtraLabels,
|
||||
Usage: "Extra labels to add to pushed metrics. In case of collision, label value defined by flag will have priority. " +
|
||||
"Flag can be set multiple times, to add few additional labels. " +
|
||||
"For example, -pushmetrics.extraLabel='instance=\"foo\"' adds instance=\"foo\" label to all the metrics pushed to every -pushmetrics.url",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: globalPushHeaders,
|
||||
Usage: "Optional HTTP headers to add to pushed metrics. Flag can be set multiple times, to add few additional headers.",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: globalPushDisableCompression,
|
||||
Usage: "Whether to disable compression when pushing metrics.",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -123,32 +152,32 @@ var (
|
||||
Name: vmExtraLabel,
|
||||
Value: nil,
|
||||
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
|
||||
"will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||
" will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: vmRateLimit,
|
||||
Usage: "Optional data transfer rate limit in bytes per second.\n" +
|
||||
"By default, the rate limit is disabled. It can be useful for limiting load on configured via '--vmAddr' destination.",
|
||||
"By default, the rate limit is disabled. It can be useful for limiting load on configured via '--vm-addr' destination.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to '--vmAddr'",
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to '--vm-addr'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to '--vmAddr'",
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to '--vm-addr'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to '--vmAddr'. By default, system CA is used",
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to '--vm-addr'. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmServerName,
|
||||
Usage: "Optional TLS server name to use for connections to '--vmAddr'. By default, the server name from '--vmAddr' is used",
|
||||
Usage: "Optional TLS server name to use for connections to '--vm-addr'. By default, the server name from '--vm-addr' is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmInsecureSkipVerify,
|
||||
Usage: "Whether to skip tls verification when connecting to '--vmAddr'",
|
||||
Usage: "Whether to skip tls verification when connecting to '--vm-addr'",
|
||||
Value: false,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
@@ -468,7 +497,7 @@ var (
|
||||
Name: vmNativeFilterMatch,
|
||||
Usage: "Time series selector to match series for export. For example, select {instance!=\"localhost\"} will " +
|
||||
"match all series with \"instance\" label different to \"localhost\".\n" +
|
||||
" See more details here https://github.com/VictoriaMetrics/VictoriaMetrics#how-to-export-data-in-native-format",
|
||||
" See more details here https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-native-format",
|
||||
Value: `{__name__!=""}`,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -598,7 +627,7 @@ var (
|
||||
Name: vmExtraLabel,
|
||||
Value: nil,
|
||||
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
|
||||
"will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||
" will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: vmRateLimit,
|
||||
@@ -625,8 +654,8 @@ var (
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeDisableBinaryProtocol,
|
||||
Usage: "Whether to use https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-json-line-format " +
|
||||
"instead of https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-native-format API." +
|
||||
"Binary export/import API protocol implies less network and resource usage, as it transfers compressed binary data blocks." +
|
||||
"instead of https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-native-format API. " +
|
||||
"Binary export/import API protocol implies less network and resource usage, as it transfers compressed binary data blocks. " +
|
||||
"Non-binary export/import API is less efficient, but supports deduplication if it is configured on vm-native-src-addr side.",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
@@ -52,6 +54,7 @@ func (ip *influxProcessor) run(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
influxSeriesTotal.Add(len(series))
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing series"), len(series))
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
@@ -63,18 +66,18 @@ func (ip *influxProcessor) run(ctx context.Context) error {
|
||||
ip.im.ResetStats()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(ip.cc)
|
||||
for i := 0; i < ip.cc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range ip.cc {
|
||||
wg.Go(func() {
|
||||
for s := range seriesCh {
|
||||
if err := ip.do(s); err != nil {
|
||||
influxErrorsTotal.Inc()
|
||||
errCh <- fmt.Errorf("request failed for %q.%q: %s", s.Measurement, s.Field, err)
|
||||
return
|
||||
}
|
||||
influxSeriesProcessed.Inc()
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// any error breaks the import
|
||||
@@ -83,6 +86,7 @@ func (ip *influxProcessor) run(ctx context.Context) error {
|
||||
case infErr := <-errCh:
|
||||
return fmt.Errorf("influx error: %s", infErr)
|
||||
case vmErr := <-ip.im.Errors():
|
||||
influxErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, ip.isVerbose))
|
||||
case seriesCh <- s:
|
||||
}
|
||||
@@ -95,6 +99,7 @@ func (ip *influxProcessor) run(ctx context.Context) error {
|
||||
// drain import errors channel
|
||||
for vmErr := range ip.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
influxErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, ip.isVerbose))
|
||||
}
|
||||
}
|
||||
@@ -169,3 +174,9 @@ func (ip *influxProcessor) do(s *influx.Series) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
influxSeriesTotal = metrics.NewCounter(`vmctl_influx_migration_series_total`)
|
||||
influxSeriesProcessed = metrics.NewCounter(`vmctl_influx_migration_series_processed`)
|
||||
influxErrorsTotal = metrics.NewCounter(`vmctl_influx_migration_errors_total`)
|
||||
)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
)
|
||||
|
||||
@@ -45,9 +47,16 @@ func (l *Limiter) Register(dataLen int) {
|
||||
t := timerpool.Get(d)
|
||||
<-t.C
|
||||
timerpool.Put(t)
|
||||
limiterThrottleEventsTotal.Inc()
|
||||
}
|
||||
l.budget += limit
|
||||
l.deadline = time.Now().Add(time.Second)
|
||||
}
|
||||
l.budget -= int64(dataLen)
|
||||
limiterBytesProcessed.Add(dataLen)
|
||||
}
|
||||
|
||||
var (
|
||||
limiterBytesProcessed = metrics.NewCounter(`vmctl_limiter_bytes_processed_total`)
|
||||
limiterThrottleEventsTotal = metrics.NewCounter(`vmctl_limiter_throttle_events_total`)
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
@@ -19,7 +20,9 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
||||
@@ -41,11 +44,20 @@ func main() {
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
start := time.Now()
|
||||
beforeFn := func(c *cli.Context) error {
|
||||
flag.Parse()
|
||||
logger.Init()
|
||||
isSilent = c.Bool(globalSilent)
|
||||
if c.Bool(globalDisableProgressBar) {
|
||||
barpool.Disable(true)
|
||||
}
|
||||
netutil.EnableIPv6()
|
||||
pushmetrics.InitWith(&pushmetrics.Config{
|
||||
URLs: c.StringSlice(globalPushMetricsURL),
|
||||
Interval: c.Duration(globalPushMetricsInterval),
|
||||
ExtraLabels: c.StringSlice(globalPushExtraLabels),
|
||||
DisableCompression: c.Bool(globalPushDisableCompression),
|
||||
Headers: c.StringSlice(globalPushHeaders),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
app := &cli.App{
|
||||
@@ -451,6 +463,7 @@ func main() {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Printf("Total time: %v", time.Since(start))
|
||||
pushmetrics.StopAndPush()
|
||||
}
|
||||
|
||||
func initConfigVM(c *cli.Context) (vm.Config, error) {
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/auth"
|
||||
)
|
||||
|
||||
@@ -36,12 +38,15 @@ type Response struct {
|
||||
|
||||
// Explore finds metric names by provided filter from api/v1/label/__name__/values
|
||||
func (c *Client) Explore(ctx context.Context, f Filter, tenantID string, start, end time.Time) ([]string, error) {
|
||||
startTime := time.Now()
|
||||
exploreRequestsTotal.Inc()
|
||||
url := fmt.Sprintf("%s/%s", c.Addr, nativeMetricNamesAddr)
|
||||
if tenantID != "" {
|
||||
url = fmt.Sprintf("%s/select/%s/prometheus/%s", c.Addr, tenantID, nativeMetricNamesAddr)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
exploreRequestsErrorsTotal.Inc()
|
||||
return nil, fmt.Errorf("cannot create request to %q: %s", url, err)
|
||||
}
|
||||
|
||||
@@ -53,37 +58,53 @@ func (c *Client) Explore(ctx context.Context, f Filter, tenantID string, start,
|
||||
|
||||
resp, err := c.do(req, http.StatusOK)
|
||||
if err != nil {
|
||||
exploreRequestsErrorsTotal.Inc()
|
||||
exploreDuration.UpdateDuration(startTime)
|
||||
return nil, fmt.Errorf("series request failed: %s", err)
|
||||
}
|
||||
|
||||
var response Response
|
||||
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
|
||||
exploreRequestsErrorsTotal.Inc()
|
||||
exploreDuration.UpdateDuration(startTime)
|
||||
return nil, fmt.Errorf("cannot decode series response: %s", err)
|
||||
}
|
||||
exploreDuration.UpdateDuration(startTime)
|
||||
return response.MetricNames, resp.Body.Close()
|
||||
}
|
||||
|
||||
// ImportPipe uses pipe reader in request to process data
|
||||
func (c *Client) ImportPipe(ctx context.Context, dstURL string, pr *io.PipeReader) error {
|
||||
startTime := time.Now()
|
||||
importRequestsTotal.Inc()
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, dstURL, pr)
|
||||
if err != nil {
|
||||
importRequestsErrorsTotal.Inc()
|
||||
return fmt.Errorf("cannot create import request to %q: %s", c.Addr, err)
|
||||
}
|
||||
|
||||
importResp, err := c.do(req, http.StatusNoContent)
|
||||
if err != nil {
|
||||
importRequestsErrorsTotal.Inc()
|
||||
importDuration.UpdateDuration(startTime)
|
||||
return fmt.Errorf("import request failed: %s", err)
|
||||
}
|
||||
if err := importResp.Body.Close(); err != nil {
|
||||
importRequestsErrorsTotal.Inc()
|
||||
importDuration.UpdateDuration(startTime)
|
||||
return fmt.Errorf("cannot close import response body: %s", err)
|
||||
}
|
||||
importDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportPipe makes request by provided filter and return io.ReadCloser which can be used to get data
|
||||
func (c *Client) ExportPipe(ctx context.Context, url string, f Filter) (io.ReadCloser, error) {
|
||||
startTime := time.Now()
|
||||
exportRequestsTotal.Inc()
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
exportRequestsErrorsTotal.Inc()
|
||||
return nil, fmt.Errorf("cannot create request to %q: %s", c.Addr, err)
|
||||
}
|
||||
|
||||
@@ -102,8 +123,11 @@ func (c *Client) ExportPipe(ctx context.Context, url string, f Filter) (io.ReadC
|
||||
|
||||
resp, err := c.do(req, http.StatusOK)
|
||||
if err != nil {
|
||||
exportRequestsErrorsTotal.Inc()
|
||||
exportDuration.UpdateDuration(startTime)
|
||||
return nil, fmt.Errorf("export request failed: %w", err)
|
||||
}
|
||||
exportDuration.UpdateDuration(startTime)
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
@@ -162,3 +186,16 @@ func (c *Client) do(req *http.Request, expSC int) (*http.Response, error) {
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
var (
|
||||
importRequestsTotal = metrics.NewCounter(`vmctl_vm_native_requests_total{type="import"}`)
|
||||
exportRequestsTotal = metrics.NewCounter(`vmctl_vm_native_requests_total{type="export"}`)
|
||||
exploreRequestsTotal = metrics.NewCounter(`vmctl_vm_native_requests_total{type="explore"}`)
|
||||
importRequestsErrorsTotal = metrics.NewCounter(`vmctl_vm_native_request_errors_total{type="import"}`)
|
||||
exportRequestsErrorsTotal = metrics.NewCounter(`vmctl_vm_native_request_errors_total{type="export"}`)
|
||||
exploreRequestsErrorsTotal = metrics.NewCounter(`vmctl_vm_native_request_errors_total{type="explore"}`)
|
||||
|
||||
importDuration = metrics.NewHistogram(`vmctl_vm_native_import_duration_seconds`)
|
||||
exportDuration = metrics.NewHistogram(`vmctl_vm_native_export_duration_seconds`)
|
||||
exploreDuration = metrics.NewHistogram(`vmctl_vm_native_explore_duration_seconds`)
|
||||
)
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
vmetrics "github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
@@ -57,6 +59,7 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
|
||||
if !prompt(ctx, question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
op.im.ResetStats()
|
||||
var startTime int64
|
||||
if op.oc.HardTS != 0 {
|
||||
@@ -84,23 +87,24 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
|
||||
seriesCh := make(chan queryObj, op.otsdbcc)
|
||||
errCh := make(chan error)
|
||||
// we're going to make serieslist * queryRanges queries, so we should represent that in the progress bar
|
||||
otsdbSeriesTotal.Add(len(serieslist) * queryRanges)
|
||||
bar := pb.StartNew(len(serieslist) * queryRanges)
|
||||
defer func(bar *pb.ProgressBar) {
|
||||
bar.Finish()
|
||||
}(bar)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(op.otsdbcc)
|
||||
for i := 0; i < op.otsdbcc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range op.otsdbcc {
|
||||
wg.Go(func() {
|
||||
for s := range seriesCh {
|
||||
if err := op.do(s); err != nil {
|
||||
otsdbErrorsTotal.Inc()
|
||||
errCh <- fmt.Errorf("couldn't retrieve series for %s : %s", metric, err)
|
||||
return
|
||||
}
|
||||
otsdbSeriesProcessed.Inc()
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
/*
|
||||
Loop through all series for this metric, processing all retentions and time ranges
|
||||
@@ -117,6 +121,7 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
|
||||
case otsdbErr := <-errCh:
|
||||
return fmt.Errorf("opentsdb error: %s", otsdbErr)
|
||||
case vmErr := <-op.im.Errors():
|
||||
otsdbErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
|
||||
case seriesCh <- queryObj{
|
||||
Tr: tr, StartTime: startTime,
|
||||
@@ -141,6 +146,7 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
|
||||
op.im.Close()
|
||||
for vmErr := range op.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
otsdbErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
|
||||
}
|
||||
}
|
||||
@@ -171,3 +177,9 @@ func (op *otsdbProcessor) do(s queryObj) error {
|
||||
}
|
||||
return op.im.Input(&ts)
|
||||
}
|
||||
|
||||
var (
|
||||
otsdbSeriesTotal = vmetrics.NewCounter(`vmctl_opentsdb_migration_series_total`)
|
||||
otsdbSeriesProcessed = vmetrics.NewCounter(`vmctl_opentsdb_migration_series_processed`)
|
||||
otsdbErrorsTotal = vmetrics.NewCounter(`vmctl_opentsdb_migration_errors_total`)
|
||||
)
|
||||
|
||||
@@ -109,7 +109,7 @@ func (c Client) FindMetrics(q string) ([]string, error) {
|
||||
return nil, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("bad return from OpenTSDB: %q: %v", resp.StatusCode, resp)
|
||||
return nil, fmt.Errorf("bad return from OpenTSDB: %d: %v", resp.StatusCode, resp)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
@@ -133,7 +133,7 @@ func (c Client) FindSeries(metric string) ([]Meta, error) {
|
||||
return nil, fmt.Errorf("failed to set GET request to %q: %s", q, err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("bad return from OpenTSDB: %q: %v", resp.StatusCode, resp)
|
||||
return nil, fmt.Errorf("bad return from OpenTSDB: %d: %v", resp.StatusCode, resp)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
|
||||
@@ -4,11 +4,15 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
@@ -61,19 +65,19 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
||||
var it chunkenc.Iterator
|
||||
for ss.Next() {
|
||||
var name string
|
||||
var labels []vm.LabelPair
|
||||
var labelPairs []vm.LabelPair
|
||||
series := ss.At()
|
||||
|
||||
for _, label := range series.Labels() {
|
||||
series.Labels().Range(func(label labels.Label) {
|
||||
if label.Name == "__name__" {
|
||||
name = label.Value
|
||||
continue
|
||||
return
|
||||
}
|
||||
labels = append(labels, vm.LabelPair{
|
||||
Name: label.Name,
|
||||
Value: label.Value,
|
||||
labelPairs = append(labelPairs, vm.LabelPair{
|
||||
Name: strings.Clone(label.Name),
|
||||
Value: strings.Clone(label.Value),
|
||||
})
|
||||
}
|
||||
})
|
||||
if name == "" {
|
||||
return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID)
|
||||
}
|
||||
@@ -99,7 +103,7 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
||||
}
|
||||
ts := vm.TimeSeries{
|
||||
Name: name,
|
||||
LabelPairs: labels,
|
||||
LabelPairs: labelPairs,
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
}
|
||||
@@ -111,6 +115,7 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
||||
}
|
||||
|
||||
func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
|
||||
promBlocksTotal.Add(len(blocks))
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing blocks"), len(blocks))
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
@@ -122,18 +127,18 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
|
||||
pp.im.ResetStats()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(pp.cc)
|
||||
for i := 0; i < pp.cc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range pp.cc {
|
||||
wg.Go(func() {
|
||||
for br := range blockReadersCh {
|
||||
if err := pp.do(br); err != nil {
|
||||
promErrorsTotal.Inc()
|
||||
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
|
||||
return
|
||||
}
|
||||
promBlocksProcessed.Inc()
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
// any error breaks the import
|
||||
for _, br := range blocks {
|
||||
@@ -143,6 +148,7 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
|
||||
return fmt.Errorf("prometheus error: %s", promErr)
|
||||
case vmErr := <-pp.im.Errors():
|
||||
close(blockReadersCh)
|
||||
promErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
|
||||
case blockReadersCh <- br:
|
||||
}
|
||||
@@ -156,6 +162,7 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
|
||||
// drain import errors channel
|
||||
for vmErr := range pp.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
promErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
|
||||
}
|
||||
}
|
||||
@@ -165,3 +172,9 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
promBlocksTotal = metrics.NewCounter(`vmctl_prometheus_migration_blocks_total`)
|
||||
promBlocksProcessed = metrics.NewCounter(`vmctl_prometheus_migration_blocks_processed`)
|
||||
promErrorsTotal = metrics.NewCounter(`vmctl_prometheus_migration_errors_total`)
|
||||
)
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||
@@ -51,6 +53,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
remoteReadRangesTotal.Add(len(ranges))
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing ranges"), len(ranges))
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
@@ -66,18 +69,18 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||
errCh := make(chan error)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(rrp.cc)
|
||||
for i := 0; i < rrp.cc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range rrp.cc {
|
||||
wg.Go(func() {
|
||||
for r := range rangeC {
|
||||
if err := rrp.do(ctx, r); err != nil {
|
||||
remoteReadErrorsTotal.Inc()
|
||||
errCh <- fmt.Errorf("request failed for: %s", err)
|
||||
return
|
||||
}
|
||||
remoteReadRangesProcessed.Inc()
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
for _, r := range ranges {
|
||||
@@ -85,6 +88,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||
case infErr := <-errCh:
|
||||
return fmt.Errorf("remote read error: %s", infErr)
|
||||
case vmErr := <-rrp.dst.Errors():
|
||||
remoteReadErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, rrp.isVerbose))
|
||||
case rangeC <- &remoteread.Filter{
|
||||
StartTimestampMs: r[0].UnixMilli(),
|
||||
@@ -100,6 +104,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||
// drain import errors channel
|
||||
for vmErr := range rrp.dst.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
remoteReadErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, rrp.isVerbose))
|
||||
}
|
||||
}
|
||||
@@ -120,3 +125,9 @@ func (rrp *remoteReadProcessor) do(ctx context.Context, filter *remoteread.Filte
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
remoteReadRangesTotal = metrics.NewCounter(`vmctl_remote_read_migration_ranges_total`)
|
||||
remoteReadRangesProcessed = metrics.NewCounter(`vmctl_remote_read_migration_ranges_processed`)
|
||||
remoteReadErrorsTotal = metrics.NewCounter(`vmctl_remote_read_migration_errors_total`)
|
||||
)
|
||||
|
||||
@@ -76,11 +76,11 @@ func (ts *TimeSeries) write(w io.Writer) (int, error) {
|
||||
|
||||
pointsCount := len(timestampsBatch)
|
||||
cw.printf(`},"timestamps":[`)
|
||||
for i := 0; i < pointsCount-1; i++ {
|
||||
for i := range pointsCount - 1 {
|
||||
cw.printf(`%d,`, timestampsBatch[i])
|
||||
}
|
||||
cw.printf(`%d],"values":[`, timestampsBatch[pointsCount-1])
|
||||
for i := 0; i < pointsCount-1; i++ {
|
||||
for i := range pointsCount - 1 {
|
||||
cw.printf(`%v,`, valuesBatch[i])
|
||||
}
|
||||
cw.printf("%v]}\n", valuesBatch[pointsCount-1])
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||
@@ -80,6 +82,12 @@ type Importer struct {
|
||||
|
||||
s *stats
|
||||
backoff *backoff.Backoff
|
||||
|
||||
importRequestsTotal *metrics.Counter
|
||||
importRequestsErrorsTotal *metrics.Counter
|
||||
importSamplesTotal *metrics.Counter
|
||||
importBytesTotal *metrics.Counter
|
||||
importDuration *metrics.Histogram
|
||||
}
|
||||
|
||||
// ResetStats resets im stats.
|
||||
@@ -147,6 +155,12 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
|
||||
input: make(chan *TimeSeries, cfg.Concurrency*4),
|
||||
errors: make(chan *ImportError, cfg.Concurrency),
|
||||
backoff: cfg.Backoff,
|
||||
|
||||
importRequestsTotal: metrics.GetOrCreateCounter(`vmctl_importer_requests_total`),
|
||||
importRequestsErrorsTotal: metrics.GetOrCreateCounter(`vmctl_importer_request_errors_total`),
|
||||
importSamplesTotal: metrics.GetOrCreateCounter(`vmctl_importer_samples_total`),
|
||||
importBytesTotal: metrics.GetOrCreateCounter(`vmctl_importer_bytes_total`),
|
||||
importDuration: metrics.GetOrCreateHistogram(`vmctl_importer_request_duration_seconds`),
|
||||
}
|
||||
if err := im.Ping(); err != nil {
|
||||
return nil, fmt.Errorf("ping to %q failed: %s", addr, err)
|
||||
@@ -156,15 +170,13 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
|
||||
cfg.BatchSize = 1e5
|
||||
}
|
||||
|
||||
im.wg.Add(int(cfg.Concurrency))
|
||||
for i := 0; i < int(cfg.Concurrency); i++ {
|
||||
for i := range int(cfg.Concurrency) {
|
||||
pbPrefix := fmt.Sprintf(`{{ green "VM worker %d:" }}`, i)
|
||||
bar := barpool.AddWithTemplate(pbPrefix+pbTpl, 0)
|
||||
|
||||
go func(bar barpool.Bar) {
|
||||
defer im.wg.Done()
|
||||
im.wg.Go(func() {
|
||||
im.startWorker(ctx, bar, cfg.BatchSize, cfg.SignificantFigures, cfg.RoundDigits)
|
||||
}(bar)
|
||||
})
|
||||
}
|
||||
im.ResetStats()
|
||||
return im, nil
|
||||
@@ -313,9 +325,13 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
im.importRequestsTotal.Inc()
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
req, err := http.NewRequest(http.MethodPost, im.importPath, pr)
|
||||
if err != nil {
|
||||
im.importRequestsErrorsTotal.Inc()
|
||||
return fmt.Errorf("cannot create request to %q: %s", im.addr, err)
|
||||
}
|
||||
if im.user != "" {
|
||||
@@ -335,6 +351,7 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
||||
if im.compress {
|
||||
zw, err := gzip.NewWriterLevel(w, 1)
|
||||
if err != nil {
|
||||
im.importRequestsErrorsTotal.Inc()
|
||||
return fmt.Errorf("unexpected error when creating gzip writer: %s", err)
|
||||
}
|
||||
w = zw
|
||||
@@ -346,29 +363,39 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
||||
for _, ts := range tsBatch {
|
||||
n, err := ts.write(bw)
|
||||
if err != nil {
|
||||
im.importRequestsErrorsTotal.Inc()
|
||||
return fmt.Errorf("write err: %w", err)
|
||||
}
|
||||
totalBytes += n
|
||||
totalSamples += len(ts.Values)
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
im.importRequestsErrorsTotal.Inc()
|
||||
return err
|
||||
}
|
||||
if closer, ok := w.(io.Closer); ok {
|
||||
err := closer.Close()
|
||||
if err != nil {
|
||||
im.importRequestsErrorsTotal.Inc()
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := pw.Close(); err != nil {
|
||||
im.importRequestsErrorsTotal.Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
requestErr := <-errCh
|
||||
if requestErr != nil {
|
||||
im.importRequestsErrorsTotal.Inc()
|
||||
im.importDuration.UpdateDuration(startTime)
|
||||
return fmt.Errorf("import request error for %q: %w", im.addr, requestErr)
|
||||
}
|
||||
|
||||
im.importSamplesTotal.Add(totalSamples)
|
||||
im.importBytesTotal.Add(totalBytes)
|
||||
im.importDuration.UpdateDuration(startTime)
|
||||
|
||||
im.s.Lock()
|
||||
im.s.bytes += uint64(totalBytes)
|
||||
im.s.samples += uint64(totalSamples)
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||
@@ -82,13 +84,19 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||
if !prompt(ctx, question) {
|
||||
return nil
|
||||
}
|
||||
migrationTenantsTotal.Set(uint64(len(tenants)))
|
||||
}
|
||||
|
||||
for _, tenantID := range tenants {
|
||||
err := p.runBackfilling(ctx, tenantID, ranges)
|
||||
if err != nil {
|
||||
migrationErrorsTotal.Inc()
|
||||
return fmt.Errorf("migration failed: %s", err)
|
||||
}
|
||||
|
||||
if p.interCluster {
|
||||
migrationTenantsProcessed.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Import finished!")
|
||||
@@ -156,6 +164,7 @@ func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcU
|
||||
p.s.bytes += uint64(written)
|
||||
p.s.requests++
|
||||
p.s.Unlock()
|
||||
migrationBytesTransferredTotal.AddInt64(written)
|
||||
|
||||
if err := pw.Close(); err != nil {
|
||||
return err
|
||||
@@ -199,7 +208,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
|
||||
var foundSeriesMsg string
|
||||
var requestsToMake int
|
||||
var metrics = map[string][][]time.Time{
|
||||
var metricsMap = map[string][][]time.Time{
|
||||
"": ranges,
|
||||
}
|
||||
|
||||
@@ -211,11 +220,11 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
|
||||
if !p.disablePerMetricRequests {
|
||||
format = fmt.Sprintf(nativeWithBackoffTpl, barPrefix)
|
||||
metrics, err = p.explore(ctx, p.src, tenantID, ranges)
|
||||
metricsMap, err = p.explore(ctx, p.src, tenantID, ranges)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to explore metric names: %s", err)
|
||||
}
|
||||
if len(metrics) == 0 {
|
||||
if len(metricsMap) == 0 {
|
||||
errMsg := "no metrics found"
|
||||
if tenantID != "" {
|
||||
errMsg = fmt.Sprintf("%s for tenant id: %s", errMsg, tenantID)
|
||||
@@ -223,10 +232,14 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
log.Println(errMsg)
|
||||
return nil
|
||||
}
|
||||
for _, m := range metrics {
|
||||
for _, m := range metricsMap {
|
||||
requestsToMake += len(m)
|
||||
}
|
||||
foundSeriesMsg = fmt.Sprintf("Found %d unique metric names to import. Total import/export requests to make %d", len(metrics), requestsToMake)
|
||||
foundSeriesMsg = fmt.Sprintf("Found %d unique metric names to import. Total import/export requests to make %d", len(metricsMap), requestsToMake)
|
||||
|
||||
migrationMetricsTotal.Add(len(metricsMap))
|
||||
} else {
|
||||
requestsToMake = len(ranges)
|
||||
}
|
||||
|
||||
if !p.interCluster {
|
||||
@@ -240,6 +253,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
log.Print(foundSeriesMsg)
|
||||
}
|
||||
|
||||
migrationRequestsPlanned.Add(requestsToMake)
|
||||
bar := barpool.NewSingleProgress(format, requestsToMake)
|
||||
bar.Start()
|
||||
defer bar.Finish()
|
||||
@@ -248,10 +262,8 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
errCh := make(chan error, p.cc)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < p.cc; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range p.cc {
|
||||
wg.Go(func() {
|
||||
for f := range filterCh {
|
||||
if !p.disablePerMetricRequests {
|
||||
if err := p.do(ctx, f, srcURL, dstURL, nil); err != nil {
|
||||
@@ -265,12 +277,13 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
return
|
||||
}
|
||||
}
|
||||
migrationRequestsCompleted.Inc()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// any error breaks the import
|
||||
for mName, mRanges := range metrics {
|
||||
for mName, mRanges := range metricsMap {
|
||||
match, err := buildMatchWithFilter(p.filter.Match, mName)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to build filter %q for metric name %q: %s", p.filter.Match, mName, err)
|
||||
@@ -290,6 +303,9 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
}:
|
||||
}
|
||||
}
|
||||
if !p.disablePerMetricRequests {
|
||||
migrationMetricsProcessed.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
close(filterCh)
|
||||
@@ -398,3 +414,18 @@ func buildMatchWithFilter(filter string, metricName string) (string, error) {
|
||||
match := "{" + strings.Join(filters, " or ") + "}"
|
||||
return match, nil
|
||||
}
|
||||
|
||||
var (
|
||||
migrationMetricsTotal = metrics.NewCounter(`vmctl_vm_native_migration_metrics_total`)
|
||||
migrationMetricsProcessed = metrics.NewCounter(`vmctl_vm_native_migration_metrics_processed`)
|
||||
|
||||
migrationRequestsPlanned = metrics.NewCounter(`vmctl_vm_native_migration_requests_planned`)
|
||||
migrationRequestsCompleted = metrics.NewCounter(`vmctl_vm_native_migration_requests_completed`)
|
||||
|
||||
migrationErrorsTotal = metrics.NewCounter(`vmctl_vm_native_migration_errors_total`)
|
||||
|
||||
migrationTenantsTotal = metrics.NewCounter(`vmctl_vm_native_migration_tenants_total`)
|
||||
migrationTenantsProcessed = metrics.NewCounter(`vmctl_vm_native_migration_tenants_processed`)
|
||||
|
||||
migrationBytesTransferredTotal = metrics.NewCounter(`vmctl_vm_native_migration_bytes_transferred_total`)
|
||||
)
|
||||
|
||||
@@ -182,6 +182,7 @@ func (ctx *InsertCtx) WriteMetadata(mmpbs []prompb.MetricMetadata) error {
|
||||
mm.Type = mmpb.Type
|
||||
mm.Unit = bytesutil.ToUnsafeBytes(mmpb.Unit)
|
||||
}
|
||||
ctx.mms = mms
|
||||
|
||||
err := vmstorage.AddMetadataRows(mms)
|
||||
if err != nil {
|
||||
@@ -206,6 +207,7 @@ func (ctx *InsertCtx) WritePromMetadata(mmps []prometheus.Metadata) error {
|
||||
mm.Help = bytesutil.ToUnsafeBytes(mmpb.Help)
|
||||
mm.Type = mmpb.Type
|
||||
}
|
||||
ctx.mms = mms
|
||||
|
||||
err := vmstorage.AddMetadataRows(mms)
|
||||
if err != nil {
|
||||
|
||||
@@ -111,9 +111,7 @@ func InitStreamAggr() {
|
||||
saCfgTimestamp.Set(fasttime.UnixTimestamp())
|
||||
|
||||
// Start config reloader.
|
||||
saCfgReloaderWG.Add(1)
|
||||
go func() {
|
||||
defer saCfgReloaderWG.Done()
|
||||
saCfgReloaderWG.Go(func() {
|
||||
for {
|
||||
select {
|
||||
case <-sighupCh:
|
||||
@@ -122,7 +120,7 @@ func InitStreamAggr() {
|
||||
}
|
||||
reloadStreamAggrConfig()
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func reloadStreamAggrConfig() {
|
||||
|
||||
@@ -45,15 +45,14 @@ func insertRows(sketches []*datadogsketches.Sketch, extraLabels []prompb.Label)
|
||||
ms := sketch.ToSummary()
|
||||
for _, m := range ms {
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10557
|
||||
ctx.AddLabel("host", sketch.Host) // newly added
|
||||
ctx.AddLabel("", m.Name)
|
||||
for _, label := range m.Labels {
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
for _, tag := range sketch.Tags {
|
||||
name, value := datadogutil.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
ctx.AddLabel(name, value)
|
||||
}
|
||||
for j := range extraLabels {
|
||||
|
||||
@@ -232,7 +232,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
return true
|
||||
case "zabbixconnector/api/v1/history":
|
||||
case "/zabbixconnector/api/v1/history":
|
||||
zabbixconnectorHistoryRequests.Inc()
|
||||
if err := zabbixconnector.InsertHandlerForHTTP(r); err != nil {
|
||||
zabbixconnectorHistoryErrors.Inc()
|
||||
@@ -241,7 +241,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
fmt.Fprintf(w, `{"error":%q}`, err.Error())
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
|
||||
@@ -142,7 +142,7 @@ type aggrStatePercentile struct {
|
||||
|
||||
func newAggrStatePercentile(pointsLen int, n float64) aggrState {
|
||||
hs := make([]*histogram.Fast, pointsLen)
|
||||
for i := 0; i < pointsLen; i++ {
|
||||
for i := range pointsLen {
|
||||
hs[i] = histogram.NewFast()
|
||||
}
|
||||
return &aggrStatePercentile{
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
@@ -49,7 +50,7 @@ func (ec *evalConfig) newTimestamps(step int64) []int64 {
|
||||
pointsLen := ec.pointsLen(step)
|
||||
timestamps := make([]int64, pointsLen)
|
||||
ts := ec.startTime
|
||||
for i := 0; i < pointsLen; i++ {
|
||||
for i := range pointsLen {
|
||||
timestamps[i] = ts
|
||||
ts += step
|
||||
}
|
||||
@@ -196,12 +197,17 @@ func newNextSeriesForSearchQuery(ec *evalConfig, sq *storage.SearchQuery, expr g
|
||||
pathExpression: safePathExpression(expr),
|
||||
}
|
||||
s.summarize(aggrAvg, ec.startTime, ec.endTime, ec.storageStep, 0)
|
||||
t := timerpool.Get(30 * time.Second)
|
||||
|
||||
// A negative or zero duration will cause timer.C to return immediately
|
||||
remainingTimeout := ec.deadline.Deadline() - fasttime.UnixTimestamp()
|
||||
t := timerpool.Get(time.Duration(remainingTimeout) * time.Second)
|
||||
defer timerpool.Put(t)
|
||||
|
||||
select {
|
||||
case seriesCh <- s:
|
||||
case <-t.C:
|
||||
logger.Errorf("resource leak when processing the %s (full query: %s); please report this error to VictoriaMetrics developers",
|
||||
logger.Errorf("reached timeout when processing the %s (full query: %s), it can be due to the amount of storageNodes configured in vmselect is more than vmselect’s available CPU count "+
|
||||
"or vmselect is heavy loaded. Consider adding resources or increasing `-search.maxQueryDuration` or `timeout` parameter in the query.",
|
||||
expr.AppendString(nil), ec.originalQuery)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -25,7 +25,7 @@ func naturalLess(a, b string) bool {
|
||||
}
|
||||
|
||||
func getNonNumPrefix(s string) (prefix string, tail string) {
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
ch := s[i]
|
||||
if ch >= '0' && ch <= '9' {
|
||||
return s[:i], s[i:]
|
||||
|
||||
@@ -82,7 +82,7 @@ func RenderHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||
if s := r.FormValue("maxDataPoints"); len(s) > 0 {
|
||||
n, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse maxDataPoints=%q: %w", maxDataPoints, err)
|
||||
return fmt.Errorf("cannot parse maxDataPoints=%d: %w", maxDataPoints, err)
|
||||
}
|
||||
if n <= 0 {
|
||||
return fmt.Errorf("maxDataPoints must be greater than 0; got %f", n)
|
||||
@@ -209,7 +209,7 @@ func parseInterval(s string) (int64, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
prefix := s
|
||||
var suffix string
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
ch := s[i]
|
||||
if ch != '-' && ch != '+' && ch != '.' && (ch < '0' || ch > '9') {
|
||||
prefix = s[:i]
|
||||
|
||||
@@ -1228,7 +1228,7 @@ func transformDelay(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, er
|
||||
stepsLocal = len(values)
|
||||
}
|
||||
copy(values[stepsLocal:], values[:len(values)-stepsLocal])
|
||||
for i := 0; i < stepsLocal; i++ {
|
||||
for i := range stepsLocal {
|
||||
values[i] = nan
|
||||
}
|
||||
}
|
||||
@@ -1740,7 +1740,7 @@ func transformGroup(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, er
|
||||
|
||||
func groupSeriesLists(ec *evalConfig, args []*graphiteql.ArgExpr, expr graphiteql.Expr) (nextSeriesFunc, error) {
|
||||
var nextSeriess []nextSeriesFunc
|
||||
for i := 0; i < len(args); i++ {
|
||||
for i := range args {
|
||||
nextSeries, err := evalSeriesList(ec, args, "seriesList", i)
|
||||
if err != nil {
|
||||
for _, f := range nextSeriess {
|
||||
@@ -3233,7 +3233,7 @@ func transformSeriesByTag(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFu
|
||||
return nil, fmt.Errorf("at least one tagExpression must be passed to seriesByTag")
|
||||
}
|
||||
var tagExpressions []string
|
||||
for i := 0; i < len(args); i++ {
|
||||
for i := range args {
|
||||
te, err := getString(args, "tagExpressions", i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -3633,7 +3633,7 @@ var graphiteToGolangRe = regexp.MustCompile(`\\(\d+)`)
|
||||
|
||||
func getNodes(args []*graphiteql.ArgExpr) ([]graphiteql.Expr, error) {
|
||||
var nodes []graphiteql.Expr
|
||||
for i := 0; i < len(args); i++ {
|
||||
for i := range args {
|
||||
expr := args[i].Expr
|
||||
switch expr.(type) {
|
||||
case *graphiteql.NumberExpr, *graphiteql.StringExpr:
|
||||
@@ -3896,27 +3896,9 @@ func nextSeriesConcurrentWrapper(nextSeries nextSeriesFunc, f func(s *series) (*
|
||||
seriesCh := make(chan *series, goroutines)
|
||||
errCh := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines)
|
||||
go func() {
|
||||
var err error
|
||||
for {
|
||||
s, e := nextSeries()
|
||||
if e != nil || s == nil {
|
||||
err = e
|
||||
break
|
||||
}
|
||||
seriesCh <- s
|
||||
}
|
||||
close(seriesCh)
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
errCh <- err
|
||||
close(errCh)
|
||||
}()
|
||||
var skipProcessing atomic.Bool
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range goroutines {
|
||||
wg.Go(func() {
|
||||
for s := range seriesCh {
|
||||
if skipProcessing.Load() {
|
||||
continue
|
||||
@@ -3934,8 +3916,24 @@ func nextSeriesConcurrentWrapper(nextSeries nextSeriesFunc, f func(s *series) (*
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
var err error
|
||||
for {
|
||||
s, e := nextSeries()
|
||||
if e != nil || s == nil {
|
||||
err = e
|
||||
break
|
||||
}
|
||||
seriesCh <- s
|
||||
}
|
||||
close(seriesCh)
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
errCh <- err
|
||||
close(errCh)
|
||||
}()
|
||||
wrapper := func() (*series, error) {
|
||||
r := <-resultCh
|
||||
if r == nil {
|
||||
@@ -4054,7 +4052,7 @@ func formatPathsFromSeriesExpressions(seriesExpressions []string, sortPaths bool
|
||||
|
||||
func newNaNSeries(ec *evalConfig, step int64) *series {
|
||||
values := make([]float64, ec.pointsLen(step))
|
||||
for i := 0; i < len(values); i++ {
|
||||
for i := range values {
|
||||
values[i] = nan
|
||||
}
|
||||
return &series{
|
||||
@@ -5246,7 +5244,7 @@ func transformLinearRegression(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSer
|
||||
|
||||
func linearRegressionForSeries(ec *evalConfig, fe *graphiteql.FuncExpr, ss, sourceSeries []*series) (nextSeriesFunc, error) {
|
||||
var resp []*series
|
||||
for i := 0; i < len(ss); i++ {
|
||||
for i := range ss {
|
||||
source := sourceSeries[i]
|
||||
s := ss[i]
|
||||
s.Tags["linearRegressions"] = fmt.Sprintf("%d, %d", ec.startTime/1e3, ec.endTime/1e3)
|
||||
@@ -5260,7 +5258,7 @@ func linearRegressionForSeries(ec *evalConfig, fe *graphiteql.FuncExpr, ss, sour
|
||||
continue
|
||||
}
|
||||
values := s.Values
|
||||
for j := 0; j < len(values); j++ {
|
||||
for j := range values {
|
||||
values[j] = offset + (float64(int(s.Timestamps[0])+j*int(s.step)))*factor
|
||||
}
|
||||
resp = append(resp, s)
|
||||
@@ -5372,7 +5370,7 @@ func holtWinterConfidenceBands(ec *evalConfig, fe *graphiteql.FuncExpr, args []*
|
||||
valuesLen := len(forecastValues)
|
||||
upperBand := make([]float64, 0, valuesLen)
|
||||
lowerBand := make([]float64, 0, valuesLen)
|
||||
for i := 0; i < valuesLen; i++ {
|
||||
for i := range valuesLen {
|
||||
forecastItem := forecastValues[i]
|
||||
deviationItem := deviationValues[i]
|
||||
if math.IsNaN(forecastItem) || math.IsNaN(deviationItem) {
|
||||
@@ -5466,7 +5464,7 @@ func transformHoltWintersAberration(ec *evalConfig, fe *graphiteql.FuncExpr) (ne
|
||||
return nil, fmt.Errorf("bug, len mismatch for series: %d and upperBand values: %d or lowerBand values: %d", len(values), len(upperBand), len(lowerBand))
|
||||
}
|
||||
aberration := make([]float64, 0, len(values))
|
||||
for i := 0; i < len(values); i++ {
|
||||
for i := range values {
|
||||
v := values[i]
|
||||
upperValue := upperBand[i]
|
||||
lowerValue := lowerBand[i]
|
||||
|
||||
@@ -280,7 +280,7 @@ func isMetricExprChar(ch byte) bool {
|
||||
}
|
||||
|
||||
func appendEscapedIdent(dst []byte, s string) []byte {
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
ch := s[i]
|
||||
if isIdentChar(ch) || isMetricExprChar(ch) {
|
||||
if i == 0 && !isFirstIdentChar(ch) {
|
||||
|
||||
@@ -520,7 +520,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
|
||||
fmt.Fprintf(w, "%s", `{"status":"error","msg":"for accessing vmalert flag '-vmalert.proxyURL' must be configured"}`)
|
||||
return true
|
||||
}
|
||||
proxyVMAlertRequests(w, r)
|
||||
proxyVMAlertRequests(w, r, path)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -558,7 +558,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
|
||||
case "/api/v1/rules", "/rules":
|
||||
rulesRequests.Inc()
|
||||
if len(*vmalertProxyURL) > 0 {
|
||||
proxyVMAlertRequests(w, r)
|
||||
proxyVMAlertRequests(w, r, path)
|
||||
return true
|
||||
}
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#rules
|
||||
@@ -568,7 +568,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
|
||||
case "/api/v1/alerts", "/alerts":
|
||||
alertsRequests.Inc()
|
||||
if len(*vmalertProxyURL) > 0 {
|
||||
proxyVMAlertRequests(w, r)
|
||||
proxyVMAlertRequests(w, r, path)
|
||||
return true
|
||||
}
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#alerts
|
||||
@@ -578,7 +578,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
|
||||
case "/api/v1/notifiers", "/notifiers":
|
||||
notifiersRequests.Inc()
|
||||
if len(*vmalertProxyURL) > 0 {
|
||||
proxyVMAlertRequests(w, r)
|
||||
proxyVMAlertRequests(w, r, path)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -725,7 +725,7 @@ var (
|
||||
metricNamesStatsResetErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/admin/status/metric_names_stats/reset"}`)
|
||||
)
|
||||
|
||||
func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request) {
|
||||
func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request, path string) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err == nil || err == http.ErrAbortHandler {
|
||||
@@ -736,8 +736,10 @@ func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request) {
|
||||
// Forward other panics to the caller.
|
||||
panic(err)
|
||||
}()
|
||||
r.Host = vmalertProxyHost
|
||||
vmalertProxy.ServeHTTP(w, r)
|
||||
req := r.Clone(r.Context())
|
||||
req.URL.Path = strings.TrimPrefix(path, "prometheus")
|
||||
req.Host = vmalertProxyHost
|
||||
vmalertProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -296,14 +298,12 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
||||
|
||||
// Start workers and wait until they finish the work.
|
||||
var wg sync.WaitGroup
|
||||
for i := range workChs {
|
||||
wg.Add(1)
|
||||
qtChild := qt.NewChild("worker #%d", i)
|
||||
go func(workerID uint) {
|
||||
timeseriesWorker(qtChild, workChs, workerID)
|
||||
for workerID := range workChs {
|
||||
qtChild := qt.NewChild("worker #%d", workerID)
|
||||
wg.Go(func() {
|
||||
timeseriesWorker(qtChild, workChs, uint(workerID))
|
||||
qtChild.Done()
|
||||
wg.Done()
|
||||
}(uint(i))
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
@@ -492,10 +492,7 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
|
||||
}
|
||||
|
||||
// Prepare worker channels.
|
||||
workers := min(len(upws), gomaxprocs)
|
||||
if workers < 1 {
|
||||
workers = 1
|
||||
}
|
||||
workers := max(min(len(upws), gomaxprocs), 1)
|
||||
itemsPerWorker := (len(upws) + workers - 1) / workers
|
||||
workChs := make([]chan *unpackWork, workers)
|
||||
for i := range workChs {
|
||||
@@ -514,12 +511,10 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
|
||||
|
||||
// Start workers and wait until they finish the work.
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < workers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID uint) {
|
||||
unpackWorker(workChs, workerID)
|
||||
wg.Done()
|
||||
}(uint(i))
|
||||
for workerID := range workers {
|
||||
wg.Go(func() {
|
||||
unpackWorker(workChs, uint(workerID))
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
@@ -582,6 +577,7 @@ func mergeSortBlocks(dst *Result, sbh *sortBlocksHeap, dedupInterval int64) {
|
||||
return
|
||||
}
|
||||
heap.Init(sbh)
|
||||
var dedupSamples int
|
||||
for {
|
||||
sbs := sbh.sbs
|
||||
top := sbs[0]
|
||||
@@ -597,6 +593,7 @@ func mergeSortBlocks(dst *Result, sbh *sortBlocksHeap, dedupInterval int64) {
|
||||
if n := equalSamplesPrefix(top, sbNext); n > 0 && dedupInterval > 0 {
|
||||
// Skip n replicated samples at top if deduplication is enabled.
|
||||
top.NextIdx = topNextIdx + n
|
||||
dedupSamples += n
|
||||
} else {
|
||||
// Copy samples from top to dst with timestamps not exceeding tsNext.
|
||||
top.NextIdx = topNextIdx + binarySearchTimestamps(top.Timestamps[topNextIdx:], tsNext)
|
||||
@@ -611,8 +608,8 @@ func mergeSortBlocks(dst *Result, sbh *sortBlocksHeap, dedupInterval int64) {
|
||||
}
|
||||
}
|
||||
timestamps, values := storage.DeduplicateSamples(dst.Timestamps, dst.Values, dedupInterval)
|
||||
dedups := len(dst.Timestamps) - len(timestamps)
|
||||
dedupsDuringSelect.Add(dedups)
|
||||
dedupSamples += len(dst.Timestamps) - len(timestamps)
|
||||
dedupsDuringSelect.Add(dedupSamples)
|
||||
dst.Timestamps = timestamps
|
||||
dst.Values = values
|
||||
}
|
||||
@@ -638,7 +635,7 @@ func equalTimestampsPrefix(a, b []int64) int {
|
||||
|
||||
func equalValuesPrefix(a, b []float64) int {
|
||||
for i, v := range a {
|
||||
if i >= len(b) || v != b[i] {
|
||||
if i >= len(b) || math.Float64bits(v) != math.Float64bits(b[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
@@ -833,12 +830,7 @@ func GraphiteTags(qt *querytracer.Tracer, filter string, limit int, deadline sea
|
||||
}
|
||||
|
||||
func hasString(a []string, s string) bool {
|
||||
for _, x := range a {
|
||||
if x == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(a, s)
|
||||
}
|
||||
|
||||
// LabelValues returns label values matching the given labelName and sq until the given deadline.
|
||||
@@ -1020,12 +1012,10 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
||||
mustStop atomic.Bool
|
||||
)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(gomaxprocs)
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
go func(workerID uint) {
|
||||
defer wg.Done()
|
||||
for workerID := range gomaxprocs {
|
||||
wg.Go(func() {
|
||||
for xw := range workCh {
|
||||
if err := f(&xw.mn, &xw.b, tr, workerID); err != nil {
|
||||
if err := f(&xw.mn, &xw.b, tr, uint(workerID)); err != nil {
|
||||
errGlobalLock.Lock()
|
||||
if errGlobal == nil {
|
||||
errGlobal = err
|
||||
@@ -1036,7 +1026,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
||||
xw.reset()
|
||||
exportWorkPool.Put(xw)
|
||||
}
|
||||
}(uint(i))
|
||||
})
|
||||
}
|
||||
|
||||
// Feed workers with work
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
)
|
||||
|
||||
func TestMergeSortBlocks(t *testing.T) {
|
||||
@@ -194,3 +197,111 @@ func TestMergeSortBlocks(t *testing.T) {
|
||||
Values: []float64{7, 24, 26},
|
||||
})
|
||||
}
|
||||
|
||||
func TestEqualSamplesPrefix(t *testing.T) {
|
||||
f := func(a, b *sortBlock, expected int) {
|
||||
t.Helper()
|
||||
|
||||
actual := equalSamplesPrefix(a, b)
|
||||
if actual != expected {
|
||||
t.Fatalf("unexpected result: got %d, want %d", actual, expected)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty blocks
|
||||
f(&sortBlock{}, &sortBlock{}, 0)
|
||||
|
||||
// Identical blocks
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
}, 4)
|
||||
|
||||
// Non-zero NextIdx
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
NextIdx: 2,
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{10, 20, 3, 4},
|
||||
Values: []float64{50, 60, 7, 8},
|
||||
NextIdx: 2,
|
||||
}, 2)
|
||||
|
||||
// Non-zero NextIdx with mismatch
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
NextIdx: 1,
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{10, 2, 3, 4},
|
||||
Values: []float64{50, 6, 7, 80},
|
||||
NextIdx: 1,
|
||||
}, 2)
|
||||
|
||||
// Different lengths
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{1, 2, 3},
|
||||
Values: []float64{5, 6, 7},
|
||||
}, 3)
|
||||
|
||||
// Timestamps diverge
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{1, 2, 30, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
}, 2)
|
||||
|
||||
// Values diverge
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 60, 7, 8},
|
||||
}, 1)
|
||||
|
||||
// Zero matches
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, 6, 7, 8},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{5, 6, 7, 8},
|
||||
Values: []float64{1, 2, 3, 4},
|
||||
}, 0)
|
||||
|
||||
// Compare staleness markers, matching
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, decimal.StaleNaN, 7, 8},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{5, decimal.StaleNaN, 7, 8},
|
||||
}, 4)
|
||||
|
||||
// Special float values: +Inf, -Inf, 0, -0
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{math.Inf(1), math.Inf(-1), math.Copysign(0, +1), math.Copysign(0, -1)},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{1, 2, 3, 4},
|
||||
Values: []float64{math.Inf(1), math.Inf(-1), math.Copysign(0, +1), math.Copysign(0, -1)},
|
||||
}, 4)
|
||||
|
||||
// Positive zero vs negative zero (bitwise different)
|
||||
f(&sortBlock{
|
||||
Timestamps: []int64{1, 2},
|
||||
Values: []float64{5, math.Copysign(0, +1)},
|
||||
}, &sortBlock{
|
||||
Timestamps: []int64{1, 2},
|
||||
Values: []float64{5, math.Copysign(0, -1)},
|
||||
}, 1)
|
||||
}
|
||||
|
||||
@@ -10,14 +10,14 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
|
||||
b.Run(fmt.Sprintf("replicationFactor-%d", replicationFactor), func(b *testing.B) {
|
||||
const samplesPerBlock = 8192
|
||||
var blocks []*sortBlock
|
||||
for j := 0; j < 10; j++ {
|
||||
for j := range 10 {
|
||||
timestamps := make([]int64, samplesPerBlock)
|
||||
values := make([]float64, samplesPerBlock)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = int64(j*samplesPerBlock + i)
|
||||
values[i] = float64(j*samplesPerBlock + i)
|
||||
}
|
||||
for i := 0; i < replicationFactor; i++ {
|
||||
for range replicationFactor {
|
||||
blocks = append(blocks, &sortBlock{
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
@@ -30,7 +30,7 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
|
||||
b.Run("overlapped-blocks-bestcase", func(b *testing.B) {
|
||||
const samplesPerBlock = 8192
|
||||
var blocks []*sortBlock
|
||||
for j := 0; j < 10; j++ {
|
||||
for j := range 10 {
|
||||
timestamps := make([]int64, samplesPerBlock)
|
||||
values := make([]float64, samplesPerBlock)
|
||||
for i := range timestamps {
|
||||
@@ -45,7 +45,7 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
|
||||
for j := 1; j < len(blocks); j++ {
|
||||
prev := blocks[j-1].Timestamps
|
||||
curr := blocks[j].Timestamps
|
||||
for i := 0; i < samplesPerBlock/2; i++ {
|
||||
for i := range samplesPerBlock / 2 {
|
||||
prev[i+samplesPerBlock/2], curr[i] = curr[i], prev[i+samplesPerBlock/2]
|
||||
}
|
||||
}
|
||||
@@ -54,7 +54,7 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
|
||||
b.Run("overlapped-blocks-worstcase", func(b *testing.B) {
|
||||
const samplesPerBlock = 8192
|
||||
var blocks []*sortBlock
|
||||
for j := 0; j < 5; j++ {
|
||||
for j := range 5 {
|
||||
timestamps := make([]int64, samplesPerBlock)
|
||||
values := make([]float64, samplesPerBlock)
|
||||
for i := range timestamps {
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
@@ -527,6 +529,14 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName s
|
||||
return err
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxLabelsAPISeries)
|
||||
|
||||
if strings.HasPrefix(labelName, "U__") {
|
||||
// This label seems to be Unicode-encoded according to the Prometheus spec.
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
// Spec: https://github.com/prometheus/proposals/blob/main/proposals/0028-utf8.md
|
||||
labelName = unescapePrometheusLabelName(labelName)
|
||||
}
|
||||
|
||||
labelValues, err := netstorage.LabelValues(qt, labelName, sq, limit, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain values for label %q: %w", labelName, err)
|
||||
@@ -1004,14 +1014,7 @@ func removeEmptyValuesAndTimeseries(tss []netstorage.Result) []netstorage.Result
|
||||
dst := tss[:0]
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
hasNaNs := false
|
||||
for _, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
hasNaNs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasNaNs {
|
||||
if !slices.ContainsFunc(ts.Values, math.IsNaN) {
|
||||
// Fast path: nothing to remove.
|
||||
if len(ts.Values) > 0 {
|
||||
dst = append(dst, *ts)
|
||||
@@ -1336,3 +1339,70 @@ func calculateMaxUniqueTimeSeriesForResource(maxConcurrentRequests, remainingMem
|
||||
func GetMaxUniqueTimeSeries() int {
|
||||
return maxUniqueTimeseriesValue
|
||||
}
|
||||
|
||||
// copied from https://github.com/prometheus/common/blob/adea6285c1c7447fcb7bfdeb6abfc6eff893e0a7/model/metric.go#L483
|
||||
// it's not possible to use direct import due to increased binary size
|
||||
func unescapePrometheusLabelName(name string) string {
|
||||
// lower function taken from strconv.atoi.
|
||||
lower := func(c byte) byte {
|
||||
return c | ('x' - 'X')
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return name
|
||||
}
|
||||
escapedName, found := strings.CutPrefix(name, "U__")
|
||||
if !found {
|
||||
return name
|
||||
}
|
||||
|
||||
var unescaped strings.Builder
|
||||
TOP:
|
||||
for i := 0; i < len(escapedName); i++ {
|
||||
// All non-underscores are treated normally.
|
||||
if escapedName[i] != '_' {
|
||||
unescaped.WriteByte(escapedName[i])
|
||||
continue
|
||||
}
|
||||
i++
|
||||
if i >= len(escapedName) {
|
||||
return name
|
||||
}
|
||||
// A double underscore is a single underscore.
|
||||
if escapedName[i] == '_' {
|
||||
unescaped.WriteByte('_')
|
||||
continue
|
||||
}
|
||||
// We think we are in a UTF-8 code, process it.
|
||||
var utf8Val uint
|
||||
for j := 0; i < len(escapedName); j++ {
|
||||
// This is too many characters for a utf8 value based on the MaxRune
|
||||
// value of '\U0010FFFF'.
|
||||
if j >= 6 {
|
||||
return name
|
||||
}
|
||||
// Found a closing underscore, convert to a rune, check validity, and append.
|
||||
if escapedName[i] == '_' {
|
||||
utf8Rune := rune(utf8Val)
|
||||
if !utf8.ValidRune(utf8Rune) {
|
||||
return name
|
||||
}
|
||||
unescaped.WriteRune(utf8Rune)
|
||||
continue TOP
|
||||
}
|
||||
r := lower(escapedName[i])
|
||||
utf8Val *= 16
|
||||
switch {
|
||||
case r >= '0' && r <= '9':
|
||||
utf8Val += uint(r) - '0'
|
||||
case r >= 'a' && r <= 'f':
|
||||
utf8Val += uint(r) - 'a' + 10
|
||||
default:
|
||||
return name
|
||||
}
|
||||
i++
|
||||
}
|
||||
// Didn't find closing underscore, invalid.
|
||||
return name
|
||||
}
|
||||
return unescaped.String()
|
||||
}
|
||||
|
||||
@@ -742,7 +742,7 @@ func getRangeTopKTimeseries(tss []*timeseries, modifier *metricsql.ModifierExpr,
|
||||
|
||||
func reverseSeries(tss []*timeseries) {
|
||||
j := len(tss)
|
||||
for i := 0; i < len(tss)/2; i++ {
|
||||
for i := range len(tss) / 2 {
|
||||
j--
|
||||
tss[i], tss[j] = tss[j], tss[i]
|
||||
}
|
||||
@@ -983,7 +983,7 @@ func getPerPointIQRBounds(tss []*timeseries) ([]float64, []float64) {
|
||||
var qs []float64
|
||||
lower := make([]float64, pointsLen)
|
||||
upper := make([]float64, pointsLen)
|
||||
for i := 0; i < pointsLen; i++ {
|
||||
for i := range pointsLen {
|
||||
values = values[:0]
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestIncrementalAggr(t *testing.T) {
|
||||
Values: valuesExpected,
|
||||
}}
|
||||
// run the test multiple times to make sure there are no side effects on concurrency
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
||||
tssSrcCopy := copyTimeseries(tssSrc)
|
||||
if err := testIncrementalParallelAggr(iafc, tssSrcCopy, tssExpected); err != nil {
|
||||
@@ -103,15 +103,13 @@ func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssEx
|
||||
workersCount := netstorage.MaxWorkers()
|
||||
tsCh := make(chan *timeseries)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workersCount)
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func(workerID uint) {
|
||||
defer wg.Done()
|
||||
for workerID := range workersCount {
|
||||
wg.Go(func() {
|
||||
for ts := range tsCh {
|
||||
runtime.Gosched() // allow other goroutines performing the work
|
||||
iafc.updateTimeseries(ts, workerID)
|
||||
iafc.updateTimeseries(ts, uint(workerID))
|
||||
}
|
||||
}(uint(i))
|
||||
})
|
||||
}
|
||||
for _, ts := range tssSrc {
|
||||
tsCh <- ts
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -477,22 +478,18 @@ func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSec
|
||||
var tssFirst []*timeseries
|
||||
var errFirst error
|
||||
qtFirst := qt.NewChild("expr1")
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
tssFirst, errFirst = evalExpr(qtFirst, ec, exprFirst)
|
||||
qtFirst.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
var tssSecond []*timeseries
|
||||
var errSecond error
|
||||
qtSecond := qt.NewChild("expr2")
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
tssSecond, errSecond = evalExpr(qtSecond, ec, exprSecond)
|
||||
qtSecond.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
if errFirst != nil {
|
||||
@@ -710,17 +707,13 @@ func evalExprsInParallel(qt *querytracer.Tracer, ec *EvalConfig, es []metricsql.
|
||||
qt.Printf("eval function args in parallel")
|
||||
var wg sync.WaitGroup
|
||||
for i, e := range es {
|
||||
wg.Add(1)
|
||||
qtChild := qt.NewChild("eval arg %d", i)
|
||||
go func(e metricsql.Expr, i int) {
|
||||
defer func() {
|
||||
qtChild.Done()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Go(func() {
|
||||
defer qtChild.Done()
|
||||
rv, err := evalExpr(qtChild, ec, e)
|
||||
rvs[i] = rv
|
||||
errs[i] = err
|
||||
}(e, i)
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
for _, err := range errs {
|
||||
@@ -785,7 +778,8 @@ func getRollupExprArg(arg metricsql.Expr) *metricsql.RollupExpr {
|
||||
// - rollupFunc(m) if iafc is nil
|
||||
// - aggrFunc(rollupFunc(m)) if iafc isn't nil
|
||||
func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr,
|
||||
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
||||
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext,
|
||||
) ([]*timeseries, error) {
|
||||
if re.At == nil {
|
||||
return evalRollupFuncWithoutAt(qt, ec, funcName, rf, expr, re, iafc)
|
||||
}
|
||||
@@ -835,7 +829,8 @@ func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf
|
||||
}
|
||||
|
||||
func evalRollupFuncWithoutAt(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
|
||||
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
||||
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext,
|
||||
) ([]*timeseries, error) {
|
||||
funcName = strings.ToLower(funcName)
|
||||
ecNew := ec
|
||||
var offset int64
|
||||
@@ -1017,16 +1012,14 @@ func doParallel(tss []*timeseries, f func(ts *timeseries, values []float64, time
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func(workerID uint) {
|
||||
defer wg.Done()
|
||||
for workerID := range workers {
|
||||
wg.Go(func() {
|
||||
var tmpValues []float64
|
||||
var tmpTimestamps []int64
|
||||
for ts := range workChs[workerID] {
|
||||
tmpValues, tmpTimestamps = f(ts, tmpValues, tmpTimestamps, workerID)
|
||||
tmpValues, tmpTimestamps = f(ts, tmpValues, tmpTimestamps, uint(workerID))
|
||||
}
|
||||
}(uint(i))
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -1058,7 +1051,8 @@ func removeNanValues(dstValues []float64, dstTimestamps []int64, values []float6
|
||||
|
||||
// evalInstantRollup evaluates instant rollup where ec.Start == ec.End.
|
||||
func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
|
||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window int64) ([]*timeseries, error) {
|
||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window int64,
|
||||
) ([]*timeseries, error) {
|
||||
if ec.Start != ec.End {
|
||||
logger.Panicf("BUG: evalInstantRollup cannot be called on non-empty time range; got %s", ec.timeRangeString())
|
||||
}
|
||||
@@ -1083,10 +1077,12 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||
rollupResultCacheV.DeleteInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
}
|
||||
getCachedSeries := func(qt *querytracer.Tracer) ([]*timeseries, int64, error) {
|
||||
rollupResultCacheV.rollupResultCacheRequests.Inc()
|
||||
again:
|
||||
offset := int64(0)
|
||||
tssCached := rollupResultCacheV.GetInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
if len(tssCached) == 0 {
|
||||
rollupResultCacheV.rollupResultCacheMisses.Inc()
|
||||
// Cache miss. Re-populate the missing data.
|
||||
start := int64(fasttime.UnixTimestamp()*1000) - cacheTimestampOffset.Milliseconds()
|
||||
offset = timestamp - start
|
||||
@@ -1129,6 +1125,7 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||
deleteCachedSeries(qt)
|
||||
goto again
|
||||
}
|
||||
rollupResultCacheV.rollupResultCachePartialHits.Inc()
|
||||
ec.QueryStats.addSeriesFetched(len(tssCached))
|
||||
return tssCached, offset, nil
|
||||
}
|
||||
@@ -1169,6 +1166,61 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
// the cached rate result could be inaccurate in edge cases, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10098
|
||||
case "rate":
|
||||
if iafc != nil {
|
||||
if !strings.EqualFold(iafc.ae.Name, "sum") {
|
||||
qt.Printf("do not apply instant rollup optimization for incremental aggregate %s()", iafc.ae.Name)
|
||||
return evalAt(qt, timestamp, window)
|
||||
}
|
||||
qt.Printf("optimized calculation for sum(rate(m[d])) as (sum(increase(m[d])) / d)")
|
||||
afe := expr.(*metricsql.AggrFuncExpr)
|
||||
fe := afe.Args[0].(*metricsql.FuncExpr)
|
||||
feIncrease := *fe
|
||||
feIncrease.Name = "increase"
|
||||
// copy RollupExpr to drop possible offset,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
|
||||
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
|
||||
newArg.Offset = nil
|
||||
feIncrease.Args = []metricsql.Expr{newArg}
|
||||
d := newArg.Window.Duration(ec.Step)
|
||||
if d == 0 {
|
||||
d = ec.Step
|
||||
}
|
||||
afeIncrease := *afe
|
||||
afeIncrease.Args = []metricsql.Expr{&feIncrease}
|
||||
be := &metricsql.BinaryOpExpr{
|
||||
Op: "/",
|
||||
KeepMetricNames: true,
|
||||
Left: &afeIncrease,
|
||||
Right: &metricsql.NumberExpr{
|
||||
N: float64(d) / 1000,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
}
|
||||
qt.Printf("optimized calculation for instant rollup rate(m[d]) as (increase(m[d]) / d)")
|
||||
fe := expr.(*metricsql.FuncExpr)
|
||||
feIncrease := *fe
|
||||
feIncrease.Name = "increase"
|
||||
// copy RollupExpr to drop possible offset,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
|
||||
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
|
||||
newArg.Offset = nil
|
||||
feIncrease.Args = []metricsql.Expr{newArg}
|
||||
d := newArg.Window.Duration(ec.Step)
|
||||
if d == 0 {
|
||||
d = ec.Step
|
||||
}
|
||||
be := &metricsql.BinaryOpExpr{
|
||||
Op: "/",
|
||||
KeepMetricNames: fe.KeepMetricNames,
|
||||
Left: &feIncrease,
|
||||
Right: &metricsql.NumberExpr{
|
||||
N: float64(d) / 1000,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
case "max_over_time":
|
||||
if iafc != nil {
|
||||
if !strings.EqualFold(iafc.ae.Name, "max") {
|
||||
@@ -1537,16 +1589,11 @@ func assertInstantValues(tss []*timeseries) {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
rollupResultCacheFullHits = metrics.NewCounter(`vm_rollup_result_cache_full_hits_total`)
|
||||
rollupResultCachePartialHits = metrics.NewCounter(`vm_rollup_result_cache_partial_hits_total`)
|
||||
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
|
||||
|
||||
memoryIntensiveQueries = metrics.NewCounter(`vm_memory_intensive_queries_total`)
|
||||
)
|
||||
var memoryIntensiveQueries = metrics.NewCounter(`vm_memory_intensive_queries_total`)
|
||||
|
||||
func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
|
||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
|
||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr,
|
||||
) ([]*timeseries, error) {
|
||||
window, err := windowExpr.NonNegativeDuration(ec.Step)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse lookbehind window in square brackets at %s: %w", expr.AppendString(nil), err)
|
||||
@@ -1582,19 +1629,20 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
|
||||
}
|
||||
|
||||
// Search for cached results.
|
||||
rollupResultCacheV.rollupResultCacheRequests.Inc()
|
||||
tssCached, start := rollupResultCacheV.GetSeries(qt, ec, expr, window)
|
||||
ec.QueryStats.addSeriesFetched(len(tssCached))
|
||||
if start > ec.End {
|
||||
qt.Printf("the result is fully cached")
|
||||
rollupResultCacheFullHits.Inc()
|
||||
rollupResultCacheV.rollupResultCacheFullHits.Inc()
|
||||
return tssCached, nil
|
||||
}
|
||||
if start > ec.Start {
|
||||
qt.Printf("partial cache hit")
|
||||
rollupResultCachePartialHits.Inc()
|
||||
rollupResultCacheV.rollupResultCachePartialHits.Inc()
|
||||
} else {
|
||||
qt.Printf("cache miss")
|
||||
rollupResultCacheMiss.Inc()
|
||||
rollupResultCacheV.rollupResultCacheMisses.Inc()
|
||||
}
|
||||
|
||||
// Fetch missing results, which aren't cached yet.
|
||||
@@ -1630,7 +1678,8 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
|
||||
//
|
||||
// pointsPerSeries is used only for estimating the needed memory for query processing
|
||||
func evalRollupFuncNoCache(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
|
||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window, pointsPerSeries int64) ([]*timeseries, error) {
|
||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window, pointsPerSeries int64,
|
||||
) ([]*timeseries, error) {
|
||||
if qt.Enabled() {
|
||||
qt = qt.NewChild("rollup %s: timeRange=%s, step=%d, window=%d", expr.AppendString(nil), ec.timeRangeString(), ec.Step, window)
|
||||
defer qt.Done()
|
||||
@@ -1720,6 +1769,7 @@ func evalRollupFuncNoCache(qt *querytracer.Tracer, ec *EvalConfig, funcName stri
|
||||
return nil, err
|
||||
}
|
||||
defer rml.Put(uint64(rollupMemorySize))
|
||||
qs.addMemoryUsage(rollupMemorySize)
|
||||
qt.Printf("the rollup evaluation needs an estimated %d bytes of RAM for %d series and %d points per series (summary %d points)",
|
||||
rollupMemorySize, timeseriesLen, pointsPerSeries, rollupPoints)
|
||||
|
||||
@@ -1753,7 +1803,8 @@ func maxSilenceInterval() int64 {
|
||||
|
||||
func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool,
|
||||
iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64,
|
||||
) ([]*timeseries, error) {
|
||||
qt = qt.NewChild("rollup %s() with incremental aggregation %s() over %d series; rollupConfigs=%s", funcName, iafc.ae.Name, rss.Len(), rcs)
|
||||
defer qt.Done()
|
||||
var samplesScannedTotal atomic.Uint64
|
||||
@@ -1792,7 +1843,8 @@ func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string,
|
||||
}
|
||||
|
||||
func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool, rss *netstorage.Results, rcs []*rollupConfig,
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64,
|
||||
) ([]*timeseries, error) {
|
||||
qt = qt.NewChild("rollup %s() over %d series; rollupConfigs=%s", funcName, rss.Len(), rcs)
|
||||
defer qt.Done()
|
||||
|
||||
@@ -1832,7 +1884,8 @@ func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, k
|
||||
}
|
||||
|
||||
func doRollupForTimeseries(funcName string, keepMetricNames bool, rc *rollupConfig, tsDst *timeseries, mnSrc *storage.MetricName,
|
||||
valuesSrc []float64, timestampsSrc []int64, sharedTimestamps []int64) uint64 {
|
||||
valuesSrc []float64, timestampsSrc []int64, sharedTimestamps []int64,
|
||||
) uint64 {
|
||||
tsDst.MetricName.CopyFrom(mnSrc)
|
||||
if len(rc.TagValue) > 0 {
|
||||
tsDst.MetricName.AddTag("rollup", rc.TagValue)
|
||||
@@ -1938,14 +1991,7 @@ func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]flo
|
||||
return values, timestamps
|
||||
}
|
||||
// Remove Prometheus staleness marks, so non-default rollup functions don't hit NaN values.
|
||||
hasStaleSamples := false
|
||||
for _, v := range values {
|
||||
if decimal.IsStaleNaN(v) {
|
||||
hasStaleSamples = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasStaleSamples {
|
||||
if !slices.ContainsFunc(values, decimal.IsStaleNaN) {
|
||||
// Fast path: values have no Prometheus staleness marks.
|
||||
return values, timestamps
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func Exec(qt *querytracer.Tracer, ec *EvalConfig, q string, isFirstPointOnly boo
|
||||
if querystats.Enabled() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
querystats.RegisterQuery(q, ec.End-ec.Start, startTime)
|
||||
querystats.RegisterQuery(q, ec.End-ec.Start, startTime, ec.QueryStats.memoryUsage())
|
||||
ec.QueryStats.addExecutionTimeMsec(startTime)
|
||||
}()
|
||||
}
|
||||
@@ -313,7 +313,7 @@ func escapeDots(s string) string {
|
||||
return s
|
||||
}
|
||||
result := make([]byte, 0, len(s)+2*dotsCount)
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
if s[i] == '.' && (i == 0 || s[i-1] != '\\') && (i+1 == len(s) || i+1 < len(s) && s[i+1] != '*' && s[i+1] != '+' && s[i+1] != '{') {
|
||||
// Escape a dot if the following conditions are met:
|
||||
// - if it isn't escaped already, i.e. if there is no `\` char before the dot.
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestExecSuccess(t *testing.T) {
|
||||
Deadline: searchutil.NewDeadline(time.Now(), time.Minute, ""),
|
||||
RoundDigits: 100,
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
result, err := Exec(nil, ec, q, false)
|
||||
if err != nil {
|
||||
t.Fatalf(`unexpected error when executing %q: %s`, q, err)
|
||||
@@ -4018,6 +4018,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(scalar)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(123, 456, time())`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-no-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6, label_set(100, "foo", "bar"))`
|
||||
@@ -4030,6 +4036,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-no-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(123,456, label_set(100, "foo", "bar"))`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-invalid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6, label_set(100, "le", "foobar"))`
|
||||
@@ -4042,6 +4054,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-invalid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(50, 60, label_set(100, "le", "foobar"))`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-inf-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6, label_set(100, "le", "+Inf"))`
|
||||
@@ -4183,6 +4201,28 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(0, 100, label_set(100, "le", "200"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.5, 0.5, 0.5, 0.5, 0.5, 0.5},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(200, 300, label_set(100, "le", "200"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0, 0, 0, 0, 0, 0},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_quantile(0.6, label_set(100, "le", "200"), "foobar"))`
|
||||
@@ -4212,7 +4252,7 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r1, r2, r3}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
|
||||
t.Run(`histogram_share(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_share(120, label_set(100, "le", "200"), "foobar"))`
|
||||
r1 := netstorage.Result{
|
||||
@@ -4311,7 +4351,37 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_share(single-value-valid-le-mid-le)`, func(t *testing.T) {
|
||||
t.Run(`histogram_fraction(single-value-valid-le-max-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(0,100, (
|
||||
label_set(100, "le", "100"),
|
||||
label_set(40, "le", "50"),
|
||||
label_set(0, "le", "10"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le-min-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(0,10, (
|
||||
label_set(100, "le", "100"),
|
||||
label_set(40, "le", "50"),
|
||||
label_set(0, "le", "10"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0, 0, 0, 0, 0, 0},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_share(single-value-valid-le-mid-le-1)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_share(105, (
|
||||
label_set(100, "le", "200"),
|
||||
@@ -4325,6 +4395,34 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_share(single-value-valid-le-mid-le-2)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_share(55, (
|
||||
label_set(100, "le", "200"),
|
||||
label_set(0, "le", "55"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0, 0, 0, 0, 0, 0},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le-mid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(55,105, (
|
||||
label_set(100, "le", "200"),
|
||||
label_set(0, "le", "55"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-valid-le-min-phi-no-zero-bucket)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0, label_set(100, "le", "200"))`
|
||||
@@ -4358,6 +4456,17 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(scalar-phi)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(25, time() / 8, label_set(100, "le", "200"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.5, 0.625, 0.75, 0.875, 0.875, 0.875},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(duplicate-le)`, func(t *testing.T) {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225
|
||||
t.Parallel()
|
||||
@@ -4439,6 +4548,36 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(valid)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_fraction(0, 25,
|
||||
label_set(90, "foo", "bar", "le", "10")
|
||||
or label_set(100, "foo", "bar", "le", "30")
|
||||
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||
or label_set(200, "tag", "xx", "le", "10")
|
||||
or label_set(300, "tag", "xx", "le", "30")
|
||||
))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.325, 0.325, 0.325, 0.325, 0.325, 0.325},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("tag"),
|
||||
Value: []byte("xx"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(negative-bucket-count)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6,
|
||||
@@ -4555,6 +4694,25 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(normal-bucket-count)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(22,35,
|
||||
label_set(0, "foo", "bar", "le", "10")
|
||||
or label_set(100, "foo", "bar", "le", "30")
|
||||
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||
)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(normal-bucket-count, boundsLabel)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_quantile(0.2,
|
||||
@@ -9827,7 +9985,7 @@ func TestExecError(t *testing.T) {
|
||||
Deadline: searchutil.NewDeadline(time.Now(), time.Minute, ""),
|
||||
RoundDigits: 100,
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
for range 4 {
|
||||
rv, err := Exec(nil, ec, q, false)
|
||||
if err == nil {
|
||||
t.Fatalf(`expecting non-nil error on %q`, q)
|
||||
|
||||
@@ -55,7 +55,7 @@ type parseCache struct {
|
||||
|
||||
func newParseCache() *parseCache {
|
||||
pc := new(parseCache)
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
for i := range parseBucketCount {
|
||||
pc.buckets[i] = newParseBucket()
|
||||
}
|
||||
return pc
|
||||
@@ -75,7 +75,7 @@ func (pc *parseCache) get(q string) *parseCacheValue {
|
||||
|
||||
func (pc *parseCache) requests() uint64 {
|
||||
var n uint64
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
for i := range parseBucketCount {
|
||||
n += pc.buckets[i].requests.Load()
|
||||
}
|
||||
return n
|
||||
@@ -83,7 +83,7 @@ func (pc *parseCache) requests() uint64 {
|
||||
|
||||
func (pc *parseCache) misses() uint64 {
|
||||
var n uint64
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
for i := range parseBucketCount {
|
||||
n += pc.buckets[i].misses.Load()
|
||||
}
|
||||
return n
|
||||
@@ -91,7 +91,7 @@ func (pc *parseCache) misses() uint64 {
|
||||
|
||||
func (pc *parseCache) len() uint64 {
|
||||
var n uint64
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
for i := range parseBucketCount {
|
||||
n += pc.buckets[i].len()
|
||||
}
|
||||
return n
|
||||
|
||||
@@ -17,7 +17,7 @@ func testGetParseCacheValue(q string) *parseCacheValue {
|
||||
|
||||
func testGenerateQueries(items int) []string {
|
||||
queries := make([]string, items)
|
||||
for i := 0; i < items; i++ {
|
||||
for i := range items {
|
||||
queries[i] = fmt.Sprintf(`node_time_seconds{instance="node%d", job="job%d"}`, i, i)
|
||||
}
|
||||
return queries
|
||||
@@ -102,7 +102,7 @@ func TestParseCacheBucketOverflow(t *testing.T) {
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
// Fill bucket
|
||||
for i := 0; i < parseBucketMaxLen; i++ {
|
||||
for i := range parseBucketMaxLen {
|
||||
b.put(queries[i], v)
|
||||
}
|
||||
expectedLen = uint64(parseBucketMaxLen)
|
||||
|
||||
@@ -15,7 +15,7 @@ func BenchmarkCachePutNoOverFlow(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < items; i++ {
|
||||
for i := range items {
|
||||
pc.put(queries[i], v)
|
||||
}
|
||||
}
|
||||
@@ -32,14 +32,14 @@ func BenchmarkCacheGetNoOverflow(b *testing.B) {
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
for i := 0; i < len(queries); i++ {
|
||||
for i := range queries {
|
||||
pc.put(queries[i], v)
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < items; i++ {
|
||||
for i := range items {
|
||||
if v := pc.get(queries[i]); v == nil {
|
||||
b.Errorf("unexpected nil value obtained from cache for query: %s ", queries[i])
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func BenchmarkCachePutGetNoOverflow(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < items; i++ {
|
||||
for i := range items {
|
||||
pc.put(queries[i], v)
|
||||
if res := pc.get(queries[i]); res == nil {
|
||||
b.Errorf("unexpected nil value obtained from cache for query: %s ", queries[i])
|
||||
@@ -79,7 +79,7 @@ func BenchmarkCachePutOverflow(b *testing.B) {
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
for i := 0; i < parseCacheMaxLen; i++ {
|
||||
for i := range parseCacheMaxLen {
|
||||
c.put(queries[i], v)
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ func BenchmarkCachePutGetOverflow(b *testing.B) {
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
for i := 0; i < parseCacheMaxLen; i++ {
|
||||
for i := range parseCacheMaxLen {
|
||||
c.put(queries[i], v)
|
||||
}
|
||||
|
||||
@@ -141,8 +141,8 @@ var testSimpleQueries = []string{
|
||||
|
||||
func BenchmarkParsePromQLWithCacheSimple(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < len(testSimpleQueries); j++ {
|
||||
for range b.N {
|
||||
for j := range testSimpleQueries {
|
||||
_, err := parsePromQLWithCache(testSimpleQueries[j])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
@@ -155,7 +155,7 @@ func BenchmarkParsePromQLWithCacheSimpleParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < len(testSimpleQueries); i++ {
|
||||
for i := range testSimpleQueries {
|
||||
_, err := parsePromQLWithCache(testSimpleQueries[i])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
@@ -210,8 +210,8 @@ var testComplexQueries = []string{
|
||||
|
||||
func BenchmarkParsePromQLWithCacheComplex(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < len(testComplexQueries); j++ {
|
||||
for range b.N {
|
||||
for j := range testComplexQueries {
|
||||
_, err := parsePromQLWithCache(testComplexQueries[j])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
@@ -224,7 +224,7 @@ func BenchmarkParsePromQLWithCacheComplexParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < len(testComplexQueries); i++ {
|
||||
for i := range testComplexQueries {
|
||||
_, err := parsePromQLWithCache(testComplexQueries[i])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
|
||||
@@ -13,6 +13,8 @@ type QueryStats struct {
|
||||
ExecutionDuration atomic.Pointer[time.Duration]
|
||||
// SeriesFetched contains the number of series fetched from storage or cache.
|
||||
SeriesFetched atomic.Int64
|
||||
// MemoryUsage contains the estimated memory consumption of the query
|
||||
MemoryUsage atomic.Int64
|
||||
|
||||
at *auth.Token
|
||||
|
||||
@@ -53,3 +55,17 @@ func (qs *QueryStats) addExecutionTimeMsec(startTime time.Time) {
|
||||
d := time.Since(startTime)
|
||||
qs.ExecutionDuration.Store(&d)
|
||||
}
|
||||
|
||||
func (qs *QueryStats) addMemoryUsage(memoryUsage int64) {
|
||||
if qs == nil {
|
||||
return
|
||||
}
|
||||
qs.MemoryUsage.Store(memoryUsage)
|
||||
}
|
||||
|
||||
func (qs *QueryStats) memoryUsage() int64 {
|
||||
if qs == nil {
|
||||
return 0
|
||||
}
|
||||
return qs.MemoryUsage.Load()
|
||||
}
|
||||
|
||||
@@ -534,7 +534,10 @@ type rollupFuncArg struct {
|
||||
timestamps []int64
|
||||
|
||||
// Real value preceding values.
|
||||
// Is populated if preceding value is within the rc.LookbackDelta.
|
||||
// Is populated if the preceding sample falls within the rc.LookbackDelta range, or if rc.LookbackDelta is not set.
|
||||
//
|
||||
// It provides an additional check and value for rollup functions such as increase(), changes(),
|
||||
// when the prevValue is NaN due to a gap or a small lookback window.
|
||||
realPrevValue float64
|
||||
|
||||
// Real value which goes after values.
|
||||
@@ -713,7 +716,11 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
// Extend dstValues in order to remove mallocs below.
|
||||
dstValues = decimal.ExtendFloat64sCapacity(dstValues, len(rc.Timestamps))
|
||||
|
||||
// Use step as the scrape interval for instant queries (when start == end).
|
||||
// Set maxPrevInterval for subsequent rfa.prevValue calculations in rollupFunc:
|
||||
// For instant queries, use rc.Step directly as maxPrevInterval.
|
||||
// For range queries, rc.Step is typically too small to serve as the lookback window between two rollup points.
|
||||
// Instead, estimate the scrape interval from raw sample timestamps (using the 0.6 quantile of the last 20 intervals)
|
||||
// and slightly inflate the scrape interval to set maxPrevInterval, allowing for some tolerance to jitter.
|
||||
maxPrevInterval := rc.Step
|
||||
if rc.Start < rc.End {
|
||||
scrapeInterval := getScrapeInterval(timestamps, rc.Step)
|
||||
@@ -729,22 +736,21 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
}
|
||||
}
|
||||
window := rc.Window
|
||||
// Adjust lookbehind window only if it isn't set explicitly, e.g. rate(foo).
|
||||
// In the case of missing lookbehind window it should be adjusted in order to return non-empty graph
|
||||
// when the window doesn't cover at least two raw samples (this is what most users expect).
|
||||
//
|
||||
// If the user explicitly sets the lookbehind window to some fixed value, e.g. rate(foo[1s]),
|
||||
// then it is expected he knows what he is doing. Do not adjust the lookbehind window then.
|
||||
//
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3483
|
||||
if window <= 0 {
|
||||
window = rc.Step
|
||||
if rc.MayAdjustWindow && window < maxPrevInterval {
|
||||
// Adjust lookbehind window only if it isn't set explicitly, e.g. rate(foo).
|
||||
// In the case of missing lookbehind window it should be adjusted in order to return non-empty graph
|
||||
// when the window doesn't cover at least two raw samples (this is what most users expect).
|
||||
//
|
||||
// If the user explicitly sets the lookbehind window to some fixed value, e.g. rate(foo[1s]),
|
||||
// then it is expected he knows what he is doing. Do not adjust the lookbehind window then.
|
||||
//
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3483
|
||||
window = maxPrevInterval
|
||||
}
|
||||
// Artificial window cannot exceed explicit rc.LookbackDelta, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/784
|
||||
if rc.isDefaultRollup && rc.LookbackDelta > 0 && window > rc.LookbackDelta {
|
||||
// Implicit window exceeds -search.maxStalenessInterval, so limit it to -search.maxStalenessInterval
|
||||
// according to https://github.com/VictoriaMetrics/VictoriaMetrics/issues/784
|
||||
window = rc.LookbackDelta
|
||||
}
|
||||
}
|
||||
@@ -869,17 +875,17 @@ func getScrapeInterval(timestamps []int64, defaultInterval int64) int64 {
|
||||
return defaultInterval
|
||||
}
|
||||
|
||||
// Estimate scrape interval as 0.6 quantile for the first 20 intervals.
|
||||
tsPrev := timestamps[0]
|
||||
timestamps = timestamps[1:]
|
||||
// Estimate scrape interval as 0.6 quantile of the last 20 intervals.
|
||||
tsPrev := timestamps[len(timestamps)-1]
|
||||
timestamps = timestamps[:len(timestamps)-1]
|
||||
if len(timestamps) > 20 {
|
||||
timestamps = timestamps[:20]
|
||||
timestamps = timestamps[len(timestamps)-20:]
|
||||
}
|
||||
a := getFloat64s()
|
||||
intervals := a.A[:0]
|
||||
for _, ts := range timestamps {
|
||||
intervals = append(intervals, float64(ts-tsPrev))
|
||||
tsPrev = ts
|
||||
for i := len(timestamps) - 1; i >= 0; i-- {
|
||||
intervals = append(intervals, float64(tsPrev-timestamps[i]))
|
||||
tsPrev = timestamps[i]
|
||||
}
|
||||
scrapeInterval := int64(quantile(0.6, intervals))
|
||||
a.A = intervals
|
||||
@@ -2107,9 +2113,15 @@ func rollupChanges(rfa *rollupFuncArg) float64 {
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
prevValue = values[0]
|
||||
values = values[1:]
|
||||
n++
|
||||
// Assume that the value didn't change during the current gap
|
||||
// if realPrevValue exists.
|
||||
if !math.IsNaN(rfa.realPrevValue) {
|
||||
prevValue = rfa.realPrevValue
|
||||
} else {
|
||||
n++
|
||||
prevValue = values[0]
|
||||
values = values[1:]
|
||||
}
|
||||
}
|
||||
for _, v := range values {
|
||||
if v != prevValue {
|
||||
|
||||
@@ -83,9 +83,11 @@ func checkRollupResultCacheReset() {
|
||||
|
||||
const checkRollupResultCacheResetInterval = 5 * time.Second
|
||||
|
||||
var needRollupResultCacheReset atomic.Bool
|
||||
var checkRollupResultCacheResetOnce sync.Once
|
||||
var rollupResultResetMetricRowSample atomic.Pointer[storage.MetricRow]
|
||||
var (
|
||||
needRollupResultCacheReset atomic.Bool
|
||||
checkRollupResultCacheResetOnce sync.Once
|
||||
rollupResultResetMetricRowSample atomic.Pointer[storage.MetricRow]
|
||||
)
|
||||
|
||||
var rollupResultCacheV = &rollupResultCache{
|
||||
c: workingsetcache.New(1024 * 1024), // This is a cache for testing.
|
||||
@@ -178,6 +180,12 @@ func InitRollupResultCache(cachePath string) {
|
||||
|
||||
rollupResultCacheV = &rollupResultCache{
|
||||
c: c,
|
||||
|
||||
rollupResultCacheRequests: metrics.GetOrCreateCounter(`vm_rollup_result_cache_requests_total`),
|
||||
rollupResultCacheFullHits: metrics.GetOrCreateCounter(`vm_rollup_result_cache_full_hits_total`),
|
||||
rollupResultCachePartialHits: metrics.GetOrCreateCounter(`vm_rollup_result_cache_partial_hits_total`),
|
||||
rollupResultCacheMisses: metrics.GetOrCreateCounter(`vm_rollup_result_cache_miss_total`),
|
||||
rollupResultCacheResets: metrics.GetOrCreateCounter(`vm_rollup_result_cache_resets_total`),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,13 +201,18 @@ func StopRollupResultCache() {
|
||||
|
||||
type rollupResultCache struct {
|
||||
c *workingsetcache.Cache
|
||||
}
|
||||
|
||||
var rollupResultCacheResets = metrics.NewCounter(`vm_cache_resets_total{type="promql/rollupResult"}`)
|
||||
rollupResultCacheRequests *metrics.Counter
|
||||
rollupResultCacheFullHits *metrics.Counter
|
||||
rollupResultCachePartialHits *metrics.Counter
|
||||
rollupResultCacheMisses *metrics.Counter
|
||||
|
||||
rollupResultCacheResets *metrics.Counter
|
||||
}
|
||||
|
||||
// ResetRollupResultCache resets rollup result cache.
|
||||
func ResetRollupResultCache() {
|
||||
rollupResultCacheResets.Inc()
|
||||
rollupResultCacheV.rollupResultCacheResets.Inc()
|
||||
rollupResultCacheKeyPrefix.Add(1)
|
||||
logger.Infof("rollupResult cache has been cleared")
|
||||
}
|
||||
@@ -726,7 +739,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
|
||||
entriesLen := int(encoding.UnmarshalUint32(src))
|
||||
src = src[4:]
|
||||
mi.entries = slicesutil.SetLength(mi.entries, entriesLen)
|
||||
for i := 0; i < entriesLen; i++ {
|
||||
for i := range entriesLen {
|
||||
tail, err := mi.entries[i].Unmarshal(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal entry #%d: %w", i, err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user