mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
357 Commits
compressor
...
test-tmp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d7d17d192 | ||
|
|
0a8b4281e5 | ||
|
|
7a7f188133 | ||
|
|
3e00fae3f4 | ||
|
|
ee3c0c6a87 | ||
|
|
cf7ea78588 | ||
|
|
186aa3bb0e | ||
|
|
e368f687a7 | ||
|
|
0214aa328e | ||
|
|
dd919eeee6 | ||
|
|
3f22d06b0c | ||
|
|
b812de236b | ||
|
|
40f56fa93b | ||
|
|
e610edf045 | ||
|
|
764955b61c | ||
|
|
e3d31a371a | ||
|
|
df723a4870 | ||
|
|
bd00e3a735 | ||
|
|
e794582f31 | ||
|
|
7cab4fd30d | ||
|
|
3333135bc0 | ||
|
|
1db1841b20 | ||
|
|
f7ce191482 | ||
|
|
96ea222780 | ||
|
|
03c0d9a672 | ||
|
|
e9f86af7f5 | ||
|
|
9ada784983 | ||
|
|
a83ee2b3f1 | ||
|
|
2564f10d98 | ||
|
|
0871770634 | ||
|
|
51b21dfd57 | ||
|
|
276989716f | ||
|
|
6cb3c0cac8 | ||
|
|
d064e14933 | ||
|
|
77b0fcfdd9 | ||
|
|
ee7fe11fd2 | ||
|
|
4c26fb6fe5 | ||
|
|
fc135094b3 | ||
|
|
5d42f21abd | ||
|
|
28eeabded1 | ||
|
|
b6910cfff7 | ||
|
|
8938ef398c | ||
|
|
df2b75fa81 | ||
|
|
857734c66c | ||
|
|
bedc0c0f8f | ||
|
|
5a41bdf329 | ||
|
|
bf5d0dd245 | ||
|
|
1cec37b0f5 | ||
|
|
c40c25b03c | ||
|
|
82badc3dd5 | ||
|
|
43ded688f7 | ||
|
|
661420fe85 | ||
|
|
7aab967447 | ||
|
|
afb07034ed | ||
|
|
44d2205136 | ||
|
|
b226318f9e | ||
|
|
30999204c9 | ||
|
|
ffddfa1f94 | ||
|
|
fc336bbf20 | ||
|
|
e0b2c1c4f5 | ||
|
|
5afbee5f6f | ||
|
|
51459196f9 | ||
|
|
7941877233 | ||
|
|
f303081304 | ||
|
|
a84628f701 | ||
|
|
f823a225ac | ||
|
|
79f1a37ee6 | ||
|
|
f9cd408ca9 | ||
|
|
c2811d8d11 | ||
|
|
8d981b15c9 | ||
|
|
58f09fe3f8 | ||
|
|
afd926a0b0 | ||
|
|
204c102342 | ||
|
|
c5949af9e8 | ||
|
|
5dc0413bc0 | ||
|
|
f919783de9 | ||
|
|
60f9f44150 | ||
|
|
0fcbe8fdae | ||
|
|
458b602938 | ||
|
|
471f1d0a09 | ||
|
|
7f80c1633f | ||
|
|
186b00df6b | ||
|
|
4205ae3011 | ||
|
|
491028774a | ||
|
|
565b79c9ca | ||
|
|
5478cc61c2 | ||
|
|
79c08ecac4 | ||
|
|
f47fd83e54 | ||
|
|
9c39bac565 | ||
|
|
1042f07498 | ||
|
|
79a595c6d0 | ||
|
|
40b47601d1 | ||
|
|
6bfcbe66f7 | ||
|
|
94118c63f6 | ||
|
|
9605d73809 | ||
|
|
3237c64ef3 | ||
|
|
1fbc2c0db1 | ||
|
|
71bb9fc0d0 | ||
|
|
0210f4ebd2 | ||
|
|
891ad8f202 | ||
|
|
e501640f44 | ||
|
|
21082405ec | ||
|
|
094a5ab58f | ||
|
|
bbc84fa119 | ||
|
|
9d1a72aca8 | ||
|
|
05d3db248b | ||
|
|
59d739ff0b | ||
|
|
b54d10be63 | ||
|
|
524f0e8d8b | ||
|
|
72419834af | ||
|
|
e6b7d25ab4 | ||
|
|
ac124cf5aa | ||
|
|
3d7f8377f7 | ||
|
|
4992e083f0 | ||
|
|
71a9fb16f7 | ||
|
|
7e7d029de1 | ||
|
|
983f30c326 | ||
|
|
efd8098b0b | ||
|
|
d86788e9a2 | ||
|
|
a87ad250d0 | ||
|
|
bf84de3c6b | ||
|
|
7ec8ea8301 | ||
|
|
c6f6302ca4 | ||
|
|
87100e55cc | ||
|
|
c464d4484f | ||
|
|
91f858ee1e | ||
|
|
da0d57e4b6 | ||
|
|
fa621b384e | ||
|
|
02fedb8585 | ||
|
|
04d19a2200 | ||
|
|
e612877fe7 | ||
|
|
43181b67b1 | ||
|
|
b0ed5b6174 | ||
|
|
4aeda4b267 | ||
|
|
20a2822c23 | ||
|
|
1891b74a0a | ||
|
|
0dc576d3da | ||
|
|
88861c66fe | ||
|
|
1ee5ba8d55 | ||
|
|
e0ab3fccaf | ||
|
|
2fe6640193 | ||
|
|
d1ccf205c4 | ||
|
|
b42ed019f5 | ||
|
|
5a41c7f5a5 | ||
|
|
ec193ef691 | ||
|
|
e669c87af4 | ||
|
|
87c1b2de6f | ||
|
|
bcd8d9d6c6 | ||
|
|
dbed0de650 | ||
|
|
34a730ac65 | ||
|
|
e21bdcdbc7 | ||
|
|
9db8e071c4 | ||
|
|
1627bcc6cb | ||
|
|
5033d05d55 | ||
|
|
5279faf02f | ||
|
|
564e6ea024 | ||
|
|
6b48126603 | ||
|
|
4a2192431d | ||
|
|
86bc7d5cd1 | ||
|
|
d05fadf988 | ||
|
|
e439e40e79 | ||
|
|
d6f5ba2887 | ||
|
|
94e4c4e367 | ||
|
|
aadd8d5f3a | ||
|
|
44d8e6a19d | ||
|
|
6b0ae0b79f | ||
|
|
a51a18403c | ||
|
|
de0ae735aa | ||
|
|
acbe526307 | ||
|
|
9a6ddb48df | ||
|
|
7d3e60f7f1 | ||
|
|
f54f73033b | ||
|
|
75a2e23b7e | ||
|
|
6fe079dbfb | ||
|
|
843fae3419 | ||
|
|
db961f8609 | ||
|
|
c45451bf69 | ||
|
|
30029f1e39 | ||
|
|
48f395456e | ||
|
|
08ce6ef825 | ||
|
|
cd10bb585c | ||
|
|
4ac94db2c7 | ||
|
|
65d831a0ee | ||
|
|
48540ac409 | ||
|
|
3cef820cba | ||
|
|
b4f3861690 | ||
|
|
4c8691450a | ||
|
|
fefa3e7936 | ||
|
|
08af80ebe0 | ||
|
|
915867fe56 | ||
|
|
786ce2c5b3 | ||
|
|
bddb0e369f | ||
|
|
f322494ca2 | ||
|
|
ceb081a018 | ||
|
|
e2fa25ab29 | ||
|
|
740548ccfc | ||
|
|
dbec34bafc | ||
|
|
04796ba249 | ||
|
|
5c7b044685 | ||
|
|
80c5066ef3 | ||
|
|
c3b8da81cd | ||
|
|
8b1fd6a619 | ||
|
|
b57f8d3cb6 | ||
|
|
f4776fec1b | ||
|
|
b76c77649d | ||
|
|
cedacf5f5c | ||
|
|
a2c3b33e42 | ||
|
|
7d1477e984 | ||
|
|
02effba767 | ||
|
|
601a25d4e8 | ||
|
|
b9b117d149 | ||
|
|
0b021fa5a7 | ||
|
|
b43fcc0cf8 | ||
|
|
534371031e | ||
|
|
0602d60047 | ||
|
|
cdc0db8ad7 | ||
|
|
77430b797d | ||
|
|
f2ad481a1f | ||
|
|
17b813ba28 | ||
|
|
6a71921565 | ||
|
|
7e924d7ecf | ||
|
|
480a8be48f | ||
|
|
c58d0549a8 | ||
|
|
cc70b5bb34 | ||
|
|
9feacf9761 | ||
|
|
53d438aab0 | ||
|
|
d29260f4e8 | ||
|
|
671ba82894 | ||
|
|
88fe4ebb34 | ||
|
|
1b1aef57e0 | ||
|
|
30b9167965 | ||
|
|
24a2a4a962 | ||
|
|
cdf384eb4d | ||
|
|
c89926bdf7 | ||
|
|
33a60f907c | ||
|
|
5e0db31914 | ||
|
|
54f1a33a63 | ||
|
|
e4525516e2 | ||
|
|
d3101b075f | ||
|
|
b4d8d135e9 | ||
|
|
12c9aa9bf3 | ||
|
|
30b61c6d8a | ||
|
|
4d39dfc0ee | ||
|
|
d2cd004710 | ||
|
|
22e6385f56 | ||
|
|
50bfa689c9 | ||
|
|
f8cb2cf1a0 | ||
|
|
6d0420b454 | ||
|
|
5bf2d6a689 | ||
|
|
765ce1b181 | ||
|
|
036f33de48 | ||
|
|
430163d01a | ||
|
|
7c40b95224 | ||
|
|
cd89923e3c | ||
|
|
f404e0b3a5 | ||
|
|
1d4a4eb8b0 | ||
|
|
30903d9361 | ||
|
|
000a918f38 | ||
|
|
9c35807368 | ||
|
|
92512cbe54 | ||
|
|
dec9a2f023 | ||
|
|
0f06d2f072 | ||
|
|
9c6a4d915d | ||
|
|
6fa3283d04 | ||
|
|
20d5314833 | ||
|
|
10c42668a1 | ||
|
|
60d587f55b | ||
|
|
466cbee433 | ||
|
|
cdbe69e62b | ||
|
|
7374a8813d | ||
|
|
bb99ddf957 | ||
|
|
9cfdbc582f | ||
|
|
8ab1261750 | ||
|
|
b1324360b8 | ||
|
|
c3fa806b2f | ||
|
|
786a10835b | ||
|
|
3adc7abf8f | ||
|
|
2962e35c0b | ||
|
|
727bc02a5c | ||
|
|
71d774f76d | ||
|
|
530b731101 | ||
|
|
ec81deb7e8 | ||
|
|
0e6731323a | ||
|
|
bdac00f674 | ||
|
|
037808dad5 | ||
|
|
508bafced3 | ||
|
|
0b2b96422d | ||
|
|
2328656f87 | ||
|
|
9d5ef1cdd1 | ||
|
|
c9837de9cd | ||
|
|
6c6e469bfb | ||
|
|
6ff1de89a9 | ||
|
|
f2d1f0716b | ||
|
|
4319d9f2b0 | ||
|
|
5a97d512c0 | ||
|
|
a15fcac1b6 | ||
|
|
5dd879cd17 | ||
|
|
84b4b5f3e5 | ||
|
|
a4b3ce9641 | ||
|
|
cd0ad293fe | ||
|
|
bb399518db | ||
|
|
1bd927e3fe | ||
|
|
3383589fd1 | ||
|
|
f07574a78e | ||
|
|
689196048f | ||
|
|
61532930e6 | ||
|
|
a19a4f34ff | ||
|
|
93c63d77c0 | ||
|
|
8735fb12fb | ||
|
|
7454d938cc | ||
|
|
49fe403af1 | ||
|
|
580fb3ad85 | ||
|
|
4485877a83 | ||
|
|
a8997e97de | ||
|
|
d3316b333d | ||
|
|
71f521fc0c | ||
|
|
6bf49aef03 | ||
|
|
a8c5035d3d | ||
|
|
43a45cf4e3 | ||
|
|
a534df6cf3 | ||
|
|
a696ef21df | ||
|
|
d4d3ec877e | ||
|
|
4ade6f9d25 | ||
|
|
37f1c76e71 | ||
|
|
4b74baf696 | ||
|
|
d5f52adf3d | ||
|
|
0abefae46c | ||
|
|
11af902b22 | ||
|
|
1985110de2 | ||
|
|
479ae93e04 | ||
|
|
fc0b6c62fe | ||
|
|
47a52f5f40 | ||
|
|
5d85968659 | ||
|
|
a335ed23c7 | ||
|
|
18afeff742 | ||
|
|
6b903d79a9 | ||
|
|
b09272ccac | ||
|
|
0a6d58b4ca | ||
|
|
304996bc08 | ||
|
|
5eb6a0f9da | ||
|
|
1cf5cf05db | ||
|
|
e5b4812d77 | ||
|
|
5696a087b8 | ||
|
|
02e5fb81c5 | ||
|
|
6fdc111fd2 | ||
|
|
7e02cb484c | ||
|
|
99607e2f3b | ||
|
|
1bf58b2f13 | ||
|
|
4837dc6e09 | ||
|
|
33a4f275b1 | ||
|
|
32b89447ae | ||
|
|
254d9e2729 | ||
|
|
8b0f5b2315 | ||
|
|
58ae3772fe | ||
|
|
58dae07b7a | ||
|
|
83fc33af89 | ||
|
|
47b7487b5f |
33
.github/workflows/main.yml
vendored
33
.github/workflows/main.yml
vendored
@@ -85,9 +85,38 @@ jobs:
|
||||
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
|
||||
|
||||
- name: Run tests
|
||||
run: make ${{ matrix.scenario}}
|
||||
run: GOGC=10 make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
|
||||
integration-test:
|
||||
name: integration-test
|
||||
needs: [lint, test]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache: false
|
||||
go-version: stable
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/bin
|
||||
~/go/pkg/mod
|
||||
key: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
|
||||
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
|
||||
|
||||
- name: Run integration tests
|
||||
run: make integration-test
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -175,7 +175,7 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2019-2024 VictoriaMetrics, Inc.
|
||||
Copyright 2019-2025 VictoriaMetrics, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
12
Makefile
12
Makefile
@@ -513,19 +513,19 @@ check-all: fmt vet golangci-lint govulncheck
|
||||
clean-checkers: remove-golangci-lint remove-govulncheck
|
||||
|
||||
test:
|
||||
DISABLE_FSYNC_FOR_TESTING=1 go test ./lib/... ./app/...
|
||||
go test ./lib/... ./app/...
|
||||
|
||||
test-race:
|
||||
DISABLE_FSYNC_FOR_TESTING=1 go test -race ./lib/... ./app/...
|
||||
go test -race ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
DISABLE_FSYNC_FOR_TESTING=1 CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
DISABLE_FSYNC_FOR_TESTING=1 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
test-full-386:
|
||||
DISABLE_FSYNC_FOR_TESTING=1 GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
integration-test: victoria-metrics vmagent vmalert vmauth
|
||||
go test ./apptest/... -skip="^TestCluster.*"
|
||||
@@ -567,7 +567,7 @@ golangci-lint: install-golangci-lint
|
||||
golangci-lint run
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.60.3
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.63.4
|
||||
|
||||
remove-golangci-lint:
|
||||
rm -rf `which golangci-lint`
|
||||
|
||||
16
README.md
16
README.md
@@ -1,12 +1,14 @@
|
||||
# VictoriaMetrics
|
||||
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
[](https://hub.docker.com/r/victoriametrics/victoria-metrics)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
<picture>
|
||||
<source srcset="docs/logo_white.webp" media="(prefers-color-scheme: dark)">
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
@@ -39,9 +40,24 @@ var (
|
||||
"The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. "+
|
||||
"Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmsingle can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate.")
|
||||
finalDedupScheduleInterval = flag.Duration("storage.finalDedupScheduleCheckInterval", time.Hour, "The interval for checking when final deduplication process should be started."+
|
||||
"Storage unconditionally adds 25% jitter to the interval value on each check evaluation."+
|
||||
" Changing the interval to the bigger values may delay downsampling, deduplication for historical data."+
|
||||
" See also https://docs.victoriametrics.com/#deduplication")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// VictoriaMetrics is optimized for reduced memory allocations,
|
||||
// so it can run with the reduced GOGC in order to reduce the used memory,
|
||||
// while keeping CPU usage spent in GC at low levels.
|
||||
//
|
||||
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
|
||||
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
|
||||
// exceeds 0.05 for extended periods of time.
|
||||
cgroup.SetGOGC(30)
|
||||
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
@@ -74,8 +90,13 @@ func main() {
|
||||
startTime := time.Now()
|
||||
storage.SetDedupInterval(*minScrapeInterval)
|
||||
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
|
||||
if *finalDedupScheduleInterval < time.Hour {
|
||||
logger.Fatalf("-dedup.finalDedupScheduleCheckInterval cannot be smaller than 1 hour; got %s", *finalDedupScheduleInterval)
|
||||
}
|
||||
storage.SetFinalDedupScheduleInterval(*finalDedupScheduleInterval)
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsertcommon.StartIngestionRateLimiter(*maxIngestionRate)
|
||||
vminsert.Init()
|
||||
|
||||
startSelfScraper()
|
||||
@@ -97,6 +118,7 @@ func main() {
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
vminsert.Stop()
|
||||
vminsertcommon.StopIngestionRateLimiter()
|
||||
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
|
||||
@@ -10,9 +10,10 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -48,7 +49,7 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
var bb bytesutil.ByteBuffer
|
||||
var rows prometheus.Rows
|
||||
var mrs []storage.MetricRow
|
||||
var labels []prompb.Label
|
||||
var labels []prompbmarshal.Label
|
||||
t := time.NewTicker(scrapeInterval)
|
||||
f := func(currentTime time.Time, sendStaleMarkers bool) {
|
||||
currentTimestamp := currentTime.UnixNano() / 1e6
|
||||
@@ -68,6 +69,10 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
t := &r.Tags[j]
|
||||
labels = addLabel(labels, t.Key, t.Value)
|
||||
}
|
||||
if timeserieslimits.IsExceeding(labels) {
|
||||
// Skip metric with exceeding labels.
|
||||
continue
|
||||
}
|
||||
if len(mrs) < cap(mrs) {
|
||||
mrs = mrs[:len(mrs)+1]
|
||||
} else {
|
||||
@@ -99,11 +104,11 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
func addLabel(dst []prompb.Label, key, value string) []prompb.Label {
|
||||
func addLabel(dst []prompbmarshal.Label, key, value string) []prompbmarshal.Label {
|
||||
if len(dst) < cap(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, prompb.Label{})
|
||||
dst = append(dst, prompbmarshal.Label{})
|
||||
}
|
||||
lb := &dst[len(dst)-1]
|
||||
lb.Name = key
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -21,6 +22,11 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
)
|
||||
|
||||
var (
|
||||
datadogStreamFields = flagutil.NewArrayString("datadog.streamFields", "Datadog tags to be used as stream fields.")
|
||||
datadogIgnoreFields = flagutil.NewArrayString("datadog.ignoreFields", "Datadog tags to ignore.")
|
||||
)
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
// RequestHandler processes Datadog insert requests
|
||||
@@ -79,17 +85,21 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(cp.StreamFields) == 0 {
|
||||
cp.StreamFields = *datadogStreamFields
|
||||
}
|
||||
if len(cp.IgnoreFields) == 0 {
|
||||
cp.IgnoreFields = *datadogIgnoreFields
|
||||
}
|
||||
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := readLogsRequest(ts, data, lmp.AddRow)
|
||||
lmp := cp.NewLogMessageProcessor("datadog")
|
||||
err = readLogsRequest(ts, data, lmp)
|
||||
lmp.MustClose()
|
||||
if n > 0 {
|
||||
rowsIngestedTotal.Add(n)
|
||||
}
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message in /api/v2/logs request: %s, stream fields: %s", err, cp.StreamFields)
|
||||
return true
|
||||
@@ -105,47 +115,118 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
var (
|
||||
v2LogsRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/datadog/api/v2/logs"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="datadog"}`)
|
||||
v2LogsRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/datadog/api/v2/logs"}`)
|
||||
)
|
||||
|
||||
// datadog message field has two formats:
|
||||
// - regular log message with string text
|
||||
// - nested json format for serverless plugins
|
||||
// which has folowing format:
|
||||
// {"message": {"message": "text","lamdba": {"arn": "string","requestID": "string"}, "timestamp": int64} }
|
||||
//
|
||||
// See https://github.com/DataDog/datadog-lambda-extension/blob/28b90c7e4e985b72d60b5f5a5147c69c7ac693c4/bottlecap/src/logs/lambda/mod.rs#L24
|
||||
func appendMsgFields(fields []logstorage.Field, v *fastjson.Value) ([]logstorage.Field, error) {
|
||||
switch v.Type() {
|
||||
case fastjson.TypeString:
|
||||
val := v.GetStringBytes()
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
case fastjson.TypeObject:
|
||||
var firstErr error
|
||||
v.GetObject().Visit(func(k []byte, v *fastjson.Value) {
|
||||
if firstErr != nil {
|
||||
return
|
||||
}
|
||||
switch bytesutil.ToUnsafeString(k) {
|
||||
case "message":
|
||||
val := v.GetStringBytes()
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
case "status":
|
||||
val := v.GetStringBytes()
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "status",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
case "lamdba":
|
||||
obj, err := v.Object()
|
||||
if err != nil {
|
||||
firstErr = err
|
||||
firstErr = fmt.Errorf("unexpected lambda value type for %q:%q; want object", k, v)
|
||||
return
|
||||
}
|
||||
obj.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if firstErr != nil {
|
||||
return
|
||||
}
|
||||
val, err := v.StringBytes()
|
||||
if err != nil {
|
||||
firstErr = fmt.Errorf("unexpected lambda label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: bytesutil.ToUnsafeString(k),
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
})
|
||||
default:
|
||||
return fields, fmt.Errorf("unsupported message type %q", v.Type().String())
|
||||
}
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
// readLogsRequest parses data according to DataDog logs format
|
||||
// https://docs.datadoghq.com/api/latest/logs/#send-logs
|
||||
func readLogsRequest(ts int64, data []byte, processLogMessage func(int64, []logstorage.Field)) (int, error) {
|
||||
func readLogsRequest(ts int64, data []byte, lmp insertutils.LogMessageProcessor) error {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
return fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
}
|
||||
records, err := v.Array()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot extract array from parsed JSON: %w", err)
|
||||
return fmt.Errorf("cannot extract array from parsed JSON: %w", err)
|
||||
}
|
||||
|
||||
var fields []logstorage.Field
|
||||
for m, r := range records {
|
||||
for _, r := range records {
|
||||
o, err := r.Object()
|
||||
if err != nil {
|
||||
return m + 1, fmt.Errorf("could not extract log record: %w", err)
|
||||
return fmt.Errorf("could not extract log record: %w", err)
|
||||
}
|
||||
o.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
val, e := v.StringBytes()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
switch string(k) {
|
||||
switch bytesutil.ToUnsafeString(k) {
|
||||
case "message":
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
fields, err = appendMsgFields(fields, v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case "timestamp":
|
||||
val, e := v.Int64()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("failed to parse timestamp for %q:%q", k, v)
|
||||
}
|
||||
if val > 0 {
|
||||
ts = val * 1e6
|
||||
}
|
||||
case "ddtags":
|
||||
// https://docs.datadoghq.com/getting_started/tagging/
|
||||
val, e := v.StringBytes()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
var pair []byte
|
||||
idx := 0
|
||||
for idx >= 0 {
|
||||
@@ -172,14 +253,22 @@ func readLogsRequest(ts int64, data []byte, processLogMessage func(int64, []logs
|
||||
}
|
||||
}
|
||||
default:
|
||||
val, e := v.StringBytes()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: bytesutil.ToUnsafeString(k),
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
}
|
||||
})
|
||||
processLogMessage(ts, fields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
fields = fields[:0]
|
||||
}
|
||||
return len(records), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package datadog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestReadLogsRequestFailure(t *testing.T) {
|
||||
@@ -15,16 +13,12 @@ func TestReadLogsRequestFailure(t *testing.T) {
|
||||
|
||||
ts := time.Now().UnixNano()
|
||||
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
t.Fatalf("unexpected call to processLogMessage with timestamp=%d, fields=%s", timestamp, fields)
|
||||
}
|
||||
|
||||
rows, err := readLogsRequest(ts, []byte(data), processLogMessage)
|
||||
if err == nil {
|
||||
lmp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := readLogsRequest(ts, []byte(data), lmp); err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
if rows != 0 {
|
||||
t.Fatalf("unexpected non-zero rows=%d", rows)
|
||||
if err := lmp.Verify(nil, ""); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
f("foobar")
|
||||
@@ -39,30 +33,16 @@ func TestReadLogsRequestSuccess(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
ts := time.Now().UnixNano()
|
||||
var result string
|
||||
processLogMessage := func(_ int64, fields []logstorage.Field) {
|
||||
a := make([]string, len(fields))
|
||||
for i, f := range fields {
|
||||
a[i] = fmt.Sprintf("%q:%q", f.Name, f.Value)
|
||||
}
|
||||
if len(result) > 0 {
|
||||
result = result + "\n"
|
||||
}
|
||||
s := "{" + strings.Join(a, ",") + "}"
|
||||
result += s
|
||||
var timestampsExpected []int64
|
||||
for i := 0; i < rowsExpected; i++ {
|
||||
timestampsExpected = append(timestampsExpected, ts)
|
||||
}
|
||||
|
||||
// Read the request without compression
|
||||
rows, err := readLogsRequest(ts, []byte(data), processLogMessage)
|
||||
if err != nil {
|
||||
lmp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := readLogsRequest(ts, []byte(data), lmp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
if err := lmp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +54,12 @@ func TestReadLogsRequestSuccess(t *testing.T) {
|
||||
"hostname":"127.0.0.1",
|
||||
"message":"bar",
|
||||
"service":"test"
|
||||
}, {
|
||||
"ddsource":"nginx",
|
||||
"ddtags":"tag1:value1,tag2:value2",
|
||||
"hostname":"127.0.0.1",
|
||||
"message":{"message": "nested"},
|
||||
"service":"test"
|
||||
}, {
|
||||
"ddsource":"nginx",
|
||||
"ddtags":"tag1:value1,tag2:value2",
|
||||
@@ -106,8 +92,9 @@ func TestReadLogsRequestSuccess(t *testing.T) {
|
||||
"service":"test"
|
||||
}
|
||||
]`
|
||||
rowsExpected := 6
|
||||
rowsExpected := 7
|
||||
resultExpected := `{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"bar","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"nested","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"foobar","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"baz","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -101,9 +99,10 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
lmp := cp.NewLogMessageProcessor("elasticsearch_bulk")
|
||||
isGzip := r.Header.Get("Content-Encoding") == "gzip"
|
||||
n, err := readBulkRequest(r.Body, isGzip, cp.TimeField, cp.MsgFields, lmp)
|
||||
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
|
||||
n, err := readBulkRequest(streamName, r.Body, isGzip, cp.TimeField, cp.MsgFields, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message #%d in /_bulk request: %s, stream fields: %s", n, err, cp.StreamFields)
|
||||
@@ -129,11 +128,10 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
var (
|
||||
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="elasticsearch_bulk"}`)
|
||||
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(r io.Reader, isGzip bool, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func readBulkRequest(streamName string, r io.Reader, isGzip bool, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
||||
|
||||
if isGzip {
|
||||
@@ -148,48 +146,29 @@ func readBulkRequest(r io.Reader, isGzip bool, timeField string, msgFields []str
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
lr := insertutils.NewLineReader(streamName, wcr)
|
||||
|
||||
n := 0
|
||||
nCheckpoint := 0
|
||||
for {
|
||||
ok, err := readBulkLine(sc, timeField, msgFields, lmp)
|
||||
ok, err := readBulkLine(lr, timeField, msgFields, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil || !ok {
|
||||
rowsIngestedTotal.Add(n - nCheckpoint)
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
if batchSize := n - nCheckpoint; n >= 1000 {
|
||||
rowsIngestedTotal.Add(batchSize)
|
||||
nCheckpoint = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
func readBulkLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
func readBulkLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
|
||||
// Read the command, must be "create" or "index"
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read "create" or "index" command, since its size exceeds -insert.maxLineSizeBytes=%d`,
|
||||
insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
if !lr.NextLine() {
|
||||
err := lr.Err()
|
||||
return false, err
|
||||
}
|
||||
line = sc.Bytes()
|
||||
line = lr.Line
|
||||
}
|
||||
lineStr := bytesutil.ToUnsafeString(line)
|
||||
if !strings.Contains(lineStr, `"create"`) && !strings.Contains(lineStr, `"index"`) {
|
||||
@@ -197,16 +176,18 @@ func readBulkLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp i
|
||||
}
|
||||
|
||||
// Decode log message
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf("cannot read log message, since its size exceeds -insert.maxLineSizeBytes=%d", insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
if !lr.NextLine() {
|
||||
if err := lr.Err(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
|
||||
}
|
||||
line = sc.Bytes()
|
||||
line = lr.Line
|
||||
if len(line) == 0 {
|
||||
// Special case - the line could be too long, so it was skipped.
|
||||
// Continue parsing next lines.
|
||||
return true, nil
|
||||
}
|
||||
p := logstorage.GetJSONParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
@@ -220,7 +201,7 @@ func readBulkLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp i
|
||||
ts = time.Now().UnixNano()
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgFields, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
lmp.AddRow(ts, p.Fields, nil)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestReadBulkRequest_Failure(t *testing.T) {
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, "_time", []string{"_msg"}, tlp)
|
||||
rows, err := readBulkRequest("test", r, false, "_time", []string{"_msg"}, tlp)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
@@ -33,7 +33,7 @@ foobar`)
|
||||
}
|
||||
|
||||
func TestReadBulkRequest_Success(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
msgFields := []string{"non_existing_foo", msgField, "non_exiting_bar"}
|
||||
@@ -41,14 +41,14 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, timeField, msgFields, tlp)
|
||||
rows, err := readBulkRequest("test", r, false, timeField, msgFields, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
if rows != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -56,22 +56,22 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
tlp = &insertutils.TestLogMessageProcessor{}
|
||||
compressedData := compressData(data)
|
||||
r = bytes.NewBufferString(compressedData)
|
||||
rows, err = readBulkRequest(r, true, timeField, msgFields, tlp)
|
||||
rows, err = readBulkRequest("test", r, true, timeField, msgFields, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
if rows != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatalf("verification failure after compression: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify an empty data
|
||||
f("", "_time", "_msg", 0, nil, "")
|
||||
f("\n", "_time", "_msg", 0, nil, "")
|
||||
f("\n\n", "_time", "_msg", 0, nil, "")
|
||||
f("", "_time", "_msg", nil, "")
|
||||
f("\n", "_time", "_msg", nil, "")
|
||||
f("\n\n", "_time", "_msg", nil, "")
|
||||
|
||||
// Verify non-empty data
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
@@ -85,13 +85,12 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 4
|
||||
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000, 1686026893000000000}
|
||||
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"_msg":"baz"}
|
||||
{"_msg":"xyz","x":"y"}
|
||||
{"_msg":"qwe rty"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func compressData(s string) string {
|
||||
|
||||
@@ -41,7 +41,7 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
r := &bytes.Reader{}
|
||||
for pb.Next() {
|
||||
r.Reset(dataBytes)
|
||||
_, err := readBulkRequest(r, isGzip, timeField, msgFields, blp)
|
||||
_, err := readBulkRequest("test", r, isGzip, timeField, msgFields, blp)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
|
||||
@@ -137,10 +137,12 @@ func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignore
|
||||
|
||||
// LogMessageProcessor is an interface for log message processors.
|
||||
type LogMessageProcessor interface {
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and the given fields.
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and fields.
|
||||
//
|
||||
// If streamFields is non-nil, then the given streamFields must be used as log stream fields instead of pre-configured fields.
|
||||
//
|
||||
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can re-use them.
|
||||
AddRow(timestamp int64, fields []logstorage.Field)
|
||||
AddRow(timestamp int64, fields, streamFields []logstorage.Field)
|
||||
|
||||
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
|
||||
MustClose()
|
||||
@@ -154,6 +156,9 @@ type logMessageProcessor struct {
|
||||
|
||||
cp *CommonParams
|
||||
lr *logstorage.LogRows
|
||||
|
||||
rowsIngestedTotal *metrics.Counter
|
||||
bytesIngestedTotal *metrics.Counter
|
||||
}
|
||||
|
||||
func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
@@ -183,18 +188,24 @@ func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
}
|
||||
|
||||
// AddRow adds new log message to lmp with the given timestamp and fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
//
|
||||
// If streamFields is non-nil, then it is used as log stream fields instead of the pre-configured stream fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
lmp.rowsIngestedTotal.Inc()
|
||||
n := logstorage.EstimatedJSONRowLen(fields)
|
||||
lmp.bytesIngestedTotal.Add(n)
|
||||
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
|
||||
line := logstorage.MarshalFieldsToJSON(nil, fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, line)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields)
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields, streamFields)
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
@@ -227,12 +238,17 @@ func (lmp *logMessageProcessor) MustClose() {
|
||||
// NewLogMessageProcessor returns new LogMessageProcessor for the given cp.
|
||||
//
|
||||
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
|
||||
func (cp *CommonParams) NewLogMessageProcessor() LogMessageProcessor {
|
||||
func (cp *CommonParams) NewLogMessageProcessor(protocolName string) LogMessageProcessor {
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields, cp.ExtraFields, *defaultMsgValue)
|
||||
rowsIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_rows_ingested_total{type=%q}", protocolName))
|
||||
bytesIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_bytes_ingested_total{type=%q}", protocolName))
|
||||
lmp := &logMessageProcessor{
|
||||
cp: cp,
|
||||
lr: lr,
|
||||
|
||||
rowsIngestedTotal: rowsIngestedTotal,
|
||||
bytesIngestedTotal: bytesIngestedTotal,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
lmp.initPeriodicFlush()
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
|
||||
var (
|
||||
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers")
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#what-length-a-log-record-is-expected-to-have")
|
||||
|
||||
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers")
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#how-many-fields-a-single-log-entry-may-contain")
|
||||
)
|
||||
|
||||
146
app/vlinsert/insertutils/line_reader.go
Normal file
146
app/vlinsert/insertutils/line_reader.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
)
|
||||
|
||||
// LineReader reads newline-delimited lines from the underlying reader
|
||||
type LineReader struct {
|
||||
// Line contains the next line read after the call to NextLine
|
||||
//
|
||||
// The Line contents is valid until the next call to NextLine.
|
||||
Line []byte
|
||||
|
||||
// name is the LineReader name
|
||||
name string
|
||||
|
||||
// r is the underlying reader to read data from
|
||||
r io.Reader
|
||||
|
||||
// buf is a buffer for reading the next line
|
||||
buf []byte
|
||||
|
||||
// bufOffset is the offset at buf to read the next line from
|
||||
bufOffset int
|
||||
|
||||
// err is the last error when reading data from r
|
||||
err error
|
||||
|
||||
// eofReached is set to true when all the data is read from r
|
||||
eofReached bool
|
||||
}
|
||||
|
||||
// NewLineReader returns LineReader for r.
|
||||
func NewLineReader(name string, r io.Reader) *LineReader {
|
||||
return &LineReader{
|
||||
name: name,
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// NextLine reads the next line from the underlying reader.
|
||||
//
|
||||
// It returns true if the next line is successfully read into Line.
|
||||
// If the line length exceeds MaxLineSizeBytes, then this line is skipped
|
||||
// and an empty line is returned instead.
|
||||
//
|
||||
// If false is returned, then no more lines left to read from r.
|
||||
// Check for Err in this case.
|
||||
func (lr *LineReader) NextLine() bool {
|
||||
for {
|
||||
if lr.bufOffset >= len(lr.buf) {
|
||||
if lr.err != nil || lr.eofReached {
|
||||
return false
|
||||
}
|
||||
if !lr.readMoreData() {
|
||||
return false
|
||||
}
|
||||
if lr.bufOffset >= len(lr.buf) && lr.eofReached {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
buf := lr.buf[lr.bufOffset:]
|
||||
if n := bytes.IndexByte(buf, '\n'); n >= 0 {
|
||||
lr.Line = buf[:n]
|
||||
lr.bufOffset += n + 1
|
||||
return true
|
||||
}
|
||||
if lr.eofReached {
|
||||
lr.Line = buf
|
||||
lr.bufOffset += len(buf)
|
||||
return true
|
||||
}
|
||||
if !lr.readMoreData() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the last error after NextLine call.
|
||||
func (lr *LineReader) Err() error {
|
||||
if lr.err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s: %s", lr.name, lr.err)
|
||||
}
|
||||
|
||||
func (lr *LineReader) readMoreData() bool {
|
||||
if lr.bufOffset > 0 {
|
||||
lr.buf = append(lr.buf[:0], lr.buf[lr.bufOffset:]...)
|
||||
lr.bufOffset = 0
|
||||
}
|
||||
|
||||
bufLen := len(lr.buf)
|
||||
if bufLen >= MaxLineSizeBytes.IntN() {
|
||||
logger.Warnf("%s: the line length exceeds -insert.maxLineSizeBytes=%d; skipping it; line contents=%q", lr.name, MaxLineSizeBytes.IntN(), lr.buf)
|
||||
tooLongLinesSkipped.Inc()
|
||||
return lr.skipUntilNextLine()
|
||||
}
|
||||
|
||||
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
|
||||
n, err := lr.r.Read(lr.buf[bufLen:])
|
||||
lr.buf = lr.buf[:bufLen+n]
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
lr.eofReached = true
|
||||
return true
|
||||
}
|
||||
lr.err = fmt.Errorf("cannot read the next line: %s", err)
|
||||
}
|
||||
return n > 0
|
||||
}
|
||||
|
||||
var tooLongLinesSkipped = metrics.NewCounter("vl_too_long_lines_skipped_total")
|
||||
|
||||
func (lr *LineReader) skipUntilNextLine() bool {
|
||||
for {
|
||||
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
|
||||
n, err := lr.r.Read(lr.buf)
|
||||
lr.buf = lr.buf[:n]
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
lr.eofReached = true
|
||||
lr.buf = lr.buf[:0]
|
||||
return true
|
||||
}
|
||||
lr.err = fmt.Errorf("cannot skip the current line: %s", err)
|
||||
return false
|
||||
}
|
||||
if n := bytes.IndexByte(lr.buf, '\n'); n >= 0 {
|
||||
// Include \n in the buf, so too long line is replaced with an empty line.
|
||||
// This is needed for maintaining synchorinzation consistency between lines
|
||||
// in protocols such as Elasticsearch bulk import.
|
||||
lr.buf = append(lr.buf[:0], lr.buf[n:]...)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
161
app/vlinsert/insertutils/line_reader_test.go
Normal file
161
app/vlinsert/insertutils/line_reader_test.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLineReader_Success(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
lr := NewLineReader("foo", r)
|
||||
var lines []string
|
||||
for lr.NextLine() {
|
||||
lines = append(lines, string(lr.Line))
|
||||
}
|
||||
if err := lr.Err(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lr.NextLine() {
|
||||
t.Fatalf("expecting error on the second call to NextLine()")
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("\n", []string{""})
|
||||
f("\n\n", []string{"", ""})
|
||||
f("foo", []string{"foo"})
|
||||
f("foo\n", []string{"foo"})
|
||||
f("\nfoo", []string{"", "foo"})
|
||||
f("foo\n\n", []string{"foo", ""})
|
||||
f("foo\nbar", []string{"foo", "bar"})
|
||||
f("foo\nbar\n", []string{"foo", "bar"})
|
||||
f("\nfoo\n\nbar\n\n", []string{"", "foo", "", "bar", ""})
|
||||
}
|
||||
|
||||
func TestLineReader_SkipUntilNextLine(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
lr := NewLineReader("foo", r)
|
||||
var lines []string
|
||||
for lr.NextLine() {
|
||||
lines = append(lines, string(lr.Line))
|
||||
}
|
||||
if err := lr.Err(); err != nil {
|
||||
t.Fatalf("unexpected error for data=%q: %s", data, err)
|
||||
}
|
||||
if lr.NextLine() {
|
||||
t.Fatalf("expecting error on the second call to NextLine()")
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines for data=%q\ngot\n%q\nwant\n%q", data, lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
|
||||
longLineLen := MaxLineSizeBytes.IntN() + overflow
|
||||
longLine := string(make([]byte, longLineLen))
|
||||
|
||||
// Single long line
|
||||
data := longLine
|
||||
f(data, nil)
|
||||
|
||||
// Multiple long lines
|
||||
data = longLine + "\n" + longLine
|
||||
f(data, []string{""})
|
||||
|
||||
data = longLine + "\n" + longLine + "\n"
|
||||
f(data, []string{"", ""})
|
||||
|
||||
// Long line in the middle
|
||||
data = "foo\n" + longLine + "\nbar"
|
||||
f(data, []string{"foo", "", "bar"})
|
||||
|
||||
// Multiple long lines in the middle
|
||||
data = "foo\n" + longLine + "\n" + longLine + "\nbar"
|
||||
f(data, []string{"foo", "", "", "bar"})
|
||||
|
||||
// Long line in the end
|
||||
data = "foo\n" + longLine
|
||||
f(data, []string{"foo"})
|
||||
|
||||
// Long line in the end
|
||||
data = "foo\n" + longLine + "\n"
|
||||
f(data, []string{"foo", ""})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineReader_Failure(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
fr := &failureReader{
|
||||
r: bytes.NewBufferString(data),
|
||||
}
|
||||
lr := NewLineReader("foo", fr)
|
||||
var lines []string
|
||||
for lr.NextLine() {
|
||||
lines = append(lines, string(lr.Line))
|
||||
}
|
||||
if err := lr.Err(); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if lr.NextLine() {
|
||||
t.Fatalf("expecting error on the second call to NextLine()")
|
||||
}
|
||||
if err := lr.Err(); err == nil {
|
||||
t.Fatalf("expecting non-nil error on the second call")
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("foo", nil)
|
||||
f("foo\n", []string{"foo"})
|
||||
f("\n", []string{""})
|
||||
f("foo\nbar", []string{"foo"})
|
||||
f("foo\nbar\n", []string{"foo", "bar"})
|
||||
f("\nfoo\nbar\n\n", []string{"", "foo", "bar", ""})
|
||||
|
||||
// long line
|
||||
longLineLen := MaxLineSizeBytes.IntN()
|
||||
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
|
||||
longLine := string(make([]byte, longLineLen+overflow))
|
||||
|
||||
data := longLine
|
||||
f(data, nil)
|
||||
|
||||
data = "foo\n" + longLine
|
||||
f(data, []string{"foo"})
|
||||
|
||||
data = longLine + "\nfoo"
|
||||
f(data, []string{""})
|
||||
|
||||
data = longLine + "\nfoo\n"
|
||||
f(data, []string{"", "foo"})
|
||||
}
|
||||
}
|
||||
|
||||
type failureReader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *failureReader) Read(p []byte) (int, error) {
|
||||
n, _ := r.r.Read(p)
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
return 0, fmt.Errorf("some error")
|
||||
}
|
||||
@@ -15,7 +15,10 @@ type TestLogMessageProcessor struct {
|
||||
}
|
||||
|
||||
// AddRow adds row with the given timestamp and fields to tlp
|
||||
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
if streamFields != nil {
|
||||
panic(fmt.Errorf("BUG: streamFields must be nil; got %v", streamFields))
|
||||
}
|
||||
tlp.timestamps = append(tlp.timestamps, timestamp)
|
||||
tlp.rows = append(tlp.rows, string(logstorage.MarshalFieldsToJSON(nil, fields)))
|
||||
}
|
||||
@@ -25,10 +28,10 @@ func (tlp *TestLogMessageProcessor) MustClose() {
|
||||
}
|
||||
|
||||
// Verify verifies the number of rows, timestamps and results after AddRow calls.
|
||||
func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected []int64, resultExpected string) error {
|
||||
func (tlp *TestLogMessageProcessor) Verify(timestampsExpected []int64, resultExpected string) error {
|
||||
result := strings.Join(tlp.rows, "\n")
|
||||
if len(tlp.rows) != rowsExpected {
|
||||
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), rowsExpected, result, resultExpected)
|
||||
if len(tlp.rows) != len(timestampsExpected) {
|
||||
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), len(timestampsExpected), result, resultExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tlp.timestamps, timestampsExpected) {
|
||||
@@ -45,7 +48,7 @@ func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected
|
||||
type BenchmarkLogMessageProcessor struct{}
|
||||
|
||||
// AddRow implements LogMessageProcessor interface.
|
||||
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _ []logstorage.Field) {
|
||||
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _, _ []logstorage.Field) {
|
||||
}
|
||||
|
||||
// MustClose implements LogMessageProcessor interface.
|
||||
|
||||
@@ -2,20 +2,19 @@ package insertutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// ExtractTimestampRFC3339NanoFromFields extracts RFC3339 timestamp in nanoseconds from the field with the name timeField at fields.
|
||||
// ExtractTimestampFromFields extracts timestamp in nanoseconds from the field with the name timeField at fields.
|
||||
//
|
||||
// The value for the timeField is set to empty string after returning from the function,
|
||||
// so it could be ignored during data ingestion.
|
||||
//
|
||||
// The current timestamp is returned if fields do not contain a field with timeField name or if the timeField value is empty.
|
||||
func ExtractTimestampRFC3339NanoFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
func ExtractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
@@ -48,22 +47,24 @@ func parseTimestamp(s string) (int64, error) {
|
||||
return nsecs, nil
|
||||
}
|
||||
|
||||
// ParseUnixTimestamp parses s as unix timestamp in either seconds or milliseconds and returns the parsed timestamp in nanoseconds.
|
||||
// ParseUnixTimestamp parses s as unix timestamp in seconds, milliseconds, microseconds or nanoseconds and returns the parsed timestamp in nanoseconds.
|
||||
func ParseUnixTimestamp(s string) (int64, error) {
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse unix timestamp from %q: %w", s, err)
|
||||
}
|
||||
if n < (1<<31) && n >= (-1<<31) {
|
||||
// The timestamp is in seconds. Convert it to milliseconds
|
||||
n *= 1e3
|
||||
// The timestamp is in seconds.
|
||||
return n * 1e9, nil
|
||||
}
|
||||
if n > int64(math.MaxInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too big timestamp in milliseconds: %d; mustn't exceed %d", n, int64(math.MaxInt64)/1e6)
|
||||
if n < 1e3*(1<<31) && n >= 1e3*(-1<<31) {
|
||||
// The timestamp is in milliseconds.
|
||||
return n * 1e6, nil
|
||||
}
|
||||
if n < int64(math.MinInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too small timestamp in milliseconds: %d; must be bigger than %d", n, int64(math.MinInt64)/1e6)
|
||||
if n < 1e6*(1<<31) && n >= 1e6*(-1<<31) {
|
||||
// The timestamp is in microseconds.
|
||||
return n * 1e3, nil
|
||||
}
|
||||
n *= 1e6
|
||||
// The timestamp is in nanoseconds
|
||||
return n, nil
|
||||
}
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
|
||||
func TestExtractTimestampFromFields_Success(t *testing.T) {
|
||||
f := func(timeField string, fields []logstorage.Field, nsecsExpected int64) {
|
||||
t.Helper()
|
||||
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields(timeField, fields)
|
||||
nsecs, err := ExtractTimestampFromFields(timeField, fields)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
@@ -51,6 +51,18 @@ func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
|
||||
{Name: "foo", Value: "bar"},
|
||||
}, 1718773640123456789)
|
||||
|
||||
// Unix timestamp in nanoseconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640123456789"},
|
||||
}, 1718773640123456789)
|
||||
|
||||
// Unix timestamp in microseconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640123456"},
|
||||
}, 1718773640123456000)
|
||||
|
||||
// Unix timestamp in milliseconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
@@ -64,14 +76,14 @@ func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
|
||||
}, 1718773640000000000)
|
||||
}
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {
|
||||
func TestExtractTimestampFromFields_Error(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
fields := []logstorage.Field{
|
||||
{Name: "time", Value: s},
|
||||
}
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields("time", fields)
|
||||
nsecs, err := ExtractTimestampFromFields("time", fields)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
@@ -80,6 +92,7 @@ func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// invalid time
|
||||
f("foobar")
|
||||
|
||||
// incomplete time
|
||||
|
||||
@@ -120,8 +120,8 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := parseJournaldRequest(data, lmp, cp)
|
||||
lmp := cp.NewLogMessageProcessor("journald")
|
||||
err = parseJournaldRequest(data, lmp, cp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
@@ -129,8 +129,6 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedJournaldTotal.Add(n)
|
||||
|
||||
// update requestJournaldDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJournaldDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -138,16 +136,14 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
rowsIngestedJournaldTotal = metrics.NewCounter(`vl_rows_ingested_total{type="journald", format="journald"}`)
|
||||
requestsJournaldTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/journald/upload"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/journald/upload"}`)
|
||||
|
||||
requestsJournaldTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/journald/upload",format="journald"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/journald/upload",format="journald"}`)
|
||||
|
||||
requestJournaldDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/journald/upload",format="journald"}`)
|
||||
requestJournaldDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/journald/upload"}`)
|
||||
)
|
||||
|
||||
// See https://systemd.io/JOURNAL_EXPORT_FORMATS/#journal-export-format
|
||||
func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *insertutils.CommonParams) (rowsIngested int, err error) {
|
||||
func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *insertutils.CommonParams) error {
|
||||
var fields []logstorage.Field
|
||||
var ts int64
|
||||
var size uint64
|
||||
@@ -170,15 +166,14 @@ func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
lmp.AddRow(ts, fields)
|
||||
rowsIngested++
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
fields = fields[:0]
|
||||
}
|
||||
// skip newline separator
|
||||
data = data[1:]
|
||||
continue
|
||||
case idx < 0:
|
||||
return rowsIngested, fmt.Errorf("missing new line separator, unread data left=%d", len(data))
|
||||
return fmt.Errorf("missing new line separator, unread data left=%d", len(data))
|
||||
}
|
||||
|
||||
idx = bytes.IndexByte(line, '=')
|
||||
@@ -191,46 +186,46 @@ func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *
|
||||
} else {
|
||||
name = bytesutil.ToUnsafeString(line)
|
||||
if len(data) == 0 {
|
||||
return rowsIngested, fmt.Errorf("unexpected zero data for binary field value of key=%s", name)
|
||||
return fmt.Errorf("unexpected zero data for binary field value of key=%s", name)
|
||||
}
|
||||
// size of binary data encoded as le i64 at the begging
|
||||
idx, err := binary.Decode(data, binary.LittleEndian, &size)
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
|
||||
return fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
|
||||
}
|
||||
// skip binary data sise
|
||||
data = data[idx:]
|
||||
if size == 0 {
|
||||
return rowsIngested, fmt.Errorf("unexpected zero binary data size decoded %d", size)
|
||||
return fmt.Errorf("unexpected zero binary data size decoded %d", size)
|
||||
}
|
||||
if int(size) > len(data) {
|
||||
return rowsIngested, fmt.Errorf("binary data size=%d cannot exceed size of the data at buffer=%d", size, len(data))
|
||||
return fmt.Errorf("binary data size=%d cannot exceed size of the data at buffer=%d", size, len(data))
|
||||
}
|
||||
value = bytesutil.ToUnsafeString(data[:size])
|
||||
data = data[int(size):]
|
||||
// binary data must has new line separator for the new line or next field
|
||||
if len(data) == 0 {
|
||||
return rowsIngested, fmt.Errorf("unexpected empty buffer after binary field=%s read", name)
|
||||
return fmt.Errorf("unexpected empty buffer after binary field=%s read", name)
|
||||
}
|
||||
lastB := data[0]
|
||||
if lastB != '\n' {
|
||||
return rowsIngested, fmt.Errorf("expected new line separator after binary field=%s, got=%s", name, string(lastB))
|
||||
return fmt.Errorf("expected new line separator after binary field=%s, got=%s", name, string(lastB))
|
||||
}
|
||||
data = data[1:]
|
||||
}
|
||||
// https://github.com/systemd/systemd/blob/main/src/libsystemd/sd-journal/journal-file.c#L1703
|
||||
if len(name) > journaldEntryMaxNameLen {
|
||||
return rowsIngested, fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
|
||||
return fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
|
||||
}
|
||||
if !allowedJournaldEntryNameChars.MatchString(name) {
|
||||
return rowsIngested, fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
|
||||
return fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
|
||||
}
|
||||
if name == cp.TimeField {
|
||||
ts, err = strconv.ParseInt(value, 10, 64)
|
||||
n, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse Journald timestamp, %w", err)
|
||||
return fmt.Errorf("failed to parse Journald timestamp, %w", err)
|
||||
}
|
||||
ts *= 1e3
|
||||
ts = n * 1e3
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -249,8 +244,7 @@ func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
lmp.AddRow(ts, fields)
|
||||
rowsIngested++
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
}
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,12 +14,11 @@ func TestPushJournaldOk(t *testing.T) {
|
||||
TimeField: "__REALTIME_TIMESTAMP",
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
}
|
||||
n, err := parseJournaldRequest([]byte(src), tlp, cp)
|
||||
if err != nil {
|
||||
if err := parseJournaldRequest([]byte(src), tlp, cp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -50,8 +49,7 @@ func TestPushJournald_Failure(t *testing.T) {
|
||||
TimeField: "__REALTIME_TIMESTAMP",
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
}
|
||||
_, err := parseJournaldRequest([]byte(data), tlp, cp)
|
||||
if err == nil {
|
||||
if err := parseJournaldRequest([]byte(data), tlp, cp); err == nil {
|
||||
t.Fatalf("expected non nil error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package jsonline
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -10,7 +8,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -52,8 +49,9 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reader = zr
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
err = processStreamInternal(reader, cp.TimeField, cp.MsgFields, lmp)
|
||||
lmp := cp.NewLogMessageProcessor("jsonline")
|
||||
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
|
||||
err = processStreamInternal(streamName, reader, cp.TimeField, cp.MsgFields, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
if err != nil {
|
||||
@@ -66,20 +64,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func processStreamInternal(r io.Reader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) error {
|
||||
func processStreamInternal(streamName string, r io.Reader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
lr := insertutils.NewLineReader(streamName, wcr)
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok, err := readLine(sc, timeField, msgFields, lmp)
|
||||
ok, err := readLine(lr, timeField, msgFields, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
@@ -89,45 +82,35 @@ func processStreamInternal(r io.Reader, timeField string, msgFields []string, lm
|
||||
return nil
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func readLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
func readLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read json line, since its size exceeds -insert.maxLineSizeBytes=%d`, insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
if !lr.NextLine() {
|
||||
err := lr.Err()
|
||||
return false, err
|
||||
}
|
||||
line = sc.Bytes()
|
||||
line = lr.Line
|
||||
}
|
||||
|
||||
p := logstorage.GetJSONParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
ts, err := insertutils.ExtractTimestampRFC3339NanoFromFields(timeField, p.Fields)
|
||||
ts, err := insertutils.ExtractTimestampFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot get timestamp: %w", err)
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgFields, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
lmp.AddRow(ts, p.Fields, nil)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/jsonline"}`)
|
||||
|
||||
|
||||
@@ -8,17 +8,17 @@ import (
|
||||
)
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
msgFields := []string{msgField}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, timeField, msgFields, tlp); err != nil {
|
||||
if err := processStreamInternal("test", r, timeField, msgFields, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -29,12 +29,11 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000}
|
||||
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"_msg":"baz"}
|
||||
{"_msg":"xyz","x":"y"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
|
||||
// Non-existing msgField
|
||||
data = `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
@@ -42,11 +41,10 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
`
|
||||
timeField = "@timestamp"
|
||||
msgField = "foobar"
|
||||
rowsExpected = 2
|
||||
timestampsExpected = []int64{1686026891735000000, 1686023292735000000}
|
||||
resultExpected = `{"log.offset":"71770","log.file.path":"/var/log/auth.log","message":"foobar"}
|
||||
{"message":"baz"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
@@ -55,7 +53,7 @@ func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "time", nil, tlp); err == nil {
|
||||
if err := processStreamInternal("test", r, "time", nil, tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,16 +53,15 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := parseJSONRequest(data, lmp)
|
||||
lmp := cp.NewLogMessageProcessor("loki_json")
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = parseJSONRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki json request: %s; data=%s", err, data)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedJSONTotal.Add(n)
|
||||
|
||||
// update requestJSONDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJSONDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -70,31 +69,29 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="json"}`)
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
)
|
||||
|
||||
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
return fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
}
|
||||
|
||||
streamsV := v.Get("streams")
|
||||
if streamsV == nil {
|
||||
return 0, fmt.Errorf("missing `streams` item in the parsed JSON")
|
||||
return fmt.Errorf("missing `streams` item in the parsed JSON")
|
||||
}
|
||||
streams, err := streamsV.Array()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
|
||||
return fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
|
||||
}
|
||||
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
var commonFields []logstorage.Field
|
||||
rowsIngested := 0
|
||||
for _, stream := range streams {
|
||||
// populate common labels from `stream` dict
|
||||
commonFields = commonFields[:0]
|
||||
@@ -103,7 +100,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
if labelsV != nil {
|
||||
o, err := labelsV.Object()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
|
||||
return fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
|
||||
}
|
||||
labels = o
|
||||
}
|
||||
@@ -119,37 +116,37 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("error when parsing `stream` object: %w", err)
|
||||
return fmt.Errorf("error when parsing `stream` object: %w", err)
|
||||
}
|
||||
|
||||
// populate messages from `values` array
|
||||
linesV := stream.Get("values")
|
||||
if linesV == nil {
|
||||
return rowsIngested, fmt.Errorf("missing `values` item in the parsed `stream` object %q", stream)
|
||||
return fmt.Errorf("missing `values` item in the parsed `stream` object %q", stream)
|
||||
}
|
||||
lines, err := linesV.Array()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
|
||||
return fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
|
||||
}
|
||||
|
||||
fields := commonFields
|
||||
for _, line := range lines {
|
||||
lineA, err := line.Array()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
|
||||
return fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
|
||||
}
|
||||
if len(lineA) < 2 || len(lineA) > 3 {
|
||||
return rowsIngested, fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2 or 3", line, len(lineA))
|
||||
return fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2 or 3", line, len(lineA))
|
||||
}
|
||||
|
||||
// parse timestamp
|
||||
timestamp, err := lineA[0].StringBytes()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
|
||||
return fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
|
||||
}
|
||||
ts, err := parseLokiTimestamp(bytesutil.ToUnsafeString(timestamp))
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
|
||||
return fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
|
||||
}
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
@@ -158,7 +155,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
// parse log message
|
||||
msg, err := lineA[1].StringBytes()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
|
||||
return fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
|
||||
}
|
||||
|
||||
fields = append(fields[:len(commonFields)], logstorage.Field{
|
||||
@@ -170,7 +167,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
if len(lineA) > 2 {
|
||||
structuredMetadata, err := lineA[2].Object()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected structured metadata type for %q; want JSON object", lineA[2])
|
||||
return fmt.Errorf("unexpected structured metadata type for %q; want JSON object", lineA[2])
|
||||
}
|
||||
|
||||
structuredMetadata.Visit(func(k []byte, v *fastjson.Value) {
|
||||
@@ -186,15 +183,18 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
|
||||
return fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
|
||||
}
|
||||
}
|
||||
lmp.AddRow(ts, fields)
|
||||
var streamFields []logstorage.Field
|
||||
if useDefaultStreamFields {
|
||||
streamFields = commonFields
|
||||
}
|
||||
lmp.AddRow(ts, fields, streamFields)
|
||||
}
|
||||
rowsIngested += len(lines)
|
||||
}
|
||||
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseLokiTimestamp(s string) (int64, error) {
|
||||
|
||||
@@ -11,12 +11,11 @@ func TestParseJSONRequest_Failure(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err == nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("unexpected number of parsed lines: %d; want 0", n)
|
||||
if err := tlp.Verify(nil, ""); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
f(``)
|
||||
@@ -66,11 +65,10 @@ func TestParseJSONRequest_Success(t *testing.T) {
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err != nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,8 +28,7 @@ func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
data := getJSONBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseJSONRequest(data, blp)
|
||||
if err != nil {
|
||||
if err := parseJSONRequest(data, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,16 +44,15 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := parseProtobufRequest(data, lmp)
|
||||
lmp := cp.NewLogMessageProcessor("loki_protobuf")
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = parseProtobufRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update requestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -61,18 +60,17 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
bb := bytesBufPool.Get()
|
||||
defer bytesBufPool.Put(bb)
|
||||
|
||||
buf, err := snappy.Decode(bb.B[:cap(bb.B)], data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
|
||||
return fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
|
||||
}
|
||||
bb.B = buf
|
||||
|
||||
@@ -81,13 +79,12 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
|
||||
err = req.UnmarshalProtobuf(bb.B)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse request body: %w", err)
|
||||
return fmt.Errorf("cannot parse request body: %w", err)
|
||||
}
|
||||
|
||||
fields := getFields()
|
||||
defer putFields(fields)
|
||||
|
||||
rowsIngested := 0
|
||||
streams := req.Streams
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
for i := range streams {
|
||||
@@ -96,7 +93,7 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
// Labels are same for all entries in the stream.
|
||||
fields.fields, err = parsePromLabels(fields.fields[:0], stream.Labels)
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
|
||||
return fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
|
||||
}
|
||||
commonFieldsLen := len(fields.fields)
|
||||
|
||||
@@ -122,11 +119,14 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
ts = currentTimestamp
|
||||
}
|
||||
|
||||
lmp.AddRow(ts, fields.fields)
|
||||
var streamFields []logstorage.Field
|
||||
if useDefaultStreamFields {
|
||||
streamFields = fields.fields[:commonFieldsLen]
|
||||
}
|
||||
lmp.AddRow(ts, fields.fields, streamFields)
|
||||
}
|
||||
rowsIngested += len(stream.Entries)
|
||||
}
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFields() *fields {
|
||||
|
||||
@@ -15,7 +15,10 @@ type testLogMessageProcessor struct {
|
||||
pr PushRequest
|
||||
}
|
||||
|
||||
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
if streamFields != nil {
|
||||
panic(fmt.Errorf("unexpected non-nil streamFields: %v", streamFields))
|
||||
}
|
||||
msg := ""
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
@@ -50,23 +53,21 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &testLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err != nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if n != len(tlp.pr.Streams) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), n)
|
||||
if len(tlp.pr.Streams) != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), len(timestampsExpected))
|
||||
}
|
||||
|
||||
data := tlp.pr.MarshalProtobuf(nil)
|
||||
encodedData := snappy.Encode(nil, data)
|
||||
|
||||
tlp2 := &insertutils.TestLogMessageProcessor{}
|
||||
n, err = parseProtobufRequest(encodedData, tlp2)
|
||||
if err != nil {
|
||||
if err := parseProtobufRequest(encodedData, tlp2, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp2.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp2.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,8 +31,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseProtobufRequest(body, blp)
|
||||
if err != nil {
|
||||
if err := parseProtobufRequest(body, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package vlinsert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
@@ -34,9 +35,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path = strings.TrimPrefix(path, "/insert")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/jsonline" {
|
||||
switch path {
|
||||
case "/jsonline":
|
||||
jsonline.RequestHandler(w, r)
|
||||
return true
|
||||
case "/ready":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/elasticsearch/"):
|
||||
|
||||
@@ -66,16 +66,15 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
n, err := pushProtobufRequest(data, lmp)
|
||||
lmp := cp.NewLogMessageProcessor("opentelelemtry_protobuf")
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = pushProtobufRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse OpenTelemetry protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update requestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -83,22 +82,19 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="opentelemetry",format="protobuf"}`)
|
||||
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
var req pb.ExportLogsServiceRequest
|
||||
if err := req.UnmarshalProtobuf(data); err != nil {
|
||||
errorsTotal.Inc()
|
||||
return 0, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
|
||||
return fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
|
||||
}
|
||||
|
||||
var rowsIngested int
|
||||
var commonFields []logstorage.Field
|
||||
for _, rl := range req.ResourceLogs {
|
||||
attributes := rl.Resource.Attributes
|
||||
@@ -109,16 +105,14 @@ func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int,
|
||||
}
|
||||
commonFieldsLen := len(commonFields)
|
||||
for _, sc := range rl.ScopeLogs {
|
||||
var scopeIngested int
|
||||
commonFields, scopeIngested = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp)
|
||||
rowsIngested += scopeIngested
|
||||
commonFields = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp, useDefaultStreamFields)
|
||||
}
|
||||
}
|
||||
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor) ([]logstorage.Field, int) {
|
||||
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) []logstorage.Field {
|
||||
fields := commonFields
|
||||
for _, lr := range sc.LogRecords {
|
||||
fields = fields[:len(commonFields)]
|
||||
@@ -137,7 +131,11 @@ func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field,
|
||||
Value: lr.FormatSeverity(),
|
||||
})
|
||||
|
||||
lmp.AddRow(lr.ExtractTimestampNano(), fields)
|
||||
var streamFields []logstorage.Field
|
||||
if useDefaultStreamFields {
|
||||
streamFields = commonFields
|
||||
}
|
||||
lmp.AddRow(lr.ExtractTimestampNano(), fields, streamFields)
|
||||
}
|
||||
return fields, len(sc.LogRecords)
|
||||
return fields
|
||||
}
|
||||
|
||||
@@ -16,12 +16,11 @@ func TestPushProtoOk(t *testing.T) {
|
||||
|
||||
pData := lr.MarshalProtobuf(nil)
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
n, err := pushProtobufRequest(pData, tlp)
|
||||
if err != nil {
|
||||
if err := pushProtobufRequest(pData, tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,8 +27,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := pushProtobufRequest(body, blp)
|
||||
if err != nil {
|
||||
if err := pushProtobufRequest(body, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
@@ -314,7 +314,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod st
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
udpRequestsTotal.Inc()
|
||||
if err := processStream(bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
if err := processStream("udp", bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process UDP data from %s at %s: %s", remoteAddr, localAddr, err)
|
||||
}
|
||||
}
|
||||
@@ -354,7 +354,7 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod stri
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
|
||||
if err := processStream(c, compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
if err := processStream("tcp", c, compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
@@ -369,12 +369,12 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod stri
|
||||
}
|
||||
|
||||
// processStream parses a stream of syslog messages from r and ingests them into vlstorage.
|
||||
func processStream(r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
|
||||
func processStream(protocol string, r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor()
|
||||
lmp := cp.NewLogMessageProcessor("syslog_" + protocol)
|
||||
err := processStreamInternal(r, compressMethod, useLocalTimestamp, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
@@ -436,7 +436,6 @@ func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertut
|
||||
return fmt.Errorf("cannot read line #%d: %s", n, err)
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
return slr.Error()
|
||||
}
|
||||
@@ -561,14 +560,14 @@ func processLine(line []byte, currentYear int, timezone *time.Location, useLocal
|
||||
if useLocalTimestamp {
|
||||
ts = time.Now().UnixNano()
|
||||
} else {
|
||||
nsecs, err := insertutils.ExtractTimestampRFC3339NanoFromFields("timestamp", p.Fields)
|
||||
nsecs, err := insertutils.ExtractTimestampFromFields("timestamp", p.Fields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get timestamp from syslog line %q: %w", line, err)
|
||||
}
|
||||
ts = nsecs
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgFields, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
lmp.AddRow(ts, p.Fields, nil)
|
||||
logstorage.PutSyslogParser(p)
|
||||
|
||||
return nil
|
||||
@@ -577,8 +576,6 @@ func processLine(line []byte, currentYear int, timezone *time.Location, useLocal
|
||||
var msgFields = []string{"message"}
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="syslog"}`)
|
||||
|
||||
errorsTotal = metrics.NewCounter(`vl_errors_total{type="syslog"}`)
|
||||
|
||||
udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`)
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestSyslogLineReader_Failure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data string, currentYear, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
f := func(data string, currentYear int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
MustInit()
|
||||
@@ -89,7 +89,7 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
if err := processStreamInternal(r, "", false, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -99,12 +99,11 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
|
||||
`
|
||||
currentYear := 2023
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1685794113000000000, 1685880513000000000, 1685814132345000000}
|
||||
resultExpected := `{"format":"rfc3164","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
|
||||
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
|
||||
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
|
||||
f(data, currentYear, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, currentYear, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -175,7 +176,7 @@ func writeCompactObject(w io.Writer, fields []logstorage.Field) error {
|
||||
_, err := fmt.Fprintf(w, "%s\n", fields[0].Value)
|
||||
return err
|
||||
}
|
||||
if len(fields) == 2 && fields[0].Name == "_time" || fields[1].Name == "_time" {
|
||||
if len(fields) == 2 && (fields[0].Name == "_time" || fields[1].Name == "_time") {
|
||||
// Write _time\tfieldValue as is
|
||||
if fields[0].Name == "_time" {
|
||||
_, err := fmt.Fprintf(w, "%s\t%s\n", fields[0].Value, fields[1].Value)
|
||||
@@ -234,5 +235,11 @@ func getJSONString(s string) string {
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error when marshaling string to JSON: %w", err))
|
||||
}
|
||||
return string(data)
|
||||
return jsonHTMLReplacer.Replace(string(data))
|
||||
}
|
||||
|
||||
var jsonHTMLReplacer = strings.NewReplacer(
|
||||
`\u003c`, "\u003c",
|
||||
`\u003e`, "\u003e",
|
||||
`\u0026`, "\u0026",
|
||||
)
|
||||
|
||||
@@ -270,7 +270,7 @@ func printCommandsHelp(w io.Writer) {
|
||||
\h - show this help
|
||||
\s - singleline json output mode
|
||||
\m - multiline json output mode
|
||||
\c - compact output
|
||||
\c - compact output mode
|
||||
\logfmt - logfmt output mode
|
||||
\wrap_long_lines - toggles wrapping long lines
|
||||
\tail <query> - live tail <query> results
|
||||
|
||||
@@ -45,6 +45,8 @@ var (
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u64FieldsPerLog = flag.Int("u64FieldsPerLog", 1, "The number of fields with uint64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
i64FieldsPerLog = flag.Int("i64FieldsPerLog", 1, "The number of fields with int64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
floatFieldsPerLog = flag.Int("floatFieldsPerLog", 1, "The number of fields with float64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
ipFieldsPerLog = flag.Int("ipFieldsPerLog", 1, "The number of fields with IPv4 values to generate per each log entry; "+
|
||||
@@ -254,6 +256,9 @@ func generateLogsAtTimestamp(bw *bufio.Writer, workerID int, ts int64, firstStre
|
||||
for j := 0; j < *u64FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u64_%d":"%d"`, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *i64FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"i64_%d":"%d"`, j, int64(rand.Uint64()))
|
||||
}
|
||||
for j := 0; j < *floatFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"float_%d":"%v"`, j, math.Round(10_000*rand.Float64())/1000)
|
||||
}
|
||||
|
||||
49
app/vlselect/logsql/facets_response.qtpl
Normal file
49
app/vlselect/logsql/facets_response.qtpl
Normal file
@@ -0,0 +1,49 @@
|
||||
{% import (
|
||||
"slices"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
{% func FacetsResponse(m map[string][]facetEntry) %}
|
||||
{
|
||||
{% code
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
%}
|
||||
"facets":[
|
||||
{% if len(sortedKeys) > 0 %}
|
||||
{%= facetsLine(m, sortedKeys[0]) %}
|
||||
{% for _, k := range sortedKeys[1:] %}
|
||||
,{%= facetsLine(m, k) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func facetsLine(m map[string][]facetEntry, k string) %}
|
||||
{
|
||||
"field_name":{%q= k %},
|
||||
"values":[
|
||||
{% code fes := m[k] %}
|
||||
{% if len(fes) > 0 %}
|
||||
{%= facetLine(fes[0]) %}
|
||||
{% for _, fe := range fes[1:] %}
|
||||
,{%= facetLine(fe) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func facetLine(fe facetEntry) %}
|
||||
{
|
||||
"field_value":{%q= fe.value %},
|
||||
"hits":{%s= fe.hits %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
178
app/vlselect/logsql/facets_response.qtpl.go
Normal file
178
app/vlselect/logsql/facets_response.qtpl.go
Normal file
@@ -0,0 +1,178 @@
|
||||
// Code generated by qtc from "facets_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:1
|
||||
import (
|
||||
"slices"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
func StreamFacetsResponse(qw422016 *qt422016.Writer, m map[string][]facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:10
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:15
|
||||
qw422016.N().S(`"facets":[`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:17
|
||||
if len(sortedKeys) > 0 {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:18
|
||||
streamfacetsLine(qw422016, m, sortedKeys[0])
|
||||
//line app/vlselect/logsql/facets_response.qtpl:19
|
||||
for _, k := range sortedKeys[1:] {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:19
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:20
|
||||
streamfacetsLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:21
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:22
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:22
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
func WriteFacetsResponse(qq422016 qtio422016.Writer, m map[string][]facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
StreamFacetsResponse(qw422016, m)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
func FacetsResponse(m map[string][]facetEntry) string {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
WriteFacetsResponse(qb422016, m)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:27
|
||||
func streamfacetsLine(qw422016 *qt422016.Writer, m map[string][]facetEntry, k string) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:27
|
||||
qw422016.N().S(`{"field_name":`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:29
|
||||
qw422016.N().Q(k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:29
|
||||
qw422016.N().S(`,"values":[`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:31
|
||||
fes := m[k]
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:32
|
||||
if len(fes) > 0 {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:33
|
||||
streamfacetLine(qw422016, fes[0])
|
||||
//line app/vlselect/logsql/facets_response.qtpl:34
|
||||
for _, fe := range fes[1:] {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:34
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:35
|
||||
streamfacetLine(qw422016, fe)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:36
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:37
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:37
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
func writefacetsLine(qq422016 qtio422016.Writer, m map[string][]facetEntry, k string) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
streamfacetsLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
func facetsLine(m map[string][]facetEntry, k string) string {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
writefacetsLine(qb422016, m, k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:42
|
||||
func streamfacetLine(qw422016 *qt422016.Writer, fe facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:42
|
||||
qw422016.N().S(`{"field_value":`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:44
|
||||
qw422016.N().Q(fe.value)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:44
|
||||
qw422016.N().S(`,"hits":`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:45
|
||||
qw422016.N().S(fe.hits)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:45
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
func writefacetLine(qq422016 qtio422016.Writer, fe facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
streamfacetLine(qw422016, fe)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
func facetLine(fe facetEntry) string {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
writefacetLine(qb422016, fe)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -23,6 +25,82 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
// ProcessFacetsRequest handles /select/logsql/facets request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-facets
|
||||
func ProcessFacetsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
maxValuesPerField, err := httputils.GetInt(r, "max_values_per_field")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
maxValueLen, err := httputils.GetInt(r, "max_value_len")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
keepConstFields := httputils.GetBool(r, "keep_const_fields")
|
||||
|
||||
q.DropAllPipes()
|
||||
q.AddFacetsPipe(limit, maxValuesPerField, maxValueLen, keepConstFields)
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string][]facetEntry)
|
||||
writeBlock := func(_ uint, _ []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
if len(columns) != 3 {
|
||||
logger.Panicf("BUG: expecting 3 columns; got %d columns", len(columns))
|
||||
}
|
||||
|
||||
fieldNames := columns[0].Values
|
||||
fieldValues := columns[1].Values
|
||||
hits := columns[2].Values
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range fieldNames {
|
||||
fieldName := strings.Clone(fieldNames[i])
|
||||
fieldValue := strings.Clone(fieldValues[i])
|
||||
hitsStr := strings.Clone(hits[i])
|
||||
|
||||
mLock.Lock()
|
||||
m[fieldName] = append(m[fieldName], facetEntry{
|
||||
value: fieldValue,
|
||||
hits: hitsStr,
|
||||
})
|
||||
mLock.Unlock()
|
||||
}
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteFacetsResponse(w, m)
|
||||
}
|
||||
|
||||
type facetEntry struct {
|
||||
value string
|
||||
hits string
|
||||
}
|
||||
|
||||
// ProcessHitsRequest handles /select/logsql/hits request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-hits-stats
|
||||
@@ -610,13 +688,13 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
|
||||
m := make(map[string]*statsSeries)
|
||||
var mLock sync.Mutex
|
||||
|
||||
timestamp := q.GetTimestamp()
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
clonedColumnNames := make([]string, len(columns))
|
||||
for i, c := range columns {
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
for i := range timestamps {
|
||||
timestamp := q.GetTimestamp()
|
||||
labels := make([]logstorage.Field, 0, len(byFields))
|
||||
for j, c := range columns {
|
||||
if c.Name == "_time" {
|
||||
@@ -1017,18 +1095,20 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
|
||||
}
|
||||
|
||||
// Parse optional extra_filters
|
||||
extraFilters, err := getExtraFilters(r, "extra_filters")
|
||||
extraFiltersStr := r.FormValue("extra_filters")
|
||||
extraFilters, err := parseExtraFilters(extraFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraFilters)
|
||||
|
||||
// Parse optional extra_stream_filters
|
||||
extraStreamFilters, err := getExtraFilters(r, "extra_stream_filters")
|
||||
extraStreamFiltersStr := r.FormValue("extra_stream_filters")
|
||||
extraStreamFilters, err := parseExtraStreamFilters(extraStreamFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraStreamFilters(extraStreamFilters)
|
||||
q.AddExtraFilters(extraStreamFilters)
|
||||
|
||||
return q, tenantIDs, nil
|
||||
}
|
||||
@@ -1046,15 +1126,114 @@ func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
|
||||
return nsecs, true, nil
|
||||
}
|
||||
|
||||
func getExtraFilters(r *http.Request, argName string) ([]logstorage.Field, error) {
|
||||
s := r.FormValue(argName)
|
||||
func parseExtraFilters(s string) (*logstorage.Filter, error) {
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var p logstorage.JSONParser
|
||||
if err := p.ParseLogMessage([]byte(s)); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %s: %w", argName, err)
|
||||
if !strings.HasPrefix(s, `{"`) {
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
return p.Fields, nil
|
||||
|
||||
// Extra filters in the form {"field":"value",...}.
|
||||
kvs, err := parseExtraFiltersJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filters := make([]string, len(kvs))
|
||||
for i, kv := range kvs {
|
||||
if len(kv.values) == 1 {
|
||||
filters[i] = fmt.Sprintf("%q:=%q", kv.key, kv.values[0])
|
||||
} else {
|
||||
orValues := make([]string, len(kv.values))
|
||||
for j, v := range kv.values {
|
||||
orValues[j] = fmt.Sprintf("%q", v)
|
||||
}
|
||||
filters[i] = fmt.Sprintf("%q:in(%s)", kv.key, strings.Join(orValues, ","))
|
||||
}
|
||||
}
|
||||
s = strings.Join(filters, " ")
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
|
||||
func parseExtraStreamFilters(s string) (*logstorage.Filter, error) {
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, `{"`) {
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
|
||||
// Extra stream filters in the form {"field":"value",...}.
|
||||
kvs, err := parseExtraFiltersJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filters := make([]string, len(kvs))
|
||||
for i, kv := range kvs {
|
||||
if len(kv.values) == 1 {
|
||||
filters[i] = fmt.Sprintf("%q=%q", kv.key, kv.values[0])
|
||||
} else {
|
||||
orValues := make([]string, len(kv.values))
|
||||
for j, v := range kv.values {
|
||||
orValues[j] = regexp.QuoteMeta(v)
|
||||
}
|
||||
filters[i] = fmt.Sprintf("%q=~%q", kv.key, strings.Join(orValues, "|"))
|
||||
}
|
||||
}
|
||||
s = "{" + strings.Join(filters, ",") + "}"
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
|
||||
type extraFilter struct {
|
||||
key string
|
||||
values []string
|
||||
}
|
||||
|
||||
func parseExtraFiltersJSON(s string) ([]extraFilter, error) {
|
||||
v, err := fastjson.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o := v.GetObject()
|
||||
|
||||
var errOuter error
|
||||
var filters []extraFilter
|
||||
o.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if errOuter != nil {
|
||||
return
|
||||
}
|
||||
switch v.Type() {
|
||||
case fastjson.TypeString:
|
||||
filters = append(filters, extraFilter{
|
||||
key: string(k),
|
||||
values: []string{string(v.GetStringBytes())},
|
||||
})
|
||||
case fastjson.TypeArray:
|
||||
a := v.GetArray()
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
orValues := make([]string, len(a))
|
||||
for i, av := range a {
|
||||
ov, err := av.StringBytes()
|
||||
if err != nil {
|
||||
errOuter = fmt.Errorf("cannot obtain string item at the array for key %q; item: %s", k, av)
|
||||
return
|
||||
}
|
||||
orValues[i] = string(ov)
|
||||
}
|
||||
filters = append(filters, extraFilter{
|
||||
key: string(k),
|
||||
values: orValues,
|
||||
})
|
||||
default:
|
||||
errOuter = fmt.Errorf("unexpected type of value for key %q: %s; value: %s", k, v.Type(), v)
|
||||
}
|
||||
})
|
||||
if errOuter != nil {
|
||||
return nil, errOuter
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
103
app/vlselect/logsql/logsql_test.go
Normal file
103
app/vlselect/logsql/logsql_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseExtraFilters_Success(t *testing.T) {
|
||||
f := func(s, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
f, err := parseExtraFilters(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in parseExtraFilters: %s", err)
|
||||
}
|
||||
result := f.String()
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", "")
|
||||
|
||||
// JSON string
|
||||
f(`{"foo":"bar"}`, `foo:=bar`)
|
||||
f(`{"foo":["bar","baz"]}`, `foo:in(bar,baz)`)
|
||||
f(`{"z":"=b ","c":["d","e,"],"a":[],"_msg":"x"}`, `z:="=b " c:in(d,"e,") =x`)
|
||||
|
||||
// LogsQL filter
|
||||
f(`foobar`, `foobar`)
|
||||
f(`foo:bar`, `foo:bar`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
|
||||
}
|
||||
|
||||
func TestParseExtraFilters_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
_, err := parseExtraFilters(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid JSON
|
||||
f(`{"foo"}`)
|
||||
f(`[1,2]`)
|
||||
f(`{"foo":[1]}`)
|
||||
|
||||
// Invliad LogsQL filter
|
||||
f(`foo:(bar`)
|
||||
|
||||
// excess pipe
|
||||
f(`foo | count()`)
|
||||
}
|
||||
|
||||
func TestParseExtraStreamFilters_Success(t *testing.T) {
|
||||
f := func(s, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
f, err := parseExtraStreamFilters(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in parseExtraStreamFilters: %s", err)
|
||||
}
|
||||
result := f.String()
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", "")
|
||||
|
||||
// JSON string
|
||||
f(`{"foo":"bar"}`, `{foo="bar"}`)
|
||||
f(`{"foo":["bar","baz"]}`, `{foo=~"bar|baz"}`)
|
||||
f(`{"z":"b","c":["d","e|\""],"a":[],"_msg":"x"}`, `{z="b",c=~"d|e\\|\"",_msg="x"}`)
|
||||
|
||||
// LogsQL filter
|
||||
f(`foobar`, `foobar`)
|
||||
f(`foo:bar`, `foo:bar`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
|
||||
}
|
||||
|
||||
func TestParseExtraStreamFilters_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
_, err := parseExtraStreamFilters(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid JSON
|
||||
f(`{"foo"}`)
|
||||
f(`[1,2]`)
|
||||
f(`{"foo":[1]}`)
|
||||
|
||||
// Invliad LogsQL filter
|
||||
f(`foo:(bar`)
|
||||
|
||||
// excess pipe
|
||||
f(`foo | count()`)
|
||||
}
|
||||
@@ -177,6 +177,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
func processSelectRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, path string) bool {
|
||||
httpserver.EnableCORS(w, r)
|
||||
switch path {
|
||||
case "/select/logsql/facets":
|
||||
logsqlFacetsRequests.Inc()
|
||||
logsql.ProcessFacetsRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/field_names":
|
||||
logsqlFieldNamesRequests.Inc()
|
||||
logsql.ProcessFieldNamesRequest(ctx, w, r)
|
||||
@@ -236,6 +240,7 @@ func getMaxQueryDuration(r *http.Request) time.Duration {
|
||||
}
|
||||
|
||||
var (
|
||||
logsqlFacetsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/facets"}`)
|
||||
logsqlFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_names"}`)
|
||||
logsqlFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_values"}`)
|
||||
logsqlHitsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/hits"}`)
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.faf86aa5.css",
|
||||
"main.js": "./static/js/main.b204330a.js",
|
||||
"main.css": "./static/css/main.02a1c6cb.css",
|
||||
"main.js": "./static/js/main.55c8060b.js",
|
||||
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.faf86aa5.css",
|
||||
"static/js/main.b204330a.js"
|
||||
"static/css/main.02a1c6cb.css",
|
||||
"static/js/main.55c8060b.js"
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.b204330a.js"></script><link href="./static/css/main.faf86aa5.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.55c8060b.js"></script><link href="./static/css/main.02a1c6cb.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
1
app/vlselect/vmui/static/css/main.02a1c6cb.css
Normal file
1
app/vlselect/vmui/static/css/main.02a1c6cb.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.55c8060b.js
Normal file
2
app/vlselect/vmui/static/js/main.55c8060b.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -45,6 +46,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -77,6 +79,9 @@ var (
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running vmagent. The following files are checked: "+
|
||||
"-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig, -remoteWrite.streamAggr.config . "+
|
||||
"Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 0, "The maximum number of labels per time series to be accepted. Series with superfluous labels are ignored. In this case the vm_rows_ignored_total{reason=\"too_many_labels\"} metric at /metrics page is incremented")
|
||||
maxLabelNameLen = flag.Int("maxLabelNameLen", 0, "The maximum length of label names in the accepted time series. Series with longer label name are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_name\"} metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 0, "The maximum length of label values in the accepted time series. Series with longer label value are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_value\"} metric at /metrics page is incremented")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -93,6 +98,15 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
// vmagent is optimized for reduced memory allocations,
|
||||
// so it can run with the reduced GOGC in order to reduce the used memory,
|
||||
// while keeping CPU usage spent in GC at low levels.
|
||||
//
|
||||
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
|
||||
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
|
||||
// exceeds 0.05 for extended periods of time.
|
||||
cgroup.SetGOGC(30)
|
||||
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
@@ -100,6 +114,7 @@ func main() {
|
||||
remotewrite.InitSecretFlags()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
timeserieslimits.Init(*maxLabelsPerTimeseries, *maxLabelNameLen, *maxLabelValueLen)
|
||||
|
||||
if promscrape.IsDryRun() {
|
||||
if err := promscrape.CheckConfig(); err != nil {
|
||||
@@ -498,7 +513,7 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
httpserver.Errorf(w, r, `unsupported multitenant prefix: %q; expected "insert"`, p.Prefix)
|
||||
return true
|
||||
}
|
||||
at, err := auth.NewToken(p.AuthToken)
|
||||
at, err := auth.NewTokenPossibleMultitenant(p.AuthToken)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain auth token: %s", err)
|
||||
return true
|
||||
@@ -510,7 +525,13 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
statusCode := http.StatusNoContent
|
||||
if strings.HasPrefix(p.Suffix, "prometheus/api/v1/import/prometheus/metrics/job/") {
|
||||
// Return 200 status code for pushgateway requests.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3636
|
||||
statusCode = http.StatusOK
|
||||
}
|
||||
w.WriteHeader(statusCode)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(p.Suffix, "datadog/") {
|
||||
|
||||
@@ -7,13 +7,10 @@ import (
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -21,6 +18,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
@@ -30,6 +28,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
@@ -99,9 +98,6 @@ var (
|
||||
// rwctxsGlobal contains statically populated entries when -remoteWrite.url is specified.
|
||||
rwctxsGlobal []*remoteWriteCtx
|
||||
|
||||
// Data without tenant id is written to defaultAuthToken if -enableMultitenantHandlers is specified.
|
||||
defaultAuthToken = &auth.Token{}
|
||||
|
||||
// ErrQueueFullHTTPRetry must be returned when TryPush() returns false.
|
||||
ErrQueueFullHTTPRetry = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("remote storage systems cannot keep up with the data ingestion rate; retry the request later " +
|
||||
@@ -209,7 +205,7 @@ func Init() {
|
||||
|
||||
initStreamAggrConfigGlobal()
|
||||
|
||||
rwctxsGlobal = newRemoteWriteCtxs(nil, *remoteWriteURLs)
|
||||
rwctxsGlobal = newRemoteWriteCtxs(*remoteWriteURLs)
|
||||
|
||||
disableOnDiskQueues := []bool(*disableOnDiskQueue)
|
||||
disableOnDiskQueueAny = slices.Contains(disableOnDiskQueues, true)
|
||||
@@ -294,7 +290,7 @@ var (
|
||||
relabelConfigTimestamp = metrics.NewCounter(`vmagent_relabel_config_last_reload_success_timestamp_seconds`)
|
||||
)
|
||||
|
||||
func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
||||
func newRemoteWriteCtxs(urls []string) []*remoteWriteCtx {
|
||||
if len(urls) == 0 {
|
||||
logger.Panicf("BUG: urls must be non-empty")
|
||||
}
|
||||
@@ -316,11 +312,6 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
||||
logger.Fatalf("invalid -remoteWrite.url=%q: %s", remoteWriteURL, err)
|
||||
}
|
||||
sanitizedURL := fmt.Sprintf("%d:secret-url", i+1)
|
||||
if at != nil {
|
||||
// Construct full remote_write url for the given tenant according to https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
|
||||
remoteWriteURL.Path = fmt.Sprintf("%s/insert/%d:%d/prometheus/api/v1/write", remoteWriteURL.Path, at.AccountID, at.ProjectID)
|
||||
sanitizedURL = fmt.Sprintf("%s:%d:%d", sanitizedURL, at.AccountID, at.ProjectID)
|
||||
}
|
||||
if *showRemoteWriteURL {
|
||||
sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL)
|
||||
}
|
||||
@@ -411,11 +402,6 @@ func TryPush(at *auth.Token, wr *prompbmarshal.WriteRequest) bool {
|
||||
func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnFailure bool) bool {
|
||||
tss := wr.Timeseries
|
||||
|
||||
if at == nil && MultitenancyEnabled() {
|
||||
// Write data to default tenant if at isn't set when multitenancy is enabled.
|
||||
at = defaultAuthToken
|
||||
}
|
||||
|
||||
var tenantRctx *relabelCtx
|
||||
if at != nil {
|
||||
// Convert at to (vm_account_id, vm_project_id) labels.
|
||||
@@ -485,6 +471,15 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnF
|
||||
rowsCountAfterRelabel := getRowsCount(tssBlock)
|
||||
rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
|
||||
}
|
||||
if timeserieslimits.Enabled() {
|
||||
tmpBlock := tssBlock[:0]
|
||||
for _, ts := range tssBlock {
|
||||
if !timeserieslimits.IsExceeding(ts.Labels) {
|
||||
tmpBlock = append(tmpBlock, ts)
|
||||
}
|
||||
}
|
||||
tssBlock = tmpBlock
|
||||
}
|
||||
sortLabelsIfNeeded(tssBlock)
|
||||
tssBlock = limitSeriesCardinality(tssBlock)
|
||||
if sas.IsEnabled() {
|
||||
@@ -729,29 +724,14 @@ func logSkippedSeries(labels []prompbmarshal.Label, flagName string, flagValue i
|
||||
select {
|
||||
case <-logSkippedSeriesTicker.C:
|
||||
// Do not use logger.WithThrottler() here, since this will increase CPU usage
|
||||
// because every call to logSkippedSeries will result to a call to labelsToString.
|
||||
logger.Warnf("skip series %s because %s=%d reached", labelsToString(labels), flagName, flagValue)
|
||||
// because every call to logSkippedSeries will result to a call to prompbmarshal.LabelsToString.
|
||||
logger.Warnf("skip series %s because %s=%d reached", prompbmarshal.LabelsToString(labels), flagName, flagValue)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var logSkippedSeriesTicker = time.NewTicker(5 * time.Second)
|
||||
|
||||
func labelsToString(labels []prompbmarshal.Label) string {
|
||||
var b []byte
|
||||
b = append(b, '{')
|
||||
for i, label := range labels {
|
||||
b = append(b, label.Name...)
|
||||
b = append(b, '=')
|
||||
b = strconv.AppendQuote(b, label.Value)
|
||||
if i+1 < len(labels) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, '}')
|
||||
return string(b)
|
||||
}
|
||||
|
||||
var (
|
||||
globalRowsPushedBeforeRelabel = metrics.NewCounter("vmagent_remotewrite_global_rows_pushed_before_relabel_total")
|
||||
rowsDroppedByGlobalRelabel = metrics.NewCounter("vmagent_remotewrite_global_relabel_metrics_dropped_total")
|
||||
|
||||
@@ -51,9 +51,14 @@ Examples:
|
||||
Usage: `Optional external URL to template in rule's labels or annotations.`,
|
||||
Required: false,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "loggerLevel",
|
||||
Usage: `Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "ERROR").`,
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
if failed := unittest.UnitTest(c.StringSlice("files"), c.Bool("disableAlertgroupLabel"), c.StringSlice("external.label"), c.String("external.url")); failed {
|
||||
if failed := unittest.UnitTest(c.StringSlice("files"), c.Bool("disableAlertgroupLabel"), c.StringSlice("external.label"), c.String("external.url"), c.String("loggerLevel")); failed {
|
||||
return fmt.Errorf("unittest failed")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
@@ -48,7 +49,7 @@ Outer:
|
||||
}
|
||||
var expSamples []parsedSample
|
||||
for _, s := range mt.ExpSamples {
|
||||
expLb := datasource.Labels{}
|
||||
expLb := []prompbmarshal.Label{}
|
||||
if s.Labels != "" {
|
||||
metricsqlExpr, err := metricsql.Parse(s.Labels)
|
||||
if err != nil {
|
||||
@@ -64,7 +65,7 @@ Outer:
|
||||
}
|
||||
if len(metricsqlMetricExpr.LabelFilterss) > 0 {
|
||||
for _, l := range metricsqlMetricExpr.LabelFilterss[0] {
|
||||
expLb = append(expLb, datasource.Label{
|
||||
expLb = append(expLb, prompbmarshal.Label{
|
||||
Name: l.Label,
|
||||
Value: l.Value,
|
||||
})
|
||||
|
||||
4
app/vmalert-tool/unittest/testdata/failed-test-with-missing-rulefile.yaml
vendored
Normal file
4
app/vmalert-tool/unittest/testdata/failed-test-with-missing-rulefile.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
rule_files:
|
||||
- non-existing-file.yaml
|
||||
|
||||
tests: []
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -46,17 +47,24 @@ var (
|
||||
testRemoteWritePath = "http://127.0.0.1" + httpListenAddr
|
||||
testHealthHTTPPath = "http://127.0.0.1" + httpListenAddr + "/health"
|
||||
|
||||
testLogLevel = "ERROR"
|
||||
disableAlertgroupLabel bool
|
||||
)
|
||||
|
||||
const (
|
||||
testStoragePath = "vmalert-unittest"
|
||||
testLogLevel = "ERROR"
|
||||
)
|
||||
|
||||
// UnitTest runs unittest for files
|
||||
func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, externalURL string) bool {
|
||||
if err := templates.Load([]string{}, true); err != nil {
|
||||
func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, externalURL, logLevel string) bool {
|
||||
if logLevel != "" {
|
||||
testLogLevel = logLevel
|
||||
}
|
||||
eu, err := url.Parse(externalURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to parse external URL: %w", err)
|
||||
}
|
||||
if err := templates.Load([]string{}, *eu); err != nil {
|
||||
logger.Fatalf("failed to load template: %v", err)
|
||||
}
|
||||
storagePath = filepath.Join(os.TempDir(), testStoragePath)
|
||||
@@ -74,8 +82,7 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
|
||||
logger.Fatalf("failed to load test files %q: %v", files, err)
|
||||
}
|
||||
if len(testfiles) == 0 {
|
||||
fmt.Println("no test file found")
|
||||
return false
|
||||
logger.Fatalf("no test file found")
|
||||
}
|
||||
|
||||
labels := make(map[string]string)
|
||||
@@ -97,8 +104,8 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
|
||||
var failed bool
|
||||
for fileName, file := range testfiles {
|
||||
if err := ruleUnitTest(fileName, file, labels); err != nil {
|
||||
fmt.Println(" FAILED")
|
||||
fmt.Printf("\nfailed to run unit test for file %q: \n%v", fileName, err)
|
||||
fmt.Println("FAILED")
|
||||
fmt.Printf("failed to run unit test for file %q: \n%v", fileName, err)
|
||||
failed = true
|
||||
} else {
|
||||
fmt.Println(" SUCCESS")
|
||||
@@ -109,7 +116,7 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
|
||||
}
|
||||
|
||||
func ruleUnitTest(filename string, content []byte, externalLabels map[string]string) []error {
|
||||
fmt.Println("\nUnit Testing: ", filename)
|
||||
fmt.Println("\n\nUnit Testing: ", filename)
|
||||
var unitTestInp unitTestFile
|
||||
if err := yaml.UnmarshalStrict(content, &unitTestInp); err != nil {
|
||||
return []error{fmt.Errorf("failed to unmarshal file: %w", err)}
|
||||
@@ -139,6 +146,9 @@ func ruleUnitTest(filename string, content []byte, externalLabels map[string]str
|
||||
if err != nil {
|
||||
return []error{fmt.Errorf("failed to parse `rule_files`: %w", err)}
|
||||
}
|
||||
if len(testGroups) == 0 {
|
||||
return []error{fmt.Errorf("found no rule group in %v", unitTestInp.RuleFiles)}
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for _, t := range unitTestInp.Tests {
|
||||
|
||||
@@ -1,30 +1,21 @@
|
||||
package unittest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{}, true); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestUnitTest_Failure(t *testing.T) {
|
||||
f := func(files []string) {
|
||||
t.Helper()
|
||||
|
||||
failed := UnitTest(files, false, nil, "")
|
||||
failed := UnitTest(files, false, nil, "", "")
|
||||
if !failed {
|
||||
t.Fatalf("expecting failed test")
|
||||
}
|
||||
}
|
||||
|
||||
// failing test
|
||||
f([]string{"./testdata/failed-test-with-missing-rulefile.yaml"})
|
||||
|
||||
f([]string{"./testdata/failed-test.yaml"})
|
||||
}
|
||||
|
||||
@@ -32,7 +23,7 @@ func TestUnitTest_Success(t *testing.T) {
|
||||
f := func(disableGroupLabel bool, files []string, externalLabels []string, externalURL string) {
|
||||
t.Helper()
|
||||
|
||||
failed := UnitTest(files, disableGroupLabel, externalLabels, externalURL)
|
||||
failed := UnitTest(files, disableGroupLabel, externalLabels, externalURL, "")
|
||||
if failed {
|
||||
t.Fatalf("unexpected failed test")
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
|
||||
@@ -7,7 +7,7 @@ groups:
|
||||
labels:
|
||||
label: bar
|
||||
annotations:
|
||||
summary: "{{ $value }"
|
||||
summary: "{{ }}"
|
||||
description: "{{$labels}}"
|
||||
- alert: UnkownAnnotationsFunction
|
||||
for: 5m
|
||||
|
||||
@@ -46,8 +46,8 @@ const (
|
||||
graphitePrefix = "/graphite"
|
||||
)
|
||||
|
||||
func (s *Client) setGraphiteReqParams(r *http.Request, query string) {
|
||||
if s.appendTypePrefix {
|
||||
func (c *Client) setGraphiteReqParams(r *http.Request, query string) {
|
||||
if c.appendTypePrefix {
|
||||
r.URL.Path += graphitePrefix
|
||||
}
|
||||
r.URL.Path += graphitePath
|
||||
@@ -58,7 +58,7 @@ func (s *Client) setGraphiteReqParams(r *http.Request, query string) {
|
||||
q.Set("target", query)
|
||||
q.Set("until", "now")
|
||||
|
||||
for k, vs := range s.extraParams {
|
||||
for k, vs := range c.extraParams {
|
||||
if q.Has(k) { // extraParams are prior to params in URL
|
||||
q.Del(k)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
@@ -16,7 +17,8 @@ import (
|
||||
var (
|
||||
disablePathAppend = flag.Bool("remoteRead.disablePathAppend", false, "Whether to disable automatic appending of '/api/v1/query' or '/select/logsql/stats_query' path "+
|
||||
"to the configured -datasource.url and -remoteRead.url")
|
||||
disableStepParam = flag.Bool("datasource.disableStepParam", false, "Whether to disable adding 'step' param to the issued instant queries. "+
|
||||
disableStepParam = flag.Bool("datasource.disableStepParam", false, "Whether to disable adding 'step' param in instant queries to the configured -datasource.url and -remoteRead.url. "+
|
||||
"Only valid for prometheus datasource. "+
|
||||
"This might be useful when using vmalert with datasources that do not support 'step' param for instant queries, like Google Managed Prometheus. "+
|
||||
"It is not recommended to enable this flag if you use vmalert with VictoriaMetrics.")
|
||||
)
|
||||
@@ -81,14 +83,14 @@ func (pi *promInstant) Unmarshal(b []byte) error {
|
||||
labels := metric.GetObject()
|
||||
|
||||
r := &pi.ms[i]
|
||||
r.Labels = make([]Label, 0, labels.Len())
|
||||
r.Labels = make([]prompbmarshal.Label, 0, labels.Len())
|
||||
labels.Visit(func(key []byte, v *fastjson.Value) {
|
||||
lv, errLocal := v.StringBytes()
|
||||
if errLocal != nil {
|
||||
err = fmt.Errorf("error when parsing label value %q: %s", v, errLocal)
|
||||
return
|
||||
}
|
||||
r.Labels = append(r.Labels, Label{
|
||||
r.Labels = append(r.Labels, prompbmarshal.Label{
|
||||
Name: string(key),
|
||||
Value: string(lv),
|
||||
})
|
||||
@@ -218,8 +220,8 @@ func parsePrometheusResponse(req *http.Request, resp *http.Response) (res Result
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *Client) setPrometheusInstantReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
if s.appendTypePrefix {
|
||||
func (c *Client) setPrometheusInstantReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
if c.appendTypePrefix {
|
||||
r.URL.Path += "/prometheus"
|
||||
}
|
||||
if !*disablePathAppend {
|
||||
@@ -227,22 +229,22 @@ func (s *Client) setPrometheusInstantReqParams(r *http.Request, query string, ti
|
||||
}
|
||||
q := r.URL.Query()
|
||||
q.Set("time", timestamp.Format(time.RFC3339))
|
||||
if !*disableStepParam && s.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
if !*disableStepParam && c.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
// always convert to seconds to keep compatibility with older
|
||||
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.evaluationInterval.Seconds())))
|
||||
q.Set("step", fmt.Sprintf("%ds", int(c.evaluationInterval.Seconds())))
|
||||
}
|
||||
if !*disableStepParam && s.queryStep > 0 { // override step with user-specified value
|
||||
if !*disableStepParam && c.queryStep > 0 { // override step with user-specified value
|
||||
// always convert to seconds to keep compatibility with older
|
||||
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.queryStep.Seconds())))
|
||||
q.Set("step", fmt.Sprintf("%ds", int(c.queryStep.Seconds())))
|
||||
}
|
||||
r.URL.RawQuery = q.Encode()
|
||||
s.setReqParams(r, query)
|
||||
c.setReqParams(r, query)
|
||||
}
|
||||
|
||||
func (s *Client) setPrometheusRangeReqParams(r *http.Request, query string, start, end time.Time) {
|
||||
if s.appendTypePrefix {
|
||||
func (c *Client) setPrometheusRangeReqParams(r *http.Request, query string, start, end time.Time) {
|
||||
if c.appendTypePrefix {
|
||||
r.URL.Path += "/prometheus"
|
||||
}
|
||||
if !*disablePathAppend {
|
||||
@@ -251,11 +253,11 @@ func (s *Client) setPrometheusRangeReqParams(r *http.Request, query string, star
|
||||
q := r.URL.Query()
|
||||
q.Add("start", start.Format(time.RFC3339))
|
||||
q.Add("end", end.Format(time.RFC3339))
|
||||
if s.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
if c.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
// always convert to seconds to keep compatibility with older
|
||||
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.evaluationInterval.Seconds())))
|
||||
q.Set("step", fmt.Sprintf("%ds", int(c.evaluationInterval.Seconds())))
|
||||
}
|
||||
r.URL.RawQuery = q.Encode()
|
||||
s.setReqParams(r, query)
|
||||
c.setReqParams(r, query)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -144,12 +145,12 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
}
|
||||
expected := []Metric{
|
||||
{
|
||||
Labels: []Label{{Value: "vm_rows", Name: "__name__"}, {Value: "bar", Name: "foo"}},
|
||||
Labels: []prompbmarshal.Label{{Value: "vm_rows", Name: "__name__"}, {Value: "bar", Name: "foo"}},
|
||||
Timestamps: []int64{1583786142},
|
||||
Values: []float64{13763},
|
||||
},
|
||||
{
|
||||
Labels: []Label{{Value: "vm_requests", Name: "__name__"}, {Value: "baz", Name: "foo"}},
|
||||
Labels: []prompbmarshal.Label{{Value: "vm_requests", Name: "__name__"}, {Value: "baz", Name: "foo"}},
|
||||
Timestamps: []int64{1583786140},
|
||||
Values: []float64{2000},
|
||||
},
|
||||
@@ -214,7 +215,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
}
|
||||
exp := []Metric{
|
||||
{
|
||||
Labels: []Label{{Value: "constantLine(10)", Name: "name"}},
|
||||
Labels: []prompbmarshal.Label{{Value: "constantLine(10)", Name: "name"}},
|
||||
Timestamps: []int64{1611758403},
|
||||
Values: []float64{10},
|
||||
},
|
||||
@@ -236,12 +237,12 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
}
|
||||
expected = []Metric{
|
||||
{
|
||||
Labels: []Label{{Value: "total", Name: "stats_result"}, {Value: "bar", Name: "foo"}},
|
||||
Labels: []prompbmarshal.Label{{Value: "total", Name: "stats_result"}, {Value: "bar", Name: "foo"}},
|
||||
Timestamps: []int64{1583786142},
|
||||
Values: []float64{13763},
|
||||
},
|
||||
{
|
||||
Labels: []Label{{Value: "total", Name: "stats_result"}, {Value: "baz", Name: "foo"}},
|
||||
Labels: []prompbmarshal.Label{{Value: "total", Name: "stats_result"}, {Value: "baz", Name: "foo"}},
|
||||
Timestamps: []int64{1583786140},
|
||||
Values: []float64{2000},
|
||||
},
|
||||
@@ -444,7 +445,7 @@ func TestVMRangeQuery(t *testing.T) {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
}
|
||||
expected := Metric{
|
||||
Labels: []Label{{Value: "vm_rows", Name: "__name__"}},
|
||||
Labels: []prompbmarshal.Label{{Value: "vm_rows", Name: "__name__"}},
|
||||
Timestamps: []int64{1583786142},
|
||||
Values: []float64{13763},
|
||||
}
|
||||
@@ -475,7 +476,7 @@ func TestVMRangeQuery(t *testing.T) {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
}
|
||||
expected = Metric{
|
||||
Labels: []Label{{Value: "total", Name: "stats_result"}},
|
||||
Labels: []prompbmarshal.Label{{Value: "total", Name: "stats_result"}},
|
||||
Timestamps: []int64{1583786142},
|
||||
Values: []float64{10},
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Client) setVLogsInstantReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
func (c *Client) setVLogsInstantReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
// there is no type path prefix in victorialogs APIs right now, ignore appendTypePrefix.
|
||||
if !*disablePathAppend {
|
||||
r.URL.Path += "/select/logsql/stats_query"
|
||||
@@ -16,15 +16,15 @@ func (s *Client) setVLogsInstantReqParams(r *http.Request, query string, timesta
|
||||
q.Set("time", timestamp.Format(time.RFC3339))
|
||||
// set the `start` and `end` params if applyIntervalAsTimeFilter is enabled(time filter is missing in the rule expr),
|
||||
// so the query will be executed in time range [timestamp - evaluationInterval, timestamp].
|
||||
if s.applyIntervalAsTimeFilter && s.evaluationInterval > 0 {
|
||||
q.Set("start", timestamp.Add(-s.evaluationInterval).Format(time.RFC3339))
|
||||
if c.applyIntervalAsTimeFilter && c.evaluationInterval > 0 {
|
||||
q.Set("start", timestamp.Add(-c.evaluationInterval).Format(time.RFC3339))
|
||||
q.Set("end", timestamp.Format(time.RFC3339))
|
||||
}
|
||||
r.URL.RawQuery = q.Encode()
|
||||
s.setReqParams(r, query)
|
||||
c.setReqParams(r, query)
|
||||
}
|
||||
|
||||
func (s *Client) setVLogsRangeReqParams(r *http.Request, query string, start, end time.Time) {
|
||||
func (c *Client) setVLogsRangeReqParams(r *http.Request, query string, start, end time.Time) {
|
||||
// there is no type path prefix in victorialogs APIs right now, ignore appendTypePrefix.
|
||||
if !*disablePathAppend {
|
||||
r.URL.Path += "/select/logsql/stats_query_range"
|
||||
@@ -33,11 +33,11 @@ func (s *Client) setVLogsRangeReqParams(r *http.Request, query string, start, en
|
||||
q.Add("start", start.Format(time.RFC3339))
|
||||
q.Add("end", end.Format(time.RFC3339))
|
||||
// set step as evaluationInterval by default
|
||||
if s.evaluationInterval > 0 {
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.evaluationInterval.Seconds())))
|
||||
if c.evaluationInterval > 0 {
|
||||
q.Set("step", fmt.Sprintf("%ds", int(c.evaluationInterval.Seconds())))
|
||||
}
|
||||
r.URL.RawQuery = q.Encode()
|
||||
s.setReqParams(r, query)
|
||||
c.setReqParams(r, query)
|
||||
}
|
||||
|
||||
func parseVLogsResponse(req *http.Request, resp *http.Response) (res Result, err error) {
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
// Querier interface wraps Query and QueryRange methods
|
||||
@@ -55,7 +57,7 @@ type QuerierParams struct {
|
||||
|
||||
// Metric is the basic entity which should be return by datasource
|
||||
type Metric struct {
|
||||
Labels []Label
|
||||
Labels []prompbmarshal.Label
|
||||
Timestamps []int64
|
||||
Values []float64
|
||||
}
|
||||
@@ -72,22 +74,9 @@ func (m *Metric) SetLabel(key, value string) {
|
||||
m.AddLabel(key, value)
|
||||
}
|
||||
|
||||
// SetLabels sets the given map as Metric labels
|
||||
func (m *Metric) SetLabels(ls map[string]string) {
|
||||
var i int
|
||||
m.Labels = make([]Label, len(ls))
|
||||
for k, v := range ls {
|
||||
m.Labels[i] = Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// AddLabel appends the given label to the label set
|
||||
func (m *Metric) AddLabel(key, value string) {
|
||||
m.Labels = append(m.Labels, Label{Name: key, Value: value})
|
||||
m.Labels = append(m.Labels, prompbmarshal.Label{Name: key, Value: value})
|
||||
}
|
||||
|
||||
// DelLabel deletes the given label from the label set
|
||||
@@ -110,14 +99,8 @@ func (m *Metric) Label(key string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Label represents metric's label
|
||||
type Label struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// Labels is collection of Label
|
||||
type Labels []Label
|
||||
type Labels []prompbmarshal.Label
|
||||
|
||||
func (ls Labels) Len() int { return len(ls) }
|
||||
func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
|
||||
@@ -172,7 +155,7 @@ func LabelCompare(a, b Labels) int {
|
||||
// ConvertToLabels convert map to Labels
|
||||
func ConvertToLabels(m map[string]string) (labelset Labels) {
|
||||
for k, v := range m {
|
||||
labelset = append(labelset, Label{
|
||||
labelset = append(labelset, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
)
|
||||
|
||||
@@ -48,22 +47,15 @@ var (
|
||||
oauth2TokenURL = flag.String("datasource.oauth2.tokenUrl", "", "Optional OAuth2 tokenURL to use for -datasource.url")
|
||||
oauth2Scopes = flag.String("datasource.oauth2.scopes", "", "Optional OAuth2 scopes to use for -datasource.url. Scopes must be delimited by ';'")
|
||||
|
||||
lookBack = flag.Duration("datasource.lookback", 0, `Deprecated: please adjust "-search.latencyOffset" at datasource side `+
|
||||
`or specify "latency_offset" in rule group's params. Lookback defines how far into the past to look when evaluating queries. `+
|
||||
`For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.`)
|
||||
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries. "+
|
||||
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries to the configured -datasource.url and -remoteRead.url. Only valid for prometheus datasource. "+
|
||||
"For example, if -datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query. "+
|
||||
"If set to 0, rule's evaluation interval will be used instead.")
|
||||
queryTimeAlignment = flag.Bool("datasource.queryTimeAlignment", true, `Deprecated: please use "eval_alignment" in rule group instead. `+
|
||||
`Whether to align "time" parameter with evaluation interval. `+
|
||||
"Alignment supposed to produce deterministic results despite number of vmalert replicas or time they were started. "+
|
||||
"See more details at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
|
||||
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
|
||||
idleConnectionTimeout = flag.Duration("datasource.idleConnTimeout", 50*time.Second, `Defines a duration for idle (keep-alive connections) to exist. Consider setting this value less than "-http.idleConnTimeout". It must prevent possible "write: broken pipe" and "read: connection reset by peer" errors.`)
|
||||
disableKeepAlive = flag.Bool("datasource.disableKeepAlive", false, `Whether to disable long-lived connections to the datasource. `+
|
||||
`If true, disables HTTP keep-alive and will only use the connection to the server for a single HTTP request.`)
|
||||
roundDigits = flag.Int("datasource.roundDigits", 0, `Adds "round_digits" GET param to datasource requests. `+
|
||||
`In VM "round_digits" limits the number of digits after the decimal point in response values.`)
|
||||
roundDigits = flag.Int("datasource.roundDigits", 0, `Adds "round_digits" GET param to datasource requests which limits the number of digits after the decimal point in response values. `+
|
||||
`Only valid for VictoriaMetrics as the datasource.`)
|
||||
)
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
@@ -90,12 +82,6 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
||||
if *addr == "" {
|
||||
return nil, fmt.Errorf("datasource.url is empty")
|
||||
}
|
||||
if !*queryTimeAlignment {
|
||||
logger.Warnf("flag `-datasource.queryTimeAlignment` is deprecated and will be removed in next releases. Please use `eval_alignment` in rule group instead.")
|
||||
}
|
||||
if *lookBack != 0 {
|
||||
logger.Warnf("flag `-datasource.lookback` is deprecated and will be removed in next releases. Please adjust `-search.latencyOffset` at datasource side or specify `latency_offset` in rule group's params. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
|
||||
}
|
||||
|
||||
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,6 +3,8 @@ package datasource
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func TestPromInstant_UnmarshalPositive(t *testing.T) {
|
||||
@@ -21,7 +23,7 @@ func TestPromInstant_UnmarshalPositive(t *testing.T) {
|
||||
|
||||
f(`[{"metric":{"__name__":"up"},"value":[1583780000,"42"]}]`, []Metric{
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "up"}},
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "up"}},
|
||||
Timestamps: []int64{1583780000},
|
||||
Values: []float64{42},
|
||||
},
|
||||
@@ -31,17 +33,17 @@ func TestPromInstant_UnmarshalPositive(t *testing.T) {
|
||||
{"metric":{"__name__":"foo"},"value":[1583780001,"7"]},
|
||||
{"metric":{"__name__":"baz", "instance":"bar"},"value":[1583780002,"8"]}]`, []Metric{
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "up"}},
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "up"}},
|
||||
Timestamps: []int64{1583780000},
|
||||
Values: []float64{42},
|
||||
},
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "foo"}},
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "foo"}},
|
||||
Timestamps: []int64{1583780001},
|
||||
Values: []float64{7},
|
||||
},
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "baz"}, {Name: "instance", Value: "bar"}},
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "baz"}, {Name: "instance", Value: "bar"}},
|
||||
Timestamps: []int64{1583780002},
|
||||
Values: []float64{8},
|
||||
},
|
||||
|
||||
@@ -78,12 +78,13 @@ absolute path to all .tpl files in root.
|
||||
externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
|
||||
"In case of conflicts, original labels are kept with prefix `exported_`.")
|
||||
|
||||
remoteReadIgnoreRestoreErrors = flag.Bool("remoteRead.ignoreRestoreErrors", true, "Whether to ignore errors from remote storage when restoring alerts state on startup. DEPRECATED - this flag has no effect and will be removed in the next releases.")
|
||||
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.")
|
||||
)
|
||||
|
||||
var alertURLGeneratorFn notifier.AlertURLGenerator
|
||||
var (
|
||||
alertURLGeneratorFn notifier.AlertURLGenerator
|
||||
extURL *url.URL
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
@@ -97,13 +98,15 @@ func main() {
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
if !*remoteReadIgnoreRestoreErrors {
|
||||
logger.Warnf("flag `remoteRead.ignoreRestoreErrors` is deprecated and will be removed in next releases.")
|
||||
var err error
|
||||
extURL, err = getExternalURL(*externalURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init external.url %q: %s", *externalURL, err)
|
||||
}
|
||||
|
||||
err := templates.Load(*ruleTemplatesPath, true)
|
||||
err = templates.Load(*ruleTemplatesPath, *extURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to parse %q: %s", *ruleTemplatesPath, err)
|
||||
logger.Fatalf("failed to load template %q: %s", *ruleTemplatesPath, err)
|
||||
}
|
||||
|
||||
if *dryRun {
|
||||
@@ -117,12 +120,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
eu, err := getExternalURL(*externalURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init `-external.url`: %s", err)
|
||||
}
|
||||
|
||||
alertURLGeneratorFn, err = getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
|
||||
alertURLGeneratorFn, err = getAlertURLGenerator(extURL, *externalAlertSource, *validateTemplates)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init `external.alert.source`: %s", err)
|
||||
}
|
||||
@@ -310,7 +308,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
|
||||
}
|
||||
templated, err := alert.ExecTemplate(qFn, alert.Labels, m)
|
||||
if err != nil {
|
||||
logger.Errorf("can not exec source template %s", err)
|
||||
logger.Errorf("cannot template alert source: %s", err)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", externalURL, templated["tpl"])
|
||||
}, nil
|
||||
@@ -365,7 +363,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
|
||||
logger.Errorf("failed to reload notifier config: %s", err)
|
||||
continue
|
||||
}
|
||||
err := templates.Load(*ruleTemplatesPath, false)
|
||||
err := templates.Load(*ruleTemplatesPath, *extURL)
|
||||
if err != nil {
|
||||
setConfigError(err)
|
||||
logger.Errorf("failed to load new templates: %s", err)
|
||||
|
||||
@@ -74,7 +74,10 @@ func TestGetAlertURLGenerator(t *testing.T) {
|
||||
|
||||
func TestConfigReload(t *testing.T) {
|
||||
originalRulePath := *rulePath
|
||||
originalExternalURL := extURL
|
||||
extURL = &url.URL{}
|
||||
defer func() {
|
||||
extURL = originalExternalURL
|
||||
*rulePath = originalRulePath
|
||||
}()
|
||||
|
||||
|
||||
@@ -160,8 +160,8 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
|
||||
// it is important to call InterruptEval before the update, because cancel fn
|
||||
// can be re-assigned during the update.
|
||||
item.old.InterruptEval()
|
||||
go func(old *rule.Group, new *rule.Group) {
|
||||
old.UpdateWith(new)
|
||||
go func(oldGroup *rule.Group, newGroup *rule.Group) {
|
||||
oldGroup.UpdateWith(newGroup)
|
||||
wg.Done()
|
||||
}(item.old, item.new)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
|
||||
@@ -127,7 +127,7 @@ func ExecTemplate(q templates.QueryFn, annotations map[string]string, tplData Al
|
||||
|
||||
// ValidateTemplates validate annotations for possible template error, uses empty data for template population
|
||||
func ValidateTemplates(annotations map[string]string) error {
|
||||
tmpl, err := templates.Get()
|
||||
tmpl, err := templates.GetWithFuncs(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -146,12 +146,21 @@ func templateAnnotations(annotations map[string]string, data AlertTplData, tmpl
|
||||
tData := tplData{data, externalLabels, externalURL}
|
||||
header := strings.Join(tplHeaders, "")
|
||||
for key, text := range annotations {
|
||||
// simple check to skip text without template
|
||||
if !strings.Contains(text, "{{") || !strings.Contains(text, "}}") {
|
||||
r[key] = text
|
||||
continue
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
builder.Reset()
|
||||
builder.Grow(len(header) + len(text))
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(text)
|
||||
if err := templateAnnotation(&buf, builder.String(), tData, tmpl, execute); err != nil {
|
||||
// clone a new template for each parse to avoid collision
|
||||
ctmpl, _ := tmpl.Clone()
|
||||
ctmpl = ctmpl.Option("missingkey=zero")
|
||||
if err := templateAnnotation(&buf, builder.String(), tData, ctmpl, execute); err != nil {
|
||||
r[key] = text
|
||||
eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err))
|
||||
continue
|
||||
@@ -167,14 +176,8 @@ type tplData struct {
|
||||
ExternalURL string
|
||||
}
|
||||
|
||||
func templateAnnotation(dst io.Writer, text string, data tplData, tmpl *textTpl.Template, execute bool) error {
|
||||
tpl, err := tmpl.Clone()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error cloning template before parse annotation: %w", err)
|
||||
}
|
||||
// Clone() doesn't copy tpl Options, so we set them manually
|
||||
tpl = tpl.Option("missingkey=zero")
|
||||
tpl, err = tpl.Parse(text)
|
||||
func templateAnnotation(dst io.Writer, text string, data tplData, tpl *textTpl.Template, execute bool) error {
|
||||
tpl, err := tpl.Parse(text)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing annotation template: %w", err)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestAlertExecTemplate(t *testing.T) {
|
||||
qFn := func(_ string) ([]datasource.Metric, error) {
|
||||
return []datasource.Metric{
|
||||
{
|
||||
Labels: []datasource.Label{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "baz", Value: "qux"},
|
||||
},
|
||||
@@ -41,7 +41,7 @@ func TestAlertExecTemplate(t *testing.T) {
|
||||
Timestamps: []int64{1},
|
||||
},
|
||||
{
|
||||
Labels: []datasource.Label{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{Name: "foo", Value: "garply"},
|
||||
{Name: "baz", Value: "fred"},
|
||||
},
|
||||
@@ -75,7 +75,13 @@ func TestAlertExecTemplate(t *testing.T) {
|
||||
Labels: map[string]string{
|
||||
"instance": "localhost",
|
||||
},
|
||||
}, map[string]string{}, map[string]string{})
|
||||
}, map[string]string{
|
||||
"summary": "it's a test summary",
|
||||
"description": "it's a test description",
|
||||
}, map[string]string{
|
||||
"summary": "it's a test summary",
|
||||
"description": "it's a test description",
|
||||
})
|
||||
|
||||
// label-template
|
||||
f(&Alert{
|
||||
@@ -93,6 +99,19 @@ func TestAlertExecTemplate(t *testing.T) {
|
||||
"description": "It is 10000 connections for localhost for more than 5m0s",
|
||||
})
|
||||
|
||||
// label template override
|
||||
f(&Alert{
|
||||
Value: 1e4,
|
||||
}, map[string]string{
|
||||
"summary": `{{- define "default.template" -}} {{ printf "summary" }} {{- end -}} {{ template "default.template" . }}`,
|
||||
"description": `{{ template "default.template" . }}`,
|
||||
"value": `{{$value }}`,
|
||||
}, map[string]string{
|
||||
"summary": "summary",
|
||||
"description": "",
|
||||
"value": "10000",
|
||||
})
|
||||
|
||||
// expression-template
|
||||
f(&Alert{
|
||||
Expr: `vm_rows{"label"="bar"}<0`,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
@@ -69,7 +70,17 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert, headers map[st
|
||||
|
||||
func (am *AlertManager) send(ctx context.Context, alerts []Alert, headers map[string]string) error {
|
||||
b := &bytes.Buffer{}
|
||||
writeamRequest(b, alerts, am.argFunc, am.relabelConfigs)
|
||||
alertsToSend := alerts[:0]
|
||||
lblss := make([][]prompbmarshal.Label, 0, len(alerts))
|
||||
for _, a := range alerts {
|
||||
lbls := a.applyRelabelingIfNeeded(am.relabelConfigs)
|
||||
if len(lbls) == 0 {
|
||||
continue
|
||||
}
|
||||
alertsToSend = append(alertsToSend, a)
|
||||
lblss = append(lblss, lbls)
|
||||
}
|
||||
writeamRequest(b, alertsToSend, am.argFunc, lblss)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, am.addr.String(), b)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
{% import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
) %}
|
||||
{% stripspace %}
|
||||
|
||||
{% func amRequest(alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) %}
|
||||
{% func amRequest(alerts []Alert, generatorURL func(Alert) string, lblss [][]prompbmarshal.Label) %}
|
||||
[
|
||||
{% for i, alert := range alerts %}
|
||||
{% code lbls := alert.applyRelabelingIfNeeded(relabelCfg) %}
|
||||
{% if len(lbls) == 0 %} {% continue %} {% endif %}
|
||||
{% code lbls := lblss[i] %}
|
||||
{
|
||||
"startsAt":{%q= alert.Start.Format(time.RFC3339Nano) %},
|
||||
"generatorURL": {%q= generatorURL(alert) %},
|
||||
|
||||
@@ -8,7 +8,7 @@ package notifier
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:8
|
||||
@@ -25,122 +25,116 @@ var (
|
||||
)
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:8
|
||||
func streamamRequest(qw422016 *qt422016.Writer, alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) {
|
||||
func streamamRequest(qw422016 *qt422016.Writer, alerts []Alert, generatorURL func(Alert) string, lblss [][]prompbmarshal.Label) {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:8
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:10
|
||||
for i, alert := range alerts {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:11
|
||||
lbls := alert.applyRelabelingIfNeeded(relabelCfg)
|
||||
lbls := lblss[i]
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:12
|
||||
if len(lbls) == 0 {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:12
|
||||
continue
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:12
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:12
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:11
|
||||
qw422016.N().S(`{"startsAt":`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:14
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:13
|
||||
qw422016.N().Q(alert.Start.Format(time.RFC3339Nano))
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:14
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:13
|
||||
qw422016.N().S(`,"generatorURL":`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:15
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:14
|
||||
qw422016.N().Q(generatorURL(alert))
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:15
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:15
|
||||
if !alert.End.IsZero() {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:15
|
||||
qw422016.N().S(`"endsAt":`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:17
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
||||
qw422016.N().Q(alert.End.Format(time.RFC3339Nano))
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:17
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:16
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:18
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:17
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:18
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:17
|
||||
qw422016.N().S(`"labels": {`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:20
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:19
|
||||
ll := len(lbls)
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:20
|
||||
for idx, l := range lbls {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||
qw422016.N().Q(l.Name)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||
qw422016.N().Q(l.Value)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||
if idx != ll-1 {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:21
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:23
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:23
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:22
|
||||
qw422016.N().S(`},"annotations": {`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:26
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:25
|
||||
c := len(alert.Annotations)
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:27
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:26
|
||||
for k, v := range alert.Annotations {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:28
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:27
|
||||
c = c - 1
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:28
|
||||
qw422016.N().Q(k)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:28
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:28
|
||||
qw422016.N().Q(v)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:28
|
||||
if c > 0 {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:28
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:28
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:30
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:30
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:29
|
||||
qw422016.N().S(`}}`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:33
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:32
|
||||
if i != len(alerts)-1 {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:33
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:32
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:33
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:32
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:34
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:33
|
||||
}
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:34
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:33
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
}
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
func writeamRequest(qq422016 qtio422016.Writer, alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
func writeamRequest(qq422016 qtio422016.Writer, alerts []Alert, generatorURL func(Alert) string, lblss [][]prompbmarshal.Label) {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
streamamRequest(qw422016, alerts, generatorURL, relabelCfg)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
streamamRequest(qw422016, alerts, generatorURL, lblss)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
}
|
||||
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
func amRequest(alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) string {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
func amRequest(alerts []Alert, generatorURL func(Alert) string, lblss [][]prompbmarshal.Label) string {
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
writeamRequest(qb422016, alerts, generatorURL, relabelCfg)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
writeamRequest(qb422016, alerts, generatorURL, lblss)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
return qs422016
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:36
|
||||
//line app/vmalert/notifier/alertmanager_request.qtpl:35
|
||||
}
|
||||
|
||||
@@ -105,6 +105,16 @@ func TestAlertManager_Send(t *testing.T) {
|
||||
if r.Header.Get(headerKey) != "bar" {
|
||||
t.Fatalf("expected header %q to be set to %q; got %q instead", headerKey, "bar", r.Header.Get(headerKey))
|
||||
}
|
||||
case 4:
|
||||
var a []struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&a); err != nil {
|
||||
t.Fatalf("can not unmarshal data into alert %s", err)
|
||||
}
|
||||
if len(a) != 1 {
|
||||
t.Fatalf("expected 1 alert in array got %d", len(a))
|
||||
}
|
||||
}
|
||||
})
|
||||
srv := httptest.NewServer(mux)
|
||||
@@ -168,7 +178,20 @@ func TestAlertManager_Send(t *testing.T) {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
if c != 3 {
|
||||
t.Fatalf("expected 3 calls(count from zero) to server got %d", c)
|
||||
if err := am.Send(context.Background(), []Alert{
|
||||
{
|
||||
Name: "alert1",
|
||||
Labels: map[string]string{"rule": "test"},
|
||||
},
|
||||
{
|
||||
Name: "alert2",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
}, map[string]string{}); err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
if c != 4 {
|
||||
t.Fatalf("expected 4 calls(count from zero) to server got %d", c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
@@ -93,13 +92,11 @@ var (
|
||||
func Init(gen AlertURLGenerator, extLabels map[string]string, extURL string) (func() []Notifier, error) {
|
||||
externalURL = extURL
|
||||
externalLabels = extLabels
|
||||
eu, err := url.Parse(externalURL)
|
||||
_, err := url.Parse(externalURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse external URL: %w", err)
|
||||
}
|
||||
|
||||
templates.UpdateWithFuncs(templates.FuncsWithExternalURL(eu))
|
||||
|
||||
if *blackHole {
|
||||
if len(*addrs) > 0 || *configPath != "" {
|
||||
return nil, fmt.Errorf("only one of -notifier.blackhole, -notifier.url and -notifier.config flags must be specified")
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect."+
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with MetricsQL. It can be single node VictoriaMetrics or vmselect."+
|
||||
"Remote read is used to restore alerts state."+
|
||||
"This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
|
||||
"Supports address in the form of IP address with a port (e.g., http://127.0.0.1:8428) or DNS SRV record. "+
|
||||
|
||||
@@ -27,7 +27,7 @@ var defaultConcurrency = cgroup.AvailableCPUs() * 2
|
||||
|
||||
const (
|
||||
defaultMaxBatchSize = 1e4
|
||||
defaultMaxQueueSize = 1e6
|
||||
defaultMaxQueueSize = 1e5
|
||||
defaultFlushInterval = 2 * time.Second
|
||||
defaultWriteTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
|
||||
maxQueueSize = flag.Int("remoteWrite.maxQueueSize", defaultMaxQueueSize, "Defines the max number of pending datapoints to remote write endpoint")
|
||||
maxBatchSize = flag.Int("remoteWrite.maxBatchSize", defaultMaxBatchSize, "Defines max number of timeseries to be flushed at once")
|
||||
concurrency = flag.Int("remoteWrite.concurrency", defaultConcurrency, "Defines number of writers for concurrent writing into remote write endpoint")
|
||||
concurrency = flag.Int("remoteWrite.concurrency", defaultConcurrency, "Defines number of writers for concurrent writing into remote write endpoint. Default value depends on the number of available CPU cores.")
|
||||
flushInterval = flag.Duration("remoteWrite.flushInterval", defaultFlushInterval, "Defines interval of flushes to remote write endpoint")
|
||||
|
||||
tlsInsecureSkipVerify = flag.Bool("remoteWrite.tlsInsecureSkipVerify", false, "Whether to skip tls verification when connecting to -remoteWrite.url")
|
||||
|
||||
@@ -14,8 +14,10 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
// AlertingRule is basic alert entity
|
||||
@@ -454,13 +456,16 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
ar.logDebugf(ts, a, "created in state PENDING")
|
||||
}
|
||||
var numActivePending int
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for h, a := range ar.alerts {
|
||||
// if alert wasn't updated in this iteration
|
||||
// means it is resolved already
|
||||
if _, ok := updated[h]; !ok {
|
||||
if a.State == notifier.StatePending {
|
||||
// alert was in Pending state - it is not
|
||||
// active anymore
|
||||
// alert was in Pending state - it is not active anymore
|
||||
// add stale time series
|
||||
tss = append(tss, pendingAlertStaleTimeSeries(a.Labels, ts.Unix(), true)...)
|
||||
|
||||
delete(ar.alerts, h)
|
||||
ar.logDebugf(ts, a, "PENDING => DELETED: is absent in current evaluation round")
|
||||
continue
|
||||
@@ -478,6 +483,9 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
if ts.Sub(a.KeepFiringSince) >= ar.KeepFiringFor {
|
||||
a.State = notifier.StateInactive
|
||||
a.ResolvedAt = ts
|
||||
// add stale time series
|
||||
tss = append(tss, firingAlertStaleTimeSeries(a.Labels, ts.Unix())...)
|
||||
|
||||
ar.logDebugf(ts, a, "FIRING => INACTIVE: is absent in current evaluation round")
|
||||
continue
|
||||
}
|
||||
@@ -489,6 +497,10 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
a.State = notifier.StateFiring
|
||||
a.Start = ts
|
||||
alertsFired.Inc()
|
||||
if ar.For > 0 {
|
||||
// add stale time series
|
||||
tss = append(tss, pendingAlertStaleTimeSeries(a.Labels, ts.Unix(), false)...)
|
||||
}
|
||||
ar.logDebugf(ts, a, "PENDING => FIRING: %s since becoming active at %v", ts.Sub(a.ActiveAt), a.ActiveAt)
|
||||
}
|
||||
}
|
||||
@@ -497,7 +509,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
curState.Err = fmt.Errorf("exec exceeded limit of %d with %d alerts", limit, numActivePending)
|
||||
return nil, curState.Err
|
||||
}
|
||||
return ar.toTimeSeries(ts.Unix()), nil
|
||||
return append(tss, ar.toTimeSeries(ts.Unix())...), nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) expandTemplates(m datasource.Metric, qFn templates.QueryFn, ts time.Time) (*labelSet, map[string]string, error) {
|
||||
@@ -522,6 +534,7 @@ func (ar *AlertingRule) expandTemplates(m datasource.Metric, qFn templates.Query
|
||||
return ls, as, nil
|
||||
}
|
||||
|
||||
// toTimeSeries creates `ALERTS` and `ALERTS_FOR_STATE` for active alerts
|
||||
func (ar *AlertingRule) toTimeSeries(timestamp int64) []prompbmarshal.TimeSeries {
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for _, a := range ar.alerts {
|
||||
@@ -601,26 +614,89 @@ func (ar *AlertingRule) alertToTimeSeries(a *notifier.Alert, timestamp int64) []
|
||||
}
|
||||
|
||||
func alertToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
||||
labels := make(map[string]string)
|
||||
labels := make([]prompbmarshal.Label, 0, len(a.Labels)+2)
|
||||
for k, v := range a.Labels {
|
||||
labels[k] = v
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
// __name__ already been dropped, no need to check duplication
|
||||
labels = append(labels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
||||
if ol := promrelabel.GetLabelByName(labels, alertStateLabel); ol != nil {
|
||||
ol.Value = a.State.String()
|
||||
} else {
|
||||
labels = append(labels, prompbmarshal.Label{Name: alertStateLabel, Value: a.State.String()})
|
||||
}
|
||||
labels["__name__"] = alertMetricName
|
||||
labels[alertStateLabel] = a.State.String()
|
||||
return newTimeSeries([]float64{1}, []int64{timestamp}, labels)
|
||||
}
|
||||
|
||||
// alertForToTimeSeries returns a timeseries that represents
|
||||
// alertForToTimeSeries returns a time series that represents
|
||||
// state of active alerts, where value is time when alert become active
|
||||
func alertForToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
||||
labels := make(map[string]string)
|
||||
labels := make([]prompbmarshal.Label, 0, len(a.Labels)+1)
|
||||
for k, v := range a.Labels {
|
||||
labels[k] = v
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
labels["__name__"] = alertForStateMetricName
|
||||
// __name__ already been dropped, no need to check duplication
|
||||
labels = append(labels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
||||
return newTimeSeries([]float64{float64(a.ActiveAt.Unix())}, []int64{timestamp}, labels)
|
||||
}
|
||||
|
||||
// pendingAlertStaleTimeSeries returns stale `ALERTS` and `ALERTS_FOR_STATE` time series
|
||||
// for alerts which changed their state from Pending to Inactive or Firing.
|
||||
func pendingAlertStaleTimeSeries(ls map[string]string, timestamp int64, includeAlertForState bool) []prompbmarshal.TimeSeries {
|
||||
var result []prompbmarshal.TimeSeries
|
||||
baseLabels := make([]prompbmarshal.Label, 0, len(ls)+1)
|
||||
for k, v := range ls {
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
alertsLabels := make([]prompbmarshal.Label, 0, len(ls)+2)
|
||||
alertsLabels = append(alertsLabels, baseLabels...)
|
||||
// __name__ already been dropped, no need to check duplication
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: alertStateLabel, Value: notifier.StatePending.String()})
|
||||
result = append(result, newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsLabels))
|
||||
|
||||
if includeAlertForState {
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
||||
result = append(result, newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, baseLabels))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// firingAlertStaleTimeSeries returns stale `ALERTS` and `ALERTS_FOR_STATE` time series
|
||||
// for alerts which changed their state from Firing to Inactive.
|
||||
func firingAlertStaleTimeSeries(ls map[string]string, timestamp int64) []prompbmarshal.TimeSeries {
|
||||
baseLabels := make([]prompbmarshal.Label, 0, len(ls)+1)
|
||||
for k, v := range ls {
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
alertsLabels := make([]prompbmarshal.Label, 0, len(ls)+2)
|
||||
alertsLabels = append(alertsLabels, baseLabels...)
|
||||
// __name__ already been dropped, no need to check duplication
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: alertStateLabel, Value: notifier.StateFiring.String()})
|
||||
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
||||
|
||||
return []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsLabels),
|
||||
newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, baseLabels),
|
||||
}
|
||||
}
|
||||
|
||||
// restore restores the value of ActiveAt field for active alerts,
|
||||
// based on previously written time series `alertForStateMetricName`.
|
||||
// Only rules with For > 0 can be restored.
|
||||
@@ -641,7 +717,8 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
|
||||
for k, v := range ar.Labels {
|
||||
labelsFilter += fmt.Sprintf(",%s=%q", k, v)
|
||||
}
|
||||
expr := fmt.Sprintf("last_over_time(%s{%s%s}[%ds])",
|
||||
// use `default_rollup()` instead of `last_over_time()` here to accounts for possible staleness markers
|
||||
expr := fmt.Sprintf("default_rollup(%s{%s%s}[%ds])",
|
||||
alertForStateMetricName, nameStr, labelsFilter, int(lookback.Seconds()))
|
||||
|
||||
res, _, err := q.Query(ctx, expr, ts)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
@@ -28,7 +29,7 @@ func TestAlertingRuleToTimeSeries(t *testing.T) {
|
||||
rule.alerts[alert.ID] = alert
|
||||
tss := rule.toTimeSeries(timestamp.Unix())
|
||||
if err := compareTimeSeries(t, tssExpected, tss); err != nil {
|
||||
t.Fatalf("timeseries mismatch: %s", err)
|
||||
t.Fatalf("timeseries mismatch for rule %q: %s", rule.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,14 +37,23 @@ func TestAlertingRuleToTimeSeries(t *testing.T) {
|
||||
State: notifier.StateFiring,
|
||||
ActiveAt: timestamp.Add(time.Second),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertMetricName,
|
||||
alertStateLabel: notifier.StateFiring.String(),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertMetricName,
|
||||
},
|
||||
{
|
||||
Name: alertStateLabel,
|
||||
Value: notifier.StateFiring.String(),
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
|
||||
[]int64{timestamp.UnixNano()},
|
||||
map[string]string{
|
||||
"__name__": alertForStateMetricName,
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertForStateMetricName,
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -54,18 +64,40 @@ func TestAlertingRuleToTimeSeries(t *testing.T) {
|
||||
"instance": "bar",
|
||||
},
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertMetricName,
|
||||
alertStateLabel: notifier.StateFiring.String(),
|
||||
"job": "foo",
|
||||
"instance": "bar",
|
||||
}),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()},
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertMetricName,
|
||||
},
|
||||
{
|
||||
Name: alertStateLabel,
|
||||
Value: notifier.StateFiring.String(),
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "bar",
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
|
||||
[]int64{timestamp.UnixNano()},
|
||||
map[string]string{
|
||||
"__name__": alertForStateMetricName,
|
||||
"job": "foo",
|
||||
"instance": "bar",
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertForStateMetricName,
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "bar",
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -73,18 +105,29 @@ func TestAlertingRuleToTimeSeries(t *testing.T) {
|
||||
State: notifier.StateFiring, ActiveAt: timestamp.Add(time.Second),
|
||||
Labels: map[string]string{
|
||||
alertStateLabel: "foo",
|
||||
"__name__": "bar",
|
||||
},
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertMetricName,
|
||||
alertStateLabel: notifier.StateFiring.String(),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertMetricName,
|
||||
},
|
||||
{
|
||||
Name: alertStateLabel,
|
||||
Value: notifier.StateFiring.String(),
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
|
||||
[]int64{timestamp.UnixNano()},
|
||||
map[string]string{
|
||||
"__name__": alertForStateMetricName,
|
||||
alertStateLabel: "foo",
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertForStateMetricName,
|
||||
},
|
||||
{
|
||||
Name: alertStateLabel,
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -92,14 +135,23 @@ func TestAlertingRuleToTimeSeries(t *testing.T) {
|
||||
State: notifier.StateFiring,
|
||||
ActiveAt: timestamp.Add(time.Second),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertMetricName,
|
||||
alertStateLabel: notifier.StateFiring.String(),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertMetricName,
|
||||
},
|
||||
{
|
||||
Name: alertStateLabel,
|
||||
Value: notifier.StateFiring.String(),
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
|
||||
[]int64{timestamp.UnixNano()},
|
||||
map[string]string{
|
||||
"__name__": alertForStateMetricName,
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertForStateMetricName,
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -107,12 +159,21 @@ func TestAlertingRuleToTimeSeries(t *testing.T) {
|
||||
State: notifier.StatePending,
|
||||
ActiveAt: timestamp.Add(time.Second),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertMetricName,
|
||||
alertStateLabel: notifier.StatePending.String(),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertMetricName,
|
||||
},
|
||||
{
|
||||
Name: alertStateLabel,
|
||||
Value: notifier.StatePending.String(),
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertForStateMetricName,
|
||||
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())}, []int64{timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: alertForStateMetricName,
|
||||
},
|
||||
}),
|
||||
})
|
||||
}
|
||||
@@ -124,7 +185,9 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
alert *notifier.Alert
|
||||
}
|
||||
|
||||
f := func(rule *AlertingRule, steps [][]datasource.Metric, alertsExpected map[int][]testAlert) {
|
||||
ts, _ := time.Parse(time.RFC3339, "2024-10-29T00:00:00Z")
|
||||
|
||||
f := func(rule *AlertingRule, steps [][]datasource.Metric, alertsExpected map[int][]testAlert, tssExpected map[int][]prompbmarshal.TimeSeries) {
|
||||
t.Helper()
|
||||
|
||||
fq := &datasource.FakeQuerier{}
|
||||
@@ -134,13 +197,19 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
Name: "TestRule_Exec",
|
||||
}
|
||||
rule.GroupID = fakeGroup.ID()
|
||||
ts := time.Now()
|
||||
for i, step := range steps {
|
||||
fq.Reset()
|
||||
fq.Add(step...)
|
||||
if _, err := rule.exec(context.TODO(), ts, 0); err != nil {
|
||||
tss, err := rule.exec(context.TODO(), ts, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
// check generate time series
|
||||
if _, ok := tssExpected[i]; ok {
|
||||
if err := compareTimeSeries(t, tssExpected[i], tss); err != nil {
|
||||
t.Fatalf("generated time series mismatch for rule %q in step %d: %s", rule.Name, i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// shift the execution timestamp before the next iteration
|
||||
ts = ts.Add(defaultStep)
|
||||
@@ -174,13 +243,25 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
f(newTestAlertingRule("empty", 0), [][]datasource.Metric{}, nil)
|
||||
f(newTestAlertingRule("empty", 0), [][]datasource.Metric{}, nil, nil)
|
||||
|
||||
f(newTestAlertingRule("empty labels", 0), [][]datasource.Metric{
|
||||
f(newTestAlertingRule("empty_labels", 0), [][]datasource.Metric{
|
||||
{datasource.Metric{Values: []float64{1}, Timestamps: []int64{1}}},
|
||||
}, map[int][]testAlert{
|
||||
0: {{alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
})
|
||||
},
|
||||
map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "empty_labels"}, {Name: "alertstate", Value: "firing"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "empty_labels"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f(newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>inactive", 0), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
@@ -194,6 +275,37 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
1: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(2 * defaultStep).Unix()), Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f(newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>inactive=>firing", 0), [][]datasource.Metric{
|
||||
@@ -210,7 +322,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
5: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
})
|
||||
}, nil)
|
||||
|
||||
f(newTestAlertingRule("multiple-firing", 0), [][]datasource.Metric{
|
||||
{
|
||||
@@ -224,7 +336,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
})
|
||||
}, nil)
|
||||
|
||||
// 1: fire first alert
|
||||
// 2: fire second alert, set first inactive
|
||||
@@ -233,27 +345,77 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo1")},
|
||||
{metricWithLabels(t, "name", "foo2")},
|
||||
},
|
||||
map[int][]testAlert{
|
||||
0: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
}, map[int][]testAlert{
|
||||
0: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
1: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
2: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
1: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
2: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
1: {
|
||||
// stale time series for foo, `firing -> inactive`
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
})
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
// new time series for foo1
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(defaultStep).Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
// stale time series for foo1
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
// new time series for foo2
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo2"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo2"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(2 * defaultStep).Unix()), Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f(newTestAlertingRule("for-pending", time.Minute), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
}, map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
})
|
||||
}, nil)
|
||||
|
||||
f(newTestAlertingRule("for-fired", defaultStep), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
@@ -261,17 +423,75 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
}, map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
1: {
|
||||
// stale time series for `pending -> firing`
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(defaultStep).Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f(newTestAlertingRule("for-pending=>empty", time.Second), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo", "a1", "b1", "a2", "b2", "a3", "b3")},
|
||||
{metricWithLabels(t, "name", "foo", "a1", "b1", "a2", "b2", "a3", "b3")},
|
||||
// empty step to delete pending alerts
|
||||
{},
|
||||
}, map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
0: {{labels: []string{"name", "foo", "a1", "b1", "a2", "b2", "a3", "b3"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo", "a1", "b1", "a2", "b2", "a3", "b3"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
2: {},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
1: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
// stale time series for `pending -> inactive`
|
||||
2: {
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f(newTestAlertingRule("for-pending=>firing=>inactive=>pending=>firing", defaultStep), [][]datasource.Metric{
|
||||
@@ -287,7 +507,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
})
|
||||
}, nil)
|
||||
|
||||
f(newTestAlertingRuleWithCustomFields("for-pending=>firing=>keepfiring=>firing", defaultStep, 0, defaultStep, nil), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
@@ -300,7 +520,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
})
|
||||
}, nil)
|
||||
|
||||
f(newTestAlertingRuleWithCustomFields("for-pending=>firing=>keepfiring=>keepfiring=>inactive=>pending=>firing", defaultStep, 0, 2*defaultStep, nil), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
@@ -321,7 +541,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
5: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
6: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func TestAlertingRuleExecRange(t *testing.T) {
|
||||
@@ -477,7 +697,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
|
||||
{Values: []float64{1, 1, 1}, Timestamps: []int64{1, 3, 5}},
|
||||
{
|
||||
Values: []float64{1, 1}, Timestamps: []int64{1, 5},
|
||||
Labels: []datasource.Label{{Name: "foo", Value: "bar"}},
|
||||
Labels: []prompbmarshal.Label{{Name: "foo", Value: "bar"}},
|
||||
},
|
||||
}, []*notifier.Alert{
|
||||
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
|
||||
@@ -523,7 +743,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
|
||||
{Values: []float64{1, 1}, Timestamps: []int64{1, 100}},
|
||||
{
|
||||
Values: []float64{1, 1}, Timestamps: []int64{1, 5},
|
||||
Labels: []datasource.Label{{Name: "foo", Value: "bar"}},
|
||||
Labels: []prompbmarshal.Label{{Name: "foo", Value: "bar"}},
|
||||
},
|
||||
}, []*notifier.Alert{
|
||||
{
|
||||
@@ -629,7 +849,7 @@ func TestGroup_Restore(t *testing.T) {
|
||||
|
||||
// one active alert with state restore
|
||||
ts := time.Now().Truncate(time.Hour)
|
||||
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
|
||||
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
|
||||
stateMetric("foo", ts))
|
||||
fn(
|
||||
[]config.Rule{{Alert: "foo", Expr: "foo", For: promutils.NewDuration(time.Second)}},
|
||||
@@ -642,7 +862,7 @@ func TestGroup_Restore(t *testing.T) {
|
||||
|
||||
// two rules, two active alerts, one with state restored
|
||||
ts = time.Now().Truncate(time.Hour)
|
||||
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
|
||||
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
|
||||
stateMetric("bar", ts))
|
||||
fn(
|
||||
[]config.Rule{
|
||||
@@ -662,9 +882,9 @@ func TestGroup_Restore(t *testing.T) {
|
||||
|
||||
// two rules, two active alerts, two with state restored
|
||||
ts = time.Now().Truncate(time.Hour)
|
||||
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
|
||||
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
|
||||
stateMetric("foo", ts))
|
||||
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
|
||||
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
|
||||
stateMetric("bar", ts))
|
||||
fn(
|
||||
[]config.Rule{
|
||||
@@ -684,7 +904,7 @@ func TestGroup_Restore(t *testing.T) {
|
||||
|
||||
// one active alert but wrong state restore
|
||||
ts = time.Now().Truncate(time.Hour)
|
||||
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertname="bar",alertgroup="TestRestore"}[3600s])`,
|
||||
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertname="bar",alertgroup="TestRestore"}[3600s])`,
|
||||
stateMetric("wrong alert", ts))
|
||||
fn(
|
||||
[]config.Rule{{Alert: "foo", Expr: "foo", For: promutils.NewDuration(time.Second)}},
|
||||
@@ -697,7 +917,7 @@ func TestGroup_Restore(t *testing.T) {
|
||||
|
||||
// one active alert with labels
|
||||
ts = time.Now().Truncate(time.Hour)
|
||||
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
|
||||
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
|
||||
stateMetric("foo", ts, "env", "dev"))
|
||||
fn(
|
||||
[]config.Rule{{Alert: "foo", Expr: "foo", Labels: map[string]string{"env": "dev"}, For: promutils.NewDuration(time.Second)}},
|
||||
@@ -710,7 +930,7 @@ func TestGroup_Restore(t *testing.T) {
|
||||
|
||||
// one active alert with restore labels missmatch
|
||||
ts = time.Now().Truncate(time.Hour)
|
||||
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
|
||||
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
|
||||
stateMetric("foo", ts, "env", "dev", "team", "foo"))
|
||||
fn(
|
||||
[]config.Rule{{Alert: "foo", Expr: "foo", Labels: map[string]string{"env": "dev"}, For: promutils.NewDuration(time.Second)}},
|
||||
@@ -1047,7 +1267,7 @@ func newTestAlertingRuleWithCustomFields(name string, waitFor, evalInterval, kee
|
||||
|
||||
func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
metric := datasource.Metric{
|
||||
Labels: []datasource.Label{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{Name: "instance", Value: "0.0.0.0:8800"},
|
||||
{Name: "group", Value: "vmalert"},
|
||||
{Name: "alertname", Value: "ConfigurationReloadFailure"},
|
||||
|
||||
@@ -8,12 +8,9 @@ import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
@@ -21,7 +18,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -350,10 +346,9 @@ func (g *Group) Start(ctx context.Context, nts func() []notifier.Notifier, rw re
|
||||
}
|
||||
|
||||
e := &executor{
|
||||
Rw: rw,
|
||||
Notifiers: nts,
|
||||
notifierHeaders: g.NotifierHeaders,
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
Rw: rw,
|
||||
Notifiers: nts,
|
||||
notifierHeaders: g.NotifierHeaders,
|
||||
}
|
||||
|
||||
g.infof("started")
|
||||
@@ -426,8 +421,6 @@ func (g *Group) Start(ctx context.Context, nts func() []notifier.Notifier, rw re
|
||||
continue
|
||||
}
|
||||
|
||||
// ensure that staleness is tracked for existing rules only
|
||||
e.purgeStaleSeries(g.Rules)
|
||||
e.notifierHeaders = g.NotifierHeaders
|
||||
g.mu.Unlock()
|
||||
|
||||
@@ -450,8 +443,8 @@ func (g *Group) Start(ctx context.Context, nts func() []notifier.Notifier, rw re
|
||||
}
|
||||
|
||||
// UpdateWith inserts new group to updateCh
|
||||
func (g *Group) UpdateWith(new *Group) {
|
||||
g.updateCh <- new
|
||||
func (g *Group) UpdateWith(newGroup *Group) {
|
||||
g.updateCh <- newGroup
|
||||
}
|
||||
|
||||
// DeepCopy returns a deep copy of group
|
||||
@@ -539,10 +532,9 @@ func (g *Group) Replay(start, end time.Time, rw remotewrite.RWClient, maxDataPoi
|
||||
// ExecOnce evaluates all the rules under group for once with given timestamp.
|
||||
func (g *Group) ExecOnce(ctx context.Context, nts func() []notifier.Notifier, rw remotewrite.RWClient, evalTS time.Time) chan error {
|
||||
e := &executor{
|
||||
Rw: rw,
|
||||
Notifiers: nts,
|
||||
notifierHeaders: g.NotifierHeaders,
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
Rw: rw,
|
||||
Notifiers: nts,
|
||||
notifierHeaders: g.NotifierHeaders,
|
||||
}
|
||||
if len(g.Rules) < 1 {
|
||||
return nil
|
||||
@@ -633,13 +625,6 @@ type executor struct {
|
||||
notifierHeaders map[string]string
|
||||
|
||||
Rw remotewrite.RWClient
|
||||
|
||||
previouslySentSeriesToRWMu sync.Mutex
|
||||
// previouslySentSeriesToRW stores series sent to RW on previous iteration
|
||||
// map[ruleID]map[ruleLabels][]prompb.Label
|
||||
// where `ruleID` is ID of the Rule within a Group
|
||||
// and `ruleLabels` is []prompb.Label marshalled to a string
|
||||
previouslySentSeriesToRW map[uint64]map[string][]prompbmarshal.Label
|
||||
}
|
||||
|
||||
// execConcurrently executes rules concurrently if concurrency>1
|
||||
@@ -706,11 +691,6 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
if err := pushToRW(tss); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
staleSeries := e.getStaleSeries(r, tss, ts)
|
||||
if err := pushToRW(staleSeries); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ar, ok := r.(*AlertingRule)
|
||||
@@ -737,79 +717,3 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
wg.Wait()
|
||||
return errGr.Err()
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
// getStaleSeries checks whether there are stale series from previously sent ones.
|
||||
func (e *executor) getStaleSeries(r Rule, tss []prompbmarshal.TimeSeries, timestamp time.Time) []prompbmarshal.TimeSeries {
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
ruleLabels := make(map[string][]prompbmarshal.Label, len(tss))
|
||||
for _, ts := range tss {
|
||||
// convert labels to strings, so we can compare with previously sent series
|
||||
bb.B = labelsToString(bb.B, ts.Labels)
|
||||
ruleLabels[string(bb.B)] = ts.Labels
|
||||
bb.Reset()
|
||||
}
|
||||
|
||||
rID := r.ID()
|
||||
var staleS []prompbmarshal.TimeSeries
|
||||
// check whether there are series which disappeared and need to be marked as stale
|
||||
e.previouslySentSeriesToRWMu.Lock()
|
||||
for key, labels := range e.previouslySentSeriesToRW[rID] {
|
||||
if _, ok := ruleLabels[key]; ok {
|
||||
continue
|
||||
}
|
||||
// previously sent series are missing in current series, so we mark them as stale
|
||||
ss := newTimeSeriesPB([]float64{decimal.StaleNaN}, []int64{timestamp.Unix()}, labels)
|
||||
staleS = append(staleS, ss)
|
||||
}
|
||||
// set previous series to current
|
||||
e.previouslySentSeriesToRW[rID] = ruleLabels
|
||||
e.previouslySentSeriesToRWMu.Unlock()
|
||||
|
||||
return staleS
|
||||
}
|
||||
|
||||
// purgeStaleSeries deletes references in tracked
|
||||
// previouslySentSeriesToRW list to Rules which aren't present
|
||||
// in the given activeRules list. The method is used when the list
|
||||
// of loaded rules has changed and executor has to remove
|
||||
// references to non-existing rules.
|
||||
func (e *executor) purgeStaleSeries(activeRules []Rule) {
|
||||
newPreviouslySentSeriesToRW := make(map[uint64]map[string][]prompbmarshal.Label)
|
||||
|
||||
e.previouslySentSeriesToRWMu.Lock()
|
||||
|
||||
for _, rule := range activeRules {
|
||||
id := rule.ID()
|
||||
prev, ok := e.previouslySentSeriesToRW[id]
|
||||
if ok {
|
||||
// keep previous series for staleness detection
|
||||
newPreviouslySentSeriesToRW[id] = prev
|
||||
}
|
||||
}
|
||||
e.previouslySentSeriesToRW = nil
|
||||
e.previouslySentSeriesToRW = newPreviouslySentSeriesToRW
|
||||
|
||||
e.previouslySentSeriesToRWMu.Unlock()
|
||||
}
|
||||
|
||||
func labelsToString(dst []byte, labels []prompbmarshal.Label) []byte {
|
||||
dst = append(dst, '{')
|
||||
for i, label := range labels {
|
||||
if len(label.Name) == 0 {
|
||||
dst = append(dst, "__name__"...)
|
||||
} else {
|
||||
dst = append(dst, label.Name...)
|
||||
}
|
||||
dst = append(dst, '=')
|
||||
dst = strconv.AppendQuote(dst, label.Value)
|
||||
if i < len(labels)-1 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
}
|
||||
dst = append(dst, '}')
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,8 +17,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
@@ -29,7 +27,7 @@ func init() {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{}, true); err != nil {
|
||||
if err := templates.Load([]string{}, url.URL{}); err != nil {
|
||||
fmt.Println("failed to load template for test")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -383,153 +381,6 @@ func TestGetResolveDuration(t *testing.T) {
|
||||
f(2*time.Minute, 0, 1*time.Minute, 8*time.Minute)
|
||||
}
|
||||
|
||||
func TestGetStaleSeries(t *testing.T) {
|
||||
ts := time.Now()
|
||||
e := &executor{
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
}
|
||||
f := func(r Rule, labels, expLabels [][]prompbmarshal.Label) {
|
||||
t.Helper()
|
||||
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for _, l := range labels {
|
||||
tss = append(tss, newTimeSeriesPB([]float64{1}, []int64{ts.Unix()}, l))
|
||||
}
|
||||
staleS := e.getStaleSeries(r, tss, ts)
|
||||
if staleS == nil && expLabels == nil {
|
||||
return
|
||||
}
|
||||
if len(staleS) != len(expLabels) {
|
||||
t.Fatalf("expected to get %d stale series, got %d",
|
||||
len(expLabels), len(staleS))
|
||||
}
|
||||
for i, exp := range expLabels {
|
||||
got := staleS[i]
|
||||
if !reflect.DeepEqual(exp, got.Labels) {
|
||||
t.Fatalf("expected to get labels: \n%v;\ngot instead: \n%v",
|
||||
exp, got.Labels)
|
||||
}
|
||||
if len(got.Samples) != 1 {
|
||||
t.Fatalf("expected to have 1 sample; got %d", len(got.Samples))
|
||||
}
|
||||
if !decimal.IsStaleNaN(got.Samples[0].Value) {
|
||||
t.Fatalf("expected sample value to be %v; got %v", decimal.StaleNaN, got.Samples[0].Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// warn: keep in mind, that executor holds the state, so sequence of f calls matters
|
||||
|
||||
// single series
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")},
|
||||
nil)
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")},
|
||||
nil)
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
nil,
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")})
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
nil,
|
||||
nil)
|
||||
|
||||
// multiple series
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{
|
||||
toPromLabels(t, "__name__", "job:foo", "job", "foo"),
|
||||
toPromLabels(t, "__name__", "job:foo", "job", "bar"),
|
||||
},
|
||||
nil)
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")})
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
||||
nil)
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
nil,
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")})
|
||||
|
||||
// multiple rules and series
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{
|
||||
toPromLabels(t, "__name__", "job:foo", "job", "foo"),
|
||||
toPromLabels(t, "__name__", "job:foo", "job", "bar"),
|
||||
},
|
||||
nil)
|
||||
f(&AlertingRule{RuleID: 2},
|
||||
[][]prompbmarshal.Label{
|
||||
toPromLabels(t, "__name__", "job:foo", "job", "foo"),
|
||||
toPromLabels(t, "__name__", "job:foo", "job", "bar"),
|
||||
},
|
||||
nil)
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")})
|
||||
f(&AlertingRule{RuleID: 1},
|
||||
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
||||
nil)
|
||||
}
|
||||
|
||||
func TestPurgeStaleSeries(t *testing.T) {
|
||||
ts := time.Now()
|
||||
labels := toPromLabels(t, "__name__", "job:foo", "job", "foo")
|
||||
tss := []prompbmarshal.TimeSeries{newTimeSeriesPB([]float64{1}, []int64{ts.Unix()}, labels)}
|
||||
|
||||
f := func(curRules, newRules, expStaleRules []Rule) {
|
||||
t.Helper()
|
||||
e := &executor{
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
}
|
||||
// seed executor with series for
|
||||
// current rules
|
||||
for _, rule := range curRules {
|
||||
e.getStaleSeries(rule, tss, ts)
|
||||
}
|
||||
|
||||
e.purgeStaleSeries(newRules)
|
||||
|
||||
if len(e.previouslySentSeriesToRW) != len(expStaleRules) {
|
||||
t.Fatalf("expected to get %d stale series, got %d",
|
||||
len(expStaleRules), len(e.previouslySentSeriesToRW))
|
||||
}
|
||||
|
||||
for _, exp := range expStaleRules {
|
||||
if _, ok := e.previouslySentSeriesToRW[exp.ID()]; !ok {
|
||||
t.Fatalf("expected to have rule %d; got nil instead", exp.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f(nil, nil, nil)
|
||||
f(
|
||||
nil,
|
||||
[]Rule{&AlertingRule{RuleID: 1}},
|
||||
nil,
|
||||
)
|
||||
f(
|
||||
[]Rule{&AlertingRule{RuleID: 1}},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
f(
|
||||
[]Rule{&AlertingRule{RuleID: 1}},
|
||||
[]Rule{&AlertingRule{RuleID: 2}},
|
||||
nil,
|
||||
)
|
||||
f(
|
||||
[]Rule{&AlertingRule{RuleID: 1}, &AlertingRule{RuleID: 2}},
|
||||
[]Rule{&AlertingRule{RuleID: 2}},
|
||||
[]Rule{&AlertingRule{RuleID: 2}},
|
||||
)
|
||||
f(
|
||||
[]Rule{&AlertingRule{RuleID: 1}, &AlertingRule{RuleID: 2}},
|
||||
[]Rule{&AlertingRule{RuleID: 1}, &AlertingRule{RuleID: 2}},
|
||||
[]Rule{&AlertingRule{RuleID: 1}, &AlertingRule{RuleID: 2}},
|
||||
)
|
||||
}
|
||||
|
||||
func TestFaultyNotifier(t *testing.T) {
|
||||
fq := &datasource.FakeQuerier{}
|
||||
fq.Add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "bar"))
|
||||
@@ -580,8 +431,7 @@ func TestFaultyRW(t *testing.T) {
|
||||
}
|
||||
|
||||
e := &executor{
|
||||
Rw: &remotewrite.Client{},
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
Rw: &remotewrite.Client{},
|
||||
}
|
||||
|
||||
err := e.exec(context.Background(), r, time.Now(), 0, 10)
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package rule
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func BenchmarkGetStaleSeries(b *testing.B) {
|
||||
ts := time.Now()
|
||||
n := 100
|
||||
payload := make([]prompbmarshal.TimeSeries, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s := fmt.Sprintf("%d", i)
|
||||
labels := toPromLabels(b,
|
||||
"__name__", "foo", ""+
|
||||
"instance", s,
|
||||
"job", s,
|
||||
"state", s,
|
||||
)
|
||||
payload = append(payload, newTimeSeriesPB([]float64{1}, []int64{ts.Unix()}, labels))
|
||||
}
|
||||
|
||||
e := &executor{
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
}
|
||||
ar := &AlertingRule{RuleID: 1}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
e.getStaleSeries(ar, payload, ts)
|
||||
}
|
||||
}
|
||||
@@ -3,16 +3,17 @@ package rule
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
// RecordingRule is a Rule that supposed
|
||||
@@ -34,6 +35,8 @@ type RecordingRule struct {
|
||||
// during evaluations
|
||||
state *ruleState
|
||||
|
||||
lastEvaluation map[string]struct{}
|
||||
|
||||
metrics *recordingRuleMetrics
|
||||
}
|
||||
|
||||
@@ -113,7 +116,7 @@ func (rr *RecordingRule) execRange(ctx context.Context, start, end time.Time) ([
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for _, s := range res.Data {
|
||||
ts := rr.toTimeSeries(s)
|
||||
key := stringifyLabels(ts)
|
||||
key := stringifyLabels(ts.Labels)
|
||||
if _, ok := duplicates[key]; ok {
|
||||
return nil, fmt.Errorf("original metric %v; resulting labels %q: %w", s.Labels, key, errDuplicate)
|
||||
}
|
||||
@@ -155,28 +158,47 @@ func (rr *RecordingRule) exec(ctx context.Context, ts time.Time, limit int) ([]p
|
||||
return nil, curState.Err
|
||||
}
|
||||
|
||||
duplicates := make(map[string]struct{}, len(qMetrics))
|
||||
curEvaluation := make(map[string]struct{}, len(qMetrics))
|
||||
lastEvaluation := rr.lastEvaluation
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for _, r := range qMetrics {
|
||||
ts := rr.toTimeSeries(r)
|
||||
key := stringifyLabels(ts)
|
||||
if _, ok := duplicates[key]; ok {
|
||||
key := stringifyLabels(ts.Labels)
|
||||
if _, ok := curEvaluation[key]; ok {
|
||||
curState.Err = fmt.Errorf("original metric %v; resulting labels %q: %w", r, key, errDuplicate)
|
||||
return nil, curState.Err
|
||||
}
|
||||
duplicates[key] = struct{}{}
|
||||
curEvaluation[key] = struct{}{}
|
||||
delete(lastEvaluation, key)
|
||||
tss = append(tss, ts)
|
||||
}
|
||||
// check for stale time series
|
||||
for k := range lastEvaluation {
|
||||
tss = append(tss, prompbmarshal.TimeSeries{
|
||||
Labels: stringToLabels(k),
|
||||
Samples: []prompbmarshal.Sample{
|
||||
{Value: decimal.StaleNaN, Timestamp: ts.UnixNano() / 1e6},
|
||||
}})
|
||||
}
|
||||
rr.lastEvaluation = curEvaluation
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func stringifyLabels(ts prompbmarshal.TimeSeries) string {
|
||||
labels := ts.Labels
|
||||
if len(labels) > 1 {
|
||||
sort.Slice(labels, func(i, j int) bool {
|
||||
return labels[i].Name < labels[j].Name
|
||||
})
|
||||
func stringToLabels(s string) []prompbmarshal.Label {
|
||||
labels := strings.Split(s, ",")
|
||||
rLabels := make([]prompbmarshal.Label, 0, len(labels))
|
||||
for i := range labels {
|
||||
if label := strings.Split(labels[i], "="); len(label) == 2 {
|
||||
rLabels = append(rLabels, prompbmarshal.Label{
|
||||
Name: label[0],
|
||||
Value: label[1],
|
||||
})
|
||||
}
|
||||
}
|
||||
return rLabels
|
||||
}
|
||||
|
||||
func stringifyLabels(labels []prompbmarshal.Label) string {
|
||||
b := strings.Builder{}
|
||||
for i, l := range labels {
|
||||
b.WriteString(l.Name)
|
||||
@@ -190,19 +212,27 @@ func stringifyLabels(ts prompbmarshal.TimeSeries) string {
|
||||
}
|
||||
|
||||
func (rr *RecordingRule) toTimeSeries(m datasource.Metric) prompbmarshal.TimeSeries {
|
||||
labels := make(map[string]string)
|
||||
for _, l := range m.Labels {
|
||||
labels[l.Name] = l.Value
|
||||
if preN := promrelabel.GetLabelByName(m.Labels, "__name__"); preN != nil {
|
||||
preN.Value = rr.Name
|
||||
} else {
|
||||
m.Labels = append(m.Labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: rr.Name,
|
||||
})
|
||||
}
|
||||
labels["__name__"] = rr.Name
|
||||
// override existing labels with configured ones
|
||||
for k, v := range rr.Labels {
|
||||
if _, ok := labels[k]; ok && labels[k] != v {
|
||||
labels[fmt.Sprintf("exported_%s", k)] = labels[k]
|
||||
for k := range rr.Labels {
|
||||
prevLabel := promrelabel.GetLabelByName(m.Labels, k)
|
||||
if prevLabel != nil && prevLabel.Value != rr.Labels[k] {
|
||||
// Rename the prevLabel to "exported_" + label.Name
|
||||
prevLabel.Name = fmt.Sprintf("exported_%s", prevLabel.Name)
|
||||
}
|
||||
labels[k] = v
|
||||
m.Labels = append(m.Labels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: rr.Labels[k],
|
||||
})
|
||||
}
|
||||
return newTimeSeries(m.Values, m.Timestamps, labels)
|
||||
ts := newTimeSeries(m.Values, m.Timestamps, m.Labels)
|
||||
return ts
|
||||
}
|
||||
|
||||
// updateWith copies all significant fields.
|
||||
|
||||
@@ -9,59 +9,131 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func TestRecordingRule_Exec(t *testing.T) {
|
||||
f := func(rule *RecordingRule, metrics []datasource.Metric, tssExpected []prompbmarshal.TimeSeries) {
|
||||
ts, _ := time.Parse(time.RFC3339, "2024-10-29T00:00:00Z")
|
||||
const defaultStep = 5 * time.Millisecond
|
||||
|
||||
f := func(rule *RecordingRule, steps [][]datasource.Metric, tssExpected [][]prompbmarshal.TimeSeries) {
|
||||
t.Helper()
|
||||
|
||||
fq := &datasource.FakeQuerier{}
|
||||
fq.Add(metrics...)
|
||||
rule.q = fq
|
||||
rule.state = &ruleState{
|
||||
entries: make([]StateEntry, 10),
|
||||
}
|
||||
tss, err := rule.exec(context.TODO(), time.Now(), 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected RecordingRule.exec error: %s", err)
|
||||
}
|
||||
if err := compareTimeSeries(t, tssExpected, tss); err != nil {
|
||||
t.Fatalf("timeseries missmatch: %s", err)
|
||||
for i, step := range steps {
|
||||
fq.Reset()
|
||||
fq.Add(step...)
|
||||
rule.q = fq
|
||||
rule.state = &ruleState{
|
||||
entries: make([]StateEntry, 10),
|
||||
}
|
||||
tss, err := rule.exec(context.TODO(), ts, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("fail to test rule %s: unexpected error: %s", rule.Name, err)
|
||||
}
|
||||
if err := compareTimeSeries(t, tssExpected[i], tss); err != nil {
|
||||
t.Fatalf("fail to test rule %s: time series mismatch on step %d: %s", rule.Name, i, err)
|
||||
}
|
||||
|
||||
ts = ts.Add(defaultStep)
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := time.Now()
|
||||
|
||||
f(&RecordingRule{
|
||||
Name: "foo",
|
||||
}, []datasource.Metric{
|
||||
}, [][]datasource.Metric{{
|
||||
metricWithValueAndLabels(t, 10, "__name__", "bar"),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{10}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foo",
|
||||
}}, [][]prompbmarshal.TimeSeries{{
|
||||
newTimeSeries([]float64{10}, []int64{ts.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
})
|
||||
}})
|
||||
|
||||
f(&RecordingRule{
|
||||
Name: "foobarbaz",
|
||||
}, []datasource.Metric{
|
||||
metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "foo"),
|
||||
metricWithValueAndLabels(t, 2, "__name__", "bar", "job", "bar"),
|
||||
metricWithValueAndLabels(t, 3, "__name__", "baz", "job", "baz"),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foobarbaz",
|
||||
"job": "foo",
|
||||
}),
|
||||
newTimeSeries([]float64{2}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foobarbaz",
|
||||
"job": "bar",
|
||||
}),
|
||||
newTimeSeries([]float64{3}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foobarbaz",
|
||||
"job": "baz",
|
||||
}),
|
||||
}, [][]datasource.Metric{
|
||||
{
|
||||
metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "foo"),
|
||||
metricWithValueAndLabels(t, 2, "__name__", "bar", "job", "bar"),
|
||||
},
|
||||
{
|
||||
metricWithValueAndLabels(t, 10, "__name__", "foo", "job", "foo"),
|
||||
},
|
||||
{
|
||||
metricWithValueAndLabels(t, 10, "__name__", "foo", "job", "bar"),
|
||||
},
|
||||
}, [][]prompbmarshal.TimeSeries{
|
||||
{
|
||||
newTimeSeries([]float64{1}, []int64{ts.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{2}, []int64{ts.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "bar",
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
newTimeSeries([]float64{10}, []int64{ts.Add(defaultStep).UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
// stale time series
|
||||
newTimeSeries([]float64{decimal.StaleNaN}, []int64{ts.Add(defaultStep).UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "bar",
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
newTimeSeries([]float64{10}, []int64{ts.Add(2 * defaultStep).UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "bar",
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{decimal.StaleNaN}, []int64{ts.Add(2 * defaultStep).UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
f(&RecordingRule{
|
||||
@@ -69,22 +141,44 @@ func TestRecordingRule_Exec(t *testing.T) {
|
||||
Labels: map[string]string{
|
||||
"source": "test",
|
||||
},
|
||||
}, []datasource.Metric{
|
||||
}, [][]datasource.Metric{{
|
||||
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo"),
|
||||
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin"),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{2}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "job:foo",
|
||||
"job": "foo",
|
||||
"source": "test",
|
||||
}}, [][]prompbmarshal.TimeSeries{{
|
||||
newTimeSeries([]float64{2}, []int64{ts.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "job:foo",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
{
|
||||
Name: "source",
|
||||
Value: "test",
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "job:foo",
|
||||
"job": "bar",
|
||||
"source": "test",
|
||||
"exported_source": "origin",
|
||||
}),
|
||||
})
|
||||
newTimeSeries([]float64{1}, []int64{ts.UnixNano()},
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "job:foo",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "source",
|
||||
Value: "test",
|
||||
},
|
||||
{
|
||||
Name: "exported_source",
|
||||
Value: "origin",
|
||||
},
|
||||
}),
|
||||
}})
|
||||
}
|
||||
|
||||
func TestRecordingRule_ExecRange(t *testing.T) {
|
||||
@@ -110,9 +204,13 @@ func TestRecordingRule_ExecRange(t *testing.T) {
|
||||
}, []datasource.Metric{
|
||||
metricWithValuesAndLabels(t, []float64{10, 20, 30}, "__name__", "bar"),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{10, 20, 30}, []int64{timestamp.UnixNano(), timestamp.UnixNano(), timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foo",
|
||||
}),
|
||||
newTimeSeries([]float64{10, 20, 30}, []int64{timestamp.UnixNano(), timestamp.UnixNano(), timestamp.UnixNano()},
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
f(&RecordingRule{
|
||||
@@ -122,18 +220,36 @@ func TestRecordingRule_ExecRange(t *testing.T) {
|
||||
metricWithValuesAndLabels(t, []float64{2, 3}, "__name__", "bar", "job", "bar"),
|
||||
metricWithValuesAndLabels(t, []float64{4, 5, 6}, "__name__", "baz", "job", "baz"),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foobarbaz",
|
||||
"job": "foo",
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{2, 3}, []int64{timestamp.UnixNano(), timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foobarbaz",
|
||||
"job": "bar",
|
||||
newTimeSeries([]float64{2, 3}, []int64{timestamp.UnixNano(), timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "bar",
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{4, 5, 6},
|
||||
[]int64{timestamp.UnixNano(), timestamp.UnixNano(), timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "foobarbaz",
|
||||
"job": "baz",
|
||||
[]int64{timestamp.UnixNano(), timestamp.UnixNano(), timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foobarbaz",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "baz",
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -146,16 +262,35 @@ func TestRecordingRule_ExecRange(t *testing.T) {
|
||||
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo"),
|
||||
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar"),
|
||||
}, []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{2}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "job:foo",
|
||||
"job": "foo",
|
||||
"source": "test",
|
||||
}),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": "job:foo",
|
||||
"job": "bar",
|
||||
"source": "test",
|
||||
newTimeSeries([]float64{2}, []int64{timestamp.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "job:foo",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "foo",
|
||||
},
|
||||
{
|
||||
Name: "source",
|
||||
Value: "test",
|
||||
},
|
||||
}),
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()},
|
||||
[]prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "job:foo",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "source",
|
||||
Value: "test",
|
||||
},
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
@@ -87,7 +88,7 @@ func metricWithLabels(t *testing.T, labels ...string) datasource.Metric {
|
||||
}
|
||||
m := datasource.Metric{Values: []float64{1}, Timestamps: []int64{1}}
|
||||
for i := 0; i < len(labels); i += 2 {
|
||||
m.Labels = append(m.Labels, datasource.Label{
|
||||
m.Labels = append(m.Labels, prompbmarshal.Label{
|
||||
Name: labels[i],
|
||||
Value: labels[i+1],
|
||||
})
|
||||
@@ -95,21 +96,6 @@ func metricWithLabels(t *testing.T, labels ...string) datasource.Metric {
|
||||
return m
|
||||
}
|
||||
|
||||
func toPromLabels(t testing.TB, labels ...string) []prompbmarshal.Label {
|
||||
t.Helper()
|
||||
if len(labels) == 0 || len(labels)%2 != 0 {
|
||||
t.Fatalf("expected to get even number of labels")
|
||||
}
|
||||
var ls []prompbmarshal.Label
|
||||
for i := 0; i < len(labels); i += 2 {
|
||||
ls = append(ls, prompbmarshal.Label{
|
||||
Name: labels[i],
|
||||
Value: labels[i+1],
|
||||
})
|
||||
}
|
||||
return ls
|
||||
}
|
||||
|
||||
func compareTimeSeries(t *testing.T, a, b []prompbmarshal.TimeSeries) error {
|
||||
t.Helper()
|
||||
if len(a) != len(b) {
|
||||
@@ -122,7 +108,7 @@ func compareTimeSeries(t *testing.T, a, b []prompbmarshal.TimeSeries) error {
|
||||
}
|
||||
for i, exp := range expTS.Samples {
|
||||
got := gotTS.Samples[i]
|
||||
if got.Value != exp.Value {
|
||||
if got.Value != exp.Value && (!decimal.IsStaleNaN(got.Value) || !decimal.IsStaleNaN(exp.Value)) {
|
||||
return fmt.Errorf("expected value %.2f; got %.2f", exp.Value, got.Value)
|
||||
}
|
||||
// timestamp validation isn't always correct for now.
|
||||
|
||||
@@ -9,10 +9,14 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
func newTimeSeries(values []float64, timestamps []int64, labels map[string]string) prompbmarshal.TimeSeries {
|
||||
// newTimeSeries first sorts given labels, then returns new time series.
|
||||
func newTimeSeries(values []float64, timestamps []int64, labels []prompbmarshal.Label) prompbmarshal.TimeSeries {
|
||||
promrelabel.SortLabels(labels)
|
||||
ts := prompbmarshal.TimeSeries{
|
||||
Labels: labels,
|
||||
Samples: make([]prompbmarshal.Sample, len(values)),
|
||||
}
|
||||
for i := range values {
|
||||
@@ -21,34 +25,6 @@ func newTimeSeries(values []float64, timestamps []int64, labels map[string]strin
|
||||
Timestamp: time.Unix(timestamps[i], 0).UnixNano() / 1e6,
|
||||
}
|
||||
}
|
||||
keys := make([]string, 0, len(labels))
|
||||
for k := range labels {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys) // make order deterministic
|
||||
for _, key := range keys {
|
||||
ts.Labels = append(ts.Labels, prompbmarshal.Label{
|
||||
Name: key,
|
||||
Value: labels[key],
|
||||
})
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// newTimeSeriesPB creates prompbmarshal.TimeSeries with given
|
||||
// values, timestamps and labels.
|
||||
// It expects that labels are already sorted.
|
||||
func newTimeSeriesPB(values []float64, timestamps []int64, labels []prompbmarshal.Label) prompbmarshal.TimeSeries {
|
||||
ts := prompbmarshal.TimeSeries{
|
||||
Samples: make([]prompbmarshal.Sample, len(values)),
|
||||
}
|
||||
for i := range values {
|
||||
ts.Samples[i] = prompbmarshal.Sample{
|
||||
Value: values[i],
|
||||
Timestamp: time.Unix(timestamps[i], 0).UnixNano() / 1e6,
|
||||
}
|
||||
}
|
||||
ts.Labels = labels
|
||||
return ts
|
||||
}
|
||||
|
||||
|
||||
@@ -54,10 +54,9 @@ func newTemplate() *textTpl.Template {
|
||||
}
|
||||
|
||||
// Load func loads templates from multiple globs specified in pathPatterns and either
|
||||
// sets them directly to current template if it's undefined or with overwrite=true
|
||||
// or sets replacement templates and adds templates with new names to a current
|
||||
func Load(pathPatterns []string, overwrite bool) error {
|
||||
var err error
|
||||
// sets them directly to current template if it's the first init;
|
||||
// or sets replacement templates and wait for Reload() to replace current template with replacement.
|
||||
func Load(pathPatterns []string, externalURL url.URL) error {
|
||||
tmpl := newTemplate()
|
||||
for _, tp := range pathPatterns {
|
||||
p, err := doublestar.FilepathGlob(tp)
|
||||
@@ -79,36 +78,12 @@ func Load(pathPatterns []string, overwrite bool) error {
|
||||
}
|
||||
tplMu.Lock()
|
||||
defer tplMu.Unlock()
|
||||
if masterTmpl.current == nil || overwrite {
|
||||
masterTmpl.replacement = nil
|
||||
masterTmpl.current = newTemplate()
|
||||
} else {
|
||||
masterTmpl.replacement = newTemplate()
|
||||
if err = copyTemplates(tmpl, masterTmpl.replacement, overwrite); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return copyTemplates(tmpl, masterTmpl.current, overwrite)
|
||||
}
|
||||
tmpl = tmpl.Funcs(funcsWithExternalURL(externalURL))
|
||||
|
||||
func copyTemplates(from *textTpl.Template, to *textTpl.Template, overwrite bool) error {
|
||||
if from == nil {
|
||||
return nil
|
||||
}
|
||||
if to == nil {
|
||||
to = newTemplate()
|
||||
}
|
||||
tmpl, err := from.Clone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range tmpl.Templates() {
|
||||
if to.Lookup(t.Name()) == nil || overwrite {
|
||||
to, err = to.AddParseTree(t.Name(), t.Tree)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add template %q: %w", t.Name(), err)
|
||||
}
|
||||
}
|
||||
if masterTmpl.current == nil {
|
||||
masterTmpl.current = tmpl
|
||||
} else {
|
||||
masterTmpl.replacement = tmpl
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -153,13 +128,6 @@ func datasourceMetricsToTemplateMetrics(ms []datasource.Metric) []metric {
|
||||
// for templating functions.
|
||||
type QueryFn func(query string) ([]datasource.Metric, error)
|
||||
|
||||
// UpdateWithFuncs updates existing or sets a new function map for a template
|
||||
func UpdateWithFuncs(funcs textTpl.FuncMap) {
|
||||
tplMu.Lock()
|
||||
defer tplMu.Unlock()
|
||||
masterTmpl.current = masterTmpl.current.Funcs(funcs)
|
||||
}
|
||||
|
||||
// GetWithFuncs returns a copy of current template with additional FuncMap
|
||||
// provided with funcs argument
|
||||
func GetWithFuncs(funcs textTpl.FuncMap) (*textTpl.Template, error) {
|
||||
@@ -169,16 +137,11 @@ func GetWithFuncs(funcs textTpl.FuncMap) (*textTpl.Template, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Clone() doesn't copy tpl Options, so we set them manually
|
||||
tmpl = tmpl.Option("missingkey=zero")
|
||||
return tmpl.Funcs(funcs), nil
|
||||
}
|
||||
|
||||
// Get returns a copy of a template
|
||||
func Get() (*textTpl.Template, error) {
|
||||
tplMu.RLock()
|
||||
defer tplMu.RUnlock()
|
||||
return masterTmpl.current.Clone()
|
||||
}
|
||||
|
||||
// FuncsWithQuery returns a function map that depends on metric data
|
||||
func FuncsWithQuery(query QueryFn) textTpl.FuncMap {
|
||||
return textTpl.FuncMap{
|
||||
@@ -196,8 +159,8 @@ func FuncsWithQuery(query QueryFn) textTpl.FuncMap {
|
||||
}
|
||||
}
|
||||
|
||||
// FuncsWithExternalURL returns a function map that depends on externalURL value
|
||||
func FuncsWithExternalURL(externalURL *url.URL) textTpl.FuncMap {
|
||||
// funcsWithExternalURL returns a function map that depends on externalURL value
|
||||
func funcsWithExternalURL(externalURL url.URL) textTpl.FuncMap {
|
||||
return textTpl.FuncMap{
|
||||
"externalURL": func() string {
|
||||
return externalURL.String()
|
||||
|
||||
@@ -2,6 +2,7 @@ package templates
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
textTpl "text/template"
|
||||
@@ -152,7 +153,7 @@ func TestTemplatesLoad_Failure(t *testing.T) {
|
||||
f := func(pathPatterns []string, expectedErrStr string) {
|
||||
t.Helper()
|
||||
|
||||
err := Load(pathPatterns, false)
|
||||
err := Load(pathPatterns, url.URL{})
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
@@ -171,128 +172,17 @@ func TestTemplatesLoad_Failure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTemplatesLoad_Success(t *testing.T) {
|
||||
f := func(initialTmpl textTemplate, pathPatterns []string, overwrite bool, expectedTmpl textTemplate) {
|
||||
f := func(pathPatterns []string, expectedTmpl textTemplate) {
|
||||
t.Helper()
|
||||
|
||||
masterTmplOrig := masterTmpl
|
||||
masterTmpl = initialTmpl
|
||||
defer func() {
|
||||
masterTmpl = masterTmplOrig
|
||||
}()
|
||||
|
||||
if err := Load(pathPatterns, overwrite); err != nil {
|
||||
if err := Load(pathPatterns, url.URL{}); err != nil {
|
||||
t.Fatalf("cannot load templates: %s", err)
|
||||
}
|
||||
|
||||
if !equalTemplates(masterTmpl.replacement, expectedTmpl.replacement) {
|
||||
t.Fatalf("unexpected replacement template\ngot\n%+v\nwant\n%+v", masterTmpl.replacement, expectedTmpl.replacement)
|
||||
}
|
||||
if !equalTemplates(masterTmpl.current, expectedTmpl.current) {
|
||||
t.Fatalf("unexpected current template\ngot\n%+v\nwant\n%+v", masterTmpl.current, expectedTmpl.current)
|
||||
}
|
||||
}
|
||||
|
||||
// non existing path undefined template override
|
||||
initialTmpl := mkTemplate(nil, nil)
|
||||
pathPatterns := []string{
|
||||
"templates/non-existing/good-*.tpl",
|
||||
"templates/absent/good-*.tpl",
|
||||
}
|
||||
overwrite := true
|
||||
expectedTmpl := mkTemplate(``, nil)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
|
||||
// non existing path defined template override
|
||||
initialTmpl = mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
pathPatterns = []string{
|
||||
"templates/non-existing/good-*.tpl",
|
||||
"templates/absent/good-*.tpl",
|
||||
}
|
||||
overwrite = true
|
||||
expectedTmpl = mkTemplate(``, nil)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
|
||||
// existing path undefined template override
|
||||
initialTmpl = mkTemplate(nil, nil)
|
||||
pathPatterns = []string{
|
||||
"templates/other/nested/good0-*.tpl",
|
||||
"templates/test/good0-*.tpl",
|
||||
}
|
||||
overwrite = false
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
|
||||
// existing path defined template override
|
||||
initialTmpl = mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" "world" }}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
pathPatterns = []string{
|
||||
"templates/other/nested/good0-*.tpl",
|
||||
"templates/test/good0-*.tpl",
|
||||
}
|
||||
overwrite = false
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" "world" }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, `
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
}
|
||||
|
||||
func TestTemplatesReload(t *testing.T) {
|
||||
f := func(initialTmpl, expectedTmpl textTemplate) {
|
||||
t.Helper()
|
||||
|
||||
masterTmplOrig := masterTmpl
|
||||
masterTmpl = initialTmpl
|
||||
defer func() {
|
||||
masterTmpl = masterTmplOrig
|
||||
}()
|
||||
|
||||
Reload()
|
||||
|
||||
if !equalTemplates(masterTmpl.replacement, expectedTmpl.replacement) {
|
||||
@@ -303,46 +193,47 @@ func TestTemplatesReload(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// empty current and replacement templates
|
||||
f(mkTemplate(nil, nil), mkTemplate(nil, nil))
|
||||
// non existing path
|
||||
pathPatterns := []string{
|
||||
"templates/non-existing/good-*.tpl",
|
||||
"templates/absent/good-*.tpl",
|
||||
}
|
||||
expectedTmpl := mkTemplate(``, nil)
|
||||
f(pathPatterns, expectedTmpl)
|
||||
|
||||
// empty current template only
|
||||
f(mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil), mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil))
|
||||
|
||||
// empty replacement template only
|
||||
f(mkTemplate(nil, `
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`), mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil))
|
||||
|
||||
// defined both templates
|
||||
f(mkTemplate(`
|
||||
// existing path
|
||||
pathPatterns = []string{
|
||||
"templates/test/good0-*.tpl",
|
||||
}
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{- printf "value" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
f(pathPatterns, expectedTmpl)
|
||||
|
||||
// existing path defined template override
|
||||
pathPatterns = []string{
|
||||
"templates/other/nested/good0-*.tpl",
|
||||
}
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "before" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, `
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "after" -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`), mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "after" -}}
|
||||
{{- end -}}
|
||||
`, nil))
|
||||
`, nil)
|
||||
f(pathPatterns, expectedTmpl)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/rule"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRecordingToApi(t *testing.T) {
|
||||
|
||||
@@ -7,11 +7,13 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -67,6 +69,7 @@ type UserInfo struct {
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
|
||||
URLMaps []URLMap `yaml:"url_map,omitempty"`
|
||||
DumpRequestOnErrors bool `yaml:"dump_request_on_errors,omitempty"`
|
||||
HeadersConf HeadersConf `yaml:",inline"`
|
||||
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
|
||||
DefaultURL *URLPrefix `yaml:"default_url,omitempty"`
|
||||
@@ -347,6 +350,7 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
hostToAddrs := make(map[string][]string)
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
if hostToAddrs[host] != nil {
|
||||
// ips for the given host have been already discovered
|
||||
continue
|
||||
@@ -363,7 +367,11 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
} else {
|
||||
resolvedAddrs = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = fmt.Sprintf("%s:%d", addr.Target, addr.Port)
|
||||
hostPort := port
|
||||
if hostPort == "" && addr.Port > 0 {
|
||||
hostPort = strconv.FormatUint(uint64(addr.Port), 10)
|
||||
}
|
||||
resolvedAddrs[i] = net.JoinHostPort(addr.Target, hostPort)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -374,7 +382,7 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
} else {
|
||||
resolvedAddrs = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = addr.String()
|
||||
resolvedAddrs[i] = net.JoinHostPort(addr.String(), port)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -388,17 +396,9 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
var busNew []*backendURL
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
for _, addr := range hostToAddrs[host] {
|
||||
buCopy := *bu
|
||||
buCopy.Host = addr
|
||||
if port != "" {
|
||||
if n := strings.IndexByte(buCopy.Host, ':'); n >= 0 {
|
||||
// Drop the discovered port and substitute it the port specified in bu.
|
||||
buCopy.Host = buCopy.Host[:n]
|
||||
}
|
||||
buCopy.Host += ":" + port
|
||||
}
|
||||
busNew = append(busNew, &backendURL{
|
||||
url: &buCopy,
|
||||
})
|
||||
@@ -462,17 +462,12 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
|
||||
|
||||
// Slow path - select other backend urls.
|
||||
n := atomicCounter.Add(1) - 1
|
||||
buMin := bus[n%uint32(len(bus))]
|
||||
for i := uint32(0); i < uint32(len(bus)); i++ {
|
||||
idx := (n + i) % uint32(len(bus))
|
||||
bu := bus[idx]
|
||||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
if buMin.isBroken() {
|
||||
// verify that buMin isn't set as broken
|
||||
buMin = bu
|
||||
}
|
||||
if bu.concurrentRequests.Load() == 0 {
|
||||
// Fast path - return the backend with zero concurrently executed requests.
|
||||
// Do not use CompareAndSwap() instead of Load(), since it is much slower on systems with many CPU cores.
|
||||
@@ -482,12 +477,13 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
|
||||
}
|
||||
|
||||
// Slow path - return the backend with the minimum number of concurrently executed requests.
|
||||
buMin := bus[n%uint32(len(bus))]
|
||||
minRequests := buMin.concurrentRequests.Load()
|
||||
for _, bu := range bus {
|
||||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
if n := bu.concurrentRequests.Load(); n < minRequests {
|
||||
if n := bu.concurrentRequests.Load(); n < minRequests || buMin.isBroken() {
|
||||
buMin = bu
|
||||
minRequests = n
|
||||
}
|
||||
@@ -786,10 +782,11 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
|
||||
func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
uis := ac.Users
|
||||
if len(uis) == 0 && ac.UnauthorizedUser == nil {
|
||||
return nil, fmt.Errorf("Missing `users` or `unauthorized_user` sections")
|
||||
}
|
||||
byAuthToken := make(map[string]*UserInfo, len(uis))
|
||||
if len(uis) == 0 && ac.UnauthorizedUser == nil {
|
||||
// fast path for empty configuration
|
||||
return byAuthToken, nil
|
||||
}
|
||||
for i := range uis {
|
||||
ui := &uis[i]
|
||||
ats, err := getAuthTokens(ui.AuthToken, ui.BearerToken, ui.Username, ui.Password)
|
||||
@@ -864,22 +861,23 @@ func (ui *UserInfo) initURLs() error {
|
||||
loadBalancingPolicy := *defaultLoadBalancingPolicy
|
||||
dropSrcPathPrefixParts := 0
|
||||
discoverBackendIPs := *discoverBackendIPsGlobal
|
||||
if ui.RetryStatusCodes != nil {
|
||||
retryStatusCodes = ui.RetryStatusCodes
|
||||
}
|
||||
if ui.LoadBalancingPolicy != "" {
|
||||
loadBalancingPolicy = ui.LoadBalancingPolicy
|
||||
}
|
||||
if ui.DropSrcPathPrefixParts != nil {
|
||||
dropSrcPathPrefixParts = *ui.DropSrcPathPrefixParts
|
||||
}
|
||||
if ui.DiscoverBackendIPs != nil {
|
||||
discoverBackendIPs = *ui.DiscoverBackendIPs
|
||||
}
|
||||
|
||||
if ui.URLPrefix != nil {
|
||||
if err := ui.URLPrefix.sanitizeAndInitialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
if ui.RetryStatusCodes != nil {
|
||||
retryStatusCodes = ui.RetryStatusCodes
|
||||
}
|
||||
if ui.LoadBalancingPolicy != "" {
|
||||
loadBalancingPolicy = ui.LoadBalancingPolicy
|
||||
}
|
||||
if ui.DropSrcPathPrefixParts != nil {
|
||||
dropSrcPathPrefixParts = *ui.DropSrcPathPrefixParts
|
||||
}
|
||||
if ui.DiscoverBackendIPs != nil {
|
||||
discoverBackendIPs = *ui.DiscoverBackendIPs
|
||||
}
|
||||
ui.URLPrefix.retryStatusCodes = retryStatusCodes
|
||||
ui.URLPrefix.dropSrcPathPrefixParts = dropSrcPathPrefixParts
|
||||
ui.URLPrefix.discoverBackendIPs = discoverBackendIPs
|
||||
|
||||
@@ -3,12 +3,14 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
)
|
||||
|
||||
func TestParseAuthConfigFailure(t *testing.T) {
|
||||
@@ -24,16 +26,10 @@ func TestParseAuthConfigFailure(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Empty config
|
||||
f(``)
|
||||
|
||||
// Invalid entry
|
||||
f(`foobar`)
|
||||
f(`foobar: baz`)
|
||||
|
||||
// Empty users
|
||||
f(`users: []`)
|
||||
|
||||
// Missing url_prefix
|
||||
f(`
|
||||
users:
|
||||
@@ -302,6 +298,12 @@ func TestParseAuthConfigSuccess(t *testing.T) {
|
||||
|
||||
insecureSkipVerifyTrue := true
|
||||
|
||||
// Empty config
|
||||
f(``, map[string]*UserInfo{})
|
||||
|
||||
// Empty users
|
||||
f(`users: []`, map[string]*UserInfo{})
|
||||
|
||||
// Single user
|
||||
f(`
|
||||
users:
|
||||
@@ -799,6 +801,75 @@ func TestBrokenBackend(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscoverBackendIPsWithIPV6(t *testing.T) {
|
||||
f := func(actualUrl, expectedUrl string) {
|
||||
t.Helper()
|
||||
up := mustParseURL(actualUrl)
|
||||
up.discoverBackendIPs = true
|
||||
up.loadBalancingPolicy = "least_loaded"
|
||||
|
||||
up.discoverBackendAddrsIfNeeded()
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
|
||||
if len(bus) != 1 {
|
||||
t.Fatalf("expected url list to be of size 1; got %d instead", len(bus))
|
||||
}
|
||||
|
||||
got := bus[0].url.Host
|
||||
if got != expectedUrl {
|
||||
t.Fatalf(`expected url to be %q; got %q instead`, expectedUrl, bus[0].url.Host)
|
||||
}
|
||||
}
|
||||
|
||||
// Discover backendURL with SRV hostnames
|
||||
customResolver := &fakeResolver{
|
||||
Resolver: &net.Resolver{},
|
||||
// SRV records must return hostname
|
||||
// not an IP address
|
||||
lookupSRVResults: map[string][]*net.SRV{
|
||||
"_vmselect._tcp.selectwithport.": {
|
||||
{
|
||||
Target: "vmselect.local",
|
||||
Port: 8481,
|
||||
},
|
||||
},
|
||||
"_vmselect._tcp.selectwoport.": {
|
||||
{
|
||||
Target: "vmselect.local",
|
||||
},
|
||||
},
|
||||
},
|
||||
lookupIPAddrResults: map[string][]net.IPAddr{
|
||||
"vminsert.local": {
|
||||
{
|
||||
IP: net.ParseIP("10.0.10.13"),
|
||||
},
|
||||
},
|
||||
"ipv6.vminsert.local": {
|
||||
{
|
||||
IP: net.ParseIP("2607:f8b0:400a:80b::200e"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
origResolver := netutil.Resolver
|
||||
netutil.Resolver = customResolver
|
||||
defer func() {
|
||||
netutil.Resolver = origResolver
|
||||
}()
|
||||
f("http://srv+_vmselect._tcp.selectwithport.:8080", "vmselect.local:8080")
|
||||
f("http://srv+_vmselect._tcp.selectwithport.:", "vmselect.local:8481")
|
||||
f("http://srv+_vmselect._tcp.selectwoport.:8080", "vmselect.local:8080")
|
||||
f("http://srv+_vmselect._tcp.selectwoport.", "vmselect.local:")
|
||||
|
||||
f("http://vminsert.local:8080", "10.0.10.13:8080")
|
||||
f("http://vminsert.local", "10.0.10.13:")
|
||||
f("http://ipv6.vminsert.local:8080", "[2607:f8b0:400a:80b::200e]:8080")
|
||||
f("http://ipv6.vminsert.local", "[2607:f8b0:400a:80b::200e]:")
|
||||
|
||||
}
|
||||
|
||||
func getRegexs(paths []string) []*Regex {
|
||||
var sps []*Regex
|
||||
for _, path := range paths {
|
||||
|
||||
@@ -22,26 +22,26 @@ users:
|
||||
# - or http://default2:8888/unsupported_url_handler?request_path=/non/existing/path
|
||||
#
|
||||
# Regular expressions are allowed in `src_paths` entries.
|
||||
- username: "foobar"
|
||||
url_map:
|
||||
- src_paths:
|
||||
- "/api/v1/query"
|
||||
- "/api/v1/query_range"
|
||||
- "/api/v1/label/[^/]+/values"
|
||||
url_prefix:
|
||||
- "http://vmselect1:8481/select/42/prometheus"
|
||||
- "http://vmselect2:8481/select/42/prometheus"
|
||||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"
|
||||
- username: "foobar"
|
||||
ip_filters:
|
||||
deny_list: [127.0.0.1]
|
||||
default_url:
|
||||
- "http://default1:8888/unsupported_url_handler"
|
||||
- "http://default2:8888/unsupported_url_handler"
|
||||
url_map:
|
||||
- src_paths:
|
||||
- "/api/v1/query"
|
||||
- "/api/v1/query_range"
|
||||
- "/api/v1/label/[^/]+/values"
|
||||
url_prefix:
|
||||
- "http://vmselect1:8481/select/42/prometheus"
|
||||
- "http://vmselect2:8481/select/42/prometheus"
|
||||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"
|
||||
default_url:
|
||||
- "http://default1:8888/unsupported_url_handler"
|
||||
- "http://default2:8888/unsupported_url_handler"
|
||||
|
||||
ip_filters:
|
||||
allow_list: ["1.2.3.0/24", "127.0.0.1"]
|
||||
deny_list:
|
||||
- 10.1.0.1
|
||||
- 10.1.0.1
|
||||
|
||||
@@ -31,7 +31,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. "+
|
||||
"By default, serves internal API and proxy requests. "+
|
||||
" See also -tls, -httpListenAddr.useProxyProtocol and -httpInternalListenAddr.")
|
||||
httpInternalListenAddr = flagutil.NewArrayString("httpInternalListenAddr", "TCP address to listen for incoming internal API http requests. Such as /health, /-/reload, /debug/pprof, etc. "+
|
||||
"If flag is set, vmauth no longer serves internal API at -httpListenAddr.")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
@@ -61,6 +65,9 @@ var (
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSServerName = flag.String("backend.TLSServerName", "", "Optional TLS ServerName, which must be sent to HTTPS backend. "+
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmauth. The auth configuration file is validated. The -auth.config flag must be specified.")
|
||||
removeXFFHTTPHeaderValue = flag.Bool(`removeXFFHTTPHeaderValue`, false, "Whether to remove the X-Forwarded-For HTTP header value from client requests before forwarding them to the backend. "+
|
||||
"Recommended when vmauth is exposed to the internet.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -71,6 +78,16 @@ func main() {
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
if *dryRun {
|
||||
if len(*authConfigPath) == 0 {
|
||||
logger.Fatalf("missing required `-auth.config` command-line flag")
|
||||
}
|
||||
if _, err := reloadAuthConfig(); err != nil {
|
||||
logger.Fatalf("failed to parse %q: %s", *authConfigPath, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":8427"}
|
||||
@@ -78,7 +95,21 @@ func main() {
|
||||
logger.Infof("starting vmauth at %q...", listenAddrs)
|
||||
startTime := time.Now()
|
||||
initAuthConfig()
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
disableInternalRoutes := len(*httpInternalListenAddr) > 0
|
||||
rh := requestHandlerWithInternalRoutes
|
||||
if disableInternalRoutes {
|
||||
rh = requestHandler
|
||||
}
|
||||
|
||||
serveOpts := httpserver.ServeOptions{
|
||||
UseProxyProtocol: useProxyProtocol,
|
||||
DisableBuiltinRoutes: disableInternalRoutes,
|
||||
}
|
||||
go httpserver.ServeWithOpts(listenAddrs, rh, serveOpts)
|
||||
|
||||
if len(*httpInternalListenAddr) > 0 {
|
||||
go httpserver.Serve(*httpInternalListenAddr, nil, internalRequestHandler)
|
||||
}
|
||||
logger.Infof("started vmauth in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
@@ -96,7 +127,7 @@ func main() {
|
||||
logger.Infof("successfully stopped vmauth in %.3f seconds", time.Since(startTime).Seconds())
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
func internalRequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
switch r.URL.Path {
|
||||
case "/-/reload":
|
||||
if !httpserver.CheckAuthFlag(w, r, reloadAuthKey) {
|
||||
@@ -107,6 +138,17 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func requestHandlerWithInternalRoutes(w http.ResponseWriter, r *http.Request) bool {
|
||||
if internalRequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
return requestHandler(w, r)
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
ats := getAuthTokensFromRequest(r)
|
||||
if len(ats) == 0 {
|
||||
@@ -123,6 +165,12 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
ui := getUserInfoByAuthTokens(ats)
|
||||
if ui == nil {
|
||||
uu := authConfig.Load().UnauthorizedUser
|
||||
if uu != nil {
|
||||
processUserRequest(w, r, uu)
|
||||
return true
|
||||
}
|
||||
|
||||
invalidAuthTokenRequests.Inc()
|
||||
if *logInvalidAuthTokens {
|
||||
err := fmt.Errorf("cannot authorize request with auth tokens %q", ats)
|
||||
@@ -180,7 +228,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
u := normalizeURL(r.URL)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Header)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Host, r.Header)
|
||||
isDefault := false
|
||||
if up == nil {
|
||||
if ui.DefaultURL == nil {
|
||||
@@ -192,15 +240,18 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
return
|
||||
}
|
||||
missingRouteRequests.Inc()
|
||||
httpserver.Errorf(w, r, "missing route for %s", u.String())
|
||||
var di string
|
||||
if ui.DumpRequestOnErrors {
|
||||
di = debugInfo(u, r)
|
||||
}
|
||||
httpserver.Errorf(w, r, "missing route for %q%s", u.String(), di)
|
||||
return
|
||||
}
|
||||
up, hc = ui.DefaultURL, ui.HeadersConf
|
||||
isDefault = true
|
||||
}
|
||||
|
||||
rtb := getReadTrackingBody(r.Body, maxRequestBodySizeToRetry.IntN())
|
||||
defer putReadTrackingBody(rtb)
|
||||
rtb := newReadTrackingBody(r.Body, maxRequestBodySizeToRetry.IntN())
|
||||
r.Body = rtb
|
||||
|
||||
maxAttempts := up.getBackendsCount()
|
||||
@@ -371,7 +422,7 @@ func sanitizeRequestHeaders(r *http.Request) *http.Request {
|
||||
// X-Forwarded-For information as a comma+space
|
||||
// separated list and fold multiple headers into one.
|
||||
prior := req.Header["X-Forwarded-For"]
|
||||
if len(prior) > 0 {
|
||||
if len(prior) > 0 && !*removeXFFHTTPHeaderValue {
|
||||
clientIP = strings.Join(prior, ", ") + ", " + clientIP
|
||||
}
|
||||
req.Header.Set("X-Forwarded-For", clientIP)
|
||||
@@ -536,22 +587,11 @@ type readTrackingBody struct {
|
||||
bufComplete bool
|
||||
}
|
||||
|
||||
func (rtb *readTrackingBody) reset() {
|
||||
rtb.maxBodySize = 0
|
||||
rtb.r = nil
|
||||
rtb.buf = rtb.buf[:0]
|
||||
rtb.readBuf = nil
|
||||
rtb.cannotRetry = false
|
||||
rtb.bufComplete = false
|
||||
}
|
||||
|
||||
func getReadTrackingBody(r io.ReadCloser, maxBodySize int) *readTrackingBody {
|
||||
v := readTrackingBodyPool.Get()
|
||||
if v == nil {
|
||||
v = &readTrackingBody{}
|
||||
}
|
||||
rtb := v.(*readTrackingBody)
|
||||
|
||||
func newReadTrackingBody(r io.ReadCloser, maxBodySize int) *readTrackingBody {
|
||||
// do not use sync.Pool there
|
||||
// since http.RoundTrip may still use request body after return
|
||||
// See this issue for details https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
|
||||
rtb := &readTrackingBody{}
|
||||
if maxBodySize < 0 {
|
||||
maxBodySize = 0
|
||||
}
|
||||
@@ -574,13 +614,6 @@ func (r *zeroReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func putReadTrackingBody(rtb *readTrackingBody) {
|
||||
rtb.reset()
|
||||
readTrackingBodyPool.Put(rtb)
|
||||
}
|
||||
|
||||
var readTrackingBodyPool sync.Pool
|
||||
|
||||
// Read implements io.Reader interface.
|
||||
func (rtb *readTrackingBody) Read(p []byte) (int, error) {
|
||||
if len(rtb.readBuf) > 0 {
|
||||
@@ -644,3 +677,14 @@ func (rtb *readTrackingBody) Close() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func debugInfo(u *url.URL, r *http.Request) string {
|
||||
s := &strings.Builder{}
|
||||
fmt.Fprintf(s, " (host: %q; ", r.Host)
|
||||
fmt.Fprintf(s, "path: %q; ", u.Path)
|
||||
fmt.Fprintf(s, "args: %q; ", u.Query().Encode())
|
||||
fmt.Fprint(s, "headers:")
|
||||
_ = r.Header.WriteSubset(s, nil)
|
||||
fmt.Fprint(s, ")")
|
||||
return s.String()
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestRequestHandler(t *testing.T) {
|
||||
r.Header.Set("Pass-Header", "abc")
|
||||
|
||||
w := &fakeResponseWriter{}
|
||||
if !requestHandler(w, r) {
|
||||
if !requestHandlerWithInternalRoutes(w, r) {
|
||||
t.Fatalf("unexpected false is returned from requestHandler")
|
||||
}
|
||||
|
||||
@@ -90,6 +90,20 @@ User-Agent: vmauth
|
||||
X-Forwarded-For: 12.34.56.78, 42.2.3.84`
|
||||
f(cfgStr, requestURL, backendHandler, responseExpected)
|
||||
|
||||
// routing of all failed to authorize requests to unauthorized_user (issue #7543)
|
||||
cfgStr = `
|
||||
unauthorized_user:
|
||||
url_prefix: "{BACKEND}/foo"
|
||||
keep_original_host: true`
|
||||
requestURL = "http://foo:invalid-secret@some-host.com/abc/def"
|
||||
backendHandler = func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "requested_url=http://%s%s", r.Host, r.URL)
|
||||
}
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
requested_url=http://some-host.com/foo/abc/def`
|
||||
f(cfgStr, requestURL, backendHandler, responseExpected)
|
||||
|
||||
// keep_original_host
|
||||
cfgStr = `
|
||||
unauthorized_user:
|
||||
@@ -181,7 +195,7 @@ unauthorized_user:
|
||||
}
|
||||
responseExpected = `
|
||||
statusCode=401
|
||||
The provided authKey doesn't match -reloadAuthKey`
|
||||
Expected to receive non-empty authKey when -reloadAuthKey is set`
|
||||
f(cfgStr, requestURL, backendHandler, responseExpected)
|
||||
if err := reloadAuthKey.Set(origAuthKey); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
@@ -346,7 +360,27 @@ unauthorized_user:
|
||||
}
|
||||
responseExpected = `
|
||||
statusCode=400
|
||||
remoteAddr: "42.2.3.84:6789, X-Forwarded-For: 12.34.56.78"; requestURI: /abc?de=fg; missing route for http://some-host.com/abc?de=fg`
|
||||
remoteAddr: "42.2.3.84:6789, X-Forwarded-For: 12.34.56.78"; requestURI: /abc?de=fg; missing route for "http://some-host.com/abc?de=fg"`
|
||||
f(cfgStr, requestURL, backendHandler, responseExpected)
|
||||
|
||||
// missing default_url and default url_prefix for unauthorized user with dump_request_on_errors enabled
|
||||
cfgStr = `
|
||||
unauthorized_user:
|
||||
dump_request_on_errors: true
|
||||
url_map:
|
||||
- src_paths: ["/foo/.+"]
|
||||
url_prefix: {BACKEND}/x-foo/`
|
||||
requestURL = "http://some-host.com/abc?de=fg"
|
||||
backendHandler = func(_ http.ResponseWriter, _ *http.Request) {
|
||||
panic(fmt.Errorf("backend handler shouldn't be called"))
|
||||
}
|
||||
responseExpected = `
|
||||
statusCode=400
|
||||
remoteAddr: "42.2.3.84:6789, X-Forwarded-For: 12.34.56.78"; requestURI: /abc?de=fg; missing route for "http://some-host.com/abc?de=fg" (host: "some-host.com"; path: "/abc"; args: "de=fg"; headers:Connection: Some-Header,Other-Header
|
||||
Pass-Header: abc
|
||||
Some-Header: foobar
|
||||
X-Forwarded-For: 12.34.56.78
|
||||
)`
|
||||
f(cfgStr, requestURL, backendHandler, responseExpected)
|
||||
|
||||
// missing default_url and default url_prefix for unauthorized user when there are configs for authorized users
|
||||
@@ -511,8 +545,7 @@ func TestReadTrackingBody_RetrySuccess(t *testing.T) {
|
||||
f := func(s string, maxBodySize int) {
|
||||
t.Helper()
|
||||
|
||||
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
defer putReadTrackingBody(rtb)
|
||||
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
|
||||
if !rtb.canRetry() {
|
||||
t.Fatalf("canRetry() must return true before reading anything")
|
||||
@@ -547,8 +580,7 @@ func TestReadTrackingBody_RetrySuccessPartialRead(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
// Check the case with partial read
|
||||
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
defer putReadTrackingBody(rtb)
|
||||
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
buf := make([]byte, i)
|
||||
@@ -597,8 +629,7 @@ func TestReadTrackingBody_RetryFailureTooBigBody(t *testing.T) {
|
||||
f := func(s string, maxBodySize int) {
|
||||
t.Helper()
|
||||
|
||||
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
defer putReadTrackingBody(rtb)
|
||||
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
|
||||
if !rtb.canRetry() {
|
||||
t.Fatalf("canRetry() must return true before reading anything")
|
||||
@@ -647,8 +678,7 @@ func TestReadTrackingBody_RetryFailureZeroOrNegativeMaxBodySize(t *testing.T) {
|
||||
f := func(s string, maxBodySize int) {
|
||||
t.Helper()
|
||||
|
||||
rtb := getReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
defer putReadTrackingBody(rtb)
|
||||
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
|
||||
|
||||
if !rtb.canRetry() {
|
||||
t.Fatalf("canRetry() must return true before reading anything")
|
||||
|
||||
@@ -51,9 +51,9 @@ func dropPrefixParts(path string, parts int) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL, h http.Header) (*URLPrefix, HeadersConf) {
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL, host string, h http.Header) (*URLPrefix, HeadersConf) {
|
||||
for _, e := range ui.URLMaps {
|
||||
if !matchAnyRegex(e.SrcHosts, u.Host) {
|
||||
if !matchAnyRegex(e.SrcHosts, host) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyRegex(e.SrcPaths, u.Path) {
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot match available backend: %s", err)
|
||||
}
|
||||
@@ -187,6 +187,10 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
RetryStatusCodes: []int{},
|
||||
DropSrcPathPrefixParts: intp(0),
|
||||
},
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/metrics"}),
|
||||
URLPrefix: mustParseURL("http://metrics-server"),
|
||||
},
|
||||
},
|
||||
URLPrefix: mustParseURL("http://default-server"),
|
||||
HeadersConf: HeadersConf{
|
||||
@@ -206,6 +210,35 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
"bb: aaa", "x: y", []int{502}, "least_loaded", 2)
|
||||
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", []int{}, "least_loaded", 0)
|
||||
f(ui, "https://foo-host/foo/bar/api/v1/query_range", "http://default-server/api/v1/query_range", "bb: aaa", "x: y", []int{502}, "least_loaded", 2)
|
||||
f(ui, "https://foo-host/metrics", "http://metrics-server", "", "", []int{502}, "least_loaded", 2)
|
||||
|
||||
// Complex routing with `url_map` without global url_prefix
|
||||
ui = &UserInfo{
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURL("http://vminsert/0/prometheus"),
|
||||
RetryStatusCodes: []int{},
|
||||
DropSrcPathPrefixParts: intp(0),
|
||||
},
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/metrics/a/b"}),
|
||||
URLPrefix: mustParseURL("http://metrics-server"),
|
||||
},
|
||||
},
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'bb: aaa'"),
|
||||
},
|
||||
ResponseHeaders: []*Header{
|
||||
mustNewHeader("'x: y'"),
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{502},
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
}
|
||||
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", []int{}, "least_loaded", 0)
|
||||
f(ui, "https://foo-host/metrics/a/b", "http://metrics-server/b", "", "", []int{502}, "least_loaded", 2)
|
||||
|
||||
// Complex routing regexp paths in `url_map`
|
||||
ui = &UserInfo{
|
||||
@@ -273,7 +306,7 @@ func TestUserInfoGetBackendURL_SRV(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot match available backend: %s", err)
|
||||
}
|
||||
@@ -351,7 +384,7 @@ func TestUserInfoGetBackendURL_SRVZeroBackends(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot match available backend: %s", err)
|
||||
}
|
||||
@@ -399,7 +432,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up != nil {
|
||||
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user