mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
233 Commits
logsql-ski
...
adds-idle-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2360775917 | ||
|
|
0a71bad722 | ||
|
|
252f8f096d | ||
|
|
9289c7512d | ||
|
|
b984f4672e | ||
|
|
7e5a206057 | ||
|
|
14ba7b237d | ||
|
|
1de187bcb7 | ||
|
|
bf33e7eda7 | ||
|
|
89bd0dca0a | ||
|
|
a6cc7098fe | ||
|
|
0aafca29be | ||
|
|
31e23c6f6f | ||
|
|
99138e15c0 | ||
|
|
1e203f35f7 | ||
|
|
7ac529c235 | ||
|
|
0b629ce5a5 | ||
|
|
1882957585 | ||
|
|
dc55146752 | ||
|
|
e2590f0485 | ||
|
|
7a5000656e | ||
|
|
1be1e9a7a4 | ||
|
|
49f13b12d9 | ||
|
|
69d244e6fb | ||
|
|
0915d6d841 | ||
|
|
fa137bd6f1 | ||
|
|
4b458370c1 | ||
|
|
c96a98731a | ||
|
|
6272ae21eb | ||
|
|
3fdd4dad82 | ||
|
|
ab855e10e7 | ||
|
|
7667abc6cc | ||
|
|
e2f62c5179 | ||
|
|
5d72690eb2 | ||
|
|
ce1e0610b0 | ||
|
|
a5d1013042 | ||
|
|
7da541360e | ||
|
|
75bd1831bb | ||
|
|
22107421eb | ||
|
|
9dd9b4442f | ||
|
|
ac836bcf6c | ||
|
|
c40f355496 | ||
|
|
d7b5062917 | ||
|
|
974b7783ee | ||
|
|
23619a3adf | ||
|
|
c746ba154d | ||
|
|
cee4bfebd7 | ||
|
|
f14497f1cd | ||
|
|
a6a599cbdc | ||
|
|
7ce052b32d | ||
|
|
7dc18bf67a | ||
|
|
bc4a0b8f37 | ||
|
|
7a6783e3df | ||
|
|
8f6af6df9f | ||
|
|
b031774cf5 | ||
|
|
b36a35d696 | ||
|
|
6ad1eb9a3d | ||
|
|
ad505a7a9a | ||
|
|
3661373cc2 | ||
|
|
ce3cfd720e | ||
|
|
fe5846211f | ||
|
|
be291c36f7 | ||
|
|
4ad577cc6f | ||
|
|
f153f54d11 | ||
|
|
b2765c45d0 | ||
|
|
4f0525852f | ||
|
|
6fdba8599d | ||
|
|
0aa19a2837 | ||
|
|
b617dc9c0b | ||
|
|
b0c1f3d819 | ||
|
|
6a6e34ab8e | ||
|
|
c90e6de13b | ||
|
|
da3af090c6 | ||
|
|
cb35e62e04 | ||
|
|
cc2647d212 | ||
|
|
707f3a69db | ||
|
|
4c80b17027 | ||
|
|
ce25d68b45 | ||
|
|
9c3d44c8c9 | ||
|
|
17283fab6c | ||
|
|
680b8c25c8 | ||
|
|
37c22ee053 | ||
|
|
29bd120126 | ||
|
|
de98688489 | ||
|
|
bd75c0a898 | ||
|
|
cb19335a9f | ||
|
|
8466ab109c | ||
|
|
89c4dc1d8d | ||
|
|
9dbd0f9085 | ||
|
|
e66465cb03 | ||
|
|
51de9f30fc | ||
|
|
75398cd0f8 | ||
|
|
bac28f2e4d | ||
|
|
590160ddbb | ||
|
|
f20d452196 | ||
|
|
92de6ea340 | ||
|
|
95608885ea | ||
|
|
de7fc743ca | ||
|
|
f4051dd1e0 | ||
|
|
87fd400dfc | ||
|
|
c87ce86d96 | ||
|
|
80f3644ee3 | ||
|
|
a8d0c1a62d | ||
|
|
56531abd56 | ||
|
|
8a03e987cb | ||
|
|
a9283e06a3 | ||
|
|
02851d7800 | ||
|
|
17e3d019d2 | ||
|
|
c6c5a5a186 | ||
|
|
55c7dafb35 | ||
|
|
3d4988ecf6 | ||
|
|
134dcaef33 | ||
|
|
5a3abfa041 | ||
|
|
2561a132ee | ||
|
|
879771808b | ||
|
|
c0050beadc | ||
|
|
046a4a5ecf | ||
|
|
e3c226cf92 | ||
|
|
8bca4d2de4 | ||
|
|
e2590b339d | ||
|
|
329c3cbdf0 | ||
|
|
e908effd22 | ||
|
|
d386a68b59 | ||
|
|
9e18724036 | ||
|
|
5917ac003e | ||
|
|
f0c4d372bd | ||
|
|
9256df17fa | ||
|
|
8606b48ce5 | ||
|
|
564463259a | ||
|
|
6b493582da | ||
|
|
5e8c087d42 | ||
|
|
3a669b4b87 | ||
|
|
5334f0c2ce | ||
|
|
7fd9325e62 | ||
|
|
a62551f773 | ||
|
|
dd0d2c77c8 | ||
|
|
57b7d16259 | ||
|
|
6193fa3dcf | ||
|
|
6aaf1768f4 | ||
|
|
07496d7d92 | ||
|
|
2d9bbe1934 | ||
|
|
679844feaf | ||
|
|
029060af60 | ||
|
|
a14acd4fcc | ||
|
|
592a9fe9f2 | ||
|
|
aeba3f39db | ||
|
|
035de57e5e | ||
|
|
22cfab1ea2 | ||
|
|
4251292708 | ||
|
|
301bd387d4 | ||
|
|
7958f38864 | ||
|
|
bde4693a90 | ||
|
|
5f487c7090 | ||
|
|
6dbb4c1671 | ||
|
|
9bedbcfa2f | ||
|
|
bae3874e6a | ||
|
|
c0e4ccb7b5 | ||
|
|
81b2fb88cc | ||
|
|
3f8f7f2c41 | ||
|
|
0e75581dd9 | ||
|
|
bbdd7743a8 | ||
|
|
c52f73b3e1 | ||
|
|
d18968f8cb | ||
|
|
a402847eb6 | ||
|
|
4770294732 | ||
|
|
8942f290eb | ||
|
|
1decbcf6eb | ||
|
|
c22da2f917 | ||
|
|
77c597738c | ||
|
|
7531e9084a | ||
|
|
3e728c41f6 | ||
|
|
cceb4c62a3 | ||
|
|
b11d4ccee3 | ||
|
|
34e253f9d6 | ||
|
|
249a467ea4 | ||
|
|
2529f3e8eb | ||
|
|
ec17d4390b | ||
|
|
8f535f9e76 | ||
|
|
54605f5dd1 | ||
|
|
a84491324d | ||
|
|
316b19a5d1 | ||
|
|
565dcefc29 | ||
|
|
dadeb6620a | ||
|
|
6b1cc9b946 | ||
|
|
d3635aae7f | ||
|
|
453988e244 | ||
|
|
89374935f2 | ||
|
|
45f7e58d6c | ||
|
|
c13729f07c | ||
|
|
21b2b1bf19 | ||
|
|
844a1eb9a3 | ||
|
|
b2ce0657b2 | ||
|
|
bfb69d346e | ||
|
|
59d495d469 | ||
|
|
2e3580905f | ||
|
|
e6027e043a | ||
|
|
a196370837 | ||
|
|
e9642e99f2 | ||
|
|
8ee89c021f | ||
|
|
f4b1cbfef0 | ||
|
|
4dfc61821a | ||
|
|
d26d536c90 | ||
|
|
2b19a3472c | ||
|
|
e128ef7ace | ||
|
|
828e78ceb4 | ||
|
|
4d2b9fe6b2 | ||
|
|
8eeb045d3f | ||
|
|
6c14d08cb3 | ||
|
|
a4b120f9be | ||
|
|
f018cf6ca8 | ||
|
|
6e6bae3e8d | ||
|
|
19b6fd490c | ||
|
|
1e24b334f1 | ||
|
|
b4fac26360 | ||
|
|
4927e64700 | ||
|
|
c81a633b02 | ||
|
|
66630c7960 | ||
|
|
50ac22df78 | ||
|
|
b421f1ab80 | ||
|
|
184e4ef4d7 | ||
|
|
bd454f5063 | ||
|
|
8412219781 | ||
|
|
37b04bbb81 | ||
|
|
dc326f70b4 | ||
|
|
b426d10847 | ||
|
|
07ed958b82 | ||
|
|
2d5e5badcf | ||
|
|
21a9b1f920 | ||
|
|
db85744e04 | ||
|
|
161404813c | ||
|
|
ab5fd386bc | ||
|
|
8acadb5080 | ||
|
|
b155b20de4 |
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -8,7 +8,7 @@ body:
|
||||
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/#how-to-upgrade)
|
||||
to [the latest available release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
and verify whether the bug is reproducible there.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html) first.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/troubleshooting/) first.
|
||||
- type: textarea
|
||||
id: describe-the-bug
|
||||
attributes:
|
||||
@@ -65,7 +65,7 @@ body:
|
||||
|
||||
See how to setup monitoring here:
|
||||
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/#monitoring)
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/question.yml
vendored
8
.github/ISSUE_TEMPLATE/question.yml
vendored
@@ -24,9 +24,9 @@ body:
|
||||
label: Troubleshooting docs
|
||||
description: I am familiar with the following troubleshooting docs
|
||||
options:
|
||||
- label: General - https://docs.victoriametrics.com/Troubleshooting.html
|
||||
- label: General - https://docs.victoriametrics.com/troubleshooting/
|
||||
required: false
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent.html#troubleshooting
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent/#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert/#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert.html#troubleshooting
|
||||
required: false
|
||||
@@ -1,35 +0,0 @@
|
||||
### Describe Your Changes
|
||||
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are mandatory:
|
||||
|
||||
- [ ] I have read the [Contributing Guidelines](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/CONTRIBUTING.md)
|
||||
- [ ] All commits are signed and include `Signed-off-by` line. Use `git commit -s` to include `Signed-off-by` your commits. See this [doc](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work) about how to sign your commits.
|
||||
- [ ] Tests are passing locally. Use `make test` to run all tests locally.
|
||||
- [ ] Linting is passing locally. Use `make check-all` to run all linters locally.
|
||||
|
||||
Further checks are optional for External Contributions:
|
||||
|
||||
- [ ] Include a link to the GitHub issue in the commit message, if issue exists.
|
||||
- [ ] Mention the change in the [Changelog](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md). Explain what has changed and why. If there is a related issue or documentation change - link them as well.
|
||||
|
||||
Tips for writing a good changelog message::
|
||||
|
||||
* Write a human-readable changelog message that describes the problem and solution.
|
||||
* Include a link to the issue or pull request in your changelog message.
|
||||
* Use specific language identifying the fix, such as an error message, metric name, or flag name.
|
||||
* Provide a link to the relevant documentation for any new features you add or modify.
|
||||
|
||||
- [ ] After your pull request is merged, please add a message to the issue with instructions for how to test the fix or try the feature you added. Here is an [example](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4048#issuecomment-1546453726)
|
||||
- [ ] Do not close the original issue before the change is released. Please note, in some cases Github can automatically close the issue once PR is merged. Re-open the issue in such case.
|
||||
- [ ] If the change somehow affects public interfaces (a new flag was added or updated, or some behavior has changed) - add the corresponding change to documentation.
|
||||
|
||||
|
||||
Examples of good changelog messages:
|
||||
|
||||
1. FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [VictoriaMetrics remote write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol) when [sending / receiving data to / from Kafka](https://docs.victoriametrics.com/vmagent.html#kafka-integration). This protocol allows saving egress network bandwidth costs when sending data from `vmagent` to `Kafka` located in another datacenter or availability zone. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1225).
|
||||
|
||||
2. BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html): suppress `series after dedup` error message in logs when `-remoteWrite.streamAggr.dedupInterval` command-line flag is set at [vmagent](https://docs.victoriametrics.com/vmgent.html) or when `-streamAggr.dedupInterval` command-line flag is set at [single-node VictoriaMetrics](https://docs.victoriametrics.com/).
|
||||
9
.github/pull_request_template.md
vendored
Normal file
9
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
### Describe Your Changes
|
||||
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).
|
||||
@@ -1,21 +1 @@
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
||||
- Filing issues and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
- Spreading a word about VictoriaMetrics: conference talks, articles, comments, experience sharing with colleagues.
|
||||
- Updating documentation.
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
- Prefer simple code and architecture.
|
||||
- Avoid complex abstractions.
|
||||
- Avoid magic code and fancy algorithms.
|
||||
- Avoid [big external dependencies](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d).
|
||||
- Minimize the number of moving parts in the distributed system.
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
|
||||
Before sending a pull request please check the following:
|
||||
- [ ] All commits are signed and include `Signed-off-by` line. Use `git commit -s` to include `Signed-off-by` your commits. See this [doc](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work) about how to sign your commits.
|
||||
- [ ] Tests are passing locally. Use `make test` to run all tests locally.
|
||||
- [ ] Linting is passing locally. Use `make check-all` to run all linters locally.
|
||||
The document has been moved [here](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
2
Makefile
2
Makefile
@@ -466,7 +466,7 @@ benchmark-pure:
|
||||
vendor-update:
|
||||
go get -u -d ./lib/...
|
||||
go get -u -d ./app/...
|
||||
go mod tidy -compat=1.21
|
||||
go mod tidy -compat=1.22
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
|
||||
@@ -6,7 +6,7 @@ The following versions of VictoriaMetrics receive regular security fixes:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: |
|
||||
| [latest release](https://docs.victoriametrics.com/changelog/) | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| v1.93.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| other releases | :x: |
|
||||
|
||||
@@ -101,3 +101,10 @@ victoria-logs-windows-amd64:
|
||||
|
||||
victoria-logs-pure:
|
||||
APP_NAME=victoria-logs $(MAKE) app-local-pure
|
||||
|
||||
run-victoria-logs:
|
||||
mkdir -p victoria-logs-data
|
||||
DOCKER_OPTS='-v $(shell pwd)/victoria-logs-data:/victoria-logs-data' \
|
||||
APP_NAME=victoria-logs \
|
||||
ARGS='' \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
@@ -47,7 +47,7 @@ func main() {
|
||||
vlinsert.Init()
|
||||
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/VictoriaLogs/", time.Since(startTime).Seconds())
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/victorialogs/", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
sig := procutil.WaitForSigterm()
|
||||
@@ -77,7 +77,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>Single-node VictoriaLogs</h2></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/VictoriaLogs/'>https://docs.victoriametrics.com/VictoriaLogs/</a></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/victorialogs/'>https://docs.victoriametrics.com/victorialogs/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
{"select/vmui", "Web UI for VictoriaLogs"},
|
||||
@@ -99,7 +99,7 @@ func usage() {
|
||||
const s = `
|
||||
victoria-logs is a log management and analytics service.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/VictoriaLogs/
|
||||
See the docs at https://docs.victoriametrics.com/victorialogs/
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -88,6 +88,9 @@ victoria-metrics-linux-ppc64le:
|
||||
victoria-metrics-linux-s390x:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-linux-loong64:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-metrics-linux-386:
|
||||
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
@@ -210,7 +209,7 @@ func readBulkLine(sc *bufio.Scanner, timeField, msgField string,
|
||||
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
|
||||
}
|
||||
line = sc.Bytes()
|
||||
p := logjson.GetParser()
|
||||
p := logstorage.GetJSONParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
@@ -224,7 +223,7 @@ func readBulkLine(sc *bufio.Scanner, timeField, msgField string,
|
||||
}
|
||||
p.RenameField(msgField, "_msg")
|
||||
processLogMessage(ts, p.Fields)
|
||||
logjson.PutParser(p)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
// CommonParams contains common HTTP parameters used by log ingestion APIs.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/VictoriaLogs/data-ingestion/#http-parameters
|
||||
// See https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters
|
||||
type CommonParams struct {
|
||||
TenantID logstorage.TenantID
|
||||
TimeField string
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
@@ -105,7 +104,7 @@ func readLine(sc *bufio.Scanner, timeField, msgField string, processLogMessage f
|
||||
line = sc.Bytes()
|
||||
}
|
||||
|
||||
p := logjson.GetParser()
|
||||
p := logstorage.GetJSONParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
@@ -118,7 +117,7 @@ func readLine(sc *bufio.Scanner, timeField, msgField string, processLogMessage f
|
||||
}
|
||||
p.RenameField(msgField, "_msg")
|
||||
processLogMessage(ts, p.Fields)
|
||||
logjson.PutParser(p)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
7
app/vlogsgenerator/Makefile
Normal file
7
app/vlogsgenerator/Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
vlogsgenerator:
|
||||
APP_NAME=vlogsgenerator $(MAKE) app-local
|
||||
|
||||
vlogsgenerator-race:
|
||||
APP_NAME=vlogsgenerator RACE=-race $(MAKE) app-local
|
||||
158
app/vlogsgenerator/README.md
Normal file
158
app/vlogsgenerator/README.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# vlogsgenerator
|
||||
|
||||
Logs generator for [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/).
|
||||
|
||||
## How to build vlogsgenerator?
|
||||
|
||||
Run `make vlogsgenerator` from the repository root. This builds `bin/vlogsgenerator` binary.
|
||||
|
||||
## How run vlogsgenerator?
|
||||
|
||||
`vlogsgenerator` generates logs in [JSON line format](https://jsonlines.org/) suitable for the ingestion
|
||||
via [`/insert/jsonline` endpoint at VictoriaLogs](https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api).
|
||||
|
||||
By default it writes the generated logs into `stdout`. For example, the following command writes generated logs to `stdout`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator
|
||||
```
|
||||
|
||||
It is possible to redirect the generated logs to file. For example, the following command writes the generated logs to `logs.json` file:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator > logs.json
|
||||
```
|
||||
|
||||
The generated logs at `logs.json` file can be inspected with the following command:
|
||||
|
||||
```
|
||||
head logs.json | jq .
|
||||
```
|
||||
|
||||
Below is an example output:
|
||||
|
||||
```json
|
||||
{
|
||||
"_time": "2024-05-08T14:34:00.854Z",
|
||||
"_msg": "message for the stream 8 and worker 0; ip=185.69.136.129; uuid=b4fe8f1a-c93c-dea3-ba11-5b9f0509291e; u64=8996587920687045253",
|
||||
"host": "host_8",
|
||||
"worker_id": "0",
|
||||
"run_id": "f9b3deee-e6b6-7f56-5deb-1586e4e81725",
|
||||
"const_0": "some value 0 8",
|
||||
"const_1": "some value 1 8",
|
||||
"const_2": "some value 2 8",
|
||||
"var_0": "some value 0 12752539384823438260",
|
||||
"dict_0": "warn",
|
||||
"dict_1": "info",
|
||||
"u8_0": "6",
|
||||
"u16_0": "35202",
|
||||
"u32_0": "1964973739",
|
||||
"u64_0": "4810489083243239145",
|
||||
"float_0": "1.868",
|
||||
"ip_0": "250.34.75.125",
|
||||
"timestamp_0": "1799-03-16T01:34:18.311Z",
|
||||
"json_0": "{\"foo\":\"bar_3\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":32}"
|
||||
}
|
||||
{
|
||||
"_time": "2024-05-08T14:34:00.854Z",
|
||||
"_msg": "message for the stream 9 and worker 0; ip=164.244.254.194; uuid=7e8373b1-ce0d-1ce7-8e96-4bcab8955598; u64=13949903463741076522",
|
||||
"host": "host_9",
|
||||
"worker_id": "0",
|
||||
"run_id": "f9b3deee-e6b6-7f56-5deb-1586e4e81725",
|
||||
"const_0": "some value 0 9",
|
||||
"const_1": "some value 1 9",
|
||||
"const_2": "some value 2 9",
|
||||
"var_0": "some value 0 5371555382075206134",
|
||||
"dict_0": "INFO",
|
||||
"dict_1": "FATAL",
|
||||
"u8_0": "219",
|
||||
"u16_0": "31459",
|
||||
"u32_0": "3918836777",
|
||||
"u64_0": "6593354256620219850",
|
||||
"float_0": "1.085",
|
||||
"ip_0": "253.151.88.158",
|
||||
"timestamp_0": "2042-10-05T16:42:57.082Z",
|
||||
"json_0": "{\"foo\":\"bar_5\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":27}"
|
||||
}
|
||||
```
|
||||
|
||||
The `run_id` field uniquely identifies every `vlogsgenerator` invocation.
|
||||
|
||||
### How to write logs to VictoriaLogs?
|
||||
|
||||
The generated logs can be written directly to VictoriaLogs by passing the address of [`/insert/jsonline` endpoint](https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api)
|
||||
to `-addr` command-line flag. For example, the following command writes the generated logs to VictoriaLogs running at `localhost`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator -addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
`vlogsgenerator` accepts various command-line flags, which can be used for configuring the number and the shape of the generated logs.
|
||||
These flags can be inspected by running `vlogsgenerator -help`. Below are the most interesting flags:
|
||||
|
||||
* `-start` - starting timestamp for generating logs. Logs are evenly generated on the [`-start` ... `-end`] interval.
|
||||
* `-end` - ending timestamp for generating logs. Logs are evenly generated on the [`-start` ... `-end`] interval.
|
||||
* `-activeStreams` - the number of active [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) to generate.
|
||||
* `-logsPerStream` - the number of log entries to generate per each log stream. Log entries are evenly distributed on the [`-start` ... `-end`] interval.
|
||||
|
||||
The total number of generated logs can be calculated as `-activeStreams` * `-logsPerStream`.
|
||||
|
||||
For example, the following command generates `1_000_000` log entries on the time range `[2024-01-01 - 2024-02-01]` across `100`
|
||||
[log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields), where every logs stream contains `10_000` log entries,
|
||||
and writes them to `http://localhost:9428/insert/jsonline`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
### Churn rate
|
||||
|
||||
It is possible to generate churn rate for active [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields)
|
||||
by specifying `-totalStreams` command-line flag bigger than `-activeStreams`. For example, the following command generates
|
||||
logs for `1000` total streams, while the number of active streams equals to `100`. This means that at every time there are logs for `100` streams,
|
||||
but these streams change over the given [`-start` ... `-end`] time range, so the total number of streams on the given time range becomes `1000`:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-totalStreams=1_000 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline
|
||||
```
|
||||
|
||||
In this case the total number of generated logs equals to `-totalStreams` * `-logsPerStream` = `10_000_000`.
|
||||
|
||||
### Benchmark tuning
|
||||
|
||||
By default `vlogsgenerator` generates and writes logs by a single worker. This may limit the maximum data ingestion rate during benchmarks.
|
||||
The number of workers can be changed via `-workers` command-line flag. For example, the following command generates and writes logs with `16` workers:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline \
|
||||
-workers=16
|
||||
```
|
||||
|
||||
### Output statistics
|
||||
|
||||
Every 10 seconds `vlogsgenerator` writes statistics about the generated logs into `stderr`. The frequency of the generated statistics can be adjusted via `-statInterval` command-line flag.
|
||||
For example, the following command writes statistics every 2 seconds:
|
||||
|
||||
```
|
||||
bin/vlogsgenerator \
|
||||
-start=2024-01-01 -end=2024-02-01 \
|
||||
-activeStreams=100 \
|
||||
-logsPerStream=10_000 \
|
||||
-addr=http://localhost:9428/insert/jsonline \
|
||||
-statInterval=2s
|
||||
```
|
||||
344
app/vlogsgenerator/main.go
Normal file
344
app/vlogsgenerator/main.go
Normal file
@@ -0,0 +1,344 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", "stdout", "HTTP address to push the generated logs to; if it is set to stdout, then logs are generated to stdout")
|
||||
workers = flag.Int("workers", 1, "The number of workers to use to push logs to -addr")
|
||||
|
||||
start = newTimeFlag("start", "-1d", "Generated logs start from this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
end = newTimeFlag("end", "0s", "Generated logs end at this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
activeStreams = flag.Int("activeStreams", 100, "The number of active log streams to generate; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields")
|
||||
totalStreams = flag.Int("totalStreams", 0, "The number of total log streams; if -totalStreams > -activeStreams, then some active streams are substituted with new streams "+
|
||||
"during data generation")
|
||||
logsPerStream = flag.Int64("logsPerStream", 1_000, "The number of log entries to generate per each log stream. Log entries are evenly distributed between -start and -end")
|
||||
constFieldsPerLog = flag.Int("constFieldsPerLog", 3, "The number of fields with constaint values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
varFieldsPerLog = flag.Int("varFieldsPerLog", 1, "The number of fields with variable values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
dictFieldsPerLog = flag.Int("dictFieldsPerLog", 2, "The number of fields with up to 8 different values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u8FieldsPerLog = flag.Int("u8FieldsPerLog", 1, "The number of fields with uint8 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u16FieldsPerLog = flag.Int("u16FieldsPerLog", 1, "The number of fields with uint16 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u32FieldsPerLog = flag.Int("u32FieldsPerLog", 1, "The number of fields with uint32 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u64FieldsPerLog = flag.Int("u64FieldsPerLog", 1, "The number of fields with uint64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
floatFieldsPerLog = flag.Int("floatFieldsPerLog", 1, "The number of fields with float64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
ipFieldsPerLog = flag.Int("ipFieldsPerLog", 1, "The number of fields with IPv4 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
timestampFieldsPerLog = flag.Int("timestampFieldsPerLog", 1, "The number of fields with ISO8601 timestamps per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
jsonFieldsPerLog = flag.Int("jsonFieldsPerLog", 1, "The number of JSON fields to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
|
||||
statInterval = flag.Duration("statInterval", 10*time.Second, "The interval between publishing the stats")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
envflag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
var remoteWriteURL *url.URL
|
||||
if *addr != "stdout" {
|
||||
urlParsed, err := url.Parse(*addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -addr=%q: %s", *addr, err)
|
||||
}
|
||||
qs, err := url.ParseQuery(urlParsed.RawQuery)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse query string in -addr=%q: %w", *addr, err)
|
||||
}
|
||||
qs.Set("_stream_fields", "host,worker_id")
|
||||
urlParsed.RawQuery = qs.Encode()
|
||||
remoteWriteURL = urlParsed
|
||||
}
|
||||
|
||||
if start.nsec >= end.nsec {
|
||||
logger.Fatalf("-start=%s must be smaller than -end=%s", start, end)
|
||||
}
|
||||
if *activeStreams <= 0 {
|
||||
logger.Fatalf("-activeStreams must be bigger than 0; got %d", *activeStreams)
|
||||
}
|
||||
if *logsPerStream <= 0 {
|
||||
logger.Fatalf("-logsPerStream must be bigger than 0; got %d", *logsPerStream)
|
||||
}
|
||||
if *totalStreams < *activeStreams {
|
||||
*totalStreams = *activeStreams
|
||||
}
|
||||
|
||||
cfg := &workerConfig{
|
||||
url: remoteWriteURL,
|
||||
activeStreams: *activeStreams,
|
||||
totalStreams: *totalStreams,
|
||||
}
|
||||
|
||||
// divide total and active streams among workers
|
||||
if *workers <= 0 {
|
||||
logger.Fatalf("-workers must be bigger than 0; got %d", *workers)
|
||||
}
|
||||
if *workers > *activeStreams {
|
||||
logger.Fatalf("-workers=%d cannot exceed -activeStreams=%d", *workers, *activeStreams)
|
||||
}
|
||||
cfg.activeStreams /= *workers
|
||||
cfg.totalStreams /= *workers
|
||||
|
||||
logger.Infof("start -workers=%d workers for ingesting -logsPerStream=%d log entries per each -totalStreams=%d (-activeStreams=%d) on a time range -start=%s, -end=%s to -addr=%s",
|
||||
*workers, *logsPerStream, *totalStreams, *activeStreams, toRFC3339(start.nsec), toRFC3339(end.nsec), *addr)
|
||||
|
||||
startTime := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < *workers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
generateAndPushLogs(cfg, workerID)
|
||||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
prevEntries := uint64(0)
|
||||
prevBytes := uint64(0)
|
||||
ticker := time.NewTicker(*statInterval)
|
||||
for range ticker.C {
|
||||
currEntries := logEntriesCount.Load()
|
||||
deltaEntries := currEntries - prevEntries
|
||||
rateEntries := float64(deltaEntries) / statInterval.Seconds()
|
||||
|
||||
currBytes := bytesGenerated.Load()
|
||||
deltaBytes := currBytes - prevBytes
|
||||
rateBytes := float64(deltaBytes) / statInterval.Seconds()
|
||||
logger.Infof("generated %dK log entries (%dK total) at %.0fK entries/sec, %dMB (%dMB total) at %.0fMB/sec",
|
||||
deltaEntries/1e3, currEntries/1e3, rateEntries/1e3, deltaBytes/1e6, currBytes/1e6, rateBytes/1e6)
|
||||
|
||||
prevEntries = currEntries
|
||||
prevBytes = currBytes
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
dSecs := time.Since(startTime).Seconds()
|
||||
currEntries := logEntriesCount.Load()
|
||||
currBytes := bytesGenerated.Load()
|
||||
rateEntries := float64(currEntries) / dSecs
|
||||
rateBytes := float64(currBytes) / dSecs
|
||||
logger.Infof("ingested %dK log entries (%dMB) in %.3f seconds; avg ingestion rate: %.0fK entries/sec, %.0fMB/sec", currEntries/1e3, currBytes/1e6, dSecs, rateEntries/1e3, rateBytes/1e6)
|
||||
}
|
||||
|
||||
var logEntriesCount atomic.Uint64
|
||||
|
||||
var bytesGenerated atomic.Uint64
|
||||
|
||||
type workerConfig struct {
|
||||
url *url.URL
|
||||
activeStreams int
|
||||
totalStreams int
|
||||
}
|
||||
|
||||
type statWriter struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (sw *statWriter) Write(p []byte) (int, error) {
|
||||
bytesGenerated.Add(uint64(len(p)))
|
||||
return sw.w.Write(p)
|
||||
}
|
||||
|
||||
func generateAndPushLogs(cfg *workerConfig, workerID int) {
|
||||
pr, pw := io.Pipe()
|
||||
sw := &statWriter{
|
||||
w: pw,
|
||||
}
|
||||
bw := bufio.NewWriter(sw)
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
generateLogs(bw, workerID, cfg.activeStreams, cfg.totalStreams)
|
||||
_ = bw.Flush()
|
||||
_ = pw.Close()
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
if cfg.url == nil {
|
||||
_, err := io.Copy(os.Stdout, pr)
|
||||
if err != nil {
|
||||
logger.Fatalf("unexpected error when writing logs to stdout: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", cfg.url.String(), pr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot create request to %q: %s", cfg.url, err)
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot perform request to %q: %s", cfg.url, err)
|
||||
}
|
||||
if resp.StatusCode/100 != 2 {
|
||||
logger.Fatalf("unexpected status code got from %q: %d; want 2xx", cfg.url, err)
|
||||
}
|
||||
|
||||
// Wait until all the generateLogs goroutine is finished.
|
||||
<-doneCh
|
||||
}
|
||||
|
||||
func generateLogs(bw *bufio.Writer, workerID, activeStreams, totalStreams int) {
|
||||
streamLifetime := int64(float64(end.nsec-start.nsec) * (float64(activeStreams) / float64(totalStreams)))
|
||||
streamStep := int64(float64(end.nsec-start.nsec) / float64(totalStreams-activeStreams+1))
|
||||
step := streamLifetime / (*logsPerStream - 1)
|
||||
|
||||
currNsec := start.nsec
|
||||
for currNsec < end.nsec {
|
||||
firstStreamID := int((currNsec - start.nsec) / streamStep)
|
||||
generateLogsAtTimestamp(bw, workerID, currNsec, firstStreamID, activeStreams)
|
||||
currNsec += step
|
||||
}
|
||||
}
|
||||
|
||||
var runID = toUUID(rand.Uint64(), rand.Uint64())
|
||||
|
||||
func generateLogsAtTimestamp(bw *bufio.Writer, workerID int, ts int64, firstStreamID, activeStreams int) {
|
||||
streamID := firstStreamID
|
||||
timeStr := toRFC3339(ts)
|
||||
for i := 0; i < activeStreams; i++ {
|
||||
ip := toIPv4(rand.Uint32())
|
||||
uuid := toUUID(rand.Uint64(), rand.Uint64())
|
||||
fmt.Fprintf(bw, `{"_time":%q,"_msg":"message for the stream %d and worker %d; ip=%s; uuid=%s; u64=%d","host":"host_%d","worker_id":"%d"`,
|
||||
timeStr, streamID, workerID, ip, uuid, rand.Uint64(), streamID, workerID)
|
||||
fmt.Fprintf(bw, `,"run_id":"%s"`, runID)
|
||||
for j := 0; j < *constFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"const_%d":"some value %d %d"`, j, j, streamID)
|
||||
}
|
||||
for j := 0; j < *varFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"var_%d":"some value %d %d"`, j, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *dictFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"dict_%d":"%s"`, j, dictValues[rand.Intn(len(dictValues))])
|
||||
}
|
||||
for j := 0; j < *u8FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u8_%d":"%d"`, j, uint8(rand.Uint32()))
|
||||
}
|
||||
for j := 0; j < *u16FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u16_%d":"%d"`, j, uint16(rand.Uint32()))
|
||||
}
|
||||
for j := 0; j < *u32FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u32_%d":"%d"`, j, rand.Uint32())
|
||||
}
|
||||
for j := 0; j < *u64FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u64_%d":"%d"`, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *floatFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"float_%d":"%v"`, j, math.Round(10_000*rand.Float64())/1000)
|
||||
}
|
||||
for j := 0; j < *ipFieldsPerLog; j++ {
|
||||
ip := toIPv4(rand.Uint32())
|
||||
fmt.Fprintf(bw, `,"ip_%d":"%s"`, j, ip)
|
||||
}
|
||||
for j := 0; j < *timestampFieldsPerLog; j++ {
|
||||
timestamp := toISO8601(int64(rand.Uint64()))
|
||||
fmt.Fprintf(bw, `,"timestamp_%d":"%s"`, j, timestamp)
|
||||
}
|
||||
for j := 0; j < *jsonFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"json_%d":"{\"foo\":\"bar_%d\",\"baz\":{\"a\":[\"x\",\"y\"]},\"f3\":NaN,\"f4\":%d}"`, j, rand.Intn(10), rand.Intn(100))
|
||||
}
|
||||
fmt.Fprintf(bw, "}\n")
|
||||
|
||||
logEntriesCount.Add(1)
|
||||
streamID++
|
||||
}
|
||||
}
|
||||
|
||||
var dictValues = []string{
|
||||
"debug",
|
||||
"info",
|
||||
"warn",
|
||||
"error",
|
||||
"fatal",
|
||||
"ERROR",
|
||||
"FATAL",
|
||||
"INFO",
|
||||
}
|
||||
|
||||
func newTimeFlag(name, defaultValue, description string) *timeFlag {
|
||||
var tf timeFlag
|
||||
if err := tf.Set(defaultValue); err != nil {
|
||||
logger.Panicf("invalid defaultValue=%q for flag %q: %w", defaultValue, name, err)
|
||||
}
|
||||
flag.Var(&tf, name, description)
|
||||
return &tf
|
||||
}
|
||||
|
||||
type timeFlag struct {
|
||||
s string
|
||||
nsec int64
|
||||
}
|
||||
|
||||
func (tf *timeFlag) Set(s string) error {
|
||||
msec, err := promutils.ParseTimeMsec(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse time from %q: %w", s, err)
|
||||
}
|
||||
tf.s = s
|
||||
tf.nsec = msec * 1e6
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tf *timeFlag) String() string {
|
||||
return tf.s
|
||||
}
|
||||
|
||||
func toRFC3339(nsec int64) string {
|
||||
return time.Unix(0, nsec).UTC().Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
func toISO8601(nsec int64) string {
|
||||
return time.Unix(0, nsec).UTC().Format("2006-01-02T15:04:05.000Z")
|
||||
}
|
||||
|
||||
func toIPv4(n uint32) string {
|
||||
dst := make([]byte, 0, len("255.255.255.255"))
|
||||
dst = marshalUint64(dst, uint64(n>>24))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64((n>>16)&0xff))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64((n>>8)&0xff))
|
||||
dst = append(dst, '.')
|
||||
dst = marshalUint64(dst, uint64(n&0xff))
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
func toUUID(a, b uint64) string {
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", a&(1<<32-1), (a>>32)&(1<<16-1), (a >> 48), b&(1<<16-1), b>>16)
|
||||
}
|
||||
|
||||
// marshalUint64 appends string representation of n to dst and returns the result.
|
||||
func marshalUint64(dst []byte, n uint64) []byte {
|
||||
return strconv.AppendUint(dst, n, 10)
|
||||
}
|
||||
47
app/vlselect/logsql/buffered_writer.go
Normal file
47
app/vlselect/logsql/buffered_writer.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func getBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
v := bufferedWriterPool.Get()
|
||||
if v == nil {
|
||||
return &bufferedWriter{
|
||||
bw: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
bw := v.(*bufferedWriter)
|
||||
bw.bw.Reset(w)
|
||||
return bw
|
||||
}
|
||||
|
||||
func putBufferedWriter(bw *bufferedWriter) {
|
||||
bw.reset()
|
||||
bufferedWriterPool.Put(bw)
|
||||
}
|
||||
|
||||
var bufferedWriterPool sync.Pool
|
||||
|
||||
type bufferedWriter struct {
|
||||
mu sync.Mutex
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) reset() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) WriteIgnoreErrors(p []byte) {
|
||||
bw.mu.Lock()
|
||||
_, _ = bw.bw.Write(p)
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) FlushIgnoreErrors() {
|
||||
bw.mu.Lock()
|
||||
_ = bw.bw.Flush()
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
69
app/vlselect/logsql/hits_response.qtpl
Normal file
69
app/vlselect/logsql/hits_response.qtpl
Normal file
@@ -0,0 +1,69 @@
|
||||
{% import (
|
||||
"slices"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// FieldsForHits formats labels for /select/logsql/hits response
|
||||
{% func FieldsForHits(columns []logstorage.BlockColumn, rowIdx int) %}
|
||||
{
|
||||
{% if len(columns) > 0 %}
|
||||
{%q= columns[0].Name %}:{%q= columns[0].Values[rowIdx] %}
|
||||
{% for _, c := range columns[1:] %}
|
||||
,{%q= c.Name %}:{%q= c.Values[rowIdx] %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func HitsSeries(m map[string]*hitsSeries) %}
|
||||
{
|
||||
{% code
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
%}
|
||||
"hits":[
|
||||
{% if len(sortedKeys) > 0 %}
|
||||
{%= hitsSeriesLine(m, sortedKeys[0]) %}
|
||||
{% for _, k := range sortedKeys[1:] %}
|
||||
,{%= hitsSeriesLine(m, k) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func hitsSeriesLine(m map[string]*hitsSeries, k string) %}
|
||||
{
|
||||
{% code
|
||||
hs := m[k]
|
||||
hs.sort()
|
||||
timestamps := hs.timestamps
|
||||
values := hs.values
|
||||
%}
|
||||
"fields":{%s= k %},
|
||||
"timestamps":[
|
||||
{% if len(timestamps) > 0 %}
|
||||
{%q= timestamps[0] %}
|
||||
{% for _, ts := range timestamps[1:] %}
|
||||
,{%q= ts %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
"values":[
|
||||
{% if len(values) > 0 %}
|
||||
{%s= values[0] %}
|
||||
{% for _, v := range values[1:] %}
|
||||
,{%s= v %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
219
app/vlselect/logsql/hits_response.qtpl.go
Normal file
219
app/vlselect/logsql/hits_response.qtpl.go
Normal file
@@ -0,0 +1,219 @@
|
||||
// Code generated by qtc from "hits_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:1
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// FieldsForHits formats labels for /select/logsql/hits response
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
func StreamFieldsForHits(qw422016 *qt422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:10
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:12
|
||||
if len(columns) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().Q(columns[0].Name)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:13
|
||||
qw422016.N().Q(columns[0].Values[rowIdx])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:14
|
||||
for _, c := range columns[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().Q(c.Name)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:15
|
||||
qw422016.N().Q(c.Values[rowIdx])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:16
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:17
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:17
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
func WriteFieldsForHits(qq422016 qtio422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
StreamFieldsForHits(qw422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
func FieldsForHits(columns []logstorage.BlockColumn, rowIdx int) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
WriteFieldsForHits(qb422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:21
|
||||
func StreamHitsSeries(qw422016 *qt422016.Writer, m map[string]*hitsSeries) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:21
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:24
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:29
|
||||
qw422016.N().S(`"hits":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:31
|
||||
if len(sortedKeys) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:32
|
||||
streamhitsSeriesLine(qw422016, m, sortedKeys[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:33
|
||||
for _, k := range sortedKeys[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:33
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:34
|
||||
streamhitsSeriesLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:35
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:36
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:36
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
func WriteHitsSeries(qq422016 qtio422016.Writer, m map[string]*hitsSeries) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
StreamHitsSeries(qw422016, m)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
func HitsSeries(m map[string]*hitsSeries) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
WriteHitsSeries(qb422016, m)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:41
|
||||
func streamhitsSeriesLine(qw422016 *qt422016.Writer, m map[string]*hitsSeries, k string) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:41
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:44
|
||||
hs := m[k]
|
||||
hs.sort()
|
||||
timestamps := hs.timestamps
|
||||
values := hs.values
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:48
|
||||
qw422016.N().S(`"fields":`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:49
|
||||
qw422016.N().S(k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:49
|
||||
qw422016.N().S(`,"timestamps":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:51
|
||||
if len(timestamps) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:52
|
||||
qw422016.N().Q(timestamps[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:53
|
||||
for _, ts := range timestamps[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:53
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:54
|
||||
qw422016.N().Q(ts)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:55
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:56
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:56
|
||||
qw422016.N().S(`],"values":[`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:59
|
||||
if len(values) > 0 {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:60
|
||||
qw422016.N().S(values[0])
|
||||
//line app/vlselect/logsql/hits_response.qtpl:61
|
||||
for _, v := range values[1:] {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:61
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:62
|
||||
qw422016.N().S(v)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:63
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:64
|
||||
}
|
||||
//line app/vlselect/logsql/hits_response.qtpl:64
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
func writehitsSeriesLine(qq422016 qtio422016.Writer, m map[string]*hitsSeries, k string) {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
streamhitsSeriesLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
func hitsSeriesLine(m map[string]*hitsSeries, k string) string {
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
writehitsSeriesLine(qb422016, m, k)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/hits_response.qtpl:67
|
||||
}
|
||||
@@ -1,66 +1,382 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
var (
|
||||
maxSortBufferSize = flagutil.NewBytes("select.maxSortBufferSize", 1024*1024, "Query results from /select/logsql/query are automatically sorted by _time "+
|
||||
"if their summary size doesn't exceed this value; otherwise, query results are streamed in the response without sorting; "+
|
||||
"too big value for this flag may result in high memory usage since the sorting is performed in memory")
|
||||
)
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request
|
||||
func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan struct{}, cancel func()) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
// ProcessHitsRequest handles /select/logsql/hits request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-hits-stats
|
||||
func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain step
|
||||
stepStr := r.FormValue("step")
|
||||
if stepStr == "" {
|
||||
stepStr = "1d"
|
||||
}
|
||||
step, err := promutils.ParseDuration(stepStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'step' arg: %s", err)
|
||||
return
|
||||
}
|
||||
if step <= 0 {
|
||||
httpserver.Errorf(w, r, "'step' must be bigger than zero")
|
||||
}
|
||||
|
||||
// Obtain offset
|
||||
offsetStr := r.FormValue("offset")
|
||||
if offsetStr == "" {
|
||||
offsetStr = "0s"
|
||||
}
|
||||
offset, err := promutils.ParseDuration(offsetStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'offset' arg: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain field entries
|
||||
fields := r.Form["field"]
|
||||
|
||||
// Prepare the query
|
||||
q.AddCountByTimePipe(int64(step), int64(offset), fields)
|
||||
q.Optimize()
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string]*hitsSeries)
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
timestampValues := columns[0].Values
|
||||
hitsValues := columns[len(columns)-1].Values
|
||||
columns = columns[1 : len(columns)-1]
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
timestampStr := strings.Clone(timestampValues[i])
|
||||
hitsStr := strings.Clone(hitsValues[i])
|
||||
|
||||
bb.Reset()
|
||||
WriteFieldsForHits(bb, columns, i)
|
||||
|
||||
mLock.Lock()
|
||||
hs, ok := m[string(bb.B)]
|
||||
if !ok {
|
||||
k := string(bb.B)
|
||||
hs = &hitsSeries{}
|
||||
m[k] = hs
|
||||
}
|
||||
hs.timestamps = append(hs.timestamps, timestampStr)
|
||||
hs.values = append(hs.values, hitsStr)
|
||||
mLock.Unlock()
|
||||
}
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteHitsSeries(w, m)
|
||||
}
|
||||
|
||||
type hitsSeries struct {
|
||||
timestamps []string
|
||||
values []string
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) sort() {
|
||||
sort.Sort(hs)
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Len() int {
|
||||
return len(hs.timestamps)
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Swap(i, j int) {
|
||||
hs.timestamps[i], hs.timestamps[j] = hs.timestamps[j], hs.timestamps[i]
|
||||
hs.values[i], hs.values[j] = hs.values[j], hs.values[i]
|
||||
}
|
||||
|
||||
func (hs *hitsSeries) Less(i, j int) bool {
|
||||
return hs.timestamps[i] < hs.timestamps[j]
|
||||
}
|
||||
|
||||
// ProcessFieldNamesRequest handles /select/logsql/field_names request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-names
|
||||
func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain field names for the given query
|
||||
q.Optimize()
|
||||
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain field names: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, fieldNames)
|
||||
}
|
||||
|
||||
// ProcessFieldValuesRequest handles /select/logsql/field_values request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-values
|
||||
func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse fieldName query arg
|
||||
fieldName := r.FormValue("field")
|
||||
if fieldName == "" {
|
||||
httpserver.Errorf(w, r, "missing 'field' query arg")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQuery(qStr)
|
||||
// Obtain unique values for the given field
|
||||
q.Optimize()
|
||||
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse query [%s]: %s", qStr, err)
|
||||
httpserver.Errorf(w, r, "cannot obtain values for field %q: %s", fieldName, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/stream+json; charset=utf-8")
|
||||
|
||||
sw := getSortWriter()
|
||||
sw.Init(w, maxSortBufferSize.IntN(), limit)
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
vlstorage.RunQuery(tenantIDs, q, stopCh, func(columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 {
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, values)
|
||||
}
|
||||
|
||||
// ProcessStreamFieldNamesRequest processes /select/logsql/stream_field_names request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-field-names
|
||||
func ProcessStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Obtain stream field names for the given query
|
||||
q.Optimize()
|
||||
names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream field names: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, names)
|
||||
}
|
||||
|
||||
// ProcessStreamFieldValuesRequest processes /select/logsql/stream_field_values request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-field-values
|
||||
func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse fieldName query arg
|
||||
fieldName := r.FormValue("field")
|
||||
if fieldName == "" {
|
||||
httpserver.Errorf(w, r, "missing 'field' query arg")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain stream field values for the given query and the given fieldName
|
||||
q.Optimize()
|
||||
values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain stream field values: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, values)
|
||||
}
|
||||
|
||||
// ProcessStreamsRequest processes /select/logsql/streams request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-streams
|
||||
func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Obtain streams for the given query
|
||||
q.Optimize()
|
||||
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot obtain streams: %s", err)
|
||||
}
|
||||
|
||||
// Write results
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteValuesWithHitsJSON(w, streams)
|
||||
}
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#http-api
|
||||
func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if limit > 0 {
|
||||
q.AddPipeLimit(uint64(limit))
|
||||
}
|
||||
|
||||
bw := getBufferedWriter(w)
|
||||
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
rowsCount := len(columns[0].Values)
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for rowIdx := 0; rowIdx < rowsCount; rowIdx++ {
|
||||
WriteJSONRow(bb, columns, rowIdx)
|
||||
for i := range timestamps {
|
||||
WriteJSONRow(bb, columns, i)
|
||||
}
|
||||
|
||||
if !sw.TryWrite(bb.B) {
|
||||
cancel()
|
||||
}
|
||||
|
||||
bw.WriteIgnoreErrors(bb.B)
|
||||
blockResultPool.Put(bb)
|
||||
})
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/stream+json")
|
||||
q.Optimize()
|
||||
err = vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
|
||||
bw.FlushIgnoreErrors()
|
||||
putBufferedWriter(bw)
|
||||
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
}
|
||||
}
|
||||
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot obtain tenanID: %w", err)
|
||||
}
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
|
||||
// Parse query
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQuery(qStr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot parse query [%s]: %s", qStr, err)
|
||||
}
|
||||
|
||||
// Parse optional start and end args
|
||||
start, okStart, err := getTimeNsec(r, "start")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
end, okEnd, err := getTimeNsec(r, "end")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if okStart || okEnd {
|
||||
if !okStart {
|
||||
start = math.MinInt64
|
||||
}
|
||||
if !okEnd {
|
||||
end = math.MaxInt64
|
||||
}
|
||||
q.AddTimeFilter(start, end)
|
||||
}
|
||||
|
||||
return q, tenantIDs, nil
|
||||
}
|
||||
|
||||
func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
|
||||
s := r.FormValue(argName)
|
||||
if s == "" {
|
||||
return 0, false, nil
|
||||
}
|
||||
currentTimestamp := float64(time.Now().UnixNano()) / 1e9
|
||||
secs, err := promutils.ParseTimeAt(s, currentTimestamp)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("cannot parse %s=%s: %w", argName, s, err)
|
||||
}
|
||||
return int64(secs * 1e9), true, nil
|
||||
}
|
||||
|
||||
32
app/vlselect/logsql/logsql.qtpl
Normal file
32
app/vlselect/logsql/logsql.qtpl
Normal file
@@ -0,0 +1,32 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// ValuesWithHitsJSON generates JSON from the given values.
|
||||
{% func ValuesWithHitsJSON(values []logstorage.ValueWithHits) %}
|
||||
{
|
||||
"values":{%= valuesWithHitsJSONArray(values) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func valuesWithHitsJSONArray(values []logstorage.ValueWithHits) %}
|
||||
[
|
||||
{% if len(values) > 0 %}
|
||||
{%= valueWithHitsJSON(values[0]) %}
|
||||
{% for _, v := range values[1:] %}
|
||||
,{%= valueWithHitsJSON(v) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
{% func valueWithHitsJSON(v logstorage.ValueWithHits) %}
|
||||
{
|
||||
"value":{%q= v.Value %},
|
||||
"hits":{%dul= v.Hits %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
152
app/vlselect/logsql/logsql.qtpl.go
Normal file
152
app/vlselect/logsql/logsql.qtpl.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Code generated by qtc from "logsql.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// ValuesWithHitsJSON generates JSON from the given values.
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
func StreamValuesWithHitsJSON(qw422016 *qt422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:8
|
||||
qw422016.N().S(`{"values":`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:10
|
||||
streamvaluesWithHitsJSONArray(qw422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:10
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
func WriteValuesWithHitsJSON(qq422016 qtio422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
StreamValuesWithHitsJSON(qw422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
func ValuesWithHitsJSON(values []logstorage.ValueWithHits) string {
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
WriteValuesWithHitsJSON(qb422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/logsql.qtpl:12
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:14
|
||||
func streamvaluesWithHitsJSONArray(qw422016 *qt422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:14
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:16
|
||||
if len(values) > 0 {
|
||||
//line app/vlselect/logsql/logsql.qtpl:17
|
||||
streamvalueWithHitsJSON(qw422016, values[0])
|
||||
//line app/vlselect/logsql/logsql.qtpl:18
|
||||
for _, v := range values[1:] {
|
||||
//line app/vlselect/logsql/logsql.qtpl:18
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:19
|
||||
streamvalueWithHitsJSON(qw422016, v)
|
||||
//line app/vlselect/logsql/logsql.qtpl:20
|
||||
}
|
||||
//line app/vlselect/logsql/logsql.qtpl:21
|
||||
}
|
||||
//line app/vlselect/logsql/logsql.qtpl:21
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
func writevaluesWithHitsJSONArray(qq422016 qtio422016.Writer, values []logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
streamvaluesWithHitsJSONArray(qw422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
func valuesWithHitsJSONArray(values []logstorage.ValueWithHits) string {
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
writevaluesWithHitsJSONArray(qb422016, values)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/logsql.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:25
|
||||
func streamvalueWithHitsJSON(qw422016 *qt422016.Writer, v logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:25
|
||||
qw422016.N().S(`{"value":`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:27
|
||||
qw422016.N().Q(v.Value)
|
||||
//line app/vlselect/logsql/logsql.qtpl:27
|
||||
qw422016.N().S(`,"hits":`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:28
|
||||
qw422016.N().DUL(v.Hits)
|
||||
//line app/vlselect/logsql/logsql.qtpl:28
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
func writevalueWithHitsJSON(qq422016 qtio422016.Writer, v logstorage.ValueWithHits) {
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
streamvalueWithHitsJSON(qw422016, v)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
func valueWithHitsJSON(v logstorage.ValueWithHits) string {
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
writevalueWithHitsJSON(qb422016, v)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/logsql.qtpl:30
|
||||
}
|
||||
@@ -1,290 +0,0 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func getSortWriter() *sortWriter {
|
||||
v := sortWriterPool.Get()
|
||||
if v == nil {
|
||||
return &sortWriter{}
|
||||
}
|
||||
return v.(*sortWriter)
|
||||
}
|
||||
|
||||
func putSortWriter(sw *sortWriter) {
|
||||
sw.reset()
|
||||
sortWriterPool.Put(sw)
|
||||
}
|
||||
|
||||
var sortWriterPool sync.Pool
|
||||
|
||||
// sortWriter expects JSON line stream to be written to it.
|
||||
//
|
||||
// It buffers the incoming data until its size reaches maxBufLen.
|
||||
// Then it streams the buffered data and all the incoming data to w.
|
||||
//
|
||||
// The FinalFlush() must be called when all the data is written.
|
||||
// If the buf isn't empty at FinalFlush() call, then the buffered data
|
||||
// is sorted by _time field.
|
||||
type sortWriter struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
|
||||
maxLines int
|
||||
linesWritten int
|
||||
|
||||
maxBufLen int
|
||||
buf []byte
|
||||
bufFlushed bool
|
||||
|
||||
hasErr bool
|
||||
}
|
||||
|
||||
func (sw *sortWriter) reset() {
|
||||
sw.w = nil
|
||||
|
||||
sw.maxLines = 0
|
||||
sw.linesWritten = 0
|
||||
|
||||
sw.maxBufLen = 0
|
||||
sw.buf = sw.buf[:0]
|
||||
sw.bufFlushed = false
|
||||
sw.hasErr = false
|
||||
}
|
||||
|
||||
// Init initializes sw.
|
||||
//
|
||||
// If maxLines is set to positive value, then sw accepts up to maxLines
|
||||
// and then rejects all the other lines by returning false from TryWrite.
|
||||
func (sw *sortWriter) Init(w io.Writer, maxBufLen, maxLines int) {
|
||||
sw.reset()
|
||||
|
||||
sw.w = w
|
||||
sw.maxBufLen = maxBufLen
|
||||
sw.maxLines = maxLines
|
||||
}
|
||||
|
||||
// TryWrite writes p to sw.
|
||||
//
|
||||
// True is returned on successful write, false otherwise.
|
||||
//
|
||||
// Unsuccessful write may occur on underlying write error or when maxLines lines are already written to sw.
|
||||
func (sw *sortWriter) TryWrite(p []byte) bool {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
|
||||
if sw.hasErr {
|
||||
return false
|
||||
}
|
||||
|
||||
if sw.bufFlushed {
|
||||
if !sw.writeToUnderlyingWriterLocked(p) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if len(sw.buf)+len(p) < sw.maxBufLen {
|
||||
sw.buf = append(sw.buf, p...)
|
||||
return true
|
||||
}
|
||||
|
||||
sw.bufFlushed = true
|
||||
if !sw.writeToUnderlyingWriterLocked(sw.buf) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
sw.buf = sw.buf[:0]
|
||||
|
||||
if !sw.writeToUnderlyingWriterLocked(p) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (sw *sortWriter) writeToUnderlyingWriterLocked(p []byte) bool {
|
||||
if len(p) == 0 {
|
||||
return true
|
||||
}
|
||||
if sw.maxLines > 0 {
|
||||
if sw.linesWritten >= sw.maxLines {
|
||||
return false
|
||||
}
|
||||
var linesLeft int
|
||||
p, linesLeft = trimLines(p, sw.maxLines-sw.linesWritten)
|
||||
sw.linesWritten += linesLeft
|
||||
}
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func trimLines(p []byte, maxLines int) ([]byte, int) {
|
||||
if maxLines <= 0 {
|
||||
return nil, 0
|
||||
}
|
||||
n := bytes.Count(p, newline)
|
||||
if n < maxLines {
|
||||
return p, n
|
||||
}
|
||||
for n >= maxLines {
|
||||
idx := bytes.LastIndexByte(p, '\n')
|
||||
p = p[:idx]
|
||||
n--
|
||||
}
|
||||
return p[:len(p)+1], maxLines
|
||||
}
|
||||
|
||||
var newline = []byte("\n")
|
||||
|
||||
func (sw *sortWriter) FinalFlush() {
|
||||
if sw.hasErr || sw.bufFlushed {
|
||||
return
|
||||
}
|
||||
|
||||
rs := getRowsSorter()
|
||||
rs.parseRows(sw.buf)
|
||||
rs.sort()
|
||||
|
||||
rows := rs.rows
|
||||
if sw.maxLines > 0 && len(rows) > sw.maxLines {
|
||||
rows = rows[:sw.maxLines]
|
||||
}
|
||||
WriteJSONRows(sw.w, rows)
|
||||
|
||||
putRowsSorter(rs)
|
||||
}
|
||||
|
||||
func getRowsSorter() *rowsSorter {
|
||||
v := rowsSorterPool.Get()
|
||||
if v == nil {
|
||||
return &rowsSorter{}
|
||||
}
|
||||
return v.(*rowsSorter)
|
||||
}
|
||||
|
||||
func putRowsSorter(rs *rowsSorter) {
|
||||
rs.reset()
|
||||
rowsSorterPool.Put(rs)
|
||||
}
|
||||
|
||||
var rowsSorterPool sync.Pool
|
||||
|
||||
type rowsSorter struct {
|
||||
buf []byte
|
||||
fieldsBuf []logstorage.Field
|
||||
rows [][]logstorage.Field
|
||||
times []string
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) reset() {
|
||||
rs.buf = rs.buf[:0]
|
||||
|
||||
fieldsBuf := rs.fieldsBuf
|
||||
for i := range fieldsBuf {
|
||||
fieldsBuf[i].Reset()
|
||||
}
|
||||
rs.fieldsBuf = fieldsBuf[:0]
|
||||
|
||||
rows := rs.rows
|
||||
for i := range rows {
|
||||
rows[i] = nil
|
||||
}
|
||||
rs.rows = rows[:0]
|
||||
|
||||
times := rs.times
|
||||
for i := range times {
|
||||
times[i] = ""
|
||||
}
|
||||
rs.times = times[:0]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) parseRows(src []byte) {
|
||||
rs.reset()
|
||||
|
||||
buf := rs.buf
|
||||
fieldsBuf := rs.fieldsBuf
|
||||
rows := rs.rows
|
||||
times := rs.times
|
||||
|
||||
p := logjson.GetParser()
|
||||
for len(src) > 0 {
|
||||
var line []byte
|
||||
n := bytes.IndexByte(src, '\n')
|
||||
if n < 0 {
|
||||
line = src
|
||||
src = nil
|
||||
} else {
|
||||
line = src[:n]
|
||||
src = src[n+1:]
|
||||
}
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
logger.Panicf("BUG: unexpected invalid JSON line: %s", err)
|
||||
}
|
||||
|
||||
timeValue := ""
|
||||
fieldsBufLen := len(fieldsBuf)
|
||||
for _, f := range p.Fields {
|
||||
bufLen := len(buf)
|
||||
buf = append(buf, f.Name...)
|
||||
name := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, f.Value...)
|
||||
value := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
fieldsBuf = append(fieldsBuf, logstorage.Field{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
|
||||
if name == "_time" {
|
||||
timeValue = value
|
||||
}
|
||||
}
|
||||
rows = append(rows, fieldsBuf[fieldsBufLen:])
|
||||
times = append(times, timeValue)
|
||||
}
|
||||
logjson.PutParser(p)
|
||||
|
||||
rs.buf = buf
|
||||
rs.fieldsBuf = fieldsBuf
|
||||
rs.rows = rows
|
||||
rs.times = times
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Len() int {
|
||||
return len(rs.rows)
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Less(i, j int) bool {
|
||||
times := rs.times
|
||||
return times[i] < times[j]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Swap(i, j int) {
|
||||
times := rs.times
|
||||
rows := rs.rows
|
||||
times[i], times[j] = times[j], times[i]
|
||||
rows[i], rows[j] = rows[j], rows[i]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) sort() {
|
||||
sort.Sort(rs)
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSortWriter(t *testing.T) {
|
||||
f := func(maxBufLen, maxLines int, data string, expectedResult string) {
|
||||
t.Helper()
|
||||
|
||||
var bb bytes.Buffer
|
||||
sw := getSortWriter()
|
||||
sw.Init(&bb, maxBufLen, maxLines)
|
||||
for _, s := range strings.Split(data, "\n") {
|
||||
if !sw.TryWrite([]byte(s + "\n")) {
|
||||
break
|
||||
}
|
||||
}
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
|
||||
result := bb.String()
|
||||
if result != expectedResult {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, expectedResult)
|
||||
}
|
||||
}
|
||||
|
||||
f(100, 0, "", "")
|
||||
f(100, 0, "{}", "{}\n")
|
||||
|
||||
data := `{"_time":"def","_msg":"xxx"}
|
||||
{"_time":"abc","_msg":"foo"}`
|
||||
resultExpected := `{"_time":"abc","_msg":"foo"}
|
||||
{"_time":"def","_msg":"xxx"}
|
||||
`
|
||||
f(100, 0, data, resultExpected)
|
||||
f(10, 0, data, data+"\n")
|
||||
|
||||
// Test with the maxLines
|
||||
f(100, 1, data, `{"_time":"abc","_msg":"foo"}`+"\n")
|
||||
f(10, 1, data, `{"_time":"def","_msg":"xxx"}`+"\n")
|
||||
f(10, 2, data, data+"\n")
|
||||
f(100, 2, data, resultExpected)
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package vlselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -76,10 +75,9 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// Skip requests, which do not start with /select/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/select")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/vmui" {
|
||||
if path == "/select/vmui" {
|
||||
// VMUI access via incomplete url without `/` in the end. Redirect to complete url.
|
||||
// Use relative redirect, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
@@ -88,14 +86,14 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
httpserver.Redirect(w, newURL)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
if strings.HasPrefix(path, "/vmui/static/") {
|
||||
if strings.HasPrefix(path, "/select/vmui/") {
|
||||
if strings.HasPrefix(path, "/select/vmui/static/") {
|
||||
// Allow clients caching static contents for long period of time, since it shouldn't change over time.
|
||||
// Path to static contents (such as js and css) must be changed whenever its contents is changed.
|
||||
// See https://developer.chrome.com/docs/lighthouse/performance/uses-long-cache-ttl/
|
||||
w.Header().Set("Cache-Control", "max-age=31536000")
|
||||
}
|
||||
r.URL.Path = path
|
||||
r.URL.Path = strings.TrimPrefix(path, "/select")
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
@@ -141,15 +139,35 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
}
|
||||
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
stopCh = ctxWithCancel.Done()
|
||||
|
||||
switch {
|
||||
case path == "/logsql/query":
|
||||
httpserver.EnableCORS(w, r)
|
||||
switch path {
|
||||
case "/select/logsql/field_names":
|
||||
logsqlFieldNamesRequests.Inc()
|
||||
logsql.ProcessFieldNamesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/field_values":
|
||||
logsqlFieldValuesRequests.Inc()
|
||||
logsql.ProcessFieldValuesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/hits":
|
||||
logsqlHitsRequests.Inc()
|
||||
logsql.ProcessHitsRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/query":
|
||||
logsqlQueryRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
logsql.ProcessQueryRequest(w, r, stopCh, cancel)
|
||||
logsql.ProcessQueryRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/stream_field_names":
|
||||
logsqlStreamFieldNamesRequests.Inc()
|
||||
logsql.ProcessStreamFieldNamesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/stream_field_values":
|
||||
logsqlStreamFieldValuesRequests.Inc()
|
||||
logsql.ProcessStreamFieldValuesRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/streams":
|
||||
logsqlStreamsRequests.Inc()
|
||||
logsql.ProcessStreamsRequest(ctx, w, r)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -170,5 +188,11 @@ func getMaxQueryDuration(r *http.Request) time.Duration {
|
||||
}
|
||||
|
||||
var (
|
||||
logsqlQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/query"}`)
|
||||
logsqlFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_names"}`)
|
||||
logsqlFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_values"}`)
|
||||
logsqlHitsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/hits"}`)
|
||||
logsqlQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/query"}`)
|
||||
logsqlStreamFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_field_names"}`)
|
||||
logsqlStreamFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_field_values"}`)
|
||||
logsqlStreamsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/streams"}`)
|
||||
)
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.bc07cc78.css",
|
||||
"main.js": "./static/js/main.034044a7.js",
|
||||
"main.css": "./static/css/main.1c2f4a93.css",
|
||||
"main.js": "./static/js/main.c3285306.js",
|
||||
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.10add6e7bdf0f1d98cf7.md",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.df7574389d8f8bbcf0c7.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.bc07cc78.css",
|
||||
"static/js/main.034044a7.js"
|
||||
"static/css/main.1c2f4a93.css",
|
||||
"static/js/main.c3285306.js"
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.034044a7.js"></script><link href="./static/css/main.bc07cc78.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.c3285306.js"></script><link href="./static/css/main.1c2f4a93.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
1
app/vlselect/vmui/static/css/main.1c2f4a93.css
Normal file
1
app/vlselect/vmui/static/css/main.1c2f4a93.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.c3285306.js
Normal file
2
app/vlselect/vmui/static/js/main.c3285306.js
Normal file
File diff suppressed because one or more lines are too long
@@ -22,7 +22,7 @@ However, there are some [intentional differences](https://medium.com/@romanhavro
|
||||
[Standalone MetricsQL package](https://godoc.org/github.com/VictoriaMetrics/metricsql) can be used for parsing MetricsQL in external apps.
|
||||
|
||||
If you are unfamiliar with PromQL, then it is suggested reading [this tutorial for beginners](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085)
|
||||
and introduction into [basic querying via MetricsQL](https://docs.victoriametrics.com/keyConcepts.html#metricsql).
|
||||
and introduction into [basic querying via MetricsQL](https://docs.victoriametrics.com/keyconcepts/#metricsql).
|
||||
|
||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||
|
||||
@@ -70,15 +70,17 @@ The list of MetricsQL features on top of PromQL:
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query)
|
||||
and the real interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) (aka `scrape_interval`).
|
||||
For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`.
|
||||
It is roughly equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
The difference is documented in [rate() docs](#rate).
|
||||
* Numeric values can contain `_` delimiters for better readability. For example, `1_234_567_890` can be used in queries instead of `1234567890`.
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyconcepts/#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
See [these docs](https://docs.victoriametrics.com/keyConcepts.html#filtering-by-multiple-or-filters) for details.
|
||||
See [these docs](https://docs.victoriametrics.com/keyconcepts/#filtering-by-multiple-or-filters) for details.
|
||||
* Support for matching against multiple numeric constants via `q == (C1, ..., CN)` and `q != (C1, ..., CN)` syntax. For example, `status_code == (300, 301, 304)`
|
||||
returns `status_code` metrics with one of `300`, `301` or `304` values.
|
||||
* Support for `group_left(*)` and `group_right(*)` for copying all the labels from time series on the `one` side
|
||||
of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches).
|
||||
The copied label names may clash with the existing label names, so MetricsQL provides an ability to add prefix to the copied metric names
|
||||
@@ -152,27 +154,27 @@ MetricsQL provides the following functions:
|
||||
|
||||
### Rollup functions
|
||||
|
||||
**Rollup functions** (aka range functions or window functions) calculate rollups over **raw samples**
|
||||
on the given lookbehind window for the [selected time series](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
For example, `avg_over_time(temperature[24h])` calculates the average temperature over raw samples for the last 24 hours.
|
||||
**Rollup functions** (aka range functions or window functions) calculate rollups over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window for the [selected time series](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
For example, `avg_over_time(temperature[24h])` calculates the average temperature over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the last 24 hours.
|
||||
|
||||
Additional details:
|
||||
|
||||
* If rollup functions are used for building graphs in Grafana, then the rollup is calculated independently per each point on the graph.
|
||||
For example, every point for `avg_over_time(temperature[24h])` graph shows the average temperature for the last 24 hours ending at this point.
|
||||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) returns multiple time series,
|
||||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) returns multiple time series,
|
||||
then rollups are calculated individually per each returned series.
|
||||
* If lookbehind window in square brackets is missing, then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
Otherwise, it is automatically wrapped into [default_rollup](#default_rollup). For example, `foo{bar="baz"}`
|
||||
is automatically converted to `default_rollup(foo{bar="baz"})` before performing the calculations.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) is passed to rollup function,
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) is passed to rollup function,
|
||||
then the inner arg is automatically converted to a [subquery](#subqueries).
|
||||
* All the rollup functions accept optional `keep_metric_names` modifier. If it is set, then the function keeps metric names in results.
|
||||
See [these docs](#keep_metric_names).
|
||||
@@ -184,7 +186,7 @@ The list of supported rollup functions:
|
||||
#### absent_over_time
|
||||
|
||||
`absent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1
|
||||
if the given lookbehind window `d` doesn't contain raw samples. Otherwise, it returns an empty result.
|
||||
if the given lookbehind window `d` doesn't contain [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples). Otherwise, it returns an empty result.
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
@@ -193,9 +195,9 @@ See also [present_over_time](#present_over_time).
|
||||
#### aggr_over_time
|
||||
|
||||
`aggr_over_time(("rollup_func1", "rollup_func2", ...), series_selector[d])` is a [rollup function](#rollup-functions),
|
||||
which calculates all the listed `rollup_func*` for raw samples on the given lookbehind window `d`.
|
||||
which calculates all the listed `rollup_func*` for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`.
|
||||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
`rollup_func*` can contain any rollup function. For instance, `aggr_over_time(("min_over_time", "max_over_time", "rate"), m[d])`
|
||||
would calculate [min_over_time](#min_over_time), [max_over_time](#max_over_time) and [rate](#rate) for `m[d]`.
|
||||
@@ -203,8 +205,8 @@ would calculate [min_over_time](#min_over_time), [max_over_time](#max_over_time)
|
||||
#### ascent_over_time
|
||||
|
||||
`ascent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
|
||||
ascent of raw sample values on the given lookbehind window `d`. The calculations are performed individually
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
ascent of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) values on the given lookbehind window `d`. The calculations are performed individually
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for tracking height gains in GPS tracking. Metric names are stripped from the resulting rollups.
|
||||
|
||||
@@ -215,8 +217,8 @@ See also [descent_over_time](#descent_over_time).
|
||||
#### avg_over_time
|
||||
|
||||
`avg_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average value
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
@@ -225,8 +227,8 @@ See also [median_over_time](#median_over_time).
|
||||
#### changes
|
||||
|
||||
`changes(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
|
||||
the raw samples changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
the [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Unlike `changes()` in Prometheus it takes into account the change from the last sample before the given lookbehind window `d`.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
@@ -240,8 +242,8 @@ See also [changes_prometheus](#changes_prometheus).
|
||||
#### changes_prometheus
|
||||
|
||||
`changes_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of times
|
||||
the raw samples changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
the [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) changed on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
It doesn't take into account the change from the last sample before the given lookbehind window `d` in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
@@ -254,9 +256,9 @@ See also [changes](#changes).
|
||||
|
||||
#### count_eq_over_time
|
||||
|
||||
`count_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -264,9 +266,9 @@ See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_ove
|
||||
|
||||
#### count_gt_over_time
|
||||
|
||||
`count_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -274,9 +276,9 @@ See also [count_over_time](#count_over_time) and [share_gt_over_time](#share_gt_
|
||||
|
||||
#### count_le_over_time
|
||||
|
||||
`count_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which don't exceed `le`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -284,9 +286,9 @@ See also [count_over_time](#count_over_time) and [share_le_over_time](#share_le_
|
||||
|
||||
#### count_ne_over_time
|
||||
|
||||
`count_ne_over_time(series_selector[d], ne)` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
`count_ne_over_time(series_selector[d], ne)` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which aren't equal to `ne`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -294,8 +296,8 @@ See also [count_over_time](#count_over_time).
|
||||
|
||||
#### count_over_time
|
||||
|
||||
`count_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`count_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -305,9 +307,9 @@ See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_
|
||||
|
||||
#### count_values_over_time
|
||||
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -315,8 +317,8 @@ See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values
|
||||
|
||||
#### decreases_over_time
|
||||
|
||||
`decreases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value decreases
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`decreases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value decreases over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -324,8 +326,8 @@ See also [increases_over_time](#increases_over_time).
|
||||
|
||||
#### default_rollup
|
||||
|
||||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
@@ -336,7 +338,7 @@ This allows avoiding unexpected gaps on the graph when `step` is smaller than th
|
||||
|
||||
`delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
the last sample before the given lookbehind window `d` and the last sample at the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
The behaviour of `delta()` function in MetricsQL is slightly different to the behaviour of `delta()` function in Prometheus.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
@@ -351,7 +353,7 @@ See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
|
||||
`delta_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
the first and the last samples at the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
The behaviour of `delta_prometheus()` is close to the behaviour of `delta()` function in Prometheus.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
@@ -363,7 +365,7 @@ See also [delta](#delta).
|
||||
#### deriv
|
||||
|
||||
`deriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivative over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The derivative is calculated using linear regression.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
@@ -375,8 +377,8 @@ See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
#### deriv_fast
|
||||
|
||||
`deriv_fast(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivative
|
||||
using the first and the last raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
using the first and the last [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -384,9 +386,9 @@ See also [deriv](#deriv) and [ideriv](#ideriv).
|
||||
|
||||
#### descent_over_time
|
||||
|
||||
`descent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates descent of raw sample values
|
||||
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`descent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates descent of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for tracking height loss in GPS tracking.
|
||||
|
||||
@@ -396,8 +398,8 @@ See also [ascent_over_time](#ascent_over_time).
|
||||
|
||||
#### distinct_over_time
|
||||
|
||||
`distinct_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number of distinct raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`distinct_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number of unique [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -406,7 +408,7 @@ See also [count_values_over_time](#count_values_over_time).
|
||||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` is a [rollup function](#rollup-functions), which returns the duration in seconds
|
||||
when time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) were present
|
||||
when time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering) were present
|
||||
over the given lookbehind window `d`. It is expected that intervals between adjacent samples per each series don't exceed the `max_interval`.
|
||||
Otherwise, such intervals are considered as gaps and aren't counted.
|
||||
|
||||
@@ -416,26 +418,26 @@ See also [lifetime](#lifetime) and [lag](#lag).
|
||||
|
||||
#### first_over_time
|
||||
|
||||
`first_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the first raw sample value
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`first_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the first [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [last_over_time](#last_over_time) and [tfirst_over_time](#tfirst_over_time).
|
||||
|
||||
#### geomean_over_time
|
||||
|
||||
`geomean_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [geometric mean](https://en.wikipedia.org/wiki/Geometric_mean)
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### histogram_over_time
|
||||
|
||||
`histogram_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates
|
||||
[VictoriaMetrics histogram](https://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) over raw samples on the given lookbehind window `d`.
|
||||
It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
[VictoriaMetrics histogram](https://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`. It is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The resulting histograms are useful to pass to [histogram_quantile](#histogram_quantile) for calculating quantiles
|
||||
over multiple [gauges](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
over multiple [gauges](https://docs.victoriametrics.com/keyconcepts/#gauge).
|
||||
For example, the following query calculates median temperature by country over the last 24 hours:
|
||||
|
||||
`histogram_quantile(0.5, sum(histogram_over_time(temperature[24h])) by (vmrange,country))`.
|
||||
@@ -457,10 +459,10 @@ See also [hoeffding_bound_lower](#hoeffding_bound_lower).
|
||||
#### holt_winters
|
||||
|
||||
`holt_winters(series_selector[d], sf, tf)` is a [rollup function](#rollup-functions), which calculates Holt-Winters value
|
||||
(aka [double exponential smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing)) for raw samples
|
||||
(aka [double exponential smoothing](https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing)) for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
over the given lookbehind window `d` using the given smoothing factor `sf` and the given trend factor `tf`.
|
||||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyconcepts/#gauge).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
@@ -468,8 +470,8 @@ See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### idelta
|
||||
|
||||
`idelta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between the last two raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`idelta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -479,9 +481,10 @@ See also [delta](#delta).
|
||||
|
||||
#### ideriv
|
||||
|
||||
`ideriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the per-second derivative based on the last two raw samples
|
||||
`ideriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the per-second derivative based
|
||||
on the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
over the given lookbehind window `d`. The derivative is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -490,8 +493,8 @@ See also [deriv](#deriv).
|
||||
#### increase
|
||||
|
||||
`increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the increase over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Unlike Prometheus, it takes into account the last sample before the given lookbehind window `d` when calculating the result.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
@@ -505,8 +508,8 @@ See also [increase_pure](#increase_pure), [increase_prometheus](#increase_promet
|
||||
#### increase_prometheus
|
||||
|
||||
`increase_prometheus(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the increase
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
It doesn't take into account the last sample before the given lookbehind window `d` when calculating the result in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
||||
@@ -517,13 +520,13 @@ See also [increase_pure](#increase_pure) and [increase](#increase).
|
||||
#### increase_pure
|
||||
|
||||
`increase_pure(series_selector[d])` is a [rollup function](#rollup-functions), which works the same as [increase](#increase) except
|
||||
of the following corner case - it assumes that [counters](https://docs.victoriametrics.com/keyConcepts.html#counter) always start from 0,
|
||||
of the following corner case - it assumes that [counters](https://docs.victoriametrics.com/keyconcepts/#counter) always start from 0,
|
||||
while [increase](#increase) ignores the first value in a series if it is too big.
|
||||
|
||||
#### increases_over_time
|
||||
|
||||
`increases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of raw sample value increases
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`increases_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value increases over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -531,16 +534,17 @@ See also [decreases_over_time](#decreases_over_time).
|
||||
|
||||
#### integrate
|
||||
|
||||
`integrate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the integral over raw samples on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`integrate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the integral over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### irate
|
||||
|
||||
`irate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the "instant" per-second increase rate over the last two raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
`irate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the "instant" per-second increase rate over
|
||||
the last two [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -552,7 +556,7 @@ See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
`lag(series_selector[d])` is a [rollup function](#rollup-functions), which returns the duration in seconds between the last sample
|
||||
on the given lookbehind window `d` and the timestamp of the current point. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -560,8 +564,8 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
||||
|
||||
#### last_over_time
|
||||
|
||||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
value on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
@@ -570,7 +574,7 @@ See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_t
|
||||
#### lifetime
|
||||
|
||||
`lifetime(series_selector[d])` is a [rollup function](#rollup-functions), which returns the duration in seconds between the last and the first sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -579,14 +583,15 @@ See also [duration_over_time](#duration_over_time) and [lag](#lag).
|
||||
#### mad_over_time
|
||||
|
||||
`mad_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
||||
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outlier_iqr_over_time).
|
||||
|
||||
#### max_over_time
|
||||
|
||||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
@@ -594,16 +599,16 @@ See also [tmax_over_time](#tmax_over_time).
|
||||
|
||||
#### median_over_time
|
||||
|
||||
`median_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates median value over raw samples
|
||||
`median_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates median value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
See also [avg_over_time](#avg_over_time).
|
||||
|
||||
#### min_over_time
|
||||
|
||||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
@@ -612,15 +617,16 @@ See also [tmin_over_time](#tmin_over_time).
|
||||
#### mode_over_time
|
||||
|
||||
`mode_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [mode](https://en.wikipedia.org/wiki/Mode_(statistics))
|
||||
for raw samples on the given lookbehind window `d`. It is calculated individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). It is expected that raw sample values are discrete.
|
||||
for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`. It is calculated individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). It is expected that [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values are discrete.
|
||||
|
||||
#### outlier_iqr_over_time
|
||||
|
||||
`outlier_iqr_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last sample on the given lookbehind window `d`
|
||||
if its value is either smaller than the `q25-1.5*iqr` or bigger than `q75+1.5*iqr` where:
|
||||
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) over raw samples on the lookbehind window `d`
|
||||
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over raw samples on the lookbehind window `d`.
|
||||
- `iqr` is an [Interquartile range](https://en.wikipedia.org/wiki/Interquartile_range) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the lookbehind window `d`
|
||||
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the lookbehind window `d`.
|
||||
|
||||
The `outlier_iqr_over_time()` is useful for detecting anomalies in gauge values based on the previous history of values.
|
||||
For example, `outlier_iqr_over_time(memory_usage_bytes[1h])` triggers when `memory_usage_bytes` suddenly goes outside the usual value range for the last hour.
|
||||
@@ -630,8 +636,8 @@ See also [outliers_iqr](#outliers_iqr).
|
||||
#### predict_linear
|
||||
|
||||
`predict_linear(series_selector[d], t)` is a [rollup function](#rollup-functions), which calculates the value `t` seconds in the future using
|
||||
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
linear interpolation over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`.
|
||||
The predicted value is calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
@@ -639,7 +645,7 @@ See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### present_over_time
|
||||
|
||||
`present_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1 if there is at least a single raw sample
|
||||
`present_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1 if there is at least a single [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`. Otherwise, an empty result is returned.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
@@ -648,8 +654,8 @@ This function is supported by PromQL.
|
||||
|
||||
#### quantile_over_time
|
||||
|
||||
`quantile_over_time(phi, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi`-quantile over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`quantile_over_time(phi, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi`-quantile over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The `phi` value must be in the range `[0...1]`.
|
||||
|
||||
This function is supported by PromQL.
|
||||
@@ -659,16 +665,16 @@ See also [quantiles_over_time](#quantiles_over_time).
|
||||
#### quantiles_over_time
|
||||
|
||||
`quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` is a [rollup function](#rollup-functions), which calculates `phi*`-quantiles
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
The function returns individual series per each `phi*` with `{phiLabel="phi*"}` label. `phi*` values must be in the range `[0...1]`.
|
||||
|
||||
See also [quantile_over_time](#quantile_over_time).
|
||||
|
||||
#### range_over_time
|
||||
|
||||
`range_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates value range over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`range_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates value range over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
E.g. it calculates `max_over_time(series_selector[d]) - min_over_time(series_selector[d])`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
@@ -676,8 +682,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
#### rate
|
||||
|
||||
`rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average per-second increase rate
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
@@ -692,18 +698,18 @@ See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
`rate_over_sum(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second rate over the sum of raw samples
|
||||
`rate_over_sum(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second rate over the sum of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`. The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### resets
|
||||
|
||||
`resets(series_selector[d])` is a [rollup function](#rollup-functions), which returns the number
|
||||
of [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) resets over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
of [counter](https://docs.victoriametrics.com/keyconcepts/#counter) resets over the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyconcepts/#counter).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -711,9 +717,9 @@ This function is supported by PromQL.
|
||||
|
||||
#### rollup
|
||||
|
||||
`rollup(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `min`, `max` and `avg` values for raw samples
|
||||
`rollup(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `min`, `max` and `avg` values for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
@@ -721,19 +727,20 @@ See also [label_match](#label_match).
|
||||
#### rollup_candlestick
|
||||
|
||||
`rollup_candlestick(series_selector[d])` is a [rollup function](#rollup-functions), which calculates `open`, `high`, `low` and `close` values (aka OHLC)
|
||||
over raw samples on the given lookbehind window `d` and returns them in time series with `rollup="open"`, `rollup="high"`, `rollup="low"` and `rollup="close"` additional labels.
|
||||
over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns them in time series
|
||||
with `rollup="open"`, `rollup="high"`, `rollup="low"` and `rollup="close"` additional labels.
|
||||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). This function is useful for financial applications.
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering). This function is useful for financial applications.
|
||||
|
||||
Optional 2nd argument `"open"`, `"high"` or `"low"` or `"close"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_delta
|
||||
|
||||
`rollup_delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates differences between adjacent raw samples
|
||||
`rollup_delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates differences between adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated differences
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
@@ -745,9 +752,9 @@ See also [rollup_increase](#rollup_increase).
|
||||
#### rollup_deriv
|
||||
|
||||
`rollup_deriv(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second derivatives
|
||||
for adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated per-second derivatives
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns `min`, `max` and `avg` values
|
||||
for the calculated per-second derivatives and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
@@ -756,10 +763,10 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
|
||||
#### rollup_increase
|
||||
|
||||
`rollup_increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates increases for adjacent raw samples
|
||||
`rollup_increase(series_selector[d])` is a [rollup function](#rollup-functions), which calculates increases for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated increases
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
@@ -768,7 +775,8 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
|
||||
#### rollup_rate
|
||||
|
||||
`rollup_rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second change rates for adjacent raw samples
|
||||
`rollup_rate(series_selector[d])` is a [rollup function](#rollup-functions), which calculates per-second change rates
|
||||
for adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated per-second change rates
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
|
||||
@@ -778,16 +786,16 @@ when to use `rollup_rate()`.
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### rollup_scrape_interval
|
||||
|
||||
`rollup_scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the interval in seconds between
|
||||
adjacent raw samples on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated interval
|
||||
adjacent [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d` and returns `min`, `max` and `avg` values for the calculated interval
|
||||
and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup="avg"` additional labels.
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
@@ -796,8 +804,9 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
|
||||
#### scrape_interval
|
||||
|
||||
`scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average interval in seconds between raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`scrape_interval(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the average interval in seconds
|
||||
between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -805,9 +814,10 @@ See also [rollup_scrape_interval](#rollup_scrape_interval).
|
||||
|
||||
#### share_gt_over_time
|
||||
|
||||
`share_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
`share_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
|
||||
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are bigger than `gt`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for calculating SLI and SLO. Example: `share_gt_over_time(up[24h], 0)` - returns service availability for the last 24 hours.
|
||||
|
||||
@@ -817,9 +827,10 @@ See also [share_le_over_time](#share_le_over_time) and [count_gt_over_time](#cou
|
||||
|
||||
#### share_le_over_time
|
||||
|
||||
`share_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
`share_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
|
||||
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are smaller or equal to `le`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
This function is useful for calculating SLI and SLO. Example: `share_le_over_time(memory_usage_bytes[24h], 100*1024*1024)` returns
|
||||
the share of time series values for the last 24 hours when memory usage was below or equal to 100MB.
|
||||
@@ -830,9 +841,10 @@ See also [share_gt_over_time](#share_gt_over_time) and [count_le_over_time](#cou
|
||||
|
||||
#### share_eq_over_time
|
||||
|
||||
`share_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`) of raw samples
|
||||
`share_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-functions), which returns share (in the range `[0...1]`)
|
||||
of [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d`, which are equal to `eq`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -841,15 +853,15 @@ See also [count_eq_over_time](#count_eq_over_time).
|
||||
#### stale_samples_over_time
|
||||
|
||||
`stale_samples_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number
|
||||
of [staleness markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) on the given lookbehind window `d`
|
||||
per each time series matching the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
of [staleness markers](https://docs.victoriametrics.com/vmagent/#prometheus-staleness-markers) on the given lookbehind window `d`
|
||||
per each time series matching the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### stddev_over_time
|
||||
|
||||
`stddev_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard deviation over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`stddev_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard deviation over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -859,8 +871,8 @@ See also [stdvar_over_time](#stdvar_over_time).
|
||||
|
||||
#### stdvar_over_time
|
||||
|
||||
`stdvar_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard variance over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`stdvar_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates standard variance over [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -870,8 +882,8 @@ See also [stddev_over_time](#stddev_over_time).
|
||||
|
||||
#### sum_eq_over_time
|
||||
|
||||
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values equal to `eq`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values equal to `eq` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -879,8 +891,8 @@ See also [sum_over_time](#sum_over_time) and [count_eq_over_time](#count_eq_over
|
||||
|
||||
#### sum_gt_over_time
|
||||
|
||||
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values bigger than `gt`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values bigger than `gt` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -888,8 +900,8 @@ See also [sum_over_time](#sum_over_time) and [count_gt_over_time](#count_gt_over
|
||||
|
||||
#### sum_le_over_time
|
||||
|
||||
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values smaller or equal to `le`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values smaller or equal to `le` on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -897,8 +909,8 @@ See also [sum_over_time](#sum_over_time) and [count_le_over_time](#count_le_over
|
||||
|
||||
#### sum_over_time
|
||||
|
||||
`sum_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -906,15 +918,16 @@ This function is supported by PromQL.
|
||||
|
||||
#### sum2_over_time
|
||||
|
||||
`sum2_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of squares for raw sample values
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`sum2_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the sum of squares for [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
values on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### timestamp
|
||||
|
||||
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -924,8 +937,9 @@ See also [time](#time) and [now](#now).
|
||||
|
||||
#### timestamp_with_name
|
||||
|
||||
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are preserved in the resulting rollups.
|
||||
|
||||
@@ -933,8 +947,9 @@ See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) mod
|
||||
|
||||
#### tfirst_over_time
|
||||
|
||||
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the first raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the first [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -943,7 +958,7 @@ See also [first_over_time](#first_over_time).
|
||||
#### tlast_change_over_time
|
||||
|
||||
`tlast_change_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last change
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) on the given lookbehind window `d`.
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering) on the given lookbehind window `d`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -957,9 +972,10 @@ See also [tlast_change_over_time](#tlast_change_over_time).
|
||||
|
||||
#### tmax_over_time
|
||||
|
||||
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
with the maximum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -967,9 +983,10 @@ See also [max_over_time](#max_over_time).
|
||||
|
||||
#### tmin_over_time
|
||||
|
||||
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision
|
||||
for the [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
with the minimum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -978,8 +995,8 @@ See also [min_over_time](#min_over_time).
|
||||
#### zscore_over_time
|
||||
|
||||
`zscore_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns [z-score](https://en.wikipedia.org/wiki/Standard_score)
|
||||
for raw samples on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
for [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -994,7 +1011,7 @@ returned from the rollup `delta(temperature[24h])`.
|
||||
|
||||
Additional details:
|
||||
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the transformations.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature))`.
|
||||
* All the transform functions accept optional `keep_metric_names` modifier. If it is set,
|
||||
@@ -1230,7 +1247,7 @@ by replacing all the values bigger or equal to 30 with 40.
|
||||
#### end
|
||||
|
||||
`end()` is a [transform function](#transform-functions), which returns the unix timestamp in seconds for the last point.
|
||||
It is known as `end` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `end` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [start](#start), [time](#time) and [now](#now).
|
||||
|
||||
@@ -1653,14 +1670,14 @@ This function is supported by PromQL.
|
||||
|
||||
`start()` is a [transform function](#transform-functions), which returns unix timestamp in seconds for the first point.
|
||||
|
||||
It is known as `start` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `start` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [end](#end), [time](#time) and [now](#now).
|
||||
|
||||
#### step
|
||||
|
||||
`step()` is a [transform function](#transform-functions), which returns the step in seconds (aka interval) between the returned points.
|
||||
It is known as `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is known as `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
See also [start](#start) and [end](#end).
|
||||
|
||||
@@ -1717,7 +1734,7 @@ This function is supported by PromQL.
|
||||
|
||||
Additional details:
|
||||
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before performing the label transformation.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature), "foo")`.
|
||||
|
||||
@@ -1894,7 +1911,7 @@ Additional details:
|
||||
and calculate the [count](#count) aggregate function independently per each group, while `count(up) without (instance)`
|
||||
would group [rollup results](#rollup-functions) by all the labels except `instance` before calculating [count](#count) aggregate function independently per each group.
|
||||
Multiple labels can be put in `by` and `without` modifiers.
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the aggregate.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up))`.
|
||||
* Aggregate functions accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point
|
||||
@@ -2104,7 +2121,7 @@ See also [quantile](#quantile).
|
||||
`share(q) by (group_labels)` is [aggregate function](#aggregate-functions), which returns shares in the range `[0..1]`
|
||||
for every non-negative points returned by `q` per each timestamp, so the sum of shares per each `group_labels` equals 1.
|
||||
|
||||
This function is useful for normalizing [histogram bucket](https://docs.victoriametrics.com/keyConcepts.html#histogram) shares
|
||||
This function is useful for normalizing [histogram bucket](https://docs.victoriametrics.com/keyconcepts/#histogram) shares
|
||||
into `[0..1]` range:
|
||||
|
||||
```metricsql
|
||||
@@ -2208,10 +2225,11 @@ See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_
|
||||
## Subqueries
|
||||
|
||||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) form a subquery.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering) form a subquery.
|
||||
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
|
||||
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
This behavior can be disabled or logged via cmd-line flags `-search.disableImplicitConversion` and `-search.logImplicitConversion` since v1.101.0.
|
||||
|
||||
VictoriaMetrics performs subqueries in the following way:
|
||||
|
||||
@@ -2219,19 +2237,19 @@ VictoriaMetrics performs subqueries in the following way:
|
||||
For example, for expression `max_over_time(rate(http_requests_total[5m])[1h:30s])` the inner function `rate(http_requests_total[5m])`
|
||||
is calculated with `step=30s`. The resulting data points are aligned by the `step`.
|
||||
* It calculates the outer rollup function over the results of the inner rollup function using the `step` value
|
||||
passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
|
||||
## Implicit query conversions
|
||||
|
||||
VictoriaMetrics performs the following implicit conversions for incoming queries before starting the calculations:
|
||||
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions), then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyconcepts/#filtering),
|
||||
which aren't wrapped into [rollup functions](#rollup-functions), are automatically wrapped into [default_rollup](#default_rollup) function.
|
||||
Examples:
|
||||
* `foo` is transformed to `default_rollup(foo)`
|
||||
@@ -2242,6 +2260,7 @@ VictoriaMetrics performs the following implicit conversions for incoming queries
|
||||
it is [transform function](#transform-functions)
|
||||
* If `step` in square brackets is missing inside [subquery](#subqueries), then `1i` step is automatically added there.
|
||||
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
|
||||
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
||||
This behavior can be disabled or logged via cmd-line flags `-search.disableImplicitConversion` and `-search.logImplicitConversion` since v1.101.0.
|
||||
@@ -1,10 +1,11 @@
|
||||
package vlstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -19,19 +20,19 @@ import (
|
||||
var (
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "7d", "Log entries with timestamps older than now-retentionPeriod are automatically deleted; "+
|
||||
"log entries with timestamps outside the retention are also rejected during data ingestion; the minimum supported retention is 1d (one day); "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#retention")
|
||||
"see https://docs.victoriametrics.com/victorialogs/#retention")
|
||||
futureRetention = flagutil.NewDuration("futureRetention", "2d", "Log entries with timestamps bigger than now+futureRetention are rejected during data ingestion; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#retention")
|
||||
"see https://docs.victoriametrics.com/victorialogs/#retention")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-logs-data", "Path to directory with the VictoriaLogs data; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#storage")
|
||||
"see https://docs.victoriametrics.com/victorialogs/#storage")
|
||||
inmemoryDataFlushInterval = flag.Duration("inmemoryDataFlushInterval", 5*time.Second, "The interval for guaranteed saving of in-memory data to disk. "+
|
||||
"The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. "+
|
||||
"Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
logNewStreams = flag.Bool("logNewStreams", false, "Whether to log creation of new streams; this can be useful for debugging of high cardinality issues with log streams; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#stream-fields ; see also -logIngestedRows")
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields ; see also -logIngestedRows")
|
||||
logIngestedRows = flag.Bool("logIngestedRows", false, "Whether to log all the ingested log entries; this can be useful for debugging of data ingestion; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/data-ingestion/ ; see also -logNewStreams")
|
||||
"see https://docs.victoriametrics.com/victorialogs/data-ingestion/ ; see also -logNewStreams")
|
||||
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 10e6, "The minimum free disk space at -storageDataPath after which "+
|
||||
"the storage stops accepting new data")
|
||||
)
|
||||
@@ -61,10 +62,16 @@ func Init() {
|
||||
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
logger.Infof("successfully opened storage in %.3f seconds; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
|
||||
time.Since(startTime).Seconds(), ss.FileParts, ss.FileBlocks, ss.FileRowsCount, ss.CompressedFileSize)
|
||||
storageMetrics = initStorageMetrics(strg)
|
||||
logger.Infof("successfully opened storage in %.3f seconds; smallParts: %d; bigParts: %d; smallPartBlocks: %d; bigPartBlocks: %d; smallPartRows: %d; bigPartRows: %d; "+
|
||||
"smallPartSize: %d bytes; bigPartSize: %d bytes",
|
||||
time.Since(startTime).Seconds(), ss.SmallParts, ss.BigParts, ss.SmallPartBlocks, ss.BigPartBlocks, ss.SmallPartRowsCount, ss.BigPartRowsCount,
|
||||
ss.CompressedSmallPartSize, ss.CompressedBigPartSize)
|
||||
|
||||
// register storage metrics
|
||||
storageMetrics = metrics.NewSet()
|
||||
storageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, strg)
|
||||
})
|
||||
metrics.RegisterSet(storageMetrics)
|
||||
}
|
||||
|
||||
@@ -99,117 +106,92 @@ func MustAddRows(lr *logstorage.LogRows) {
|
||||
strg.MustAddRows(lr)
|
||||
}
|
||||
|
||||
// RunQuery runs the given q and calls processBlock for the returned data blocks
|
||||
func RunQuery(tenantIDs []logstorage.TenantID, q *logstorage.Query, stopCh <-chan struct{}, processBlock func(columns []logstorage.BlockColumn)) {
|
||||
strg.RunQuery(tenantIDs, q, stopCh, processBlock)
|
||||
// RunQuery runs the given q and calls writeBlock for the returned data blocks
|
||||
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteBlockFunc) error {
|
||||
return strg.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
}
|
||||
|
||||
func initStorageMetrics(strg *logstorage.Storage) *metrics.Set {
|
||||
ssCache := &logstorage.StorageStats{}
|
||||
var ssCacheLock sync.Mutex
|
||||
var lastUpdateTime time.Time
|
||||
// GetFieldNames executes q and returns field names seen in results.
|
||||
func GetFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
|
||||
m := func() *logstorage.StorageStats {
|
||||
ssCacheLock.Lock()
|
||||
defer ssCacheLock.Unlock()
|
||||
if time.Since(lastUpdateTime) < time.Second {
|
||||
return ssCache
|
||||
}
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
ssCache = &ss
|
||||
lastUpdateTime = time.Now()
|
||||
return ssCache
|
||||
// GetFieldValues executes q and returns unique values for the fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique values are returned.
|
||||
func GetFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreamFieldNames executes q and returns stream field names seen in results.
|
||||
func GetStreamFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
|
||||
// GetStreamFieldValues executes q and returns stream field values for the given fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique stream field values are returned.
|
||||
func GetStreamFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreams executes q and returns streams seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streams are returned.
|
||||
func GetStreams(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreams(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
|
||||
func writeStorageMetrics(w io.Writer, strg *logstorage.Storage) {
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
|
||||
metrics.WriteGaugeUint64(w, fmt.Sprintf(`vl_free_disk_space_bytes{path=%q}`, *storageDataPath), fs.MustGetFreeSpace(*storageDataPath))
|
||||
|
||||
isReadOnly := uint64(0)
|
||||
if ss.IsReadOnly {
|
||||
isReadOnly = 1
|
||||
}
|
||||
metrics.WriteGaugeUint64(w, fmt.Sprintf(`vl_storage_is_read_only{path=%q}`, *storageDataPath), isReadOnly)
|
||||
|
||||
ms := metrics.NewSet()
|
||||
metrics.WriteGaugeUint64(w, `vl_active_merges{type="storage/inmemory"}`, ss.InmemoryActiveMerges)
|
||||
metrics.WriteGaugeUint64(w, `vl_active_merges{type="storage/small"}`, ss.SmallPartActiveMerges)
|
||||
metrics.WriteGaugeUint64(w, `vl_active_merges{type="storage/big"}`, ss.BigPartActiveMerges)
|
||||
|
||||
ms.NewGauge(fmt.Sprintf(`vl_free_disk_space_bytes{path=%q}`, *storageDataPath), func() float64 {
|
||||
return float64(fs.MustGetFreeSpace(*storageDataPath))
|
||||
})
|
||||
ms.NewGauge(fmt.Sprintf(`vl_storage_is_read_only{path=%q}`, *storageDataPath), func() float64 {
|
||||
if m().IsReadOnly {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vl_merges_total{type="storage/inmemory"}`, ss.InmemoryMergesTotal)
|
||||
metrics.WriteCounterUint64(w, `vl_merges_total{type="storage/small"}`, ss.SmallPartMergesTotal)
|
||||
metrics.WriteCounterUint64(w, `vl_merges_total{type="storage/big"}`, ss.BigPartMergesTotal)
|
||||
|
||||
ms.NewGauge(`vl_active_merges{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryActiveMerges)
|
||||
})
|
||||
ms.NewGauge(`vl_merges_total{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryMergesTotal)
|
||||
})
|
||||
ms.NewGauge(`vl_active_merges{type="file"}`, func() float64 {
|
||||
return float64(m().FileActiveMerges)
|
||||
})
|
||||
ms.NewGauge(`vl_merges_total{type="file"}`, func() float64 {
|
||||
return float64(m().FileMergesTotal)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_rows{type="storage/inmemory"}`, ss.InmemoryRowsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_rows{type="storage/small"}`, ss.SmallPartRowsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_rows{type="storage/big"}`, ss.BigPartRowsCount)
|
||||
|
||||
ms.NewGauge(`vl_storage_rows{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryRowsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_rows{type="file"}`, func() float64 {
|
||||
return float64(m().FileRowsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_parts{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryParts)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_parts{type="file"}`, func() float64 {
|
||||
return float64(m().FileParts)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_blocks{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryBlocks)
|
||||
})
|
||||
ms.NewGauge(`vl_storage_blocks{type="file"}`, func() float64 {
|
||||
return float64(m().FileBlocks)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_parts{type="storage/inmemory"}`, ss.InmemoryParts)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_parts{type="storage/small"}`, ss.SmallParts)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_parts{type="storage/big"}`, ss.BigParts)
|
||||
|
||||
ms.NewGauge(`vl_partitions`, func() float64 {
|
||||
return float64(m().PartitionsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_streams_created_total`, func() float64 {
|
||||
return float64(m().StreamsCreatedTotal)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_blocks{type="storage/inmemory"}`, ss.InmemoryBlocks)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_blocks{type="storage/small"}`, ss.SmallPartBlocks)
|
||||
metrics.WriteGaugeUint64(w, `vl_storage_blocks{type="storage/big"}`, ss.BigPartBlocks)
|
||||
|
||||
ms.NewGauge(`vl_indexdb_rows`, func() float64 {
|
||||
return float64(m().IndexdbItemsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_indexdb_parts`, func() float64 {
|
||||
return float64(m().IndexdbPartsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_indexdb_blocks`, func() float64 {
|
||||
return float64(m().IndexdbBlocksCount)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vl_partitions`, ss.PartitionsCount)
|
||||
metrics.WriteCounterUint64(w, `vl_streams_created_total`, ss.StreamsCreatedTotal)
|
||||
|
||||
ms.NewGauge(`vl_data_size_bytes{type="indexdb"}`, func() float64 {
|
||||
return float64(m().IndexdbSizeBytes)
|
||||
})
|
||||
ms.NewGauge(`vl_data_size_bytes{type="storage"}`, func() float64 {
|
||||
dm := m()
|
||||
return float64(dm.CompressedInmemorySize + dm.CompressedFileSize)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vl_indexdb_rows`, ss.IndexdbItemsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_indexdb_parts`, ss.IndexdbPartsCount)
|
||||
metrics.WriteGaugeUint64(w, `vl_indexdb_blocks`, ss.IndexdbBlocksCount)
|
||||
|
||||
ms.NewGauge(`vl_compressed_data_size_bytes{type="inmemory"}`, func() float64 {
|
||||
return float64(m().CompressedInmemorySize)
|
||||
})
|
||||
ms.NewGauge(`vl_compressed_data_size_bytes{type="file"}`, func() float64 {
|
||||
return float64(m().CompressedFileSize)
|
||||
})
|
||||
ms.NewGauge(`vl_uncompressed_data_size_bytes{type="inmemory"}`, func() float64 {
|
||||
return float64(m().UncompressedInmemorySize)
|
||||
})
|
||||
ms.NewGauge(`vl_uncompressed_data_size_bytes{type="file"}`, func() float64 {
|
||||
return float64(m().UncompressedFileSize)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vl_data_size_bytes{type="indexdb"}`, ss.IndexdbSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vl_data_size_bytes{type="storage"}`, ss.CompressedInmemorySize+ss.CompressedSmallPartSize+ss.CompressedBigPartSize)
|
||||
|
||||
ms.NewGauge(`vl_rows_dropped_total{reason="too_big_timestamp"}`, func() float64 {
|
||||
return float64(m().RowsDroppedTooBigTimestamp)
|
||||
})
|
||||
ms.NewGauge(`vl_rows_dropped_total{reason="too_small_timestamp"}`, func() float64 {
|
||||
return float64(m().RowsDroppedTooSmallTimestamp)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vl_compressed_data_size_bytes{type="storage/inmemory"}`, ss.CompressedInmemorySize)
|
||||
metrics.WriteGaugeUint64(w, `vl_compressed_data_size_bytes{type="storage/small"}`, ss.CompressedSmallPartSize)
|
||||
metrics.WriteGaugeUint64(w, `vl_compressed_data_size_bytes{type="storage/big"}`, ss.CompressedBigPartSize)
|
||||
|
||||
return ms
|
||||
metrics.WriteGaugeUint64(w, `vl_uncompressed_data_size_bytes{type="storage/inmemory"}`, ss.UncompressedInmemorySize)
|
||||
metrics.WriteGaugeUint64(w, `vl_uncompressed_data_size_bytes{type="storage/small"}`, ss.UncompressedSmallPartSize)
|
||||
metrics.WriteGaugeUint64(w, `vl_uncompressed_data_size_bytes{type="storage/big"}`, ss.UncompressedBigPartSize)
|
||||
|
||||
metrics.WriteCounterUint64(w, `vl_rows_dropped_total{reason="too_big_timestamp"}`, ss.RowsDroppedTooBigTimestamp)
|
||||
metrics.WriteCounterUint64(w, `vl_rows_dropped_total{reason="too_small_timestamp"}`, ss.RowsDroppedTooSmallTimestamp)
|
||||
}
|
||||
|
||||
@@ -88,6 +88,9 @@ vmagent-linux-ppc64le:
|
||||
vmagent-linux-s390x:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-linux-loong64:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmagent-linux-386:
|
||||
APP_NAME=vmagent CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmagent docs [here](https://docs.victoriametrics.com/vmagent.html).
|
||||
See vmagent docs [here](https://docs.victoriametrics.com/vmagent/).
|
||||
|
||||
vmagent docs can be edited at [docs/vmagent.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmagent.md).
|
||||
vmagent docs can be edited at [docs/vmagent.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmagent.md).
|
||||
|
||||
@@ -3,13 +3,15 @@ package common
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
// PushCtx is a context used for populating WriteRequest.
|
||||
type PushCtx struct {
|
||||
// WriteRequest contains the WriteRequest, which must be pushed later to remote storage.
|
||||
//
|
||||
// The actual labels and samples for the time series are stored in Labels and Samples fields.
|
||||
WriteRequest prompbmarshal.WriteRequest
|
||||
|
||||
// Labels contains flat list of all the labels used in WriteRequest.
|
||||
@@ -21,13 +23,7 @@ type PushCtx struct {
|
||||
|
||||
// Reset resets ctx.
|
||||
func (ctx *PushCtx) Reset() {
|
||||
tss := ctx.WriteRequest.Timeseries
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
ts.Labels = nil
|
||||
ts.Samples = nil
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = ctx.WriteRequest.Timeseries[:0]
|
||||
ctx.WriteRequest.Reset()
|
||||
|
||||
promrelabel.CleanLabels(ctx.Labels)
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
@@ -39,15 +35,10 @@ func (ctx *PushCtx) Reset() {
|
||||
//
|
||||
// Call PutPushCtx when the ctx is no longer needed.
|
||||
func GetPushCtx() *PushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*PushCtx)
|
||||
}
|
||||
return &PushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*PushCtx)
|
||||
}
|
||||
return &PushCtx{}
|
||||
}
|
||||
|
||||
// PutPushCtx returns ctx to the pool.
|
||||
@@ -55,12 +46,7 @@ func GetPushCtx() *PushCtx {
|
||||
// ctx mustn't be used after returning to the pool.
|
||||
func PutPushCtx(ctx *PushCtx) {
|
||||
ctx.Reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *PushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
@@ -160,25 +159,15 @@ func (ctx *pushCtx) reset() {
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/prometheusimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/promremotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/statsd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
@@ -36,6 +37,7 @@ import (
|
||||
influxserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/influx"
|
||||
opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb"
|
||||
opentsdbhttpserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdbhttp"
|
||||
statsdserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/statsd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
@@ -61,6 +63,12 @@ var (
|
||||
"See also -graphiteListenAddr.useProxyProtocol")
|
||||
graphiteUseProxyProtocol = flag.Bool("graphiteListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -graphiteListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
||||
statsdListenAddr = flag.String("statsdListenAddr", "", "TCP and UDP address to listen for Statsd plaintext data. Usually :8125 must be set. Doesn't work if empty. "+
|
||||
"See also -statsdListenAddr.useProxyProtocol")
|
||||
statsdUseProxyProtocol = flag.Bool("statsdListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -statsdListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
||||
statsdDisableAggregationEnforcemenet = flag.Bool(`statsd.disableAggregationEnforcement`, false, "Whether to disable streaming aggregation requirement check. "+
|
||||
"It's recommended to run statsdServer with pre-configured streaming aggregation to decrease load at database.")
|
||||
opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpenTSDB metrics. "+
|
||||
"Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+
|
||||
"Usually :4242 must be set. Doesn't work if empty. See also -opentsdbListenAddr.useProxyProtocol")
|
||||
@@ -80,6 +88,7 @@ var (
|
||||
var (
|
||||
influxServer *influxserver.Server
|
||||
graphiteServer *graphiteserver.Server
|
||||
statsdServer *statsdserver.Server
|
||||
opentsdbServer *opentsdbserver.Server
|
||||
opentsdbhttpServer *opentsdbhttpserver.Server
|
||||
)
|
||||
@@ -137,6 +146,12 @@ func main() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphiteServer = graphiteserver.MustStart(*graphiteListenAddr, *graphiteUseProxyProtocol, graphite.InsertHandler)
|
||||
}
|
||||
if len(*statsdListenAddr) > 0 {
|
||||
if !remotewrite.HasAnyStreamAggrConfigured() && !*statsdDisableAggregationEnforcemenet {
|
||||
logger.Fatalf("streaming aggregation must be configured with enabled statsd server. It's recommended to aggregate metrics received at statsd listener. This check could be disabled with flag -statsd.disableAggregationEnforcement")
|
||||
}
|
||||
statsdServer = statsdserver.MustStart(*statsdListenAddr, *statsdUseProxyProtocol, statsd.InsertHandler)
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
httpInsertHandler := getOpenTSDBHTTPInsertHandler()
|
||||
opentsdbServer = opentsdbserver.MustStart(*opentsdbListenAddr, *opentsdbUseProxyProtocol, opentsdb.InsertHandler, httpInsertHandler)
|
||||
@@ -172,6 +187,9 @@ func main() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphiteServer.MustStop()
|
||||
}
|
||||
if len(*statsdListenAddr) > 0 {
|
||||
statsdServer.MustStop()
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
opentsdbServer.MustStop()
|
||||
}
|
||||
@@ -225,7 +243,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>vmagent</h2>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/vmagent.html'>https://docs.victoriametrics.com/vmagent.html</a></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/vmagent/'>https://docs.victoriametrics.com/vmagent/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
{"targets", "status for discovered active targets"},
|
||||
@@ -718,7 +736,7 @@ func usage() {
|
||||
const s = `
|
||||
vmagent collects metrics data via popular data ingestion protocols and routes it to VictoriaMetrics.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmagent.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmagent/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/awsapi"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
@@ -20,14 +23,13 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
forcePromProto = flagutil.NewArrayBool("remoteWrite.forcePromProto", "Whether to force Prometheus remote write protocol for sending data "+
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol")
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol")
|
||||
forceVMProto = flagutil.NewArrayBool("remoteWrite.forceVMProto", "Whether to force VictoriaMetrics remote write protocol for sending data "+
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol")
|
||||
"to the corresponding -remoteWrite.url . See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol")
|
||||
|
||||
rateLimit = flagutil.NewArrayInt("remoteWrite.rateLimit", 0, "Optional rate limit in bytes per second for data sent to the corresponding -remoteWrite.url. "+
|
||||
"By default, the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
|
||||
@@ -118,7 +120,7 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||
logger.Fatalf("cannot initialize AWS Config for -remoteWrite.url=%q: %s", remoteWriteURL, err)
|
||||
}
|
||||
tr := &http.Transport{
|
||||
DialContext: statDial,
|
||||
DialContext: httputils.GetStatDialFunc("vmagent_remotewrite"),
|
||||
TLSHandshakeTimeout: tlsHandshakeTimeout.GetOptionalArg(argIdx),
|
||||
MaxConnsPerHost: 2 * concurrency,
|
||||
MaxIdleConnsPerHost: 2 * concurrency,
|
||||
@@ -164,7 +166,7 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||
useVMProto = common.HandleVMProtoClientHandshake(c.remoteWriteURL, doRequest)
|
||||
if !useVMProto {
|
||||
logger.Infof("the remote storage at %q doesn't support VictoriaMetrics remote write protocol. Switching to Prometheus remote write protocol. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol", sanitizedURL)
|
||||
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol", sanitizedURL)
|
||||
}
|
||||
}
|
||||
c.useVMProto = useVMProto
|
||||
@@ -298,6 +300,11 @@ func (c *client) runWorker() {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(block) == 0 {
|
||||
// skip empty data blocks from sending
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6241
|
||||
continue
|
||||
}
|
||||
go func() {
|
||||
startTime := time.Now()
|
||||
ch <- c.sendBlock(block)
|
||||
|
||||
@@ -28,7 +28,7 @@ var (
|
||||
maxRowsPerBlock = flag.Int("remoteWrite.maxRowsPerBlock", 10000, "The maximum number of samples to send in each block to remote storage. Higher number may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxBlockSize")
|
||||
vmProtoCompressLevel = flag.Int("remoteWrite.vmProtoCompressLevel", 0, "The compression level for VictoriaMetrics remote write protocol. "+
|
||||
"Higher values reduce network traffic at the cost of higher CPU usage. Negative values reduce CPU usage at the cost of increased network traffic. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol")
|
||||
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol")
|
||||
)
|
||||
|
||||
type pendingSeries struct {
|
||||
@@ -109,9 +109,10 @@ type writeRequest struct {
|
||||
|
||||
wr prompbmarshal.WriteRequest
|
||||
|
||||
tss []prompbmarshal.TimeSeries
|
||||
labels []prompbmarshal.Label
|
||||
samples []prompbmarshal.Sample
|
||||
tss []prompbmarshal.TimeSeries
|
||||
labels []prompbmarshal.Label
|
||||
samples []prompbmarshal.Sample
|
||||
exemplars []prompbmarshal.Exemplar
|
||||
|
||||
// buf holds labels data
|
||||
buf []byte
|
||||
@@ -122,17 +123,14 @@ func (wr *writeRequest) reset() {
|
||||
|
||||
wr.wr.Timeseries = nil
|
||||
|
||||
for i := range wr.tss {
|
||||
ts := &wr.tss[i]
|
||||
ts.Labels = nil
|
||||
ts.Samples = nil
|
||||
}
|
||||
clear(wr.tss)
|
||||
wr.tss = wr.tss[:0]
|
||||
|
||||
promrelabel.CleanLabels(wr.labels)
|
||||
wr.labels = wr.labels[:0]
|
||||
|
||||
wr.samples = wr.samples[:0]
|
||||
wr.exemplars = wr.exemplars[:0]
|
||||
wr.buf = wr.buf[:0]
|
||||
}
|
||||
|
||||
@@ -204,6 +202,7 @@ func (wr *writeRequest) copyTimeSeries(dst, src *prompbmarshal.TimeSeries) {
|
||||
labelsDst := wr.labels
|
||||
labelsLen := len(wr.labels)
|
||||
samplesDst := wr.samples
|
||||
exemplarsDst := wr.exemplars
|
||||
buf := wr.buf
|
||||
for i := range src.Labels {
|
||||
labelsDst = append(labelsDst, prompbmarshal.Label{})
|
||||
@@ -220,8 +219,12 @@ func (wr *writeRequest) copyTimeSeries(dst, src *prompbmarshal.TimeSeries) {
|
||||
samplesDst = append(samplesDst, src.Samples...)
|
||||
dst.Samples = samplesDst[len(samplesDst)-len(src.Samples):]
|
||||
|
||||
exemplarsDst = append(exemplarsDst, src.Exemplars...)
|
||||
dst.Exemplars = exemplarsDst[len(exemplarsDst)-len(src.Exemplars):]
|
||||
|
||||
wr.samples = samplesDst
|
||||
wr.labels = labelsDst
|
||||
wr.exemplars = exemplarsDst
|
||||
wr.buf = buf
|
||||
}
|
||||
|
||||
@@ -233,7 +236,6 @@ func tryPushWriteRequest(wr *prompbmarshal.WriteRequest, tryPushBlock func(block
|
||||
// Nothing to push
|
||||
return true
|
||||
}
|
||||
|
||||
marshalConcurrencyCh <- struct{}{}
|
||||
|
||||
bb := writeRequestBufPool.Get()
|
||||
@@ -270,6 +272,8 @@ func tryPushWriteRequest(wr *prompbmarshal.WriteRequest, tryPushBlock func(block
|
||||
if len(wr.Timeseries) == 1 {
|
||||
// A single time series left. Recursively split its samples into smaller parts if possible.
|
||||
samples := wr.Timeseries[0].Samples
|
||||
exemplars := wr.Timeseries[0].Exemplars
|
||||
|
||||
if len(samples) == 1 {
|
||||
logger.Warnf("dropping a sample for metric with too long labels exceeding -remoteWrite.maxBlockSize=%d bytes", maxUnpackedBlockSize.N)
|
||||
return true
|
||||
@@ -281,11 +285,16 @@ func tryPushWriteRequest(wr *prompbmarshal.WriteRequest, tryPushBlock func(block
|
||||
return false
|
||||
}
|
||||
wr.Timeseries[0].Samples = samples[n:]
|
||||
// We do not want to send exemplars twice
|
||||
wr.Timeseries[0].Exemplars = nil
|
||||
|
||||
if !tryPushWriteRequest(wr, tryPushBlock, isVMRemoteWrite) {
|
||||
wr.Timeseries[0].Samples = samples
|
||||
wr.Timeseries[0].Exemplars = exemplars
|
||||
return false
|
||||
}
|
||||
wr.Timeseries[0].Samples = samples
|
||||
wr.Timeseries[0].Exemplars = exemplars
|
||||
return true
|
||||
}
|
||||
timeseries := wr.Timeseries
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
|
||||
func TestPushWriteRequest(t *testing.T) {
|
||||
rowsCounts := []int{1, 10, 100, 1e3, 1e4}
|
||||
expectedBlockLensProm := []int{216, 1848, 16424, 169882, 1757876}
|
||||
expectedBlockLensVM := []int{138, 492, 3927, 34995, 288476}
|
||||
expectedBlockLensProm := []int{248, 1952, 17433, 180381, 1861994}
|
||||
expectedBlockLensVM := []int{170, 575, 4748, 44936, 367096}
|
||||
for i, rowsCount := range rowsCounts {
|
||||
expectedBlockLenProm := expectedBlockLensProm[i]
|
||||
expectedBlockLenVM := expectedBlockLensVM[i]
|
||||
@@ -34,7 +34,7 @@ func testPushWriteRequest(t *testing.T, rowsCount, expectedBlockLenProm, expecte
|
||||
return true
|
||||
}
|
||||
if !tryPushWriteRequest(wr, pushBlock, isVMRemoteWrite) {
|
||||
t.Fatalf("cannot push data to to remote storage")
|
||||
t.Fatalf("cannot push data to remote storage")
|
||||
}
|
||||
if math.Abs(float64(pushBlockLen-expectedBlockLen)/float64(expectedBlockLen)*100) > tolerancePrc {
|
||||
t.Fatalf("unexpected block len for rowsCount=%d, isVMRemoteWrite=%v; got %d bytes; expecting %d bytes +- %.0f%%",
|
||||
@@ -59,6 +59,20 @@ func newTestWriteRequest(seriesCount, labelsCount int) *prompbmarshal.WriteReque
|
||||
Value: fmt.Sprintf("value_%d_%d", i, j),
|
||||
})
|
||||
}
|
||||
exemplar := prompbmarshal.Exemplar{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "trace_id",
|
||||
Value: "123456",
|
||||
},
|
||||
{
|
||||
Name: "log_id",
|
||||
Value: "987654",
|
||||
},
|
||||
},
|
||||
Value: float64(i),
|
||||
Timestamp: 1000 * int64(i),
|
||||
}
|
||||
wr.Timeseries = append(wr.Timeseries, prompbmarshal.TimeSeries{
|
||||
Labels: labels,
|
||||
Samples: []prompbmarshal.Sample{
|
||||
@@ -67,6 +81,10 @@ func newTestWriteRequest(seriesCount, labelsCount int) *prompbmarshal.WriteReque
|
||||
Timestamp: 1000 * int64(i),
|
||||
},
|
||||
},
|
||||
|
||||
Exemplars: []prompbmarshal.Exemplar{
|
||||
exemplar,
|
||||
},
|
||||
})
|
||||
}
|
||||
return &wr
|
||||
|
||||
@@ -19,10 +19,10 @@ var (
|
||||
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabeling configs, which are applied "+
|
||||
"to all the metrics before sending them to -remoteWrite.url. See also -remoteWrite.urlRelabelConfig. "+
|
||||
"The path can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#relabeling")
|
||||
"See https://docs.victoriametrics.com/vmagent/#relabeling")
|
||||
relabelConfigPaths = flagutil.NewArrayString("remoteWrite.urlRelabelConfig", "Optional path to relabel configs for the corresponding -remoteWrite.url. "+
|
||||
"See also -remoteWrite.relabelConfig. The path can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#relabeling")
|
||||
"See https://docs.victoriametrics.com/vmagent/#relabeling")
|
||||
|
||||
usePromCompatibleNaming = flag.Bool("usePromCompatibleNaming", false, "Whether to replace characters unsupported by Prometheus with underscores "+
|
||||
"in the ingested metric names and label names. For example, foo.bar{a.b='c'} is transformed into foo_bar{a_b='c'} during data ingestion if this flag is set. "+
|
||||
@@ -46,11 +46,11 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
|
||||
}
|
||||
rcs.global = global
|
||||
}
|
||||
if len(*relabelConfigPaths) > (len(*remoteWriteURLs) + len(*remoteWriteMultitenantURLs)) {
|
||||
return nil, fmt.Errorf("too many -remoteWrite.urlRelabelConfig args: %d; it mustn't exceed the number of -remoteWrite.url or -remoteWrite.multitenantURL args: %d",
|
||||
len(*relabelConfigPaths), (len(*remoteWriteURLs) + len(*remoteWriteMultitenantURLs)))
|
||||
if len(*relabelConfigPaths) > len(*remoteWriteURLs) {
|
||||
return nil, fmt.Errorf("too many -remoteWrite.urlRelabelConfig args: %d; it mustn't exceed the number of -remoteWrite.url args: %d",
|
||||
len(*relabelConfigPaths), (len(*remoteWriteURLs)))
|
||||
}
|
||||
rcs.perURL = make([]*promrelabel.ParsedConfigs, (len(*remoteWriteURLs) + len(*remoteWriteMultitenantURLs)))
|
||||
rcs.perURL = make([]*promrelabel.ParsedConfigs, len(*remoteWriteURLs))
|
||||
for i, path := range *relabelConfigPaths {
|
||||
if len(path) == 0 {
|
||||
// Skip empty relabel config.
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
@@ -39,22 +38,23 @@ var (
|
||||
"or Prometheus remote_write protocol. Example url: http://<victoriametrics-host>:8428/api/v1/write . "+
|
||||
"Pass multiple -remoteWrite.url options in order to replicate the collected data to multiple remote storage systems. "+
|
||||
"The data can be sharded among the configured remote storage systems if -remoteWrite.shardByURL flag is set")
|
||||
remoteWriteMultitenantURLs = flagutil.NewArrayString("remoteWrite.multitenantURL", "Base path for multitenant remote storage URL to write data to. "+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#multitenancy for details. Example url: http://<vminsert>:8480 . "+
|
||||
"Pass multiple -remoteWrite.multitenantURL flags in order to replicate data to multiple remote storage systems. "+
|
||||
"This flag is deprecated in favor of -enableMultitenantHandlers . See https://docs.victoriametrics.com/vmagent.html#multitenancy")
|
||||
enableMultitenantHandlers = flag.Bool("enableMultitenantHandlers", false, "Whether to process incoming data via multitenant insert handlers according to "+
|
||||
"https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format . By default incoming data is processed via single-node insert handlers "+
|
||||
"https://docs.victoriametrics.com/cluster-victoriametrics/#url-format . By default incoming data is processed via single-node insert handlers "+
|
||||
"according to https://docs.victoriametrics.com/#how-to-import-time-series-data ."+
|
||||
"See https://docs.victoriametrics.com/vmagent.html#multitenancy for details")
|
||||
"See https://docs.victoriametrics.com/vmagent/#multitenancy for details")
|
||||
|
||||
shardByURL = flag.Bool("remoteWrite.shardByURL", false, "Whether to shard outgoing series across all the remote storage systems enumerated via -remoteWrite.url . "+
|
||||
"By default the data is replicated across all the -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages")
|
||||
"By default the data is replicated across all the -remoteWrite.url . See https://docs.victoriametrics.com/vmagent/#sharding-among-remote-storages . "+
|
||||
"See also -remoteWrite.shardByURLReplicas")
|
||||
shardByURLReplicas = flag.Int("remoteWrite.shardByURLReplicas", 1, "How many copies of data to make among remote storage systems enumerated via -remoteWrite.url "+
|
||||
"when -remoteWrite.shardByURL is set. See https://docs.victoriametrics.com/vmagent/#sharding-among-remote-storages")
|
||||
shardByURLLabels = flagutil.NewArrayString("remoteWrite.shardByURL.labels", "Optional list of labels, which must be used for sharding outgoing samples "+
|
||||
"among remote storage systems if -remoteWrite.shardByURL command-line flag is set. By default all the labels are used for sharding in order to gain "+
|
||||
"even distribution of series over the specified -remoteWrite.url systems. See also -remoteWrite.shardByURL.ignoreLabels")
|
||||
shardByURLIgnoreLabels = flagutil.NewArrayString("remoteWrite.shardByURL.ignoreLabels", "Optional list of labels, which must be ignored when sharding outgoing samples "+
|
||||
"among remote storage systems if -remoteWrite.shardByURL command-line flag is set. By default all the labels are used for sharding in order to gain "+
|
||||
"even distribution of series over the specified -remoteWrite.url systems. See also -remoteWrite.shardByURL.labels")
|
||||
|
||||
tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory for storing pending data, which isn't sent to the configured -remoteWrite.url . "+
|
||||
"See also -remoteWrite.maxDiskUsagePerURL and -remoteWrite.disableOnDiskQueue")
|
||||
keepDanglingQueues = flag.Bool("remoteWrite.keepDanglingQueues", false, "Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. "+
|
||||
@@ -81,63 +81,52 @@ var (
|
||||
`For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}`+
|
||||
`Enabled sorting for labels can slow down ingestion performance a bit`)
|
||||
maxHourlySeries = flag.Int("remoteWrite.maxHourlySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last hour. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter")
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent/#cardinality-limiter")
|
||||
maxDailySeries = flag.Int("remoteWrite.maxDailySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter")
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent/#cardinality-limiter")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmagent can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate. See also -remoteWrite.rateLimit")
|
||||
|
||||
streamAggrConfig = flagutil.NewArrayString("remoteWrite.streamAggr.config", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html . "+
|
||||
"See also -remoteWrite.streamAggr.keepInput, -remoteWrite.streamAggr.dropInput and -remoteWrite.streamAggr.dedupInterval")
|
||||
streamAggrKeepInput = flagutil.NewArrayBool("remoteWrite.streamAggr.keepInput", "Whether to keep all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
streamAggrDropInput = flagutil.NewArrayBool("remoteWrite.streamAggr.dropInput", "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation "+
|
||||
"with -remoteWrite.streamAggr.config . See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
streamAggrIgnoreOldSamples = flagutil.NewArrayBool("remoteWrite.streamAggr.ignoreOldSamples", "Whether to ignore input samples with old timestamps outside the current aggregation interval "+
|
||||
"for the corresponding -remoteWrite.streamAggr.config . See https://docs.victoriametrics.com/stream-aggregation.html#ignoring-old-samples")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
|
||||
disableOnDiskQueue = flag.Bool("remoteWrite.disableOnDiskQueue", false, "Whether to disable storing pending data to -remoteWrite.tmpDataPath "+
|
||||
"when the configured remote storage systems cannot keep up with the data ingestion rate. See https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence ."+
|
||||
disableOnDiskQueue = flagutil.NewArrayBool("remoteWrite.disableOnDiskQueue", "Whether to disable storing pending data to -remoteWrite.tmpDataPath "+
|
||||
"when the configured remote storage systems cannot keep up with the data ingestion rate. See https://docs.victoriametrics.com/vmagent#disabling-on-disk-persistence ."+
|
||||
"See also -remoteWrite.dropSamplesOnOverload")
|
||||
dropSamplesOnOverload = flag.Bool("remoteWrite.dropSamplesOnOverload", false, "Whether to drop samples when -remoteWrite.disableOnDiskQueue is set and if the samples "+
|
||||
"cannot be pushed into the configured remote storage systems in a timely manner. See https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence")
|
||||
dropSamplesOnOverload = flagutil.NewArrayBool("remoteWrite.dropSamplesOnOverload", "Whether to drop samples when -remoteWrite.disableOnDiskQueue is set and if the samples "+
|
||||
"cannot be pushed into the configured remote storage systems in a timely manner. See https://docs.victoriametrics.com/vmagent#disabling-on-disk-persistence")
|
||||
)
|
||||
|
||||
var (
|
||||
// rwctxsDefault contains statically populated entries when -remoteWrite.url is specified.
|
||||
rwctxsDefault []*remoteWriteCtx
|
||||
// rwctxs contains statically populated entries when -remoteWrite.url is specified.
|
||||
rwctxs []*remoteWriteCtx
|
||||
|
||||
// rwctxsMap contains dynamically populated entries when -remoteWrite.multitenantURL is specified.
|
||||
rwctxsMap = make(map[tenantmetrics.TenantID][]*remoteWriteCtx)
|
||||
rwctxsMapLock sync.Mutex
|
||||
|
||||
// Data without tenant id is written to defaultAuthToken if -remoteWrite.multitenantURL is specified.
|
||||
// Data without tenant id is written to defaultAuthToken if -enableMultitenantHandlers is specified.
|
||||
defaultAuthToken = &auth.Token{}
|
||||
|
||||
// ErrQueueFullHTTPRetry must be returned when TryPush() returns false.
|
||||
ErrQueueFullHTTPRetry = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("remote storage systems cannot keep up with the data ingestion rate; retry the request later " +
|
||||
"or remove -remoteWrite.disableOnDiskQueue from vmagent command-line flags, so it could save pending data to -remoteWrite.tmpDataPath; " +
|
||||
"see https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence"),
|
||||
"see https://docs.victoriametrics.com/vmagent/#disabling-on-disk-persistence"),
|
||||
StatusCode: http.StatusTooManyRequests,
|
||||
}
|
||||
|
||||
// disableOnDiskQueueAll is set to true if all remoteWrite.urls were configured to disable persistent queue via disableOnDiskQueue
|
||||
disableOnDiskQueueAll bool
|
||||
)
|
||||
|
||||
// MultitenancyEnabled returns true if -enableMultitenantHandlers or -remoteWrite.multitenantURL is specified.
|
||||
// MultitenancyEnabled returns true if -enableMultitenantHandlers is specified.
|
||||
func MultitenancyEnabled() bool {
|
||||
return *enableMultitenantHandlers || len(*remoteWriteMultitenantURLs) > 0
|
||||
return *enableMultitenantHandlers
|
||||
}
|
||||
|
||||
// Contains the current relabelConfigs.
|
||||
var allRelabelConfigs atomic.Pointer[relabelConfigs]
|
||||
|
||||
// Contains the current global stream aggregators.
|
||||
var sasGlobal atomic.Pointer[streamaggr.Aggregators]
|
||||
|
||||
// Contains the current global deduplicator.
|
||||
var deduplicatorGlobal *streamaggr.Deduplicator
|
||||
|
||||
// maxQueues limits the maximum value for `-remoteWrite.queues`. There is no sense in setting too high value,
|
||||
// since it may lead to high memory usage due to big number of buffers.
|
||||
var maxQueues = cgroup.AvailableCPUs() * 16
|
||||
@@ -163,11 +152,8 @@ var (
|
||||
//
|
||||
// Stop must be called for graceful shutdown.
|
||||
func Init() {
|
||||
if len(*remoteWriteURLs) == 0 && len(*remoteWriteMultitenantURLs) == 0 {
|
||||
logger.Fatalf("at least one `-remoteWrite.url` or `-remoteWrite.multitenantURL` command-line flag must be set")
|
||||
}
|
||||
if len(*remoteWriteURLs) > 0 && len(*remoteWriteMultitenantURLs) > 0 {
|
||||
logger.Fatalf("cannot set both `-remoteWrite.url` and `-remoteWrite.multitenantURL` command-line flags")
|
||||
if len(*remoteWriteURLs) == 0 {
|
||||
logger.Fatalf("at least one `-remoteWrite.url` command-line flag must be set")
|
||||
}
|
||||
if *maxHourlySeries > 0 {
|
||||
hourlySeriesLimiter = bloomfilter.NewLimiter(*maxHourlySeries, time.Hour)
|
||||
@@ -217,9 +203,29 @@ func Init() {
|
||||
relabelConfigSuccess.Set(1)
|
||||
relabelConfigTimestamp.Set(fasttime.UnixTimestamp())
|
||||
|
||||
if len(*remoteWriteURLs) > 0 {
|
||||
rwctxsDefault = newRemoteWriteCtxs(nil, *remoteWriteURLs)
|
||||
sasFile, sasOpts := getStreamAggrOpts(-1)
|
||||
if sasFile != "" {
|
||||
sas, err := newStreamAggrConfig(-1, pushToRemoteStoragesDropFailed)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot initialize stream aggregators from -streamAggr.config=%q: %s", sasFile, err)
|
||||
}
|
||||
sasGlobal.Store(sas)
|
||||
} else if sasOpts.DedupInterval > 0 {
|
||||
deduplicatorGlobal = streamaggr.NewDeduplicator(pushToRemoteStoragesDropFailed, sasOpts.DedupInterval, sasOpts.DropInputLabels)
|
||||
}
|
||||
|
||||
if len(*remoteWriteURLs) > 0 {
|
||||
rwctxs = newRemoteWriteCtxs(nil, *remoteWriteURLs)
|
||||
}
|
||||
|
||||
disableOnDiskQueueAll = true
|
||||
for _, v := range *disableOnDiskQueue {
|
||||
if !v {
|
||||
disableOnDiskQueueAll = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
dropDanglingQueues()
|
||||
|
||||
// Start config reloader.
|
||||
@@ -242,18 +248,15 @@ func dropDanglingQueues() {
|
||||
if *keepDanglingQueues {
|
||||
return
|
||||
}
|
||||
if len(*remoteWriteMultitenantURLs) > 0 {
|
||||
// Do not drop dangling queues for *remoteWriteMultitenantURLs, since it is impossible to determine
|
||||
// unused queues for multitenant urls - they are created on demand when new sample for the given
|
||||
// tenant is pushed to remote storage.
|
||||
return
|
||||
}
|
||||
// Remove dangling persistent queues, if any.
|
||||
// This is required for the case when the number of queues has been changed or URL have been changed.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4014
|
||||
//
|
||||
existingQueues := make(map[string]struct{}, len(rwctxsDefault))
|
||||
for _, rwctx := range rwctxsDefault {
|
||||
// In case if there were many persistent queues with identical *remoteWriteURLs
|
||||
// the queue with the last index will be dropped.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6140
|
||||
existingQueues := make(map[string]struct{}, len(rwctxs))
|
||||
for _, rwctx := range rwctxs {
|
||||
existingQueues[rwctx.fq.Dirname()] = struct{}{}
|
||||
}
|
||||
|
||||
@@ -270,7 +273,7 @@ func dropDanglingQueues() {
|
||||
}
|
||||
}
|
||||
if removed > 0 {
|
||||
logger.Infof("removed %d dangling queues from %q, active queues: %d", removed, *tmpDataPath, len(rwctxsDefault))
|
||||
logger.Infof("removed %d dangling queues from %q, active queues: %d", removed, *tmpDataPath, len(rwctxs))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,24 +300,6 @@ var (
|
||||
relabelConfigTimestamp = metrics.NewCounter(`vmagent_relabel_config_last_reload_success_timestamp_seconds`)
|
||||
)
|
||||
|
||||
func reloadStreamAggrConfigs() {
|
||||
if len(*remoteWriteMultitenantURLs) > 0 {
|
||||
rwctxsMapLock.Lock()
|
||||
for _, rwctxs := range rwctxsMap {
|
||||
reinitStreamAggr(rwctxs)
|
||||
}
|
||||
rwctxsMapLock.Unlock()
|
||||
} else {
|
||||
reinitStreamAggr(rwctxsDefault)
|
||||
}
|
||||
}
|
||||
|
||||
func reinitStreamAggr(rwctxs []*remoteWriteCtx) {
|
||||
for _, rwctx := range rwctxs {
|
||||
rwctx.reinitStreamAggr()
|
||||
}
|
||||
}
|
||||
|
||||
func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
||||
if len(urls) == 0 {
|
||||
logger.Panicf("BUG: urls must be non-empty")
|
||||
@@ -338,7 +323,7 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
||||
}
|
||||
sanitizedURL := fmt.Sprintf("%d:secret-url", i+1)
|
||||
if at != nil {
|
||||
// Construct full remote_write url for the given tenant according to https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format
|
||||
// Construct full remote_write url for the given tenant according to https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
|
||||
remoteWriteURL.Path = fmt.Sprintf("%s/insert/%d:%d/prometheus/api/v1/write", remoteWriteURL.Path, at.AccountID, at.ProjectID)
|
||||
sanitizedURL = fmt.Sprintf("%s:%d:%d", sanitizedURL, at.AccountID, at.ProjectID)
|
||||
}
|
||||
@@ -350,8 +335,10 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
||||
return rwctxs
|
||||
}
|
||||
|
||||
var configReloaderStopCh = make(chan struct{})
|
||||
var configReloaderWG sync.WaitGroup
|
||||
var (
|
||||
configReloaderStopCh = make(chan struct{})
|
||||
configReloaderWG sync.WaitGroup
|
||||
)
|
||||
|
||||
// StartIngestionRateLimiter starts ingestion rate limiter.
|
||||
//
|
||||
@@ -389,18 +376,16 @@ func Stop() {
|
||||
close(configReloaderStopCh)
|
||||
configReloaderWG.Wait()
|
||||
|
||||
for _, rwctx := range rwctxsDefault {
|
||||
sasGlobal.Load().MustStop()
|
||||
if deduplicatorGlobal != nil {
|
||||
deduplicatorGlobal.MustStop()
|
||||
deduplicatorGlobal = nil
|
||||
}
|
||||
|
||||
for _, rwctx := range rwctxs {
|
||||
rwctx.MustStop()
|
||||
}
|
||||
rwctxsDefault = nil
|
||||
|
||||
// There is no need in locking rwctxsMapLock here, since nobody should call TryPush during the Stop call.
|
||||
for _, rwctxs := range rwctxsMap {
|
||||
for _, rwctx := range rwctxs {
|
||||
rwctx.MustStop()
|
||||
}
|
||||
}
|
||||
rwctxsMap = nil
|
||||
rwctxs = nil
|
||||
|
||||
if sl := hourlySeriesLimiter; sl != nil {
|
||||
sl.MustStop()
|
||||
@@ -410,30 +395,24 @@ func Stop() {
|
||||
}
|
||||
}
|
||||
|
||||
// PushDropSamplesOnFailure pushes wr to the configured remote storage systems set via -remoteWrite.url and -remoteWrite.multitenantURL
|
||||
//
|
||||
// If at is nil, then the data is pushed to the configured -remoteWrite.url.
|
||||
// If at isn't nil, the data is pushed to the configured -remoteWrite.multitenantURL.
|
||||
// PushDropSamplesOnFailure pushes wr to the configured remote storage systems set via -remoteWrite.url
|
||||
//
|
||||
// PushDropSamplesOnFailure can modify wr contents.
|
||||
func PushDropSamplesOnFailure(at *auth.Token, wr *prompbmarshal.WriteRequest) {
|
||||
_ = tryPush(at, wr, true)
|
||||
}
|
||||
|
||||
// TryPush tries sending wr to the configured remote storage systems set via -remoteWrite.url and -remoteWrite.multitenantURL
|
||||
//
|
||||
// If at is nil, then the data is pushed to the configured -remoteWrite.url.
|
||||
// If at isn't nil, the data is pushed to the configured -remoteWrite.multitenantURL.
|
||||
// TryPush tries sending wr to the configured remote storage systems set via -remoteWrite.url
|
||||
//
|
||||
// TryPush can modify wr contents, so the caller must re-initialize wr before calling TryPush() after unsuccessful attempt.
|
||||
// TryPush may send partial data from wr on unsuccessful attempt, so repeated call for the same wr may send the data multiple times.
|
||||
//
|
||||
// The caller must return ErrQueueFullHTTPRetry to the client, which sends wr, if TryPush returns false.
|
||||
func TryPush(at *auth.Token, wr *prompbmarshal.WriteRequest) bool {
|
||||
return tryPush(at, wr, *dropSamplesOnOverload)
|
||||
return tryPush(at, wr, false)
|
||||
}
|
||||
|
||||
func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, dropSamplesOnFailure bool) bool {
|
||||
func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnFailure bool) bool {
|
||||
tss := wr.Timeseries
|
||||
|
||||
if at == nil && MultitenancyEnabled() {
|
||||
@@ -442,41 +421,25 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, dropSamplesOnFailur
|
||||
}
|
||||
|
||||
var tenantRctx *relabelCtx
|
||||
var rwctxs []*remoteWriteCtx
|
||||
if at == nil {
|
||||
rwctxs = rwctxsDefault
|
||||
} else if len(*remoteWriteMultitenantURLs) == 0 {
|
||||
if at != nil {
|
||||
// Convert at to (vm_account_id, vm_project_id) labels.
|
||||
tenantRctx = getRelabelCtx()
|
||||
defer putRelabelCtx(tenantRctx)
|
||||
rwctxs = rwctxsDefault
|
||||
} else {
|
||||
rwctxsMapLock.Lock()
|
||||
tenantID := tenantmetrics.TenantID{
|
||||
AccountID: at.AccountID,
|
||||
ProjectID: at.ProjectID,
|
||||
}
|
||||
rwctxs = rwctxsMap[tenantID]
|
||||
if rwctxs == nil {
|
||||
rwctxs = newRemoteWriteCtxs(at, *remoteWriteMultitenantURLs)
|
||||
rwctxsMap[tenantID] = rwctxs
|
||||
}
|
||||
rwctxsMapLock.Unlock()
|
||||
}
|
||||
|
||||
rowsCount := getRowsCount(tss)
|
||||
|
||||
if *disableOnDiskQueue {
|
||||
// Quick check whether writes to configured remote storage systems are blocked.
|
||||
// This allows saving CPU time spent on relabeling and block compression
|
||||
// if some of remote storage systems cannot keep up with the data ingestion rate.
|
||||
// Quick check whether writes to configured remote storage systems are blocked.
|
||||
// This allows saving CPU time spent on relabeling and block compression
|
||||
// if some of remote storage systems cannot keep up with the data ingestion rate.
|
||||
// this shortcut is only applicable if all remote writes have disableOnDiskQueue = true
|
||||
if disableOnDiskQueueAll {
|
||||
for _, rwctx := range rwctxs {
|
||||
if rwctx.fq.IsWriteBlocked() {
|
||||
pushFailures.Inc()
|
||||
if dropSamplesOnFailure {
|
||||
rwctx.pushFailures.Inc()
|
||||
if forceDropSamplesOnFailure || rwctx.dropSamplesOnOverload {
|
||||
// Just drop samples
|
||||
samplesDropped.Add(rowsCount)
|
||||
return true
|
||||
rwctx.rowsDroppedOnPushFailure.Add(rowsCount)
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -495,6 +458,8 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, dropSamplesOnFailur
|
||||
// Allow up to 10x of labels per each block on average.
|
||||
maxLabelsPerBlock := 10 * maxSamplesPerBlock
|
||||
|
||||
sas := sasGlobal.Load()
|
||||
|
||||
for len(tss) > 0 {
|
||||
// Process big tss in smaller blocks in order to reduce the maximum memory usage
|
||||
samplesCount := 0
|
||||
@@ -529,27 +494,31 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, dropSamplesOnFailur
|
||||
}
|
||||
sortLabelsIfNeeded(tssBlock)
|
||||
tssBlock = limitSeriesCardinality(tssBlock)
|
||||
if !tryPushBlockToRemoteStorages(rwctxs, tssBlock) {
|
||||
if !*disableOnDiskQueue {
|
||||
logger.Panicf("BUG: tryPushBlockToRemoteStorages must return true if -remoteWrite.disableOnDiskQueue isn't set")
|
||||
}
|
||||
pushFailures.Inc()
|
||||
if dropSamplesOnFailure {
|
||||
samplesDropped.Add(rowsCount)
|
||||
return true
|
||||
if sas.IsEnabled() {
|
||||
matchIdxs := matchIdxsPool.Get()
|
||||
matchIdxs.B = sas.Push(tssBlock, matchIdxs.B)
|
||||
if !*streamAggrGlobalKeepInput {
|
||||
tssBlock = dropAggregatedSeries(tssBlock, matchIdxs.B, *streamAggrGlobalDropInput)
|
||||
}
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
} else if deduplicatorGlobal != nil {
|
||||
deduplicatorGlobal.Push(tssBlock)
|
||||
tssBlock = tssBlock[:0]
|
||||
}
|
||||
if !tryPushBlockToRemoteStorages(tssBlock, forceDropSamplesOnFailure) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
samplesDropped = metrics.NewCounter(`vmagent_remotewrite_samples_dropped_total`)
|
||||
pushFailures = metrics.NewCounter(`vmagent_remotewrite_push_failures_total`)
|
||||
)
|
||||
func pushToRemoteStoragesDropFailed(tss []prompbmarshal.TimeSeries) {
|
||||
if tryPushBlockToRemoteStorages(tss, true) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmarshal.TimeSeries) bool {
|
||||
func tryPushBlockToRemoteStorages(tssBlock []prompbmarshal.TimeSeries, forceDropSamplesOnFailure bool) bool {
|
||||
if len(tssBlock) == 0 {
|
||||
// Nothing to push
|
||||
return true
|
||||
@@ -557,63 +526,22 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
||||
|
||||
if len(rwctxs) == 1 {
|
||||
// Fast path - just push data to the configured single remote storage
|
||||
return rwctxs[0].TryPush(tssBlock)
|
||||
return rwctxs[0].TryPush(tssBlock, forceDropSamplesOnFailure)
|
||||
}
|
||||
|
||||
// We need to push tssBlock to multiple remote storages.
|
||||
// This is either sharding or replication depending on -remoteWrite.shardByURL command-line flag value.
|
||||
if *shardByURL {
|
||||
// Shard the data among rwctxs
|
||||
tssByURL := make([][]prompbmarshal.TimeSeries, len(rwctxs))
|
||||
tmpLabels := promutils.GetLabels()
|
||||
for _, ts := range tssBlock {
|
||||
hashLabels := ts.Labels
|
||||
if len(shardByURLLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLLabelsMap[label.Name]; ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
} else if len(shardByURLIgnoreLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLIgnoreLabelsMap[label.Name]; !ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
}
|
||||
h := getLabelsHash(hashLabels)
|
||||
idx := h % uint64(len(tssByURL))
|
||||
tssByURL[idx] = append(tssByURL[idx], ts)
|
||||
if *shardByURL && *shardByURLReplicas < len(rwctxs) {
|
||||
// Shard tssBlock samples among rwctxs.
|
||||
replicas := *shardByURLReplicas
|
||||
if replicas <= 0 {
|
||||
replicas = 1
|
||||
}
|
||||
promutils.PutLabels(tmpLabels)
|
||||
|
||||
// Push sharded data to remote storages in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
var anyPushFailed atomic.Bool
|
||||
for i, rwctx := range rwctxs {
|
||||
tssShard := tssByURL[i]
|
||||
if len(tssShard) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx, tss []prompbmarshal.TimeSeries) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tss) {
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx, tssShard)
|
||||
}
|
||||
wg.Wait()
|
||||
return !anyPushFailed.Load()
|
||||
return tryShardingBlockAmongRemoteStorages(tssBlock, replicas, forceDropSamplesOnFailure)
|
||||
}
|
||||
|
||||
// Replicate data among rwctxs.
|
||||
// Push block to remote storages in parallel in order to reduce
|
||||
// Replicate tssBlock samples among rwctxs.
|
||||
// Push tssBlock to remote storage systems in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
@@ -621,7 +549,7 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
||||
for _, rwctx := range rwctxs {
|
||||
go func(rwctx *remoteWriteCtx) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tssBlock) {
|
||||
if !rwctx.TryPush(tssBlock, forceDropSamplesOnFailure) {
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx)
|
||||
@@ -630,6 +558,97 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
func tryShardingBlockAmongRemoteStorages(tssBlock []prompbmarshal.TimeSeries, replicas int, forceDropSamplesOnFailure bool) bool {
|
||||
x := getTSSShards(len(rwctxs))
|
||||
defer putTSSShards(x)
|
||||
|
||||
shards := x.shards
|
||||
tmpLabels := promutils.GetLabels()
|
||||
for _, ts := range tssBlock {
|
||||
hashLabels := ts.Labels
|
||||
if len(shardByURLLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLLabelsMap[label.Name]; ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
} else if len(shardByURLIgnoreLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLIgnoreLabelsMap[label.Name]; !ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
}
|
||||
h := getLabelsHash(hashLabels)
|
||||
idx := h % uint64(len(shards))
|
||||
i := 0
|
||||
for {
|
||||
shards[idx] = append(shards[idx], ts)
|
||||
i++
|
||||
if i >= replicas {
|
||||
break
|
||||
}
|
||||
idx++
|
||||
if idx >= uint64(len(shards)) {
|
||||
idx = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
promutils.PutLabels(tmpLabels)
|
||||
|
||||
// Push sharded samples to remote storage systems in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
var anyPushFailed atomic.Bool
|
||||
for i, rwctx := range rwctxs {
|
||||
shard := shards[i]
|
||||
if len(shard) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx, tss []prompbmarshal.TimeSeries) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tss, forceDropSamplesOnFailure) {
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx, shard)
|
||||
}
|
||||
wg.Wait()
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
type tssShards struct {
|
||||
shards [][]prompbmarshal.TimeSeries
|
||||
}
|
||||
|
||||
func getTSSShards(n int) *tssShards {
|
||||
v := tssShardsPool.Get()
|
||||
if v == nil {
|
||||
v = &tssShards{}
|
||||
}
|
||||
x := v.(*tssShards)
|
||||
if cap(x.shards) < n {
|
||||
x.shards = make([][]prompbmarshal.TimeSeries, n)
|
||||
}
|
||||
x.shards = x.shards[:n]
|
||||
return x
|
||||
}
|
||||
|
||||
func putTSSShards(x *tssShards) {
|
||||
shards := x.shards
|
||||
for i := range shards {
|
||||
clear(shards[i])
|
||||
shards[i] = shards[i][:0]
|
||||
}
|
||||
tssShardsPool.Put(x)
|
||||
}
|
||||
|
||||
var tssShardsPool sync.Pool
|
||||
|
||||
// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set.
|
||||
func sortLabelsIfNeeded(tss []prompbmarshal.TimeSeries) {
|
||||
if !*sortLabels {
|
||||
@@ -726,14 +745,19 @@ type remoteWriteCtx struct {
|
||||
sas atomic.Pointer[streamaggr.Aggregators]
|
||||
deduplicator *streamaggr.Deduplicator
|
||||
|
||||
streamAggrKeepInput bool
|
||||
streamAggrDropInput bool
|
||||
streamAggrKeepInput bool
|
||||
streamAggrDropInput bool
|
||||
disableOnDiskQueue bool
|
||||
dropSamplesOnOverload bool
|
||||
|
||||
pss []*pendingSeries
|
||||
pssNextIdx atomic.Uint64
|
||||
|
||||
rowsPushedAfterRelabel *metrics.Counter
|
||||
rowsDroppedByRelabel *metrics.Counter
|
||||
|
||||
pushFailures *metrics.Counter
|
||||
rowsDroppedOnPushFailure *metrics.Counter
|
||||
}
|
||||
|
||||
func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks int, sanitizedURL string) *remoteWriteCtx {
|
||||
@@ -749,7 +773,8 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
logger.Warnf("rounding the -remoteWrite.maxDiskUsagePerURL=%d to the minimum supported value: %d", maxPendingBytes, persistentqueue.DefaultChunkFileSize)
|
||||
maxPendingBytes = persistentqueue.DefaultChunkFileSize
|
||||
}
|
||||
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes, *disableOnDiskQueue)
|
||||
isPQDisabled := disableOnDiskQueue.GetOptionalArg(argIdx)
|
||||
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes, isPQDisabled)
|
||||
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{path=%q, url=%q}`, queuePath, sanitizedURL), func() float64 {
|
||||
return float64(fq.GetPendingBytes())
|
||||
})
|
||||
@@ -792,21 +817,20 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
c: c,
|
||||
pss: pss,
|
||||
|
||||
dropSamplesOnOverload: dropSamplesOnOverload.GetOptionalArg(argIdx),
|
||||
disableOnDiskQueue: isPQDisabled,
|
||||
|
||||
rowsPushedAfterRelabel: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_rows_pushed_after_relabel_total{path=%q, url=%q}`, queuePath, sanitizedURL)),
|
||||
rowsDroppedByRelabel: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_relabel_metrics_dropped_total{path=%q, url=%q}`, queuePath, sanitizedURL)),
|
||||
|
||||
pushFailures: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_push_failures_total{path=%q, url=%q}`, queuePath, sanitizedURL)),
|
||||
rowsDroppedOnPushFailure: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_samples_dropped_total{path=%q, url=%q}`, queuePath, sanitizedURL)),
|
||||
}
|
||||
|
||||
// Initialize sas
|
||||
sasFile := streamAggrConfig.GetOptionalArg(argIdx)
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(argIdx)
|
||||
ignoreOldSamples := streamAggrIgnoreOldSamples.GetOptionalArg(argIdx)
|
||||
sasFile, sasOpts := getStreamAggrOpts(argIdx)
|
||||
if sasFile != "" {
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: dedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: ignoreOldSamples,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, opts)
|
||||
sas, err := newStreamAggrConfig(argIdx, rwctx.pushInternalTrackDropped)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot initialize stream aggregators from -remoteWrite.streamAggr.config=%q: %s", sasFile, err)
|
||||
}
|
||||
@@ -815,8 +839,8 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
rwctx.streamAggrDropInput = streamAggrDropInput.GetOptionalArg(argIdx)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, sasFile)).Set(1)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, sasFile)).Set(fasttime.UnixTimestamp())
|
||||
} else if dedupInterval > 0 {
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, dedupInterval, *streamAggrDropInputLabels)
|
||||
} else if sasOpts.DedupInterval > 0 {
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, sasOpts.DedupInterval, sasOpts.DropInputLabels)
|
||||
}
|
||||
|
||||
return rwctx
|
||||
@@ -849,7 +873,11 @@ func (rwctx *remoteWriteCtx) MustStop() {
|
||||
rwctx.rowsDroppedByRelabel = nil
|
||||
}
|
||||
|
||||
func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
||||
// TryPush sends tss series to the configured remote write endpoint
|
||||
//
|
||||
// TryPush can be called concurrently for multiple remoteWriteCtx,
|
||||
// so it shouldn't modify tss entries.
|
||||
func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries, forceDropSamplesOnFailure bool) bool {
|
||||
// Apply relabeling
|
||||
var rctx *relabelCtx
|
||||
var v *[]prompbmarshal.TimeSeries
|
||||
@@ -873,7 +901,7 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
||||
|
||||
// Apply stream aggregation or deduplication if they are configured
|
||||
sas := rwctx.sas.Load()
|
||||
if sas != nil {
|
||||
if sas.IsEnabled() {
|
||||
matchIdxs := matchIdxsPool.Get()
|
||||
matchIdxs.B = sas.Push(tss, matchIdxs.B)
|
||||
if !rwctx.streamAggrKeepInput {
|
||||
@@ -888,7 +916,6 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
} else if rwctx.deduplicator != nil {
|
||||
rwctx.deduplicator.Push(tss)
|
||||
clear(tss)
|
||||
tss = tss[:0]
|
||||
}
|
||||
|
||||
@@ -902,6 +929,14 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
||||
putRelabelCtx(rctx)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
rwctx.pushFailures.Inc()
|
||||
if forceDropSamplesOnFailure || rwctx.dropSamplesOnOverload {
|
||||
rwctx.rowsDroppedOnPushFailure.Add(len(tss))
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -926,13 +961,13 @@ func (rwctx *remoteWriteCtx) pushInternalTrackDropped(tss []prompbmarshal.TimeSe
|
||||
if rwctx.tryPushInternal(tss) {
|
||||
return
|
||||
}
|
||||
if !*disableOnDiskQueue {
|
||||
if !rwctx.disableOnDiskQueue {
|
||||
logger.Panicf("BUG: tryPushInternal must return true if -remoteWrite.disableOnDiskQueue isn't set")
|
||||
}
|
||||
pushFailures.Inc()
|
||||
if *dropSamplesOnOverload {
|
||||
rwctx.pushFailures.Inc()
|
||||
if dropSamplesOnOverload.GetOptionalArg(rwctx.idx) {
|
||||
rowsCount := getRowsCount(tss)
|
||||
samplesDropped.Add(rowsCount)
|
||||
rwctx.rowsDroppedOnPushFailure.Add(rowsCount)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -962,40 +997,6 @@ func (rwctx *remoteWriteCtx) tryPushInternal(tss []prompbmarshal.TimeSeries) boo
|
||||
return ok
|
||||
}
|
||||
|
||||
func (rwctx *remoteWriteCtx) reinitStreamAggr() {
|
||||
sasFile := streamAggrConfig.GetOptionalArg(rwctx.idx)
|
||||
if sasFile == "" {
|
||||
// There is no stream aggregation for rwctx
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("reloading stream aggregation configs pointed by -remoteWrite.streamAggr.config=%q", sasFile)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_total{path=%q}`, sasFile)).Inc()
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: streamAggrDedupInterval.GetOptionalArg(rwctx.idx),
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: streamAggrIgnoreOldSamples.GetOptionalArg(rwctx.idx),
|
||||
}
|
||||
sasNew, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, opts)
|
||||
if err != nil {
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_errors_total{path=%q}`, sasFile)).Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, sasFile)).Set(0)
|
||||
logger.Errorf("cannot reload stream aggregation config from -remoteWrite.streamAggr.config=%q; continue using the previously loaded config; error: %s", sasFile, err)
|
||||
return
|
||||
}
|
||||
sas := rwctx.sas.Load()
|
||||
if !sasNew.Equal(sas) {
|
||||
sasOld := rwctx.sas.Swap(sasNew)
|
||||
sasOld.MustStop()
|
||||
logger.Infof("successfully reloaded stream aggregation configs at -remoteWrite.streamAggr.config=%q", sasFile)
|
||||
} else {
|
||||
sasNew.MustStop()
|
||||
logger.Infof("the config at -remoteWrite.streamAggr.config=%q wasn't changed", sasFile)
|
||||
}
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, sasFile)).Set(1)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, sasFile)).Set(fasttime.UnixTimestamp())
|
||||
}
|
||||
|
||||
var tssPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
a := []prompbmarshal.TimeSeries{}
|
||||
@@ -1011,27 +1012,6 @@ func getRowsCount(tss []prompbmarshal.TimeSeries) int {
|
||||
return rowsCount
|
||||
}
|
||||
|
||||
// CheckStreamAggrConfigs checks configs pointed by -remoteWrite.streamAggr.config
|
||||
func CheckStreamAggrConfigs() error {
|
||||
pushNoop := func(_ []prompbmarshal.TimeSeries) {}
|
||||
for idx, sasFile := range *streamAggrConfig {
|
||||
if sasFile == "" {
|
||||
continue
|
||||
}
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: streamAggrDedupInterval.GetOptionalArg(idx),
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: streamAggrIgnoreOldSamples.GetOptionalArg(idx),
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, pushNoop, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load -remoteWrite.streamAggr.config=%q: %w", sasFile, err)
|
||||
}
|
||||
sas.MustStop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMapFromStrings(a []string) map[string]struct{} {
|
||||
m := make(map[string]struct{}, len(a))
|
||||
for _, s := range a {
|
||||
|
||||
215
app/vmagent/remotewrite/remotewrite_test.go
Normal file
215
app/vmagent/remotewrite/remotewrite_test.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package remotewrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
func TestGetLabelsHash_Distribution(t *testing.T) {
|
||||
f := func(bucketsCount int) {
|
||||
t.Helper()
|
||||
|
||||
// Distribute itemsCount hashes returned by getLabelsHash() across bucketsCount buckets.
|
||||
itemsCount := 1_000 * bucketsCount
|
||||
m := make([]int, bucketsCount)
|
||||
var labels []prompbmarshal.Label
|
||||
for i := 0; i < itemsCount; i++ {
|
||||
labels = append(labels[:0], prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: fmt.Sprintf("some_name_%d", i),
|
||||
})
|
||||
for j := 0; j < 10; j++ {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: fmt.Sprintf("label_%d", j),
|
||||
Value: fmt.Sprintf("value_%d_%d", i, j),
|
||||
})
|
||||
}
|
||||
h := getLabelsHash(labels)
|
||||
m[h%uint64(bucketsCount)]++
|
||||
}
|
||||
|
||||
// Verify that the distribution is even
|
||||
expectedItemsPerBucket := itemsCount / bucketsCount
|
||||
for _, n := range m {
|
||||
if math.Abs(1-float64(n)/float64(expectedItemsPerBucket)) > 0.04 {
|
||||
t.Fatalf("unexpected items in the bucket for %d buckets; got %d; want around %d", bucketsCount, n, expectedItemsPerBucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f(2)
|
||||
f(3)
|
||||
f(4)
|
||||
f(5)
|
||||
f(10)
|
||||
}
|
||||
|
||||
func TestRemoteWriteContext_TryPush_ImmutableTimeseries(t *testing.T) {
|
||||
f := func(streamAggrConfig, relabelConfig string, dedupInterval time.Duration, keepInput, dropInput bool, input string) {
|
||||
t.Helper()
|
||||
perURLRelabel, err := promrelabel.ParseRelabelConfigsData([]byte(relabelConfig))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load relabel configs: %s", err)
|
||||
}
|
||||
rcs := &relabelConfigs{
|
||||
perURL: []*promrelabel.ParsedConfigs{
|
||||
perURLRelabel,
|
||||
},
|
||||
}
|
||||
allRelabelConfigs.Store(rcs)
|
||||
|
||||
pss := make([]*pendingSeries, 1)
|
||||
pss[0] = newPendingSeries(nil, true, 0, 100)
|
||||
rwctx := &remoteWriteCtx{
|
||||
idx: 0,
|
||||
streamAggrKeepInput: keepInput,
|
||||
streamAggrDropInput: dropInput,
|
||||
pss: pss,
|
||||
rowsPushedAfterRelabel: metrics.GetOrCreateCounter(`foo`),
|
||||
rowsDroppedByRelabel: metrics.GetOrCreateCounter(`bar`),
|
||||
}
|
||||
if dedupInterval > 0 {
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(nil, dedupInterval, nil)
|
||||
}
|
||||
|
||||
if len(streamAggrConfig) > 0 {
|
||||
f := createFile(t, []byte(streamAggrConfig))
|
||||
sas, err := streamaggr.LoadFromFile(f.Name(), nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load streamaggr configs: %s", err)
|
||||
}
|
||||
rwctx.sas.Store(sas)
|
||||
}
|
||||
|
||||
inputTss := mustParsePromMetrics(input)
|
||||
expectedTss := make([]prompbmarshal.TimeSeries, len(inputTss))
|
||||
|
||||
// copy inputTss to make sure it is not mutated during TryPush call
|
||||
copy(expectedTss, inputTss)
|
||||
rwctx.TryPush(inputTss, false)
|
||||
|
||||
if !reflect.DeepEqual(expectedTss, inputTss) {
|
||||
t.Fatalf("unexpected samples;\ngot\n%v\nwant\n%v", inputTss, expectedTss)
|
||||
}
|
||||
}
|
||||
|
||||
f(`
|
||||
- interval: 1m
|
||||
outputs: [sum_samples]
|
||||
- interval: 2m
|
||||
outputs: [count_series]
|
||||
`, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, 0, false, false, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`)
|
||||
f(``, ``, time.Hour, false, false, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="foo"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="foo"} 25
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, time.Hour, false, false, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, time.Hour, true, false, `
|
||||
metric{env="test"} 10
|
||||
metric{env="dev"} 20
|
||||
metric{env="foo"} 15
|
||||
metric{env="dev"} 25
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, time.Hour, false, true, `
|
||||
metric{env="foo"} 10
|
||||
metric{env="dev"} 20
|
||||
metric{env="foo"} 15
|
||||
metric{env="dev"} 25
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, time.Hour, true, true, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="test"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`)
|
||||
}
|
||||
|
||||
func mustParsePromMetrics(s string) []prompbmarshal.TimeSeries {
|
||||
var rows prometheus.Rows
|
||||
errLogger := func(s string) {
|
||||
panic(fmt.Errorf("unexpected error when parsing Prometheus metrics: %s", s))
|
||||
}
|
||||
rows.UnmarshalWithErrLogger(s, errLogger)
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
samples := make([]prompbmarshal.Sample, 0, len(rows.Rows))
|
||||
for _, row := range rows.Rows {
|
||||
labels := make([]prompbmarshal.Label, 0, len(row.Tags)+1)
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: row.Metric,
|
||||
})
|
||||
for _, tag := range row.Tags {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: tag.Key,
|
||||
Value: tag.Value,
|
||||
})
|
||||
}
|
||||
samples = append(samples, prompbmarshal.Sample{
|
||||
Value: row.Value,
|
||||
Timestamp: row.Timestamp,
|
||||
})
|
||||
ts := prompbmarshal.TimeSeries{
|
||||
Labels: labels,
|
||||
Samples: samples[len(samples)-1:],
|
||||
}
|
||||
tss = append(tss, ts)
|
||||
}
|
||||
return tss
|
||||
}
|
||||
|
||||
func createFile(t *testing.T, data []byte) *os.File {
|
||||
t.Helper()
|
||||
f, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(f.Name(), data, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
package remotewrite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
func getStdDialer() *net.Dialer {
|
||||
stdDialerOnce.Do(func() {
|
||||
stdDialer = &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: netutil.TCP6Enabled(),
|
||||
}
|
||||
})
|
||||
return stdDialer
|
||||
}
|
||||
|
||||
var (
|
||||
stdDialer *net.Dialer
|
||||
stdDialerOnce sync.Once
|
||||
)
|
||||
|
||||
func statDial(ctx context.Context, _, addr string) (conn net.Conn, err error) {
|
||||
network := netutil.GetTCPNetwork()
|
||||
d := getStdDialer()
|
||||
conn, err = d.DialContext(ctx, network, addr)
|
||||
dialsTotal.Inc()
|
||||
if err != nil {
|
||||
dialErrors.Inc()
|
||||
return nil, err
|
||||
}
|
||||
conns.Inc()
|
||||
sc := &statConn{
|
||||
Conn: conn,
|
||||
}
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
var (
|
||||
dialsTotal = metrics.NewCounter(`vmagent_remotewrite_dials_total`)
|
||||
dialErrors = metrics.NewCounter(`vmagent_remotewrite_dial_errors_total`)
|
||||
conns = metrics.NewCounter(`vmagent_remotewrite_conns`)
|
||||
)
|
||||
|
||||
type statConn struct {
|
||||
closed atomic.Int32
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (sc *statConn) Read(p []byte) (int, error) {
|
||||
n, err := sc.Conn.Read(p)
|
||||
connReadsTotal.Inc()
|
||||
if err != nil {
|
||||
connReadErrors.Inc()
|
||||
}
|
||||
connBytesRead.Add(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (sc *statConn) Write(p []byte) (int, error) {
|
||||
n, err := sc.Conn.Write(p)
|
||||
connWritesTotal.Inc()
|
||||
if err != nil {
|
||||
connWriteErrors.Inc()
|
||||
}
|
||||
connBytesWritten.Add(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (sc *statConn) Close() error {
|
||||
err := sc.Conn.Close()
|
||||
if sc.closed.Add(1) == 1 {
|
||||
conns.Dec()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
connReadsTotal = metrics.NewCounter(`vmagent_remotewrite_conn_reads_total`)
|
||||
connWritesTotal = metrics.NewCounter(`vmagent_remotewrite_conn_writes_total`)
|
||||
connReadErrors = metrics.NewCounter(`vmagent_remotewrite_conn_read_errors_total`)
|
||||
connWriteErrors = metrics.NewCounter(`vmagent_remotewrite_conn_write_errors_total`)
|
||||
connBytesRead = metrics.NewCounter(`vmagent_remotewrite_conn_bytes_read_total`)
|
||||
connBytesWritten = metrics.NewCounter(`vmagent_remotewrite_conn_bytes_written_total`)
|
||||
)
|
||||
160
app/vmagent/remotewrite/streamaggr.go
Normal file
160
app/vmagent/remotewrite/streamaggr.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package remotewrite
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
// Global config
|
||||
streamAggrGlobalConfig = flag.String("streamAggr.config", "", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/ . "+
|
||||
"See also -streamAggr.keepInput, -streamAggr.dropInput and -streamAggr.dedupInterval")
|
||||
streamAggrGlobalKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep all the input samples after the aggregation "+
|
||||
"with -streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to remote storages write. See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrGlobalDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to remote storages write. See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrGlobalDedupInterval = flagutil.NewDuration("streamAggr.dedupInterval", "0s", "Input samples are de-duplicated with this interval on "+
|
||||
"aggregator before optional aggregation with -streamAggr.config . "+
|
||||
"See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation/#deduplication")
|
||||
streamAggrGlobalIgnoreOldSamples = flag.Bool("streamAggr.ignoreOldSamples", false, "Whether to ignore input samples with old timestamps outside the "+
|
||||
"current aggregation interval for aggregator. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignoring-old-samples")
|
||||
streamAggrGlobalIgnoreFirstIntervals = flag.Int("streamAggr.ignoreFirstIntervals", 0, "Number of aggregation intervals to skip after the start for "+
|
||||
"aggregator. Increase this value if you observe incorrect aggregation results after vmagent restarts. It could be caused by receiving unordered delayed data from "+
|
||||
"clients pushing data into the vmagent. See https://docs.victoriametrics.com/stream-aggregation/#ignore-aggregation-intervals-on-start")
|
||||
streamAggrGlobalDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples for aggregator "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation/#dropping-unneeded-labels")
|
||||
|
||||
// Per URL config
|
||||
streamAggrConfig = flagutil.NewArrayString("remoteWrite.streamAggr.config", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/ . "+
|
||||
"See also -remoteWrite.streamAggr.keepInput, -remoteWrite.streamAggr.dropInput and -remoteWrite.streamAggr.dedupInterval")
|
||||
streamAggrDropInput = flagutil.NewArrayBool("remoteWrite.streamAggr.dropInput", "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrKeepInput = flagutil.NewArrayBool("remoteWrite.streamAggr.keepInput", "Whether to keep all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation "+
|
||||
"with -remoteWrite.streamAggr.config . See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation/#deduplication")
|
||||
streamAggrIgnoreOldSamples = flagutil.NewArrayBool("remoteWrite.streamAggr.ignoreOldSamples", "Whether to ignore input samples with old timestamps outside the current "+
|
||||
"aggregation interval for the corresponding -remoteWrite.streamAggr.config . "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignoring-old-samples")
|
||||
streamAggrIgnoreFirstIntervals = flag.Int("remoteWrite.streamAggr.ignoreFirstIntervals", 0, "Number of aggregation intervals to skip after the start. Increase this value if "+
|
||||
"you observe incorrect aggregation results after vmagent restarts. It could be caused by receiving unordered delayed data from clients pushing data into the vmagent. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignore-aggregation-intervals-on-start")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("remoteWrite.streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation/#dropping-unneeded-labels")
|
||||
)
|
||||
|
||||
// CheckStreamAggrConfigs checks -remoteWrite.streamAggr.config and -streamAggr.config.
|
||||
func CheckStreamAggrConfigs() error {
|
||||
pushNoop := func(_ []prompbmarshal.TimeSeries) {}
|
||||
|
||||
if _, err := newStreamAggrConfig(-1, pushNoop); err != nil {
|
||||
return fmt.Errorf("could not load -streamAggr.config stream aggregation config: %w", err)
|
||||
}
|
||||
if len(*streamAggrConfig) > len(*remoteWriteURLs) {
|
||||
return fmt.Errorf("too many -remoteWrite.streamAggr.config args: %d; it mustn't exceed the number of -remoteWrite.url args: %d",
|
||||
len(*streamAggrConfig), len(*remoteWriteURLs))
|
||||
}
|
||||
for idx := range *streamAggrConfig {
|
||||
if _, err := newStreamAggrConfig(idx, pushNoop); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasAnyStreamAggrConfigured checks if any streaming aggregation config provided
|
||||
func HasAnyStreamAggrConfigured() bool {
|
||||
return len(*streamAggrConfig) > 0 || *streamAggrGlobalConfig != ""
|
||||
}
|
||||
|
||||
func reloadStreamAggrConfigs() {
|
||||
reloadStreamAggrConfig(-1, pushToRemoteStoragesDropFailed)
|
||||
for idx, rwctx := range rwctxs {
|
||||
reloadStreamAggrConfig(idx, rwctx.pushInternalTrackDropped)
|
||||
}
|
||||
}
|
||||
|
||||
func reloadStreamAggrConfig(idx int, pushFunc streamaggr.PushFunc) {
|
||||
path, opts := getStreamAggrOpts(idx)
|
||||
logger.Infof("reloading stream aggregation configs pointed by -remoteWrite.streamAggr.config=%q", path)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_total{path=%q}`, path)).Inc()
|
||||
|
||||
sasNew, err := newStreamAggrConfigWithOpts(pushFunc, path, opts)
|
||||
if err != nil {
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_errors_total{path=%q}`, path)).Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, path)).Set(0)
|
||||
logger.Errorf("cannot reload stream aggregation config at %q; continue using the previously loaded config; error: %s", path, err)
|
||||
return
|
||||
}
|
||||
|
||||
var sas *streamaggr.Aggregators
|
||||
if idx < 0 {
|
||||
sas = sasGlobal.Load()
|
||||
} else {
|
||||
sas = rwctxs[idx].sas.Load()
|
||||
}
|
||||
|
||||
if !sasNew.Equal(sas) {
|
||||
var sasOld *streamaggr.Aggregators
|
||||
if idx < 0 {
|
||||
sasOld = sasGlobal.Swap(sasNew)
|
||||
} else {
|
||||
sasOld = rwctxs[idx].sas.Swap(sasNew)
|
||||
}
|
||||
sasOld.MustStop()
|
||||
logger.Infof("successfully reloaded stream aggregation configs at %q", path)
|
||||
} else {
|
||||
sasNew.MustStop()
|
||||
logger.Infof("successfully reloaded stream aggregation configs at %q", path)
|
||||
}
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, path)).Set(1)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, path)).Set(fasttime.UnixTimestamp())
|
||||
}
|
||||
|
||||
func getStreamAggrOpts(idx int) (string, *streamaggr.Options) {
|
||||
if idx < 0 {
|
||||
return *streamAggrGlobalConfig, &streamaggr.Options{
|
||||
DedupInterval: streamAggrGlobalDedupInterval.Duration(),
|
||||
DropInputLabels: *streamAggrGlobalDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrGlobalIgnoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrGlobalIgnoreFirstIntervals,
|
||||
}
|
||||
}
|
||||
opts := streamaggr.Options{
|
||||
DedupInterval: streamAggrDedupInterval.GetOptionalArg(idx),
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: streamAggrIgnoreOldSamples.GetOptionalArg(idx),
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
if len(*streamAggrConfig) == 0 {
|
||||
return "", &opts
|
||||
}
|
||||
return streamAggrConfig.GetOptionalArg(idx), &opts
|
||||
}
|
||||
|
||||
func newStreamAggrConfigWithOpts(pushFunc streamaggr.PushFunc, path string, opts *streamaggr.Options) (*streamaggr.Aggregators, error) {
|
||||
if len(path) == 0 {
|
||||
// Skip empty stream aggregation config.
|
||||
return nil, nil
|
||||
}
|
||||
return streamaggr.LoadFromFile(path, pushFunc, opts)
|
||||
}
|
||||
|
||||
func newStreamAggrConfig(idx int, pushFunc streamaggr.PushFunc) (*streamaggr.Aggregators, error) {
|
||||
path, opts := getStreamAggrOpts(idx)
|
||||
return newStreamAggrConfigWithOpts(pushFunc, path, opts)
|
||||
}
|
||||
72
app/vmagent/statsd/request_handler.go
Normal file
72
app/vmagent/statsd/request_handler.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package statsd
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/statsd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/statsd/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="statsd"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="statsd"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes remote write for statsd plaintext protocol.
|
||||
//
|
||||
// See https://github.com/statsd/statsd/blob/master/docs/metric_types.md
|
||||
func InsertHandler(r io.Reader) error {
|
||||
return stream.Parse(r, false, func(rows []parser.Row) error {
|
||||
return insertRows(nil, rows)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, rows []parser.Row) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
tssDst := ctx.WriteRequest.Timeseries[:0]
|
||||
labels := ctx.Labels[:0]
|
||||
samples := ctx.Samples[:0]
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
labelsLen := len(labels)
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: r.Metric,
|
||||
})
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: tag.Key,
|
||||
Value: tag.Value,
|
||||
})
|
||||
}
|
||||
samplesLen := len(samples)
|
||||
for _, v := range r.Values {
|
||||
samples = append(samples, prompbmarshal.Sample{
|
||||
Value: v,
|
||||
Timestamp: r.Timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
tssDst = append(tssDst, prompbmarshal.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return nil
|
||||
}
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
var numReg = regexp.MustCompile(`\D?\d*\D?`)
|
||||
|
||||
// series holds input_series defined in the test file
|
||||
type series struct {
|
||||
Series string `yaml:"series"`
|
||||
@@ -86,13 +88,12 @@ func writeInputSeries(input []series, interval *promutils.Duration, startStamp t
|
||||
func parseInputValue(input string, origin bool) ([]sequenceValue, error) {
|
||||
var res []sequenceValue
|
||||
items := strings.Split(input, " ")
|
||||
reg := regexp.MustCompile(`\D?\d*\D?`)
|
||||
for _, item := range items {
|
||||
if item == "stale" {
|
||||
res = append(res, sequenceValue{Value: decimal.StaleNaN})
|
||||
continue
|
||||
}
|
||||
vals := reg.FindAllString(item, -1)
|
||||
vals := numReg.FindAllString(item, -1)
|
||||
switch len(vals) {
|
||||
case 1:
|
||||
if vals[0] == "_" {
|
||||
|
||||
@@ -119,6 +119,9 @@ vmalert-linux-ppc64le:
|
||||
vmalert-linux-s390x:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-linux-loong64:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmalert-linux-386:
|
||||
APP_NAME=vmalert CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmalert docs [here](https://docs.victoriametrics.com/vmalert.html).
|
||||
See vmalert docs [here](https://docs.victoriametrics.com/vmalert/).
|
||||
|
||||
vmalert docs can be edited at [docs/vmalert.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmalert.md).
|
||||
|
||||
@@ -15,8 +15,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("datasource.url", "", "Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. "+
|
||||
"E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend and -datasource.showURL")
|
||||
addr = flag.String("datasource.url", "", "Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect endpoint. Required parameter. "+
|
||||
"Supports address in the form of IP address with a port (e.g., 127.0.0.1:8428) or DNS SRV record. "+
|
||||
"See also -remoteRead.disablePathAppend and -datasource.showURL")
|
||||
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
|
||||
showDatasourceURL = flag.Bool("datasource.showURL", false, "Whether to avoid stripping sensitive information such as auth headers or passwords from URLs in log messages or UI and exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
@@ -56,8 +57,9 @@ var (
|
||||
`Whether to align "time" parameter with evaluation interval. `+
|
||||
"Alignment supposed to produce deterministic results despite number of vmalert replicas or time they were started. "+
|
||||
"See more details at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
|
||||
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
|
||||
disableKeepAlive = flag.Bool("datasource.disableKeepAlive", false, `Whether to disable long-lived connections to the datasource. `+
|
||||
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
|
||||
idleConnectionTimeout = flag.Duration("datasource.idleConnTimeout", 50*time.Second, `Defines a duration for idle (keep-alive connection) to exist. Consider settings this value less to the value of "-http.idleConnTimeout". It must prevent possible "write: broken pipe" and "read: connection reset by peer" errors.`)
|
||||
disableKeepAlive = flag.Bool("datasource.disableKeepAlive", false, `Whether to disable long-lived connections to the datasource. `+
|
||||
`If true, disables HTTP keep-alives and will only use the connection to the server for a single HTTP request.`)
|
||||
roundDigits = flag.Int("datasource.roundDigits", 0, `Adds "round_digits" GET param to datasource requests. `+
|
||||
`In VM "round_digits" limits the number of digits after the decimal point in response values.`)
|
||||
@@ -98,11 +100,13 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
tr.DialContext = httputils.GetStatDialFunc("vmalert_datasource")
|
||||
tr.DisableKeepAlives = *disableKeepAlive
|
||||
tr.MaxIdleConnsPerHost = *maxIdleConnections
|
||||
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
|
||||
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
|
||||
}
|
||||
tr.IdleConnTimeout = *idleConnectionTimeout
|
||||
|
||||
if extraParams == nil {
|
||||
extraParams = url.Values{}
|
||||
|
||||
1
app/vmalert/datasource/testdata/instant_response.json
vendored
Normal file
1
app/vmalert/datasource/testdata/instant_response.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -7,6 +7,10 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -31,27 +35,85 @@ type promResponse struct {
|
||||
} `json:"stats,omitempty"`
|
||||
}
|
||||
|
||||
// see https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
type promInstant struct {
|
||||
Result []struct {
|
||||
Labels map[string]string `json:"metric"`
|
||||
TV [2]interface{} `json:"value"`
|
||||
} `json:"result"`
|
||||
// ms is populated after Unmarshal call
|
||||
ms []Metric
|
||||
}
|
||||
|
||||
func (r promInstant) metrics() ([]Metric, error) {
|
||||
result := make([]Metric, len(r.Result))
|
||||
for i, res := range r.Result {
|
||||
f, err := strconv.ParseFloat(res.TV[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
|
||||
}
|
||||
var m Metric
|
||||
m.SetLabels(res.Labels)
|
||||
m.Timestamps = append(m.Timestamps, int64(res.TV[0].(float64)))
|
||||
m.Values = append(m.Values, f)
|
||||
result[i] = m
|
||||
// metrics returned parsed Metric slice
|
||||
// Must be called only after Unmarshal
|
||||
func (pi *promInstant) metrics() ([]Metric, error) {
|
||||
return pi.ms, nil
|
||||
}
|
||||
|
||||
var jsonParserPool fastjson.ParserPool
|
||||
|
||||
// Unmarshal unmarshals the given byte slice into promInstant
|
||||
// It is using fastjson to reduce number of allocations compared to
|
||||
// standard json.Unmarshal function.
|
||||
// Response example:
|
||||
//
|
||||
// [{"metric":{"__name__":"up","job":"prometheus"},value": [ 1435781451.781,"1"]},
|
||||
// {"metric":{"__name__":"up","job":"node"},value": [ 1435781451.781,"0"]}]
|
||||
func (pi *promInstant) Unmarshal(b []byte) error {
|
||||
p := jsonParserPool.Get()
|
||||
defer jsonParserPool.Put(p)
|
||||
|
||||
v, err := p.ParseBytes(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return result, nil
|
||||
|
||||
rows, err := v.Array()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot find the top-level array of result objects: %w", err)
|
||||
}
|
||||
pi.ms = make([]Metric, len(rows))
|
||||
for i, row := range rows {
|
||||
metric := row.Get("metric")
|
||||
if metric == nil {
|
||||
return fmt.Errorf("can't find `metric` object in %q", row)
|
||||
}
|
||||
labels := metric.GetObject()
|
||||
|
||||
r := &pi.ms[i]
|
||||
r.Labels = make([]Label, 0, labels.Len())
|
||||
labels.Visit(func(key []byte, v *fastjson.Value) {
|
||||
lv, errLocal := v.StringBytes()
|
||||
if errLocal != nil {
|
||||
err = fmt.Errorf("error when parsing label value %q: %s", v, errLocal)
|
||||
return
|
||||
}
|
||||
r.Labels = append(r.Labels, Label{
|
||||
Name: string(key),
|
||||
Value: string(lv),
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing `metric` object in %q: %w", row, err)
|
||||
}
|
||||
|
||||
value := row.Get("value")
|
||||
if value == nil {
|
||||
return fmt.Errorf("can't find `value` object in %q", row)
|
||||
}
|
||||
sample := value.GetArray()
|
||||
if len(sample) != 2 {
|
||||
return fmt.Errorf("object `value` in %q should contain 2 values, but contains %d instead", row, len(sample))
|
||||
}
|
||||
r.Timestamps = []int64{sample[0].GetInt64()}
|
||||
val, err := sample[1].StringBytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing `value` object %q: %s", sample[1], err)
|
||||
}
|
||||
f, err := strconv.ParseFloat(bytesutil.ToUnsafeString(val), 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when parsing float64 from %s in %q: %w", sample[1], row, err)
|
||||
}
|
||||
r.Values = []float64{f}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type promRange struct {
|
||||
@@ -118,7 +180,7 @@ func parsePrometheusResponse(req *http.Request, resp *http.Response) (res Result
|
||||
switch r.Data.ResultType {
|
||||
case rtVector:
|
||||
var pi promInstant
|
||||
if err := json.Unmarshal(r.Data.Result, &pi.Result); err != nil {
|
||||
if err := pi.Unmarshal(r.Data.Result); err != nil {
|
||||
return res, fmt.Errorf("unmarshal err %w; \n %#v", err, string(r.Data.Result))
|
||||
}
|
||||
parseFn = pi.metrics
|
||||
|
||||
@@ -1,20 +1,73 @@
|
||||
package datasource
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMetrics(b *testing.B) {
|
||||
payload := []byte(`[{"metric":{"__name__":"vm_rows"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests", "foo":"bar", "baz": "qux"},"value":[1583786140,"2000"]}]`)
|
||||
|
||||
var pi promInstant
|
||||
if err := json.Unmarshal(payload, &pi.Result); err != nil {
|
||||
b.Fatalf(err.Error())
|
||||
}
|
||||
b.Run("Instant", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = pi.metrics()
|
||||
func TestPromInstant_UnmarshalPositive(t *testing.T) {
|
||||
f := func(data string, exp []Metric) {
|
||||
t.Helper()
|
||||
var pi promInstant
|
||||
err := pi.Unmarshal([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected unmarshal err %v; \n %v", err, string(data))
|
||||
}
|
||||
got, _ := pi.metrics()
|
||||
if !reflect.DeepEqual(got, exp) {
|
||||
t.Fatalf("expected to get:\n%v\ngot instead:\n%v", exp, got)
|
||||
}
|
||||
}
|
||||
|
||||
f(`[{"metric":{"__name__":"up"},"value":[1583780000,"42"]}]`, []Metric{
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "up"}},
|
||||
Timestamps: []int64{1583780000},
|
||||
Values: []float64{42},
|
||||
},
|
||||
})
|
||||
f(`[
|
||||
{"metric":{"__name__":"up"},"value":[1583780000,"42"]},
|
||||
{"metric":{"__name__":"foo"},"value":[1583780001,"7"]},
|
||||
{"metric":{"__name__":"baz", "instance":"bar"},"value":[1583780002,"8"]}]`, []Metric{
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "up"}},
|
||||
Timestamps: []int64{1583780000},
|
||||
Values: []float64{42},
|
||||
},
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "foo"}},
|
||||
Timestamps: []int64{1583780001},
|
||||
Values: []float64{7},
|
||||
},
|
||||
{
|
||||
Labels: []Label{{Name: "__name__", Value: "baz"}, {Name: "instance", Value: "bar"}},
|
||||
Timestamps: []int64{1583780002},
|
||||
Values: []float64{8},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPromInstant_UnmarshalNegative(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
var pi promInstant
|
||||
err := pi.Unmarshal([]byte(data))
|
||||
if err == nil {
|
||||
t.Fatalf("expected to get an error; got nil instead")
|
||||
}
|
||||
}
|
||||
f(``)
|
||||
f(`foo`)
|
||||
f(`[{"metric":{"__name__":"up"},"value":[1583780000,"42"]},`)
|
||||
f(`[{"metric":{"__name__"},"value":[1583780000,"42"]},`)
|
||||
// no `metric` object
|
||||
f(`[{"value":[1583780000,"42"]}]`)
|
||||
// no `value` object
|
||||
f(`[{"metric":{"__name__":"up"}}]`)
|
||||
// less than 2 values in `value` object
|
||||
f(`[{"metric":{"__name__":"up"},"value":["42"]}]`)
|
||||
f(`[{"metric":{"__name__":"up"},"value":[1583780000]}]`)
|
||||
// non-numeric sample value
|
||||
f(`[{"metric":{"__name__":"up"},"value":[1583780000,"foo"]}]`)
|
||||
}
|
||||
|
||||
43
app/vmalert/datasource/vm_prom_api_timing_test.go
Normal file
43
app/vmalert/datasource/vm_prom_api_timing_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package datasource
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMetrics(b *testing.B) {
|
||||
payload := []byte(`[{"metric":{"__name__":"vm_rows"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests", "foo":"bar", "baz": "qux"},"value":[1583786140,"2000"]}]`)
|
||||
|
||||
var pi promInstant
|
||||
if err := pi.Unmarshal(payload); err != nil {
|
||||
b.Fatalf(err.Error())
|
||||
}
|
||||
b.Run("Instant", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = pi.metrics()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkParsePrometheusResponse(b *testing.B) {
|
||||
req, _ := http.NewRequest("GET", "", nil)
|
||||
resp := &http.Response{StatusCode: http.StatusOK}
|
||||
data, err := os.ReadFile("testdata/instant_response.json")
|
||||
if err != nil {
|
||||
b.Fatalf("error while reading file: %s", err)
|
||||
}
|
||||
resp.Body = io.NopCloser(bytes.NewReader(data))
|
||||
|
||||
b.Run("Instant", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := parsePrometheusResponse(req, resp)
|
||||
if err != nil {
|
||||
b.Fatalf("unexpected parse err: %s", err)
|
||||
}
|
||||
resp.Body = io.NopCloser(bytes.NewReader(data))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -44,7 +44,7 @@ Enterprise version of vmalert supports S3 and GCS paths to rules.
|
||||
For example: gs://bucket/path/to/rules, s3://bucket/path/to/rules
|
||||
S3 and GCS paths support only matching by prefix, e.g. s3://bucket/dir/rule_ matches
|
||||
all files with prefix rule_ in folder dir.
|
||||
See https://docs.victoriametrics.com/vmalert.html#reading-rules-from-object-storage
|
||||
See https://docs.victoriametrics.com/vmalert/#reading-rules-from-object-storage
|
||||
`)
|
||||
|
||||
ruleTemplatesPath = flagutil.NewArrayString("rule.templates", `Path or glob pattern to location with go template definitions `+
|
||||
@@ -71,7 +71,7 @@ absolute path to all .tpl files in root.
|
||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.")
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
|
||||
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
|
||||
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
|
||||
`Supports templating - see https://docs.victoriametrics.com/vmalert/#templating . `+
|
||||
`For example, link to Grafana: -external.alert.source='explore?orgId=1&left={"datasource":"VictoriaMetrics","queries":[{"expr":{{$expr|jsonEscape|queryEscape}},"refId":"A"}],"range":{"from":"now-1h","to":"now"}}'. `+
|
||||
`Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. `+
|
||||
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
|
||||
@@ -319,7 +319,7 @@ func usage() {
|
||||
const s = `
|
||||
vmalert processes alerts and recording rules.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmalert.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmalert/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,8 @@ var (
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect."+
|
||||
"Remote read is used to restore alerts state."+
|
||||
"This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
|
||||
"E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.")
|
||||
"Supports address in the form of IP address with a port (e.g., 127.0.0.1:8428) or DNS SRV record. "+
|
||||
"See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.")
|
||||
|
||||
showRemoteReadURL = flag.Bool("remoteRead.showURL", false, "Whether to show -remoteRead.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
@@ -65,6 +66,7 @@ func Init() (datasource.QuerierBuilder, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
tr.DialContext = httputils.GetStatDialFunc("vmalert_remoteread")
|
||||
|
||||
endpointParams, err := flagutil.ParseJSONMap(*oauth2EndpointParams)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,7 +13,9 @@ import (
|
||||
|
||||
var (
|
||||
addr = flag.String("remoteWrite.url", "", "Optional URL to VictoriaMetrics or vminsert where to persist alerts state "+
|
||||
"and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, "+
|
||||
"and recording rules results in form of timeseries. "+
|
||||
"Supports address in the form of IP address with a port (e.g., 127.0.0.1:8428) or DNS SRV record. "+
|
||||
"For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, "+
|
||||
"then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.")
|
||||
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
@@ -69,6 +71,7 @@ func Init(ctx context.Context) (*Client, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
t.DialContext = httputils.GetStatDialFunc("vmalert_remotewrite")
|
||||
|
||||
endpointParams, err := flagutil.ParseJSONMap(*oauth2EndpointParams)
|
||||
if err != nil {
|
||||
|
||||
@@ -314,23 +314,20 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
||||
}
|
||||
for _, s := range res.Data {
|
||||
ls, err := ar.toLabels(s, qFn)
|
||||
ls, as, err := ar.expandTemplates(s, qFn, time.Time{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to expand labels: %s", err)
|
||||
}
|
||||
h := hash(ls.processed)
|
||||
a, err := ar.newAlert(s, nil, time.Time{}, qFn) // initial alert
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create alert: %w", err)
|
||||
return nil, fmt.Errorf("failed to expand templates: %s", err)
|
||||
}
|
||||
alertID := hash(ls.processed)
|
||||
a := ar.newAlert(s, time.Time{}, ls.processed, as) // initial alert
|
||||
|
||||
prevT := time.Time{}
|
||||
for i := range s.Values {
|
||||
at := time.Unix(s.Timestamps[i], 0)
|
||||
// try to restore alert's state on the first iteration
|
||||
if at.Equal(start) {
|
||||
if _, ok := ar.alerts[h]; ok {
|
||||
a = ar.alerts[h]
|
||||
if _, ok := ar.alerts[alertID]; ok {
|
||||
a = ar.alerts[alertID]
|
||||
prevT = at
|
||||
}
|
||||
}
|
||||
@@ -352,7 +349,7 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
|
||||
// save alert's state on last iteration, so it can be used on the next execRange call
|
||||
if at.Equal(end) {
|
||||
holdAlertState[h] = a
|
||||
holdAlertState[alertID] = a
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -386,15 +383,34 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
}
|
||||
}()
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
||||
}
|
||||
|
||||
ar.logDebugf(ts, nil, "query returned %d samples (elapsed: %s)", curState.Samples, curState.Duration)
|
||||
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res.Data, err
|
||||
}
|
||||
|
||||
// template labels and annotations before updating ar.alerts,
|
||||
// since they could use `query` function which takes a while to execute,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6079.
|
||||
expandedLabels := make([]*labelSet, len(res.Data))
|
||||
expandedAnnotations := make([]map[string]string, len(res.Data))
|
||||
for i, m := range res.Data {
|
||||
ls, as, err := ar.expandTemplates(m, qFn, ts)
|
||||
if err != nil {
|
||||
curState.Err = fmt.Errorf("failed to expand templates: %w", err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
expandedLabels[i] = ls
|
||||
expandedAnnotations[i] = as
|
||||
}
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
for h, a := range ar.alerts {
|
||||
// cleanup inactive alerts from previous Exec
|
||||
if a.State == notifier.StateInactive && ts.Sub(a.ResolvedAt) > resolvedRetention {
|
||||
@@ -403,26 +419,18 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
}
|
||||
}
|
||||
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res.Data, err
|
||||
}
|
||||
updated := make(map[uint64]struct{})
|
||||
// update list of active alerts
|
||||
for _, m := range res.Data {
|
||||
ls, err := ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
curState.Err = fmt.Errorf("failed to expand labels: %w", err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
h := hash(ls.processed)
|
||||
if _, ok := updated[h]; ok {
|
||||
for i, m := range res.Data {
|
||||
labels, annotations := expandedLabels[i], expandedAnnotations[i]
|
||||
alertID := hash(labels.processed)
|
||||
if _, ok := updated[alertID]; ok {
|
||||
// duplicate may be caused the removal of `__name__` label
|
||||
curState.Err = fmt.Errorf("labels %v: %w", ls.processed, errDuplicate)
|
||||
curState.Err = fmt.Errorf("labels %v: %w", labels.processed, errDuplicate)
|
||||
return nil, curState.Err
|
||||
}
|
||||
updated[h] = struct{}{}
|
||||
if a, ok := ar.alerts[h]; ok {
|
||||
updated[alertID] = struct{}{}
|
||||
if a, ok := ar.alerts[alertID]; ok {
|
||||
if a.State == notifier.StateInactive {
|
||||
// alert could be in inactive state for resolvedRetention
|
||||
// so when we again receive metrics for it - we switch it
|
||||
@@ -432,22 +440,17 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
ar.logDebugf(ts, a, "INACTIVE => PENDING")
|
||||
}
|
||||
a.Value = m.Values[0]
|
||||
// re-exec template since Value or query can be used in annotations
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
a.Annotations = annotations
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.KeepFiringSince = time.Time{}
|
||||
continue
|
||||
}
|
||||
a, err := ar.newAlert(m, ls, ts, qFn)
|
||||
if err != nil {
|
||||
curState.Err = fmt.Errorf("failed to create alert: %w", err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
a.ID = h
|
||||
a := ar.newAlert(m, ts, labels.processed, annotations)
|
||||
a.ID = alertID
|
||||
a.State = notifier.StatePending
|
||||
ar.alerts[h] = a
|
||||
ar.alerts[alertID] = a
|
||||
ar.logDebugf(ts, a, "created in state PENDING")
|
||||
}
|
||||
var numActivePending int
|
||||
@@ -497,6 +500,28 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
return ar.toTimeSeries(ts.Unix()), nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) expandTemplates(m datasource.Metric, qFn templates.QueryFn, ts time.Time) (*labelSet, map[string]string, error) {
|
||||
ls, err := ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to expand labels: %w", err)
|
||||
}
|
||||
|
||||
tplData := notifier.AlertTplData{
|
||||
Value: m.Values[0],
|
||||
Labels: ls.origin,
|
||||
Expr: ar.Expr,
|
||||
AlertID: hash(ls.processed),
|
||||
GroupID: ar.GroupID,
|
||||
ActiveAt: ts,
|
||||
For: ar.For,
|
||||
}
|
||||
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to template annotations: %w", err)
|
||||
}
|
||||
return ls, as, nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) toTimeSeries(timestamp int64) []prompbmarshal.TimeSeries {
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for _, a := range ar.alerts {
|
||||
@@ -530,25 +555,25 @@ func hash(labels map[string]string) uint64 {
|
||||
return hash.Sum64()
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) newAlert(m datasource.Metric, ls *labelSet, start time.Time, qFn templates.QueryFn) (*notifier.Alert, error) {
|
||||
var err error
|
||||
if ls == nil {
|
||||
ls, err = ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to expand labels: %w", err)
|
||||
}
|
||||
func (ar *AlertingRule) newAlert(m datasource.Metric, start time.Time, labels, annotations map[string]string) *notifier.Alert {
|
||||
as := make(map[string]string)
|
||||
if annotations != nil {
|
||||
as = annotations
|
||||
}
|
||||
a := ¬ifier.Alert{
|
||||
GroupID: ar.GroupID,
|
||||
Name: ar.Name,
|
||||
Labels: ls.processed,
|
||||
Value: m.Values[0],
|
||||
ActiveAt: start,
|
||||
Expr: ar.Expr,
|
||||
For: ar.For,
|
||||
ls := make(map[string]string)
|
||||
if labels != nil {
|
||||
ls = labels
|
||||
}
|
||||
return ¬ifier.Alert{
|
||||
GroupID: ar.GroupID,
|
||||
Name: ar.Name,
|
||||
Expr: ar.Expr,
|
||||
For: ar.For,
|
||||
ActiveAt: start,
|
||||
Value: m.Values[0],
|
||||
Labels: ls,
|
||||
Annotations: as,
|
||||
}
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
return a, err
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -604,9 +629,6 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
|
||||
return nil
|
||||
}
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
if len(ar.alerts) < 1 {
|
||||
return nil
|
||||
}
|
||||
@@ -631,6 +653,10 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
|
||||
ar.logDebugf(ts, nil, "no response was received from restore query")
|
||||
return nil
|
||||
}
|
||||
|
||||
ar.alertsMu.Lock()
|
||||
defer ar.alertsMu.Unlock()
|
||||
|
||||
for _, series := range res.Data {
|
||||
series.DelLabel("__name__")
|
||||
labelSet := make(map[string]string, len(series.Labels))
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"hash/fnv"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
@@ -724,13 +725,19 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
return errGr.Err()
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
// getStaleSeries checks whether there are stale series from previously sent ones.
|
||||
func (e *executor) getStaleSeries(r Rule, tss []prompbmarshal.TimeSeries, timestamp time.Time) []prompbmarshal.TimeSeries {
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
ruleLabels := make(map[string][]prompbmarshal.Label, len(tss))
|
||||
for _, ts := range tss {
|
||||
// convert labels to strings so we can compare with previously sent series
|
||||
key := labelsToString(ts.Labels)
|
||||
ruleLabels[key] = ts.Labels
|
||||
// convert labels to strings, so we can compare with previously sent series
|
||||
bb.B = labelsToString(bb.B, ts.Labels)
|
||||
ruleLabels[string(bb.B)] = ts.Labels
|
||||
bb.Reset()
|
||||
}
|
||||
|
||||
rID := r.ID()
|
||||
@@ -776,21 +783,20 @@ func (e *executor) purgeStaleSeries(activeRules []Rule) {
|
||||
e.previouslySentSeriesToRWMu.Unlock()
|
||||
}
|
||||
|
||||
func labelsToString(labels []prompbmarshal.Label) string {
|
||||
var b strings.Builder
|
||||
b.WriteRune('{')
|
||||
func labelsToString(dst []byte, labels []prompbmarshal.Label) []byte {
|
||||
dst = append(dst, '{')
|
||||
for i, label := range labels {
|
||||
if len(label.Name) == 0 {
|
||||
b.WriteString("__name__")
|
||||
dst = append(dst, "__name__"...)
|
||||
} else {
|
||||
b.WriteString(label.Name)
|
||||
dst = append(dst, label.Name...)
|
||||
}
|
||||
b.WriteRune('=')
|
||||
b.WriteString(strconv.Quote(label.Value))
|
||||
dst = append(dst, '=')
|
||||
dst = strconv.AppendQuote(dst, label.Value)
|
||||
if i < len(labels)-1 {
|
||||
b.WriteRune(',')
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
}
|
||||
b.WriteRune('}')
|
||||
return b.String()
|
||||
dst = append(dst, '}')
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -217,20 +217,21 @@ func TestGroupStart(t *testing.T) {
|
||||
|
||||
const evalInterval = time.Millisecond
|
||||
g := NewGroup(groups[0], fs, evalInterval, map[string]string{"cluster": "east-1"})
|
||||
g.Concurrency = 2
|
||||
|
||||
const inst1, inst2, job = "foo", "bar", "baz"
|
||||
m1 := metricWithLabels(t, "instance", inst1, "job", job)
|
||||
m2 := metricWithLabels(t, "instance", inst2, "job", job)
|
||||
|
||||
r := g.Rules[0].(*AlertingRule)
|
||||
alert1, err := r.newAlert(m1, nil, time.Now(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("faield to create alert: %s", err)
|
||||
}
|
||||
alert1 := r.newAlert(m1, time.Now(), nil, nil)
|
||||
alert1.State = notifier.StateFiring
|
||||
// add annotations
|
||||
alert1.Annotations["summary"] = "1"
|
||||
// add external label
|
||||
alert1.Labels["cluster"] = "east-1"
|
||||
// add labels from response
|
||||
alert1.Labels["job"] = job
|
||||
alert1.Labels["instance"] = inst1
|
||||
// add rule labels
|
||||
alert1.Labels["label"] = "bar"
|
||||
alert1.Labels["host"] = inst1
|
||||
@@ -239,13 +240,15 @@ func TestGroupStart(t *testing.T) {
|
||||
alert1.Labels[alertGroupNameLabel] = g.Name
|
||||
alert1.ID = hash(alert1.Labels)
|
||||
|
||||
alert2, err := r.newAlert(m2, nil, time.Now(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("faield to create alert: %s", err)
|
||||
}
|
||||
alert2 := r.newAlert(m2, time.Now(), nil, nil)
|
||||
alert2.State = notifier.StateFiring
|
||||
// add annotations
|
||||
alert2.Annotations["summary"] = "1"
|
||||
// add external label
|
||||
alert2.Labels["cluster"] = "east-1"
|
||||
// add labels from response
|
||||
alert2.Labels["job"] = job
|
||||
alert2.Labels["instance"] = inst2
|
||||
// add rule labels
|
||||
alert2.Labels["label"] = "bar"
|
||||
alert2.Labels["host"] = inst2
|
||||
@@ -262,8 +265,25 @@ func TestGroupStart(t *testing.T) {
|
||||
close(finished)
|
||||
}()
|
||||
|
||||
// wait for multiple evals
|
||||
time.Sleep(20 * evalInterval)
|
||||
waitForIterations := func(n int, interval time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
var cur uint64
|
||||
prev := g.metrics.iterationTotal.Get()
|
||||
for i := 0; ; i++ {
|
||||
if i > 40 {
|
||||
t.Fatalf("group wasn't able to perform %d evaluations during %d eval intervals", n, i)
|
||||
}
|
||||
cur = g.metrics.iterationTotal.Get()
|
||||
if int(cur-prev) >= n {
|
||||
return
|
||||
}
|
||||
time.Sleep(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for multiple evaluation iterations
|
||||
waitForIterations(4, evalInterval)
|
||||
|
||||
gotAlerts := fn.GetAlerts()
|
||||
expectedAlerts := []notifier.Alert{*alert1, *alert2}
|
||||
@@ -280,8 +300,8 @@ func TestGroupStart(t *testing.T) {
|
||||
// and set only one datapoint for response
|
||||
fs.Add(m1)
|
||||
|
||||
// wait for multiple evals
|
||||
time.Sleep(20 * evalInterval)
|
||||
// wait for multiple evaluation iterations
|
||||
waitForIterations(4, evalInterval)
|
||||
|
||||
gotAlerts = fn.GetAlerts()
|
||||
alert2.State = notifier.StateInactive
|
||||
|
||||
36
app/vmalert/rule/group_timing_test.go
Normal file
36
app/vmalert/rule/group_timing_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package rule
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func BenchmarkGetStaleSeries(b *testing.B) {
|
||||
ts := time.Now()
|
||||
n := 100
|
||||
payload := make([]prompbmarshal.TimeSeries, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s := fmt.Sprintf("%d", i)
|
||||
labels := toPromLabels(b,
|
||||
"__name__", "foo", ""+
|
||||
"instance", s,
|
||||
"job", s,
|
||||
"state", s,
|
||||
)
|
||||
payload = append(payload, newTimeSeriesPB([]float64{1}, []int64{ts.Unix()}, labels))
|
||||
}
|
||||
|
||||
e := &executor{
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
}
|
||||
ar := &AlertingRule{RuleID: 1}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
e.getStaleSeries(ar, payload, ts)
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ type Rule interface {
|
||||
close()
|
||||
}
|
||||
|
||||
var errDuplicate = errors.New("result contains metrics with the same labelset after applying rule labels. See https://docs.victoriametrics.com/vmalert.html#series-with-the-same-labelset for details")
|
||||
var errDuplicate = errors.New("result contains metrics with the same labelset during evaluation. See https://docs.victoriametrics.com/vmalert/#series-with-the-same-labelset for details")
|
||||
|
||||
type ruleState struct {
|
||||
sync.RWMutex
|
||||
|
||||
@@ -95,7 +95,7 @@ func metricWithLabels(t *testing.T, labels ...string) datasource.Metric {
|
||||
return m
|
||||
}
|
||||
|
||||
func toPromLabels(t *testing.T, labels ...string) []prompbmarshal.Label {
|
||||
func toPromLabels(t testing.TB, labels ...string) []prompbmarshal.Label {
|
||||
t.Helper()
|
||||
if len(labels) == 0 || len(labels)%2 != 0 {
|
||||
t.Fatalf("expected to get even number of labels")
|
||||
|
||||
@@ -13,6 +13,20 @@ function collapseAll() {
|
||||
$('.collapse').removeClass('show');
|
||||
}
|
||||
|
||||
function showByID(id) {
|
||||
if (!id) {
|
||||
return
|
||||
}
|
||||
let parent = $("#" + id).parent();
|
||||
if (!parent) {
|
||||
return
|
||||
}
|
||||
let target = $("#" + parent.attr("data-bs-target"));
|
||||
if (target.length > 0) {
|
||||
target.addClass('show');
|
||||
}
|
||||
}
|
||||
|
||||
function toggleByID(id) {
|
||||
if (id) {
|
||||
let el = $("#" + id);
|
||||
@@ -61,7 +75,7 @@ function search() {
|
||||
function setParamURL(key, value) {
|
||||
let url = new URL(location.href)
|
||||
url.searchParams.set(key, value);
|
||||
window.history.replaceState(null, null, `?${url.searchParams.toString()}`);
|
||||
window.history.replaceState(null, null, `?${url.searchParams.toString()}${url.hash}`);
|
||||
}
|
||||
|
||||
function getParamURL(key) {
|
||||
@@ -141,7 +155,7 @@ $(document).ready(function () {
|
||||
search()
|
||||
|
||||
let hash = window.location.hash.substr(1);
|
||||
toggleByID(hash);
|
||||
showByID(hash);
|
||||
});
|
||||
|
||||
$(document).ready(function () {
|
||||
|
||||
@@ -39,7 +39,7 @@ var (
|
||||
{Name: "Groups", Url: "groups"},
|
||||
{Name: "Alerts", Url: "alerts"},
|
||||
{Name: "Notifiers", Url: "notifiers"},
|
||||
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert.html"},
|
||||
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert/"},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -87,6 +87,9 @@ vmauth-linux-ppc64le:
|
||||
vmauth-linux-s390x:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-linux-loong64:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmauth-linux-386:
|
||||
APP_NAME=vmauth CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmauth docs [here](https://docs.victoriametrics.com/vmauth.html).
|
||||
See vmauth docs [here](https://docs.victoriametrics.com/vmauth/).
|
||||
|
||||
vmauth docs can be edited at [docs/vmauth.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmauth.md).
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -27,20 +26,21 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
)
|
||||
|
||||
var (
|
||||
authConfigPath = flag.String("auth.config", "", "Path to auth config. It can point either to local file or to http url. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config")
|
||||
"See https://docs.victoriametrics.com/vmauth/ for details on the format of this auth config")
|
||||
configCheckInterval = flag.Duration("configCheckInterval", 0, "interval for config file re-read. "+
|
||||
"Zero value disables config re-reading. By default, refreshing is disabled, send SIGHUP for config refresh.")
|
||||
defaultRetryStatusCodes = flagutil.NewArrayInt("retryStatusCodes", 0, "Comma-separated list of default HTTP response status codes when vmauth re-tries the request on other backends. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#load-balancing for details")
|
||||
"See https://docs.victoriametrics.com/vmauth/#load-balancing for details")
|
||||
defaultLoadBalancingPolicy = flag.String("loadBalancingPolicy", "least_loaded", "The default load balancing policy to use for backend urls specified inside url_prefix section. "+
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth.html#load-balancing")
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth/#load-balancing")
|
||||
discoverBackendIPsGlobal = flag.Bool("discoverBackendIPs", false, "Whether to discover backend IPs via periodic DNS queries to hostnames specified in url_prefix. "+
|
||||
"This may be useful when url_prefix points to a hostname with dynamically scaled instances behind it. See https://docs.victoriametrics.com/vmauth.html#discovering-backend-ips")
|
||||
"This may be useful when url_prefix points to a hostname with dynamically scaled instances behind it. See https://docs.victoriametrics.com/vmauth/#discovering-backend-ips")
|
||||
discoverBackendIPsInterval = flag.Duration("discoverBackendIPsInterval", 10*time.Second, "The interval for re-discovering backend IPs if -discoverBackendIPs command-line flag is set. "+
|
||||
"Too low value may lead to DNS errors")
|
||||
httpAuthHeader = flagutil.NewArrayString("httpAuthHeader", "HTTP request header to use for obtaining authorization tokens. By default auth tokens are read from Authorization request header")
|
||||
@@ -73,15 +73,18 @@ type UserInfo struct {
|
||||
RetryStatusCodes []int `yaml:"retry_status_codes,omitempty"`
|
||||
LoadBalancingPolicy string `yaml:"load_balancing_policy,omitempty"`
|
||||
DropSrcPathPrefixParts *int `yaml:"drop_src_path_prefix_parts,omitempty"`
|
||||
TLSInsecureSkipVerify *bool `yaml:"tls_insecure_skip_verify,omitempty"`
|
||||
TLSCAFile string `yaml:"tls_ca_file,omitempty"`
|
||||
TLSCertFile string `yaml:"tls_cert_file,omitempty"`
|
||||
TLSKeyFile string `yaml:"tls_key_file,omitempty"`
|
||||
TLSServerName string `yaml:"tls_server_name,omitempty"`
|
||||
TLSInsecureSkipVerify *bool `yaml:"tls_insecure_skip_verify,omitempty"`
|
||||
|
||||
MetricLabels map[string]string `yaml:"metric_labels,omitempty"`
|
||||
|
||||
concurrencyLimitCh chan struct{}
|
||||
concurrencyLimitReached *metrics.Counter
|
||||
|
||||
httpTransport *http.Transport
|
||||
rt http.RoundTripper
|
||||
|
||||
requests *metrics.Counter
|
||||
backendErrors *metrics.Counter
|
||||
@@ -90,8 +93,8 @@ type UserInfo struct {
|
||||
|
||||
// HeadersConf represents config for request and response headers.
|
||||
type HeadersConf struct {
|
||||
RequestHeaders []Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []Header `yaml:"response_headers,omitempty"`
|
||||
RequestHeaders []*Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
|
||||
}
|
||||
|
||||
func (ui *UserInfo) beginConcurrencyLimit() error {
|
||||
@@ -155,10 +158,10 @@ type URLMap struct {
|
||||
SrcHosts []*Regex `yaml:"src_hosts,omitempty"`
|
||||
|
||||
// SrcQueryArgs is an optional list of query args, which must match request URL query args.
|
||||
SrcQueryArgs []QueryArg `yaml:"src_query_args,omitempty"`
|
||||
SrcQueryArgs []*QueryArg `yaml:"src_query_args,omitempty"`
|
||||
|
||||
// SrcHeaders is an optional list of headers, which must match request headers.
|
||||
SrcHeaders []Header `yaml:"src_headers,omitempty"`
|
||||
SrcHeaders []*Header `yaml:"src_headers,omitempty"`
|
||||
|
||||
// UrlPrefix contains backend url prefixes for the proxied request url.
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
@@ -179,22 +182,15 @@ type URLMap struct {
|
||||
DropSrcPathPrefixParts *int `yaml:"drop_src_path_prefix_parts,omitempty"`
|
||||
}
|
||||
|
||||
// Regex represents a regex
|
||||
type Regex struct {
|
||||
re *regexp.Regexp
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// QueryArg represents HTTP query arg
|
||||
type QueryArg struct {
|
||||
Name string
|
||||
Value string
|
||||
Value *Regex
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals up from yaml.
|
||||
// UnmarshalYAML unmarshals qa from yaml.
|
||||
func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error {
|
||||
var s string
|
||||
if err := f(&s); err != nil {
|
||||
@@ -203,14 +199,27 @@ func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error {
|
||||
qa.sOriginal = s
|
||||
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n >= 0 {
|
||||
qa.Name = s[:n]
|
||||
qa.Value = s[n+1:]
|
||||
if n < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
qa.Name = s[:n]
|
||||
expr := s[n+1:]
|
||||
if !strings.HasPrefix(expr, "~") {
|
||||
expr = regexp.QuoteMeta(expr)
|
||||
} else {
|
||||
expr = expr[1:]
|
||||
}
|
||||
|
||||
var re Regex
|
||||
if err := yaml.Unmarshal([]byte(expr), &re); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal regex for %q query arg: %w", qa.Name, err)
|
||||
}
|
||||
qa.Value = &re
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML marshals up to yaml.
|
||||
// MarshalYAML marshals qa to yaml.
|
||||
func (qa *QueryArg) MarshalYAML() (interface{}, error) {
|
||||
return qa.sOriginal, nil
|
||||
}
|
||||
@@ -293,7 +302,7 @@ func (up *URLPrefix) getBackendsCount() int {
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getBackendURL() *backendURL {
|
||||
up.discoverBackendIPsIfNeeded()
|
||||
up.discoverBackendAddrsIfNeeded()
|
||||
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
@@ -303,7 +312,7 @@ func (up *URLPrefix) getBackendURL() *backendURL {
|
||||
return getLeastLoadedBackendURL(bus, &up.n)
|
||||
}
|
||||
|
||||
func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
||||
func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
if !up.discoverBackendIPs {
|
||||
// The discovery is disabled.
|
||||
return
|
||||
@@ -328,27 +337,42 @@ func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
||||
|
||||
// Discover ips for all the backendURLs
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(intervalSec))
|
||||
hostToIPs := make(map[string][]string)
|
||||
hostToAddrs := make(map[string][]string)
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
if hostToIPs[host] != nil {
|
||||
if hostToAddrs[host] != nil {
|
||||
// ips for the given host have been already discovered
|
||||
continue
|
||||
}
|
||||
addrs, err := resolver.LookupIPAddr(ctx, host)
|
||||
var ips []string
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend IPs for %s: %s; use it literally", bu, err)
|
||||
ips = []string{host}
|
||||
} else {
|
||||
ips = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
ips[i] = addr.String()
|
||||
var resolvedAddrs []string
|
||||
if strings.HasPrefix(host, "srv+") {
|
||||
// The host has the format 'srv+realhost'. Strip 'srv+' prefix before performing the lookup.
|
||||
host = strings.TrimPrefix(host, "srv+")
|
||||
_, addrs, err := netutil.Resolver.LookupSRV(ctx, "", "", host)
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend SRV records for %s: %s; use it literally", bu, err)
|
||||
resolvedAddrs = []string{host}
|
||||
} else {
|
||||
resolvedAddrs := make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = fmt.Sprintf("%s:%d", addr.Target, addr.Port)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addrs, err := netutil.Resolver.LookupIPAddr(ctx, host)
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend IPs for %s: %s; use it literally", bu, err)
|
||||
resolvedAddrs = []string{host}
|
||||
} else {
|
||||
resolvedAddrs = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = addr.String()
|
||||
}
|
||||
}
|
||||
// sort ips, so they could be compared below in areEqualBackendURLs()
|
||||
sort.Strings(ips)
|
||||
}
|
||||
hostToIPs[host] = ips
|
||||
// sort resolvedAddrs, so they could be compared below in areEqualBackendURLs()
|
||||
sort.Strings(resolvedAddrs)
|
||||
hostToAddrs[host] = resolvedAddrs
|
||||
}
|
||||
cancel()
|
||||
|
||||
@@ -357,10 +381,14 @@ func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
for _, ip := range hostToIPs[host] {
|
||||
for _, addr := range hostToAddrs[host] {
|
||||
buCopy := *bu
|
||||
buCopy.Host = ip
|
||||
buCopy.Host = addr
|
||||
if port != "" {
|
||||
if n := strings.IndexByte(buCopy.Host, ':'); n >= 0 {
|
||||
// Drop the discovered port and substitute it the the port specified in bu.
|
||||
buCopy.Host = buCopy.Host[:n]
|
||||
}
|
||||
buCopy.Host += ":" + port
|
||||
}
|
||||
busNew = append(busNew, &backendURL{
|
||||
@@ -391,11 +419,6 @@ func areEqualBackendURLs(a, b []*backendURL) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var resolver = &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
// getFirstAvailableBackendURL returns the first available backendURL, which isn't broken.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
@@ -508,6 +531,13 @@ func (up *URLPrefix) MarshalYAML() (interface{}, error) {
|
||||
return up.vOriginal, nil
|
||||
}
|
||||
|
||||
// Regex represents a regex
|
||||
type Regex struct {
|
||||
re *regexp.Regexp
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
func (r *Regex) match(s string) bool {
|
||||
prefix, ok := r.re.LiteralPrefix()
|
||||
if ok {
|
||||
@@ -663,6 +693,14 @@ func loadAuthConfig() (bool, error) {
|
||||
authConfig.Store(ac)
|
||||
authConfigData.Store(&data)
|
||||
authUsers.Store(&m)
|
||||
if prevAc != nil {
|
||||
// explicilty unregister metrics, since all summary type metrics
|
||||
// are registered at global state of metrics package
|
||||
// and must be removed from it to release memory.
|
||||
// Metrics must be unregistered only after atomic.Value.Store calls above
|
||||
// Otherwise it may lead to metric gaps, since UnregisterAllMetrics is slow operation
|
||||
prevAc.ms.UnregisterAllMetrics()
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -716,11 +754,11 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
|
||||
tr, err := getTransport(ui.TLSInsecureSkipVerify, ui.TLSCAFile)
|
||||
rt, err := newRoundTripper(ui.TLSCAFile, ui.TLSCertFile, ui.TLSKeyFile, ui.TLSServerName, ui.TLSInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize HTTP transport: %w", err)
|
||||
return nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
|
||||
}
|
||||
ui.httpTransport = tr
|
||||
ui.rt = rt
|
||||
}
|
||||
return ac, nil
|
||||
}
|
||||
@@ -764,11 +802,11 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
|
||||
tr, err := getTransport(ui.TLSInsecureSkipVerify, ui.TLSCAFile)
|
||||
rt, err := newRoundTripper(ui.TLSCAFile, ui.TLSCertFile, ui.TLSKeyFile, ui.TLSServerName, ui.TLSInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize HTTP transport: %w", err)
|
||||
return nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
|
||||
}
|
||||
ui.httpTransport = tr
|
||||
ui.rt = rt
|
||||
|
||||
for _, at := range ats {
|
||||
byAuthToken[at] = ui
|
||||
|
||||
@@ -4,10 +4,11 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
func TestParseAuthConfigFailure(t *testing.T) {
|
||||
@@ -315,43 +316,54 @@ users:
|
||||
f(`
|
||||
users:
|
||||
- auth_token: foo
|
||||
url_prefix: http://aaa:343/bbb
|
||||
url_prefix: https://aaa:343/bbb
|
||||
max_concurrent_requests: 5
|
||||
tls_insecure_skip_verify: true
|
||||
tls_server_name: "foo.bar"
|
||||
tls_ca_file: "foo/bar"
|
||||
tls_cert_file: "foo/baz"
|
||||
tls_key_file: "foo/foo"
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthToken("foo"): {
|
||||
AuthToken: "foo",
|
||||
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
||||
URLPrefix: mustParseURL("https://aaa:343/bbb"),
|
||||
MaxConcurrentRequests: 5,
|
||||
TLSInsecureSkipVerify: &insecureSkipVerifyTrue,
|
||||
TLSServerName: "foo.bar",
|
||||
TLSCAFile: "foo/bar",
|
||||
TLSCertFile: "foo/baz",
|
||||
TLSKeyFile: "foo/foo",
|
||||
},
|
||||
})
|
||||
|
||||
// Multiple url_prefix entries
|
||||
insecureSkipVerifyFalse := false
|
||||
discoverBackendIPsTrue := true
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix:
|
||||
- http://node1:343/bbb
|
||||
- http://node2:343/bbb
|
||||
- http://srv+node2:343/bbb
|
||||
tls_insecure_skip_verify: false
|
||||
retry_status_codes: [500, 501]
|
||||
load_balancing_policy: first_available
|
||||
drop_src_path_prefix_parts: 1
|
||||
discover_backend_ips: true
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBasicToken("foo", "bar"): {
|
||||
Username: "foo",
|
||||
Password: "bar",
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://node1:343/bbb",
|
||||
"http://node2:343/bbb",
|
||||
"http://srv+node2:343/bbb",
|
||||
}),
|
||||
TLSInsecureSkipVerify: &insecureSkipVerifyFalse,
|
||||
RetryStatusCodes: []int{500, 501},
|
||||
LoadBalancingPolicy: "first_available",
|
||||
DropSrcPathPrefixParts: intp(1),
|
||||
DiscoverBackendIPs: &discoverBackendIPsTrue,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -384,32 +396,21 @@ users:
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg("foo=b.+ar"),
|
||||
mustNewQueryArg("baz=~.*x=y.+"),
|
||||
},
|
||||
SrcHeaders: []Header{
|
||||
{
|
||||
Name: "TenantID",
|
||||
Value: "345",
|
||||
},
|
||||
SrcHeaders: []*Header{
|
||||
mustNewHeader("'TenantID: 345'"),
|
||||
},
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'foo: bar'"),
|
||||
mustNewHeader("'xxx: y'"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -423,7 +424,7 @@ users:
|
||||
url_prefix: http://vmselect/select/0/prometheus
|
||||
- src_paths: ["/api/v1/write"]
|
||||
src_hosts: ["foo\\.bar", "baz:1234"]
|
||||
src_query_args: ['foo=bar']
|
||||
src_query_args: ['foo=b.+ar', 'baz=~.*x=y.+']
|
||||
src_headers: ['TenantID: 345']
|
||||
url_prefix: ["http://vminsert1/insert/0/prometheus","http://vminsert2/insert/0/prometheus"]
|
||||
headers:
|
||||
@@ -486,15 +487,9 @@ users:
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'foo: bar'"),
|
||||
mustNewHeader("'xxx: y'"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -518,15 +513,9 @@ users:
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'foo: bar'"),
|
||||
mustNewHeader("'xxx: y'"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -600,11 +589,11 @@ unauthorized_user:
|
||||
}
|
||||
|
||||
ui := m[getHTTPAuthBasicToken("foo", "bar")]
|
||||
if !isSetBool(ui.TLSInsecureSkipVerify, true) || !ui.httpTransport.TLSClientConfig.InsecureSkipVerify {
|
||||
if !isSetBool(ui.TLSInsecureSkipVerify, true) {
|
||||
t.Fatalf("unexpected TLSInsecureSkipVerify value for user foo")
|
||||
}
|
||||
|
||||
if !isSetBool(ac.UnauthorizedUser.TLSInsecureSkipVerify, false) || ac.UnauthorizedUser.httpTransport.TLSClientConfig.InsecureSkipVerify {
|
||||
if !isSetBool(ac.UnauthorizedUser.TLSInsecureSkipVerify, false) {
|
||||
t.Fatalf("unexpected TLSInsecureSkipVerify value for unauthorized_user")
|
||||
}
|
||||
}
|
||||
@@ -696,13 +685,74 @@ func isSetBool(boolP *bool, expectedValue bool) bool {
|
||||
return *boolP == expectedValue
|
||||
}
|
||||
|
||||
func TestGetLeastLoadedBackendURL(t *testing.T) {
|
||||
up := mustParseURLs([]string{
|
||||
"http://node1:343",
|
||||
"http://node2:343",
|
||||
"http://node3:343",
|
||||
})
|
||||
up.loadBalancingPolicy = "least_loaded"
|
||||
|
||||
fn := func(ns ...int) {
|
||||
t.Helper()
|
||||
bus := up.bus.Load()
|
||||
pbus := *bus
|
||||
for i, b := range pbus {
|
||||
got := int(b.concurrentRequests.Load())
|
||||
exp := ns[i]
|
||||
if got != exp {
|
||||
t.Fatalf("expected %q to have %d concurrent requests; got %d instead", b.url, exp, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
up.getBackendURL()
|
||||
fn(0, 1, 0)
|
||||
up.getBackendURL()
|
||||
fn(0, 1, 1)
|
||||
up.getBackendURL()
|
||||
fn(1, 1, 1)
|
||||
|
||||
up.getBackendURL()
|
||||
up.getBackendURL()
|
||||
fn(1, 2, 2)
|
||||
|
||||
bus := up.bus.Load()
|
||||
pbus := *bus
|
||||
pbus[0].concurrentRequests.Add(2)
|
||||
pbus[2].concurrentRequests.Add(5)
|
||||
fn(3, 2, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(3, 3, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(3, 4, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(4, 4, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(5, 4, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(5, 5, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(6, 5, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(6, 6, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
up.getBackendURL()
|
||||
fn(7, 7, 7)
|
||||
}
|
||||
|
||||
func getRegexs(paths []string) []*Regex {
|
||||
var sps []*Regex
|
||||
for _, path := range paths {
|
||||
sps = append(sps, &Regex{
|
||||
sOriginal: path,
|
||||
re: regexp.MustCompile("^(?:" + path + ")$"),
|
||||
})
|
||||
sps = append(sps, mustNewRegex(path))
|
||||
}
|
||||
return sps
|
||||
}
|
||||
@@ -759,3 +809,27 @@ func mustParseURLs(us []string) *URLPrefix {
|
||||
func intp(n int) *int {
|
||||
return &n
|
||||
}
|
||||
|
||||
func mustNewRegex(s string) *Regex {
|
||||
var re Regex
|
||||
if err := yaml.Unmarshal([]byte(s), &re); err != nil {
|
||||
logger.Panicf("cannot unmarshal regex %q: %s", s, err)
|
||||
}
|
||||
return &re
|
||||
}
|
||||
|
||||
func mustNewQueryArg(s string) *QueryArg {
|
||||
var qa QueryArg
|
||||
if err := yaml.Unmarshal([]byte(s), &qa); err != nil {
|
||||
logger.Panicf("cannot unmarshal query arg filter %q: %s", s, err)
|
||||
}
|
||||
return &qa
|
||||
}
|
||||
|
||||
func mustNewHeader(s string) *Header {
|
||||
var h Header
|
||||
if err := yaml.Unmarshal([]byte(s), &h); err != nil {
|
||||
logger.Panicf("cannot unmarshal header filter %q: %s", s, err)
|
||||
}
|
||||
return &h
|
||||
}
|
||||
|
||||
@@ -2,8 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -22,14 +20,13 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
)
|
||||
|
||||
@@ -40,6 +37,7 @@ var (
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host. "+
|
||||
"See also -maxConcurrentRequests")
|
||||
idleConnTimeout = flag.Duration("idleConnTimeout", 50*time.Second, `Defines a duration for idle (keep-alive connection) to exist. Consider settings this value less to the value of "-http.idleConnTimeout". It must prevent possible "write: broken pipe" and "read: connection reset by peer" errors.`)
|
||||
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
|
||||
maxConcurrentRequests = flag.Int("maxConcurrentRequests", 1000, "The maximum number of concurrent requests vmauth can process. Other requests are rejected with "+
|
||||
"'429 Too Many Requests' http status code. See also -maxConcurrentPerUserRequests and -maxIdleConnsPerBackend command-line options")
|
||||
@@ -53,9 +51,15 @@ var (
|
||||
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size, which can be cached and re-tried at other backends. "+
|
||||
"Bigger values may require more memory")
|
||||
backendTLSInsecureSkipVerify = flag.Bool("backend.tlsInsecureSkipVerify", false, "Whether to skip TLS verification when connecting to backends over HTTPS. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#backend-tls-setup")
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSCAFile = flag.String("backend.TLSCAFile", "", "Optional path to TLS root CA file, which is used for TLS verification when connecting to backends over HTTPS. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#backend-tls-setup")
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSCertFile = flag.String("backend.TLSCertFile", "", "Optional path to TLS client certificate file, which must be sent to HTTPS backend. "+
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSKeyFile = flag.String("backend.TLSKeyFile", "", "Optional path to TLS client key file, which must be sent to HTTPS backend. "+
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
backendTLSServerName = flag.String("backend.TLSServerName", "", "Optional TLS ServerName, which must be sent to HTTPS backend. "+
|
||||
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -240,7 +244,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
req.Host = targetURL.Host
|
||||
}
|
||||
updateHeadersByConfig(req.Header, hc.RequestHeaders)
|
||||
res, err := ui.httpTransport.RoundTrip(req)
|
||||
res, err := ui.rt.RoundTrip(req)
|
||||
rtb, rtbOK := req.Body.(*readTrackingBody)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
@@ -323,7 +327,7 @@ func copyHeader(dst, src http.Header) {
|
||||
}
|
||||
}
|
||||
|
||||
func updateHeadersByConfig(headers http.Header, config []Header) {
|
||||
func updateHeadersByConfig(headers http.Header, config []*Header) {
|
||||
for _, h := range config {
|
||||
if h.Value == "" {
|
||||
headers.Del(h.Name)
|
||||
@@ -392,50 +396,41 @@ var (
|
||||
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
|
||||
)
|
||||
|
||||
func getTransport(insecureSkipVerifyP *bool, caFile string) (*http.Transport, error) {
|
||||
if insecureSkipVerifyP == nil {
|
||||
insecureSkipVerifyP = backendTLSInsecureSkipVerify
|
||||
func newRoundTripper(caFileOpt, certFileOpt, keyFileOpt, serverNameOpt string, insecureSkipVerifyP *bool) (http.RoundTripper, error) {
|
||||
caFile := *backendTLSCAFile
|
||||
if caFileOpt != "" {
|
||||
caFile = caFileOpt
|
||||
}
|
||||
insecureSkipVerify := *insecureSkipVerifyP
|
||||
if caFile == "" {
|
||||
caFile = *backendTLSCAFile
|
||||
certFile := *backendTLSCertFile
|
||||
if certFileOpt != "" {
|
||||
certFile = certFileOpt
|
||||
}
|
||||
keyFile := *backendTLSKeyFile
|
||||
if keyFileOpt != "" {
|
||||
keyFile = keyFileOpt
|
||||
}
|
||||
serverName := *backendTLSServerName
|
||||
if serverNameOpt != "" {
|
||||
serverName = serverNameOpt
|
||||
}
|
||||
insecureSkipVerify := *backendTLSInsecureSkipVerify
|
||||
if p := insecureSkipVerifyP; p != nil {
|
||||
insecureSkipVerify = *p
|
||||
}
|
||||
opts := &promauth.Options{
|
||||
TLSConfig: &promauth.TLSConfig{
|
||||
CAFile: caFile,
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
ServerName: serverName,
|
||||
InsecureSkipVerify: insecureSkipVerify,
|
||||
},
|
||||
}
|
||||
cfg, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize promauth.Config: %w", err)
|
||||
}
|
||||
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
bb.B = appendTransportKey(bb.B[:0], insecureSkipVerify, caFile)
|
||||
|
||||
transportMapLock.Lock()
|
||||
defer transportMapLock.Unlock()
|
||||
|
||||
tr := transportMap[string(bb.B)]
|
||||
if tr == nil {
|
||||
trLocal, err := newTransport(insecureSkipVerify, caFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transportMap[string(bb.B)] = trLocal
|
||||
tr = trLocal
|
||||
}
|
||||
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
var (
|
||||
transportMap = make(map[string]*http.Transport)
|
||||
transportMapLock sync.Mutex
|
||||
)
|
||||
|
||||
func appendTransportKey(dst []byte, insecureSkipVerify bool, caFile string) []byte {
|
||||
dst = encoding.MarshalBool(dst, insecureSkipVerify)
|
||||
dst = encoding.MarshalBytes(dst, bytesutil.ToUnsafeBytes(caFile))
|
||||
return dst
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
func newTransport(insecureSkipVerify bool, caFile string) (*http.Transport, error) {
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.ResponseHeaderTimeout = *responseTimeout
|
||||
// Automatic compression must be disabled in order to fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/535
|
||||
@@ -444,27 +439,11 @@ func newTransport(insecureSkipVerify bool, caFile string) (*http.Transport, erro
|
||||
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
|
||||
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
|
||||
}
|
||||
tlsCfg := tr.TLSClientConfig
|
||||
if tlsCfg == nil {
|
||||
tlsCfg = &tls.Config{}
|
||||
tr.TLSClientConfig = tlsCfg
|
||||
}
|
||||
if insecureSkipVerify || caFile != "" {
|
||||
tlsCfg.ClientSessionCache = tls.NewLRUClientSessionCache(0)
|
||||
tlsCfg.InsecureSkipVerify = insecureSkipVerify
|
||||
if caFile != "" {
|
||||
data, err := fscore.ReadFileOrHTTP(caFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read tls_ca_file: %w", err)
|
||||
}
|
||||
rootCA := x509.NewCertPool()
|
||||
if !rootCA.AppendCertsFromPEM(data) {
|
||||
return nil, fmt.Errorf("cannot parse data read from tls_ca_file %q", caFile)
|
||||
}
|
||||
tlsCfg.RootCAs = rootCA
|
||||
}
|
||||
}
|
||||
return tr, nil
|
||||
tr.IdleConnTimeout = *idleConnTimeout
|
||||
tr.DialContext = netutil.DialMaybeSRV
|
||||
|
||||
rt := cfg.NewRoundTripper(tr)
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -488,7 +467,7 @@ func usage() {
|
||||
const s = `
|
||||
vmauth authenticates and authorizes incoming requests and proxies them to VictoriaMetrics.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmauth.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmauth/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -86,19 +86,25 @@ func matchAnyRegex(rs []*Regex, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func matchAnyQueryArg(qas []QueryArg, args url.Values) bool {
|
||||
func matchAnyQueryArg(qas []*QueryArg, args url.Values) bool {
|
||||
if len(qas) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, qa := range qas {
|
||||
if slices.Contains(args[qa.Name], qa.Value) {
|
||||
return true
|
||||
vs, ok := args[qa.Name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
if qa.Value.match(v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchAnyHeader(headers []Header, h http.Header) bool {
|
||||
func matchAnyHeader(headers []*Header, h http.Header) bool {
|
||||
if len(headers) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -97,8 +97,10 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
bu := up.getBackendURL()
|
||||
target := mergeURLs(bu.url, u, up.dropSrcPathPrefixParts)
|
||||
bu.put()
|
||||
if target.String() != expectedTarget {
|
||||
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
|
||||
|
||||
gotTarget := target.String()
|
||||
if gotTarget != expectedTarget {
|
||||
t.Fatalf("unexpected target; \ngot:\n%q;\nwant:\n%q", gotTarget, expectedTarget)
|
||||
}
|
||||
if s := headersToString(hc.RequestHeaders); s != expectedRequestHeaders {
|
||||
t.Fatalf("unexpected request headers; got %q; want %q", s, expectedRequestHeaders)
|
||||
@@ -123,17 +125,11 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'bb: aaa'"),
|
||||
},
|
||||
ResponseHeaders: []Header{
|
||||
{
|
||||
Name: "x",
|
||||
Value: "y",
|
||||
},
|
||||
ResponseHeaders: []*Header{
|
||||
mustNewHeader("'x: y'"),
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{503, 501},
|
||||
@@ -162,29 +158,17 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"host42"}),
|
||||
SrcPaths: getRegexs([]string{"/vmsingle/api/v1/query"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "db",
|
||||
Value: "foo",
|
||||
},
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg("db=foo"),
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "xx",
|
||||
Value: "aa",
|
||||
},
|
||||
{
|
||||
Name: "yy",
|
||||
Value: "asdf",
|
||||
},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'xx: aa'"),
|
||||
mustNewHeader("'yy: asdf'"),
|
||||
},
|
||||
ResponseHeaders: []Header{
|
||||
{
|
||||
Name: "qwe",
|
||||
Value: "rty",
|
||||
},
|
||||
ResponseHeaders: []*Header{
|
||||
mustNewHeader("'qwe: rty'"),
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{503, 500, 501},
|
||||
@@ -200,14 +184,12 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
},
|
||||
URLPrefix: mustParseURL("http://default-server"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
}},
|
||||
ResponseHeaders: []Header{{
|
||||
Name: "x",
|
||||
Value: "y",
|
||||
}},
|
||||
RequestHeaders: []*Header{
|
||||
mustNewHeader("'bb: aaa'"),
|
||||
},
|
||||
ResponseHeaders: []*Header{
|
||||
mustNewHeader("'x: y'"),
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{502},
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
@@ -250,6 +232,30 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
|
||||
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "", "", nil, "least_loaded", 0)
|
||||
|
||||
// Complex routing regexp query args in `url_map`
|
||||
ui = &UserInfo{
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query"}),
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg(`query=~.*{.*env="dev".*}*.`),
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query"}),
|
||||
SrcQueryArgs: []*QueryArg{
|
||||
mustNewQueryArg(`query=~.*{.*env="prod".*}.*`),
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/1/prometheus"),
|
||||
},
|
||||
},
|
||||
URLPrefix: mustParseURL("http://default-server"),
|
||||
}
|
||||
f(ui, `/api/v1/query?query=up{env="prod"}`, `http://vmselect/1/prometheus/api/v1/query?query=up%7Benv%3D%22prod%22%7D`, "", "", nil, "least_loaded", 0)
|
||||
f(ui, `/api/v1/query?query=up{foo="bar",env="dev",pod!=""}`, `http://vmselect/0/prometheus/api/v1/query?query=up%7Bfoo%3D%22bar%22%2Cenv%3D%22dev%22%2Cpod%21%3D%22%22%7D`, "", "", nil, "least_loaded", 0)
|
||||
f(ui, `/api/v1/query?query=up{foo="bar"}`, `http://default-server/api/v1/query?query=up%7Bfoo%3D%22bar%22%7D`, "", "", nil, "least_loaded", 0)
|
||||
}
|
||||
|
||||
func TestCreateTargetURLFailure(t *testing.T) {
|
||||
@@ -265,10 +271,10 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||
}
|
||||
if hc.RequestHeaders != nil {
|
||||
t.Fatalf("unexpected non-empty request headers=%q", hc.RequestHeaders)
|
||||
t.Fatalf("unexpected non-empty request headers: %s", headersToString(hc.RequestHeaders))
|
||||
}
|
||||
if hc.ResponseHeaders != nil {
|
||||
t.Fatalf("unexpected non-empty response headers=%q", hc.ResponseHeaders)
|
||||
t.Fatalf("unexpected non-empty response headers: %s", headersToString(hc.ResponseHeaders))
|
||||
}
|
||||
}
|
||||
f(&UserInfo{}, "/foo/bar")
|
||||
@@ -282,7 +288,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||
}, "/api/v1/write")
|
||||
}
|
||||
|
||||
func headersToString(hs []Header) string {
|
||||
func headersToString(hs []*Header) string {
|
||||
a := make([]string, len(hs))
|
||||
for i, h := range hs {
|
||||
a[i] = fmt.Sprintf("%s: %s", h.Name, h.Value)
|
||||
|
||||
@@ -81,6 +81,9 @@ vmbackup-linux-ppc64le:
|
||||
vmbackup-linux-s390x:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-loong64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-386:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmbackup docs [here](https://docs.victoriametrics.com/vmbackup.html).
|
||||
See vmbackup docs [here](https://docs.victoriametrics.com/vmbackup/).
|
||||
|
||||
vmbackup docs can be edited at [docs/vmbackup.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmbackup.md).
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8420", "TCP address for exporting metrics at /metrics page")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-metrics-data", "Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage")
|
||||
snapshotName = flag.String("snapshotName", "", "Name for the snapshot to backup. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots. There is no need in setting -snapshotName if -snapshot.createURL is set")
|
||||
snapshotName = flag.String("snapshotName", "", "Name for the snapshot to backup. See https://docs.victoriametrics.com/single-server-victoriametrics/#how-to-work-with-snapshots. There is no need in setting -snapshotName if -snapshot.createURL is set")
|
||||
snapshotCreateURL = flag.String("snapshot.createURL", "", "VictoriaMetrics create snapshot url. When this is given a snapshot will automatically be created during backup. "+
|
||||
"Example: http://victoriametrics:8428/snapshot/create . There is no need in setting -snapshotName if -snapshot.createURL is set")
|
||||
snapshotDeleteURL = flag.String("snapshot.deleteURL", "", "VictoriaMetrics delete snapshot url. Optional. Will be generated from -snapshot.createURL if not provided. "+
|
||||
@@ -164,7 +164,7 @@ func usage() {
|
||||
vmbackup performs backups for VictoriaMetrics data from instant snapshots to gcs, s3, azblob
|
||||
or local filesystem. Backed up data can be restored with vmrestore.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmbackup.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmbackup/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmbackupmanager docs [here](https://docs.victoriametrics.com/vmbackupmanager.html).
|
||||
See vmbackupmanager docs [here](https://docs.victoriametrics.com/vmbackupmanager/).
|
||||
|
||||
vmbackupmanager docs can be edited at [docs/vmbackupmanager.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmbackupmanager.md).
|
||||
|
||||
@@ -81,6 +81,9 @@ vmctl-linux-ppc64le:
|
||||
vmctl-linux-s390x:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-linux-loong64:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmctl-linux-386:
|
||||
APP_NAME=vmctl CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmctl docs [here](https://docs.victoriametrics.com/vmctl.html).
|
||||
See vmctl docs [here](https://docs.victoriametrics.com/vmctl/).
|
||||
|
||||
vmctl docs can be edited at [docs/vmctl.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmctl.md).
|
||||
|
||||
@@ -442,12 +442,12 @@ var (
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeStart,
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats",
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/single-server-victoriametrics/#timestamp-formats",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeEnd,
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats",
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/single-server-victoriametrics/#timestamp-formats",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeStepInterval,
|
||||
@@ -469,7 +469,7 @@ var (
|
||||
Name: vmNativeSrcAddr,
|
||||
Usage: "VictoriaMetrics address to perform export from. \n" +
|
||||
" Should be the same as --httpListenAddr value for single-node version or vmselect component." +
|
||||
" If exporting from cluster version see https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format",
|
||||
" If exporting from cluster version see https://docs.victoriametrics.com/cluster-victoriametrics/#url-format",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -518,7 +518,7 @@ var (
|
||||
Name: vmNativeDstAddr,
|
||||
Usage: "VictoriaMetrics address to perform import to. \n" +
|
||||
" Should be the same as --httpListenAddr value for single-node version or vminsert component." +
|
||||
" If importing into cluster version see https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format",
|
||||
" If importing into cluster version see https://docs.victoriametrics.com/cluster-victoriametrics/#url-format",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
|
||||
@@ -26,7 +26,7 @@ type Retention struct {
|
||||
FirstOrder string
|
||||
SecondOrder string
|
||||
AggTime string
|
||||
// The actual ranges will will attempt to query (as offsets from now)
|
||||
// The actual ranges will attempt to query (as offsets from now)
|
||||
QueryRanges []TimeRange
|
||||
}
|
||||
|
||||
|
||||
@@ -123,7 +123,7 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
|
||||
importPath := addr + "/api/v1/import"
|
||||
if cfg.AccountID != "" {
|
||||
// if cluster version
|
||||
// see https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format
|
||||
// see https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
|
||||
importPath = fmt.Sprintf("%s/insert/%s/prometheus/api/v1/import", addr, cfg.AccountID)
|
||||
}
|
||||
importPath, err := AddExtraLabelsToImportPath(importPath, cfg.ExtraLabels)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmgateway docs [here](https://docs.victoriametrics.com/vmgateway.html).
|
||||
See vmgateway docs [here](https://docs.victoriametrics.com/vmgateway/).
|
||||
|
||||
vmgateway docs can be edited at [docs/vmgateway.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmgateway.md).
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
@@ -37,12 +38,9 @@ func (ctx *InsertCtx) Reset(rowsLen int) {
|
||||
for i := range mrs {
|
||||
cleanMetricRow(&mrs[i])
|
||||
}
|
||||
mrs = slicesutil.SetLength(mrs, rowsLen)
|
||||
ctx.mrs = mrs[:0]
|
||||
|
||||
if n := rowsLen - cap(ctx.mrs); n > 0 {
|
||||
ctx.mrs = append(ctx.mrs[:cap(ctx.mrs)], make([]storage.MetricRow, n)...)
|
||||
}
|
||||
ctx.mrs = ctx.mrs[:0]
|
||||
ctx.metricNamesBuf = ctx.metricNamesBuf[:0]
|
||||
ctx.relabelCtx.Reset()
|
||||
ctx.streamAggrCtx.Reset()
|
||||
@@ -142,7 +140,7 @@ func (ctx *InsertCtx) ApplyRelabeling() {
|
||||
// FlushBufs flushes buffered rows to the underlying storage.
|
||||
func (ctx *InsertCtx) FlushBufs() error {
|
||||
sas := sasGlobal.Load()
|
||||
if (sas != nil || deduplicator != nil) && !ctx.skipStreamAggr {
|
||||
if (sas.IsEnabled() || deduplicator != nil) && !ctx.skipStreamAggr {
|
||||
matchIdxs := matchIdxsPool.Get()
|
||||
matchIdxs.B = ctx.streamAggrCtx.push(ctx.mrs, matchIdxs.B)
|
||||
if !*streamAggrKeepInput {
|
||||
|
||||
@@ -2,23 +2,16 @@ package common
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
)
|
||||
|
||||
// GetInsertCtx returns InsertCtx from the pool.
|
||||
//
|
||||
// Call PutInsertCtx for returning it to the pool.
|
||||
func GetInsertCtx() *InsertCtx {
|
||||
select {
|
||||
case ctx := <-insertCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := insertCtxPool.Get(); v != nil {
|
||||
return v.(*InsertCtx)
|
||||
}
|
||||
return &InsertCtx{}
|
||||
if v := insertCtxPool.Get(); v != nil {
|
||||
return v.(*InsertCtx)
|
||||
}
|
||||
return &InsertCtx{}
|
||||
}
|
||||
|
||||
// PutInsertCtx returns ctx to the pool.
|
||||
@@ -26,12 +19,7 @@ func GetInsertCtx() *InsertCtx {
|
||||
// ctx cannot be used after the call.
|
||||
func PutInsertCtx(ctx *InsertCtx) {
|
||||
ctx.Reset(0)
|
||||
select {
|
||||
case insertCtxPoolCh <- ctx:
|
||||
default:
|
||||
insertCtxPool.Put(ctx)
|
||||
}
|
||||
insertCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var insertCtxPool sync.Pool
|
||||
var insertCtxPoolCh = make(chan *InsertCtx, cgroup.AvailableCPUs())
|
||||
|
||||
@@ -20,20 +20,22 @@ import (
|
||||
|
||||
var (
|
||||
streamAggrConfig = flag.String("streamAggr.config", "", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html . "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/ . "+
|
||||
"See also -streamAggr.keepInput, -streamAggr.dropInput and -streamAggr.dedupInterval")
|
||||
streamAggrKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
"See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
"See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation/")
|
||||
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation with -streamAggr.config . "+
|
||||
"See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
"See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation/#deduplication")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation/#dropping-unneeded-labels")
|
||||
streamAggrIgnoreFirstIntervals = flag.Int("streamAggr.ignoreFirstIntervals", 0, "Number of aggregation intervals to skip after the start. Increase this value if you observe incorrect aggregation results after restarts. It could be caused by receiving unordered delayed data from clients pushing data into the database. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignore-aggregation-intervals-on-start")
|
||||
streamAggrIgnoreOldSamples = flag.Bool("streamAggr.ignoreOldSamples", false, "Whether to ignore input samples with old timestamps outside the current aggregation interval. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html#ignoring-old-samples")
|
||||
"See https://docs.victoriametrics.com/stream-aggregation/#ignoring-old-samples")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -56,9 +58,10 @@ func CheckStreamAggrConfig() error {
|
||||
}
|
||||
pushNoop := func(_ []prompbmarshal.TimeSeries) {}
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushNoop, opts)
|
||||
if err != nil {
|
||||
@@ -68,6 +71,11 @@ func CheckStreamAggrConfig() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasStreamAggrConfigured checks if streamAggr config provided
|
||||
func HasStreamAggrConfigured() bool {
|
||||
return *streamAggrConfig != ""
|
||||
}
|
||||
|
||||
// InitStreamAggr must be called after flag.Parse and before using the common package.
|
||||
//
|
||||
// MustStopStreamAggr must be called when stream aggr is no longer needed.
|
||||
@@ -84,9 +92,10 @@ func InitStreamAggr() {
|
||||
sighupCh := procutil.NewSighupChan()
|
||||
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
@@ -117,9 +126,10 @@ func reloadStreamAggrConfig() {
|
||||
saCfgReloads.Inc()
|
||||
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
IgnoreFirstIntervals: *streamAggrIgnoreFirstIntervals,
|
||||
}
|
||||
sasNew, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
@@ -232,7 +242,7 @@ func (ctx *streamAggrCtx) push(mrs []storage.MetricRow, matchIdxs []byte) []byte
|
||||
tss = tss[tssLen:]
|
||||
|
||||
sas := sasGlobal.Load()
|
||||
if sas != nil {
|
||||
if sas.IsEnabled() {
|
||||
matchIdxs = sas.Push(tss, matchIdxs)
|
||||
} else if deduplicator != nil {
|
||||
matchIdxs = bytesutil.ResizeNoCopyMayOverallocate(matchIdxs, len(tss))
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
@@ -166,25 +165,15 @@ func (ctx *pushCtx) reset() {
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prompush"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/promremotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/statsd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -36,6 +37,8 @@ import (
|
||||
influxserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/influx"
|
||||
opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb"
|
||||
opentsdbhttpserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdbhttp"
|
||||
statsdserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/statsd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
@@ -49,6 +52,12 @@ var (
|
||||
"See also -graphiteListenAddr.useProxyProtocol")
|
||||
graphiteUseProxyProtocol = flag.Bool("graphiteListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -graphiteListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
||||
statsdListenAddr = flag.String("statsdListenAddr", "", "TCP and UDP address to listen for Statsd plaintext data. Usually :8125 must be set. Doesn't work if empty. "+
|
||||
"See also -statsdListenAddr.useProxyProtocol")
|
||||
statsdUseProxyProtocol = flag.Bool("statsdListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -statsdListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
||||
statsdDisableAggregationEnforcemenet = flag.Bool(`statsd.disableAggregationEnforcement`, false, "Whether to disable streaming aggregation requirement check. "+
|
||||
"It's recommended to run statsdServer with pre-configured streaming aggregation to decrease load at database.")
|
||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for InfluxDB line protocol data. Usually :8089 must be set. Doesn't work if empty. "+
|
||||
"This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write . "+
|
||||
"See also -influxListenAddr.useProxyProtocol")
|
||||
@@ -67,11 +76,12 @@ var (
|
||||
configAuthKey = flagutil.NewPassword("configAuthKey", "Authorization key for accessing /config page. It must be passed via authKey query arg")
|
||||
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed as authKey=...")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 16*1024, "The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 1024, "The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented")
|
||||
)
|
||||
|
||||
var (
|
||||
graphiteServer *graphiteserver.Server
|
||||
statsdServer *statsdserver.Server
|
||||
influxServer *influxserver.Server
|
||||
opentsdbServer *opentsdbserver.Server
|
||||
opentsdbhttpServer *opentsdbhttpserver.Server
|
||||
@@ -92,6 +102,12 @@ func Init() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphiteServer = graphiteserver.MustStart(*graphiteListenAddr, *graphiteUseProxyProtocol, graphite.InsertHandler)
|
||||
}
|
||||
if len(*statsdListenAddr) > 0 {
|
||||
if !vminsertCommon.HasStreamAggrConfigured() && !*statsdDisableAggregationEnforcemenet {
|
||||
logger.Fatalf("streaming aggregation must be configured with enabled statsd server. It's recommended to aggregate metrics received at statsd listener. This check could be disabled with flag -statsd.disableAggregationEnforcement")
|
||||
}
|
||||
statsdServer = statsdserver.MustStart(*statsdListenAddr, *statsdUseProxyProtocol, statsd.InsertHandler)
|
||||
}
|
||||
if len(*influxListenAddr) > 0 {
|
||||
influxServer = influxserver.MustStart(*influxListenAddr, *influxUseProxyProtocol, influx.InsertHandlerForReader)
|
||||
}
|
||||
@@ -112,6 +128,9 @@ func Stop() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphiteServer.MustStop()
|
||||
}
|
||||
if len(*statsdListenAddr) > 0 {
|
||||
statsdServer.MustStop()
|
||||
}
|
||||
if len(*influxListenAddr) > 0 {
|
||||
influxServer.MustStop()
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
@@ -90,25 +89,15 @@ func (ctx *pushCtx) reset() {
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
60
app/vminsert/statsd/request_handler.go
Normal file
60
app/vminsert/statsd/request_handler.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package statsd
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/statsd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/statsd/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="statsd"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="statsd"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes remote write for statsd protocol with tags.
|
||||
//
|
||||
// https://github.com/statsd/statsd/blob/master/docs/metric_types.md
|
||||
func InsertHandler(r io.Reader) error {
|
||||
return stream.Parse(r, false, insertRows)
|
||||
}
|
||||
|
||||
func insertRows(rows []parser.Row) error {
|
||||
ctx := common.GetInsertCtx()
|
||||
defer common.PutInsertCtx(ctx)
|
||||
|
||||
ctx.Reset(len(rows))
|
||||
hasRelabeling := relabel.HasRelabeling()
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
ctx.AddLabel("", r.Metric)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ctx.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricName []byte
|
||||
var err error
|
||||
for _, v := range r.Values {
|
||||
metricName, err = ctx.WriteDataPointExt(metricName, ctx.Labels, r.Timestamp, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return ctx.FlushBufs()
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
@@ -96,25 +95,15 @@ func (ctx *pushCtx) reset() {
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
@@ -81,6 +81,9 @@ vmrestore-linux-ppc64le:
|
||||
vmrestore-linux-s390x:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-loong64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-386:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmrestore docs [here](https://docs.victoriametrics.com/vmrestore.html).
|
||||
See vmrestore docs [here](https://docs.victoriametrics.com/vmrestore/).
|
||||
|
||||
vmrestore docs can be edited at [docs/vmrestore.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmrestore.md).
|
||||
|
||||
@@ -74,7 +74,7 @@ func usage() {
|
||||
const s = `
|
||||
vmrestore restores VictoriaMetrics data from backups made by vmbackup.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/vmrestore.html .
|
||||
See the docs at https://docs.victoriametrics.com/vmrestore/ .
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
||||
|
||||
@@ -213,11 +213,11 @@ func (p *parser) parseMetricExprOrFuncCall() (Expr, error) {
|
||||
// Metric epxression or bool expression or None.
|
||||
if isBool(ident) {
|
||||
be := &BoolExpr{
|
||||
B: strings.ToLower(ident) == "true",
|
||||
B: strings.EqualFold(ident, "true"),
|
||||
}
|
||||
return be, nil
|
||||
}
|
||||
if strings.ToLower(ident) == "none" {
|
||||
if strings.EqualFold(ident, "none") {
|
||||
nne := &NoneExpr{}
|
||||
return nne, nil
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
// Strip /prometheus and /graphite prefixes in order to provide path compatibility with cluster version
|
||||
//
|
||||
// See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format
|
||||
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/prometheus/"):
|
||||
path = path[len("/prometheus"):]
|
||||
|
||||
@@ -21,13 +21,13 @@ textarea { margin: 1em }
|
||||
<form method="get">
|
||||
<div>
|
||||
<p>
|
||||
<a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query with optional WITH expressions:
|
||||
<a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query with optional WITH expressions:
|
||||
</p>
|
||||
<textarea name="query" style="height: 15em; width: 90%">{%s q %}</textarea><br/>
|
||||
<input type="submit" value="Expand" />
|
||||
|
||||
<p>
|
||||
<a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:
|
||||
<a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:
|
||||
</p>
|
||||
<textarea style="height: 5em; width: 90%" readonly="readonly">{%= expandWithExprs(q) %}</textarea>
|
||||
</div>
|
||||
@@ -79,7 +79,7 @@ textarea { margin: 1em }
|
||||
{% endstripspace %}
|
||||
|
||||
{% func withExprsTutorial() %}
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a></h3>
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a></h3>
|
||||
|
||||
<p>
|
||||
Let's look at the following real query from <a href="https://grafana.com/grafana/dashboards/1860">Node Exporter Full</a> dashboard:
|
||||
|
||||
@@ -28,11 +28,11 @@ var (
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
func StreamExpandWithExprsResponse(qw422016 *qt422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
qw422016.N().S(`<html><head><title>Expand WITH expressions</title><style>p { font-weight: bold }textarea { margin: 1em }</style></head><body><div><form method="get"><div><p><a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query with optional WITH expressions:</p><textarea name="query" style="height: 15em; width: 90%">`)
|
||||
qw422016.N().S(`<html><head><title>Expand WITH expressions</title><style>p { font-weight: bold }textarea { margin: 1em }</style></head><body><div><form method="get"><div><p><a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query with optional WITH expressions:</p><textarea name="query" style="height: 15em; width: 90%">`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:26
|
||||
qw422016.E().S(q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:26
|
||||
qw422016.N().S(`</textarea><br/><input type="submit" value="Expand" /><p><a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:</p><textarea style="height: 5em; width: 90%" readonly="readonly">`)
|
||||
qw422016.N().S(`</textarea><br/><input type="submit" value="Expand" /><p><a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:</p><textarea style="height: 5em; width: 90%" readonly="readonly">`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:32
|
||||
streamexpandWithExprs(qw422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:32
|
||||
@@ -192,7 +192,7 @@ func ExpandWithExprsJSONResponse(q string) string {
|
||||
func streamwithExprsTutorial(qw422016 *qt422016.Writer) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:81
|
||||
qw422016.N().S(`
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a></h3>
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/metricsql/">MetricsQL</a></h3>
|
||||
|
||||
<p>
|
||||
Let's look at the following real query from <a href="https://grafana.com/grafana/dashboards/1860">Node Exporter Full</a> dashboard:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user