mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
535 Commits
v1.97.4
...
logsql-ski
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
971aecd1ae | ||
|
|
b4d8837917 | ||
|
|
1cbaec73ad | ||
|
|
fff31aa8b0 | ||
|
|
e3a26c0db6 | ||
|
|
85d09e5a2d | ||
|
|
6bcc6c938b | ||
|
|
458338afa5 | ||
|
|
aaa18e565d | ||
|
|
4f55aa29db | ||
|
|
9064602d00 | ||
|
|
16eeb4eb33 | ||
|
|
9dd5db2b77 | ||
|
|
66c5fc3243 | ||
|
|
43835704b7 | ||
|
|
7308bad777 | ||
|
|
7db8ba41e7 | ||
|
|
7b20de4674 | ||
|
|
f06f55edb6 | ||
|
|
22497c2c98 | ||
|
|
cba2f6dce1 | ||
|
|
e39a1a98f5 | ||
|
|
2123821e0f | ||
|
|
b8ba9ea769 | ||
|
|
8f457c550d | ||
|
|
267c28362b | ||
|
|
14f3f72829 | ||
|
|
9ee51e34cc | ||
|
|
7c0003d8a4 | ||
|
|
e6b1ea6740 | ||
|
|
db0c669cf4 | ||
|
|
c9aca0c3b6 | ||
|
|
8bcbdc106c | ||
|
|
2205de2391 | ||
|
|
a4945c0bf0 | ||
|
|
b6f94cdda7 | ||
|
|
b02dda1440 | ||
|
|
3a9b34a67c | ||
|
|
83216e956c | ||
|
|
fc8d9dd317 | ||
|
|
0dda3978a5 | ||
|
|
7bf090525d | ||
|
|
79bc2288c0 | ||
|
|
b62496d997 | ||
|
|
8cd6f7ea3c | ||
|
|
00e5e00a5a | ||
|
|
69d4075945 | ||
|
|
c57f43d3f0 | ||
|
|
39924c8079 | ||
|
|
f924bf5432 | ||
|
|
2d4ce05895 | ||
|
|
28d4ad24a6 | ||
|
|
d0ab3b2b02 | ||
|
|
e9da0b1714 | ||
|
|
f8d10a7106 | ||
|
|
931dd3f320 | ||
|
|
7ccdb57ea4 | ||
|
|
619964c5fc | ||
|
|
7baae7f42a | ||
|
|
c006db1798 | ||
|
|
81657729ce | ||
|
|
6fd369afa1 | ||
|
|
d99b64d31f | ||
|
|
706bac30ae | ||
|
|
26e981ced2 | ||
|
|
5b1b9c2f7d | ||
|
|
d776c22592 | ||
|
|
b212c9d6f5 | ||
|
|
967d5496cf | ||
|
|
b958fb1e76 | ||
|
|
a8acf3767a | ||
|
|
1de6cd4442 | ||
|
|
f80ac120f3 | ||
|
|
93c3be2530 | ||
|
|
a51a2bc692 | ||
|
|
b3b29ba6ac | ||
|
|
6910e72c99 | ||
|
|
fb42380ef3 | ||
|
|
3de8656551 | ||
|
|
55bd43f28e | ||
|
|
e4eccd7074 | ||
|
|
918cccaddf | ||
|
|
c3a72b6cdb | ||
|
|
be36ceb1cf | ||
|
|
21bfb66650 | ||
|
|
9bd3cadce6 | ||
|
|
4487dac30b | ||
|
|
904e95fc69 | ||
|
|
76b1fc6ac1 | ||
|
|
daa1326b98 | ||
|
|
c300ce659f | ||
|
|
c79bf3925c | ||
|
|
49a6dca2d5 | ||
|
|
8f59ca423b | ||
|
|
830b871baf | ||
|
|
f5848a5c8b | ||
|
|
f17248eb3f | ||
|
|
4cb70ee9a3 | ||
|
|
4d71a33cb5 | ||
|
|
af3922b1df | ||
|
|
131f357098 | ||
|
|
4001ca36b8 | ||
|
|
a05303eaa0 | ||
|
|
e79b05b4ab | ||
|
|
2e843a8ed9 | ||
|
|
623d257faf | ||
|
|
b6bd9a97a3 | ||
|
|
47892b4a4c | ||
|
|
166b97b8d0 | ||
|
|
ac9c2a796f | ||
|
|
47e7ad2e01 | ||
|
|
d7224b2d1c | ||
|
|
77eca6bb37 | ||
|
|
f937439657 | ||
|
|
d72b565c03 | ||
|
|
f8f4025dca | ||
|
|
4a359d5f67 | ||
|
|
20d0183195 | ||
|
|
1263cab870 | ||
|
|
5ffece51bf | ||
|
|
23ab865035 | ||
|
|
51f5ac1929 | ||
|
|
bc90f4aae6 | ||
|
|
509df44d03 | ||
|
|
cb23685681 | ||
|
|
bc79f7196d | ||
|
|
70eaa06f08 | ||
|
|
b08cbd0400 | ||
|
|
b569fa0b2c | ||
|
|
43b5d8bc7a | ||
|
|
0c0ed61ce7 | ||
|
|
02bccd1eb9 | ||
|
|
db3709c87d | ||
|
|
93a29fce4e | ||
|
|
21d9393c9e | ||
|
|
46fd0ed693 | ||
|
|
b399852742 | ||
|
|
7e3511ffbd | ||
|
|
5f8b91186a | ||
|
|
e6dd52b04c | ||
|
|
4553521f9a | ||
|
|
f95e9f13ae | ||
|
|
76f00cea6b | ||
|
|
729b263670 | ||
|
|
f45f39d80e | ||
|
|
1cedaf61cb | ||
|
|
6a465f6e29 | ||
|
|
cbd80efcc1 | ||
|
|
ab2b3f1785 | ||
|
|
fb502700f7 | ||
|
|
b577413d3b | ||
|
|
5f9fb58dde | ||
|
|
a2ea8bc97b | ||
|
|
9d5bf5ba5d | ||
|
|
f4d16919ee | ||
|
|
75aa704ee6 | ||
|
|
2e91dd18c7 | ||
|
|
d7d685f2af | ||
|
|
a0937b01c1 | ||
|
|
b3d84489ec | ||
|
|
15e33d56f1 | ||
|
|
e80b44f19d | ||
|
|
e8bb64bad5 | ||
|
|
45d8d41e1e | ||
|
|
69dbfa7bc2 | ||
|
|
f5115c8f1b | ||
|
|
df5b73ed0d | ||
|
|
d1d2771bee | ||
|
|
1ed6df7901 | ||
|
|
d46d87a9e0 | ||
|
|
869755b77d | ||
|
|
2d31fd7855 | ||
|
|
ef2b8d1f17 | ||
|
|
cb1e618a16 | ||
|
|
0b7ce70df4 | ||
|
|
83a8c24281 | ||
|
|
b9f7c3169a | ||
|
|
25eeb2b16c | ||
|
|
90c7c67793 | ||
|
|
98b31b7f7c | ||
|
|
0d8bec9c6c | ||
|
|
cb259116b4 | ||
|
|
c0a93cf183 | ||
|
|
7b2b980181 | ||
|
|
76ef84fcae | ||
|
|
d8688c9e82 | ||
|
|
8efe12d66e | ||
|
|
97dd7e26ad | ||
|
|
b2efacb624 | ||
|
|
61d1af8050 | ||
|
|
9c1331a38a | ||
|
|
5582a24ecf | ||
|
|
96f913c83e | ||
|
|
76a6f806ae | ||
|
|
5b42f15ccc | ||
|
|
b4b38f782c | ||
|
|
b33b620af6 | ||
|
|
e036433b8b | ||
|
|
3971ce0625 | ||
|
|
73f5fb0f0c | ||
|
|
c2ff1cfd30 | ||
|
|
f781c42ea4 | ||
|
|
1c6230c977 | ||
|
|
da611ad628 | ||
|
|
ed523b5bbc | ||
|
|
22d63ac7cd | ||
|
|
32653db7d5 | ||
|
|
6319d029a8 | ||
|
|
074abd5bee | ||
|
|
e70177c5fb | ||
|
|
b232968bb4 | ||
|
|
d42667fc41 | ||
|
|
f5bbffd45f | ||
|
|
a1e9af3abe | ||
|
|
eb40395a1c | ||
|
|
946814afee | ||
|
|
925f60841f | ||
|
|
aa5e7e268c | ||
|
|
0ab1069363 | ||
|
|
86494518da | ||
|
|
ac3cf3f357 | ||
|
|
2b8253185b | ||
|
|
138a4d1c2b | ||
|
|
0422ae01ba | ||
|
|
3c06b3af92 | ||
|
|
9648c88b71 | ||
|
|
54a1c506e3 | ||
|
|
614d34e539 | ||
|
|
4e65636b44 | ||
|
|
643c51795c | ||
|
|
97e02f2633 | ||
|
|
89ace61436 | ||
|
|
28a9e92b5e | ||
|
|
eb8e95516f | ||
|
|
18db573b10 | ||
|
|
cf2e80a869 | ||
|
|
69ab55b6f7 | ||
|
|
5b33da5e19 | ||
|
|
c1a5f75bd3 | ||
|
|
44b721f201 | ||
|
|
a6cba91fd6 | ||
|
|
a7aa119f35 | ||
|
|
50ead3d32f | ||
|
|
9cd4b0537a | ||
|
|
5fe3f23aee | ||
|
|
47d1ea1d3a | ||
|
|
db2320ee83 | ||
|
|
489eb88169 | ||
|
|
c8c2c5f8e5 | ||
|
|
d62b14685a | ||
|
|
732e1427f9 | ||
|
|
c51031dd70 | ||
|
|
a304372d88 | ||
|
|
9ea69622a0 | ||
|
|
e130f29659 | ||
|
|
5aa3dfbd20 | ||
|
|
bf9cb84575 | ||
|
|
fdf0cc9f25 | ||
|
|
0b7a23a91d | ||
|
|
cfe774ab50 | ||
|
|
434a5803e7 | ||
|
|
812d17f588 | ||
|
|
ca8a4cfa0d | ||
|
|
e5c69262e2 | ||
|
|
146fccc22d | ||
|
|
6a8dc74ee7 | ||
|
|
38e0397ebd | ||
|
|
e959f54351 | ||
|
|
c75bfd5b07 | ||
|
|
bb48d416fc | ||
|
|
f8baf29b6e | ||
|
|
7a04f99c72 | ||
|
|
a3cf3d7de1 | ||
|
|
8266b77d0e | ||
|
|
b9b4e859be | ||
|
|
dd7dd0b1db | ||
|
|
d62055ad33 | ||
|
|
a5795f533d | ||
|
|
f2266f40b7 | ||
|
|
e22836c636 | ||
|
|
3c74aa6b3d | ||
|
|
b74231a642 | ||
|
|
3328b09ae8 | ||
|
|
c6221c9046 | ||
|
|
04d13f6149 | ||
|
|
3723c809a1 | ||
|
|
67a55b89a4 | ||
|
|
1217c1f2da | ||
|
|
62498a1e68 | ||
|
|
fab02faa3f | ||
|
|
6f203ebc9f | ||
|
|
44e15c866a | ||
|
|
8c33ba537a | ||
|
|
935a42d08c | ||
|
|
0c88ba326f | ||
|
|
f0b4dd7426 | ||
|
|
aae71832e5 | ||
|
|
35f592a02c | ||
|
|
4bdb3d9fd9 | ||
|
|
6697da73e5 | ||
|
|
7e1dd8ab9d | ||
|
|
d5ca67e667 | ||
|
|
906a35bdbb | ||
|
|
ece86cd314 | ||
|
|
55f1f24e62 | ||
|
|
b3d9d36fb3 | ||
|
|
4617dc8bbe | ||
|
|
f81b480905 | ||
|
|
275335c181 | ||
|
|
5c89150fc9 | ||
|
|
a204fd69f1 | ||
|
|
0f1ea36dc8 | ||
|
|
ea9e2b19a5 | ||
|
|
cf94522389 | ||
|
|
ea858b6e72 | ||
|
|
6390b54c4d | ||
|
|
c8d1d2ab72 | ||
|
|
d4c0615dcd | ||
|
|
840ab60111 | ||
|
|
340638d4b0 | ||
|
|
9bad52b687 | ||
|
|
f79944532b | ||
|
|
f46eaf92eb | ||
|
|
8995b04886 | ||
|
|
abf82c3657 | ||
|
|
9e44870d5c | ||
|
|
348eec39ba | ||
|
|
3997319f45 | ||
|
|
d68bb658ce | ||
|
|
df7d3c55ed | ||
|
|
5934002b57 | ||
|
|
bace9a2501 | ||
|
|
3bec0549dc | ||
|
|
07855de142 | ||
|
|
e8b3045062 | ||
|
|
73f0a805e2 | ||
|
|
463bc27312 | ||
|
|
34aa25d681 | ||
|
|
79b57f625c | ||
|
|
8d9d7a8a12 | ||
|
|
cd34142a14 | ||
|
|
aec9cd4316 | ||
|
|
b7dfe9894c | ||
|
|
fa19daf3bd | ||
|
|
f7c3dee1c3 | ||
|
|
a6eacfdb11 | ||
|
|
ce3ec3ff2e | ||
|
|
7332431b90 | ||
|
|
2c98e8712c | ||
|
|
c35c1b007c | ||
|
|
0af5f88744 | ||
|
|
7ee131de8b | ||
|
|
17cf031fa1 | ||
|
|
bb1279bfc4 | ||
|
|
4034d081f4 | ||
|
|
b2baf7d472 | ||
|
|
dc25c30fdc | ||
|
|
cc5a274e4d | ||
|
|
e2dad3a2ac | ||
|
|
11b03d9fc8 | ||
|
|
b60dcbe11f | ||
|
|
524c0a2e07 | ||
|
|
5b652bccad | ||
|
|
7c2c987ff9 | ||
|
|
e525b98fbf | ||
|
|
0514091948 | ||
|
|
451d2abf50 | ||
|
|
c42ddce159 | ||
|
|
5a092e161c | ||
|
|
2e30842582 | ||
|
|
149d83e596 | ||
|
|
6a6ea89da5 | ||
|
|
dfbb6e0826 | ||
|
|
d03719e72d | ||
|
|
cee901cdf4 | ||
|
|
c68bcddd13 | ||
|
|
3ed7d62627 | ||
|
|
f8207e33a2 | ||
|
|
f973711e56 | ||
|
|
0c293a66ec | ||
|
|
6a07cb1bdb | ||
|
|
7cd1b7d047 | ||
|
|
ecccd2a1cc | ||
|
|
172e196ac9 | ||
|
|
3170ad3f44 | ||
|
|
6b9bedd0f9 | ||
|
|
eb1505ba14 | ||
|
|
c0a9b87f46 | ||
|
|
cdac04997a | ||
|
|
926854b0f3 | ||
|
|
39cba7e4fa | ||
|
|
08e6100050 | ||
|
|
baaa88001e | ||
|
|
e4ad41b5ff | ||
|
|
9c9021fd8b | ||
|
|
b09bd6c42a | ||
|
|
b564729d75 | ||
|
|
4954eee187 | ||
|
|
53643b620a | ||
|
|
1c9f13d6c7 | ||
|
|
d6e22f2888 | ||
|
|
88329d84ca | ||
|
|
3dddf700f1 | ||
|
|
4281be4c19 | ||
|
|
f120477231 | ||
|
|
582681ce58 | ||
|
|
dc7256b304 | ||
|
|
2f3091460f | ||
|
|
e963d6c789 | ||
|
|
cafdc7e21a | ||
|
|
062cbb1130 | ||
|
|
94252e1794 | ||
|
|
f0a62de3a9 | ||
|
|
c5f9d9f0d6 | ||
|
|
b92c9a045d | ||
|
|
95222b2079 | ||
|
|
a49a50701a | ||
|
|
5d69ba630e | ||
|
|
2eb967231b | ||
|
|
7eb5c187b3 | ||
|
|
4a27bf41af | ||
|
|
4258f2e261 | ||
|
|
e0890a84e0 | ||
|
|
8850c7431d | ||
|
|
a379b2c016 | ||
|
|
62e5e2a4c8 | ||
|
|
64723e591e | ||
|
|
39e0007e14 | ||
|
|
ae8a867924 | ||
|
|
b161e889b5 | ||
|
|
d8c1db7953 | ||
|
|
ee745ab900 | ||
|
|
0511d5c3a0 | ||
|
|
b00a9132bb | ||
|
|
da4b30e8e5 | ||
|
|
c300a636d6 | ||
|
|
76a4351012 | ||
|
|
2a89a9e67b | ||
|
|
b718e555a6 | ||
|
|
e1926f286b | ||
|
|
83e55456e2 | ||
|
|
b135504e46 | ||
|
|
8771e44d23 | ||
|
|
a354924b0d | ||
|
|
aea8feee1a | ||
|
|
61d9df4c36 | ||
|
|
0f5176380b | ||
|
|
5817e4c0c1 | ||
|
|
3c246cdf00 | ||
|
|
93ada2eaaf | ||
|
|
077f84964a | ||
|
|
f7b68b466c | ||
|
|
e1bf8440eb | ||
|
|
3380043424 | ||
|
|
4369bc1df2 | ||
|
|
0cf56c1ba5 | ||
|
|
5458a4bcfc | ||
|
|
19c1066a25 | ||
|
|
b74006e2ca | ||
|
|
7f7c118d26 | ||
|
|
bf9ea249a3 | ||
|
|
093798375e | ||
|
|
07213f4e0c | ||
|
|
e78e5ccfaa | ||
|
|
05f0b707d1 | ||
|
|
b431ccea5b | ||
|
|
d8f5be2290 | ||
|
|
6a7c7ae391 | ||
|
|
b8b0d22d2d | ||
|
|
541b644d3d | ||
|
|
4a31bd9661 | ||
|
|
eaa2125f2c | ||
|
|
a1d1ccd6f2 | ||
|
|
dcbdbc760e | ||
|
|
a81ccbd749 | ||
|
|
9e8e4cca6a | ||
|
|
0bf7921721 | ||
|
|
65b8002aeb | ||
|
|
61524ad87b | ||
|
|
e159cc30df | ||
|
|
28f9fe5f65 | ||
|
|
28b737dc57 | ||
|
|
f4f1caea2a | ||
|
|
7bc3af1224 | ||
|
|
c1e50848c5 | ||
|
|
1bed3a1da7 | ||
|
|
075d3fb4bf | ||
|
|
6bdedc4443 | ||
|
|
3cab5a16a9 | ||
|
|
54ac16c883 | ||
|
|
f0e51dd01d | ||
|
|
9dad983bb8 | ||
|
|
bbcea93ff8 | ||
|
|
209c96fc42 | ||
|
|
ee6586f443 | ||
|
|
de9a9546c2 | ||
|
|
fce313ae7f | ||
|
|
5f836c8729 | ||
|
|
64027074f9 | ||
|
|
1684766152 | ||
|
|
6136c19fbc | ||
|
|
ba36472b6b | ||
|
|
3ac49e5c38 | ||
|
|
487a94565b | ||
|
|
29a9b31584 | ||
|
|
db11b94e30 | ||
|
|
7b3183bf71 | ||
|
|
a40fcc8aa6 | ||
|
|
0e3c532bf7 | ||
|
|
deed8ddfb8 | ||
|
|
87bf1900e4 | ||
|
|
507744ebb4 | ||
|
|
5b065441c8 | ||
|
|
31c53adbde | ||
|
|
bdfa4aee0d | ||
|
|
8f9eddb1e4 | ||
|
|
88dc6cff70 | ||
|
|
0fc1f98d28 | ||
|
|
ff6f7142ec | ||
|
|
49d5e7fef5 | ||
|
|
cafd6f08b3 | ||
|
|
333bda8702 | ||
|
|
9d17fc7004 | ||
|
|
8aaa828ba3 | ||
|
|
55b5c13839 | ||
|
|
49e3665d6d | ||
|
|
c91614b626 | ||
|
|
c8a96ac241 | ||
|
|
b7fd7ee0b6 | ||
|
|
fca3b14b7b | ||
|
|
db4623efc2 | ||
|
|
02492bc1a4 | ||
|
|
ec0ca8e7eb | ||
|
|
9922a486a6 | ||
|
|
9ce75ee11b |
35
.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
vendored
Normal file
35
.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
### Describe Your Changes
|
||||
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are mandatory:
|
||||
|
||||
- [ ] I have read the [Contributing Guidelines](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/CONTRIBUTING.md)
|
||||
- [ ] All commits are signed and include `Signed-off-by` line. Use `git commit -s` to include `Signed-off-by` your commits. See this [doc](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work) about how to sign your commits.
|
||||
- [ ] Tests are passing locally. Use `make test` to run all tests locally.
|
||||
- [ ] Linting is passing locally. Use `make check-all` to run all linters locally.
|
||||
|
||||
Further checks are optional for External Contributions:
|
||||
|
||||
- [ ] Include a link to the GitHub issue in the commit message, if issue exists.
|
||||
- [ ] Mention the change in the [Changelog](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md). Explain what has changed and why. If there is a related issue or documentation change - link them as well.
|
||||
|
||||
Tips for writing a good changelog message::
|
||||
|
||||
* Write a human-readable changelog message that describes the problem and solution.
|
||||
* Include a link to the issue or pull request in your changelog message.
|
||||
* Use specific language identifying the fix, such as an error message, metric name, or flag name.
|
||||
* Provide a link to the relevant documentation for any new features you add or modify.
|
||||
|
||||
- [ ] After your pull request is merged, please add a message to the issue with instructions for how to test the fix or try the feature you added. Here is an [example](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4048#issuecomment-1546453726)
|
||||
- [ ] Do not close the original issue before the change is released. Please note, in some cases Github can automatically close the issue once PR is merged. Re-open the issue in such case.
|
||||
- [ ] If the change somehow affects public interfaces (a new flag was added or updated, or some behavior has changed) - add the corresponding change to documentation.
|
||||
|
||||
|
||||
Examples of good changelog messages:
|
||||
|
||||
1. FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [VictoriaMetrics remote write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol) when [sending / receiving data to / from Kafka](https://docs.victoriametrics.com/vmagent.html#kafka-integration). This protocol allows saving egress network bandwidth costs when sending data from `vmagent` to `Kafka` located in another datacenter or availability zone. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1225).
|
||||
|
||||
2. BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html): suppress `series after dedup` error message in logs when `-remoteWrite.streamAggr.dedupInterval` command-line flag is set at [vmagent](https://docs.victoriametrics.com/vmgent.html) or when `-streamAggr.dedupInterval` command-line flag is set at [single-node VictoriaMetrics](https://docs.victoriametrics.com/).
|
||||
2
.github/workflows/check-licenses.yml
vendored
2
.github/workflows/check-licenses.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
|
||||
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
@@ -63,7 +63,7 @@ jobs:
|
||||
if: ${{ matrix.language == 'go' }}
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
|
||||
8
.github/workflows/main.yml
vendored
8
.github/workflows/main.yml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
@@ -115,6 +115,6 @@ jobs:
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
|
||||
15
.github/workflows/sync-docs.yml
vendored
15
.github/workflows/sync-docs.yml
vendored
@@ -6,9 +6,6 @@ on:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
workflow_dispatch: {}
|
||||
env:
|
||||
PAGEFIND_VERSION: "1.0.4"
|
||||
HUGO_VERSION: "latest"
|
||||
permissions:
|
||||
contents: read # This is required for actions/checkout and to commit back image update
|
||||
deployments: write
|
||||
@@ -27,16 +24,6 @@ jobs:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
path: docs
|
||||
- uses: peaceiris/actions-hugo@v2
|
||||
with:
|
||||
hugo-version: ${{env.HUGO_VERSION}}
|
||||
extended: true
|
||||
- name: Install PageFind #install the static search engine for index build
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: "https://github.com/CloudCannon/pagefind/releases/download/v${{env.PAGEFIND_VERSION}}/pagefind-v${{env.PAGEFIND_VERSION}}-x86_64-unknown-linux-musl.tar.gz"
|
||||
name: "pagefind"
|
||||
version: ${{env.PAGEFIND_VERSION}}
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v5
|
||||
with:
|
||||
@@ -51,13 +38,11 @@ jobs:
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "short_sha=$calculatedSha" >> $GITHUB_OUTPUT
|
||||
working-directory: main
|
||||
|
||||
- name: update code and commit
|
||||
run: |
|
||||
rm -rf content
|
||||
cp -r ../main/docs content
|
||||
make clean-after-copy
|
||||
make build-search-index
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
git add .
|
||||
|
||||
33
.github/workflows/wiki.yml
vendored
33
.github/workflows/wiki.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: wiki
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'docs/*'
|
||||
branches:
|
||||
- master
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write # for Git to git push
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: publish
|
||||
shell: bash
|
||||
env:
|
||||
TOKEN: ${{secrets.CI_TOKEN}}
|
||||
run: |
|
||||
git clone https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git wiki
|
||||
cp -r docs/* wiki
|
||||
cd wiki
|
||||
git config --local user.email "info@victoriametrics.com"
|
||||
git config --local user.name "Vika"
|
||||
git add .
|
||||
git commit -m "update wiki pages"
|
||||
remote_repo="https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git"
|
||||
git push "${remote_repo}"
|
||||
cd ..
|
||||
rm -rf wiki
|
||||
@@ -14,3 +14,8 @@ We are open to third-party pull requests provided they follow [KISS design princ
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
|
||||
Before sending a pull request please check the following:
|
||||
- [ ] All commits are signed and include `Signed-off-by` line. Use `git commit -s` to include `Signed-off-by` your commits. See this [doc](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work) about how to sign your commits.
|
||||
- [ ] Tests are passing locally. Use `make test` to run all tests locally.
|
||||
- [ ] Linting is passing locally. Use `make check-all` to run all linters locally.
|
||||
|
||||
7
Makefile
7
Makefile
@@ -178,7 +178,8 @@ victoria-metrics-crossbuild: \
|
||||
victoria-metrics-darwin-amd64 \
|
||||
victoria-metrics-darwin-arm64 \
|
||||
victoria-metrics-freebsd-amd64 \
|
||||
victoria-metrics-openbsd-amd64
|
||||
victoria-metrics-openbsd-amd64 \
|
||||
victoria-metrics-windows-amd64
|
||||
|
||||
vmutils-crossbuild: \
|
||||
vmutils-linux-386 \
|
||||
@@ -465,7 +466,7 @@ benchmark-pure:
|
||||
vendor-update:
|
||||
go get -u -d ./lib/...
|
||||
go get -u -d ./app/...
|
||||
go mod tidy -compat=1.20
|
||||
go mod tidy -compat=1.21
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
@@ -491,7 +492,7 @@ golangci-lint: install-golangci-lint
|
||||
golangci-lint run
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.55.1
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.57.1
|
||||
|
||||
govulncheck: install-govulncheck
|
||||
govulncheck ./...
|
||||
|
||||
484
README.md
484
README.md
@@ -12,29 +12,34 @@
|
||||
<img src="docs/logo.webp" width="300" alt="VictoriaMetrics logo">
|
||||
|
||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
See [case studies for VictoriaMetrics](https://docs.victoriametrics.com/CaseStudies.html).
|
||||
|
||||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest),
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
|
||||
The cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
Documentation for the cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
|
||||
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
|
||||
There is also a user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
|
||||
If you have questions about VictoriaMetrics, then feel free asking them in the [VictoriaMetrics community Slack chat](https://victoriametrics.slack.com/),
|
||||
you can join it via [Slack Inviter](https://slack.victoriametrics.com/).
|
||||
|
||||
If you have questions about VictoriaMetrics, then feel free asking them in the [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://docs.victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://docs.victoriametrics.com/enterprise/).
|
||||
Enterprise binaries can be downloaded and evaluated for free
|
||||
from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
You can also [request a free trial license](https://victoriametrics.com/products/enterprise/trial/).
|
||||
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended to check the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) periodically, and to perform [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
VictoriaMetrics is developed at a fast pace, so it is recommended to check the [CHANGELOG](https://docs.victoriametrics.com/CHANGELOG.html) periodically,
|
||||
and to perform [regular upgrades](#how-to-upgrade-victoriametrics).
|
||||
|
||||
VictoriaMetrics has achieved security certifications for Database Software Development and Software-Based Monitoring Services. We apply strict security measures in everything we do. See our [Security page](https://victoriametrics.com/security/) for more details.
|
||||
[VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise/) provides long-term support lines of releases (LTS releases) -
|
||||
see [these docs](https://docs.victoriametrics.com/lts-releases/).
|
||||
|
||||
VictoriaMetrics has achieved security certifications for Database Software Development and Software-Based Monitoring Services.
|
||||
We apply strict security measures in everything we do. See [Security page](https://victoriametrics.com/security/) for more details.
|
||||
|
||||
## Prominent features
|
||||
|
||||
@@ -94,45 +99,29 @@ VictoriaMetrics has the following prominent features:
|
||||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and
|
||||
[high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
|
||||
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data
|
||||
and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise.html).
|
||||
and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise/).
|
||||
* It has an open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
* It can store data on [NFS-based storages](https://en.wikipedia.org/wiki/Network_File_System) such as [Amazon EFS](https://aws.amazon.com/efs/)
|
||||
and [Google Filestore](https://cloud.google.com/filestore).
|
||||
|
||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
See [case studies for VictoriaMetrics](https://docs.victoriametrics.com/CaseStudies.html) and [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
## Case studies and talks
|
||||
## Components
|
||||
|
||||
Case studies:
|
||||
VictoriaMetrics ecosystem contains the following components additionally to [single-node VictoriaMetrics](https://docs.victoriametrics.com/):
|
||||
|
||||
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
|
||||
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
|
||||
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
|
||||
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
|
||||
* [Brandwatch](https://docs.victoriametrics.com/CaseStudies.html#brandwatch)
|
||||
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
|
||||
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
|
||||
* [Criteo](https://docs.victoriametrics.com/CaseStudies.html#criteo)
|
||||
* [Dig Security](https://docs.victoriametrics.com/CaseStudies.html#dig-security)
|
||||
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
|
||||
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
|
||||
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
|
||||
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
|
||||
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
|
||||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Naver](https://docs.victoriametrics.com/CaseStudies.html#naver)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Roblox](https://docs.victoriametrics.com/CaseStudies.html#roblox)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
|
||||
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
|
||||
* [Zerodha](https://docs.victoriametrics.com/CaseStudies.html#zerodha)
|
||||
* [zhihu](https://docs.victoriametrics.com/CaseStudies.html#zhihu)
|
||||
|
||||
See also [articles and slides about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics)
|
||||
- [vmagent](https://docs.victoriametrics.com/vmagent/) - lightweight agent for receiving metrics via [pull-based](https://docs.victoriametrics.com/vmagent/#how-to-collect-metrics-in-prometheus-format)
|
||||
and [push-based](https://docs.victoriametrics.com/vmagent/#how-to-push-data-to-vmagent) protocols, transforming and sending them to the configured Prometheus-compatible
|
||||
remote storage systems such as VictoriaMetrics.
|
||||
- [vmalert](https://docs.victoriametrics.com/vmalert/) - a service for processing Prometheus-compatible alerting and recording rules.
|
||||
- [vmalert-tool](https://docs.victoriametrics.com/vmalert-tool/) - a tool for validating alerting and recording rules.
|
||||
- [vmauth](https://docs.victoriametrics.com/vmauth/) - authorization proxy and load balancer optimized for VictoriaMetrics products.
|
||||
- [vmgateway](https://docs.victoriametrics.com/vmgateway/) - authorization proxy with per-[tenant](https://docs.victoriametrics.com/cluster-victoriametrics/#multitenancy) rate limiting cababilities.
|
||||
- [vmctl](https://docs.victoriametrics.com/vmctl/) - a tool for migrating and copying data between different storage systems for metrics.
|
||||
- [vmbackup](https://docs.victoriametrics.com/vmbackup/), [vmrestore](https://docs.victoriametrics.com/vmrestore/) and [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager/) -
|
||||
tools for creating backups and restoring from backups for VictoriaMetrics data.
|
||||
- `vminsert`, `vmselect` and `vmstorage` - components of [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
- [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/) - user-friendly cost-efficient database for logs.
|
||||
|
||||
## Operation
|
||||
|
||||
@@ -376,7 +365,8 @@ Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](
|
||||
|
||||
## vmui
|
||||
|
||||
VictoriaMetrics provides UI for query troubleshooting and exploration. The UI is available at `http://victoriametrics:8428/vmui`.
|
||||
VictoriaMetrics provides UI for query troubleshooting and exploration. The UI is available at `http://victoriametrics:8428/vmui`
|
||||
(or at `http://<vmselect>:8481/select/<accountID>/vmui/` in [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/cluster-victoriametrics/)).
|
||||
The UI allows exploring query results via graphs and tables. It also provides the following features:
|
||||
|
||||
- Explore:
|
||||
@@ -386,6 +376,7 @@ The UI allows exploring query results via graphs and tables. It also provides th
|
||||
- [Active queries](#active-queries) - shows currently executed queries;
|
||||
- Tools:
|
||||
- [Trace analyzer](#query-tracing) - playground for loading query traces in JSON format;
|
||||
- [Query analyzer](#query-tracing) - playground for loading query results and traces in JSON format. See `Export query` button below;
|
||||
- [WITH expressions playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/expand-with-exprs) - test how WITH expressions work;
|
||||
- [Metric relabel debugger](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/relabeling) - playground for [relabeling](#relabeling) configs.
|
||||
|
||||
@@ -421,6 +412,10 @@ Graphs for a particular query can be temporarily hidden by clicking the `eye` ic
|
||||
When the `eye` icon is clicked while holding the `ctrl` key, then query results for the rest of queries become hidden
|
||||
except of the current query results.
|
||||
|
||||
VMUI allows sharing query and [trace](https://docs.victoriametrics.com/#query-tracing) results by clicking on
|
||||
`Export query` button in top right corner of the graph area. The query and trace will be exported as a file that later
|
||||
can be loaded in VMUI via `Query Analyzer` tool.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
## Top queries
|
||||
@@ -497,11 +492,18 @@ Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](
|
||||
|
||||
## How to scrape Prometheus exporters such as [node-exporter](https://github.com/prometheus/node_exporter)
|
||||
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file). Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets. If the provided configuration file contains [unsupported options](https://docs.victoriametrics.com/vmagent.html#unsupported-prometheus-config-sections), then either delete them from the file or just pass `-promscrape.config.strictParse=false` command-line flag to VictoriaMetrics, so it will ignore unsupported options.
|
||||
VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file
|
||||
according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file).
|
||||
Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets.
|
||||
If the provided configuration file contains [unsupported options](https://docs.victoriametrics.com/vmagent.html#unsupported-prometheus-config-sections),
|
||||
then either delete them from the file or just pass `-promscrape.config.strictParse=false` command-line flag to VictoriaMetrics, so it will ignore unsupported options.
|
||||
|
||||
The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
||||
See [the list of supported service discovery types for Prometheus scrape targets](https://docs.victoriametrics.com/sd_configs.html).
|
||||
See also:
|
||||
|
||||
- [scrape config examples](https://docs.victoriametrics.com/scrape_config_examples/)
|
||||
- [the list of supported service discovery types for Prometheus scrape targets](https://docs.victoriametrics.com/sd_configs.html)
|
||||
|
||||
VictoriaMetrics also supports [importing data in Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
|
||||
|
||||
@@ -509,8 +511,9 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
||||
|
||||
## How to send data from DataDog agent
|
||||
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/)
|
||||
via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v2/series` path.
|
||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/), [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) and
|
||||
[DataDog Lambda Extension](https://docs.datadoghq.com/serverless/libraries_integrations/extension/)
|
||||
via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v2/series` or via "sketches" API at `/datadog/api/beta/sketches`.
|
||||
|
||||
### Sending metrics to VictoriaMetrics
|
||||
|
||||
@@ -536,11 +539,11 @@ add the following line:
|
||||
dd_url: http://victoriametrics:8428/datadog
|
||||
```
|
||||
|
||||
[vmagent](https://docs.victoriametrics.com/vmagent.html) also can accept Datadog metrics format. Depending on where vmagent will forward data,
|
||||
[vmagent](https://docs.victoriametrics.com/vmagent.html) also can accept DataDog metrics format. Depending on where vmagent will forward data,
|
||||
pick [single-node or cluster URL](https://docs.victoriametrics.com/url-examples.html#datadog) formats.
|
||||
|
||||
### Sending metrics to Datadog and VictoriaMetrics
|
||||
|
||||
### Sending metrics to DataDog and VictoriaMetrics
|
||||
|
||||
DataDog allows configuring [Dual Shipping](https://docs.datadoghq.com/agent/guide/dual-shipping/) for metrics
|
||||
sending via ENV variable `DD_ADDITIONAL_ENDPOINTS` or via configuration file `additional_endpoints`.
|
||||
|
||||
@@ -564,6 +567,19 @@ additional_endpoints:
|
||||
- apikey
|
||||
```
|
||||
|
||||
### Send metrics via Serverless DataDog plugin
|
||||
|
||||
Disable logs (logs ingestion is not supported by VictoriaMetrics) and set a custom endpoint in `serverless.yaml`:
|
||||
|
||||
```
|
||||
custom:
|
||||
datadog:
|
||||
enableDDLogs: false # Disabled not supported DD logs
|
||||
apiKey: fakekey # Set any key, otherwise plugin fails
|
||||
provider:
|
||||
environment:
|
||||
DD_DD_URL: <<vm-url>>/datadog # VictoriaMetrics endpoint for DataDog
|
||||
```
|
||||
|
||||
### Send via cURL
|
||||
|
||||
@@ -901,9 +917,9 @@ VictoriaMetrics supports the following handlers from [Prometheus querying API](h
|
||||
|
||||
* [/api/v1/query](https://docs.victoriametrics.com/keyConcepts.html#instant-query)
|
||||
* [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
* [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers)
|
||||
* [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names)
|
||||
* [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values)
|
||||
* [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series)
|
||||
* [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels)
|
||||
* [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues)
|
||||
* [/api/v1/status/tsdb](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats). See [these docs](#tsdb-stats) for details.
|
||||
* [/api/v1/targets](https://prometheus.io/docs/prometheus/latest/querying/api/#targets) - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for more details.
|
||||
* [/federate](https://prometheus.io/docs/prometheus/latest/federation/) - see [these docs](#federation) for more details.
|
||||
@@ -1046,7 +1062,7 @@ to your needs or when testing bugfixes.
|
||||
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.22.
|
||||
1. Run `make victoria-metrics` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
||||
|
||||
@@ -1062,7 +1078,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||
|
||||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.22.
|
||||
1. Run `make victoria-metrics-linux-arm` or `make victoria-metrics-linux-arm64` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-linux-arm` or `victoria-metrics-linux-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
@@ -1076,7 +1092,7 @@ ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://b
|
||||
|
||||
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.20.
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.22.
|
||||
1. Run `make victoria-metrics-pure` from the root folder of [the repository](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
||||
|
||||
@@ -1119,17 +1135,16 @@ as a service for your OS. A [snap package](https://snapcraft.io/victoriametrics)
|
||||
|
||||
## How to work with snapshots
|
||||
|
||||
VictoriaMetrics can create [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
for all the data stored under `-storageDataPath` directory.
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/create` in order to create an instant snapshot.
|
||||
The page will return the following JSON response:
|
||||
Send a request to `http://<victoriametrics-addr>:8428/snapshot/create` endpoint in order to create
|
||||
an [instant snapshot](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
The page returns the following JSON response on successful creation of snapshot:
|
||||
|
||||
```json
|
||||
{"status":"ok","snapshot":"<snapshot-name>"}
|
||||
```
|
||||
|
||||
Snapshots are created under `<-storageDataPath>/snapshots` directory, where `<-storageDataPath>`
|
||||
is the command-line flag value. Snapshots can be archived to backup storage at any time
|
||||
is the corresponding command-line flag value. Snapshots can be archived to backup storage at any time
|
||||
with [vmbackup](https://docs.victoriametrics.com/vmbackup.html).
|
||||
|
||||
Snapshots consist of a mix of hard-links and soft-links to various files and directories inside `-storageDataPath`.
|
||||
@@ -1141,20 +1156,32 @@ for more details. This adds some restrictions on what can be done with the conte
|
||||
- Do not copy subdirectories inside `<-storageDataPath>/snapshot` with `cp`, `rsync` or similar commands, since there are high chances
|
||||
that these commands won't copy some data stored in the snapshot. Prefer using [vmbackup](https://docs.victoriametrics.com/vmbackup.html) for making copies of snapshot data.
|
||||
|
||||
The `http://<victoriametrics-addr>:8428/snapshot/list` page contains the list of available snapshots.
|
||||
See also [snapshot troubleshooting](#snapshot-troubleshooting).
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete?snapshot=<snapshot-name>` in order
|
||||
to delete `<snapshot-name>` snapshot.
|
||||
The `http://<victoriametrics-addr>:8428/snapshot/list` endpoint returns the list of available snapshots.
|
||||
|
||||
Send a query to `http://<victoriametrics-addr>:8428/snapshot/delete?snapshot=<snapshot-name>` in order
|
||||
to delete the snapshot with `<snapshot-name>` name.
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete_all` in order to delete all the snapshots.
|
||||
|
||||
Steps for restoring from a snapshot:
|
||||
### How to restore from a snapshot
|
||||
|
||||
1. Stop VictoriaMetrics with `kill -INT`.
|
||||
1. Restore snapshot contents from backup with [vmrestore](https://docs.victoriametrics.com/vmrestore.html)
|
||||
to the directory pointed by `-storageDataPath`.
|
||||
1. Start VictoriaMetrics.
|
||||
|
||||
### Snapshot troubleshooting
|
||||
|
||||
Snapshot doesn't occupy disk space just after its' creation thanks to the [used approach](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
Old snapshots may start occupying additional disk space if they refer to old parts, which were already deleted during [background merge](#storage).
|
||||
That's why it is recommended deleting old snapshots after they are no longer needed in order to free up disk space used by old snapshots.
|
||||
This can be done either manually or automatically if the `-snapshotsMaxAge` command-line flag is set. Make sure that the backup process has enough time to complete
|
||||
when setting `-snapshotsMaxAge` command-line flag.
|
||||
|
||||
VictoriaMetrics exposes the current number of available snapshots via `vm_snapshots` metric at [`/metrics`](#monitoring) page.
|
||||
|
||||
## How to delete time series
|
||||
|
||||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/admin/tsdb/delete_series?match[]=<timeseries_selector_for_delete>`,
|
||||
@@ -1270,7 +1297,7 @@ where:
|
||||
* `unix_s` - unix seconds
|
||||
* `unix_ms` - unix milliseconds
|
||||
* `unix_ns` - unix nanoseconds
|
||||
* `rfc3339` - [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) time
|
||||
* `rfc3339` - [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) time (in the timezone of the server)
|
||||
* `custom:<layout>` - custom layout for time that is supported by [time.Format](https://golang.org/pkg/time/#Time.Format) function from Go.
|
||||
|
||||
* `<timeseries_selector_for_export>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
@@ -1514,17 +1541,22 @@ VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-sc
|
||||
|
||||
### Sending data via OpenTelemetry
|
||||
|
||||
VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentelemetry/api/v1/push` path.
|
||||
VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentelemetry/v1/metrics` path.
|
||||
|
||||
VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
|
||||
Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
|
||||
VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/v1/metrics`.
|
||||
Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/v1/metrics`.
|
||||
|
||||
VictoriaMetrics stores the ingested OpenTelemetry [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) as is without any transformations.
|
||||
Pass `-opentelemetry.usePrometheusNaming` command-line flag to VictoriaMetrics for automatic conversion of metric names and labels into Prometheus-compatible format.
|
||||
|
||||
See [How to use OpenTelemetry metrics with VictoriaMetrics](https://docs.victoriametrics.com/guides/getting-started-with-opentelemetry/).
|
||||
|
||||
## JSON line format
|
||||
|
||||
VictoriaMetrics accepts data in JSON line format at [/api/v1/import](#how-to-import-data-in-json-line-format)
|
||||
and exports data in this format at [/api/v1/export](#how-to-export-data-in-json-line-format).
|
||||
|
||||
The format follows [JSON streaming concept](http://ndjson.org/), e.g. each line contains JSON object with metrics data in the following format:
|
||||
The format follows [JSON streaming concept](https://jsonlines.org/), e.g. each line contains JSON object with metrics data in the following format:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -1642,21 +1674,70 @@ See also [resource usage limits docs](#resource-usage-limits).
|
||||
|
||||
By default, VictoriaMetrics is tuned for an optimal resource usage under typical workloads. Some workloads may need fine-grained resource usage limits. In these cases the following command-line flags may be useful:
|
||||
|
||||
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics. Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected. Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory some metainformation about the time series located by each query and spends some CPU time for processing the found time series. This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
||||
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled. This allows saving CPU and RAM when executing unexpected heavy queries.
|
||||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB` of additional memory. So it is better to limit the number of concurrent queries, while suspending additional incoming queries if the concurrency limit is reached. VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for suspended queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-memory.allowedPercent` and `-memory.allowedBytes` limit the amounts of memory, which may be used for various internal caches at VictoriaMetrics.
|
||||
Note that VictoriaMetrics may use more memory, since these flags don't limit additional memory, which may be needed on a per-query basis.
|
||||
- `-search.maxMemoryPerQuery` limits the amounts of memory, which can be used for processing a single query. Queries, which need more memory, are rejected.
|
||||
Heavy queries, which select big number of time series, may exceed the per-query memory limit by a small percent. The total memory limit
|
||||
for concurrently executed queries can be estimated as `-search.maxMemoryPerQuery` multiplied by `-search.maxConcurrentRequests`.
|
||||
- `-search.maxUniqueTimeseries` limits the number of unique time series a single query can find and process. VictoriaMetrics keeps in memory
|
||||
some metainformation about the time series located by each query and spends some CPU time for processing the found time series.
|
||||
This means that the maximum memory usage and CPU usage a single query can use is proportional to `-search.maxUniqueTimeseries`.
|
||||
- `-search.maxQueryDuration` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled.
|
||||
This allows saving CPU and RAM when executing unexpected heavy queries.
|
||||
The limit can be altered for each query by passing `timeout` GET parameter, but can't exceed the limit specified via `-search.maxQueryDuration` command-line flag.
|
||||
- `-search.maxConcurrentRequests` limits the number of concurrent requests VictoriaMetrics can process. Bigger number of concurrent requests usually means
|
||||
bigger memory usage. For example, if a single query needs 100 MiB of additional memory during its execution, then 100 concurrent queries may need `100 * 100 MiB = 10 GiB`
|
||||
of additional memory. So it is better to limit the number of concurrent queries, while pausing additional incoming queries if the concurrency limit is reached.
|
||||
VictoriaMetrics provides `-search.maxQueueDuration` command-line flag for limiting the max wait time for paused queries. See also `-search.maxMemoryPerQuery` command-line flag.
|
||||
- `-search.maxQueueDuration` limits the maximum duration queries may wait for execution when `-search.maxConcurrentRequests` concurrent queries are executed.
|
||||
- `-search.ignoreExtraFiltersAtLabelsAPI` enables ignoring of `match[]`, [`extra_filters[]` and `extra_label`](https://docs.victoriametrics.com/#prometheus-querying-api-enhancements)
|
||||
query args at [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels) and
|
||||
[/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues).
|
||||
This may be useful for reducing the load on VictoriaMetrics if the provided extra filters match too many time series.
|
||||
The downside is that the endpoints can return labels and series, which do not match the provided extra filters.
|
||||
- `-search.maxSamplesPerSeries` limits the number of raw samples the query can process per each time series. VictoriaMetrics sequentially processes
|
||||
raw samples per each found time series during the query. It unpacks raw samples on the selected time range per each time series into memory
|
||||
and then applies the given [rollup function](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). The `-search.maxSamplesPerSeries` command-line flag
|
||||
allows limiting memory usage in the case when the query is executed on a time range, which contains hundreds of millions of raw samples per each located time series.
|
||||
- `-search.maxSamplesPerQuery` limits the number of raw samples a single query can process. This allows limiting CPU usage for heavy queries.
|
||||
- `-search.maxResponseSeries` limits the number of time series a single query can return from [`/api/v1/query`](https://docs.victoriametrics.com/keyConcepts.html#instant-query)
|
||||
and [`/api/v1/query_range`](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeriesPerAggrFunc` limits the number of time series, which can be generated by [MetricsQL aggregate functions](https://docs.victoriametrics.com/MetricsQL.html#aggregate-functions) in a single query.
|
||||
- `-search.maxSeries` limits the number of time series, which may be returned from [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers). This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names). This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxTagValues` limits the number of items, which may be returned from [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values). This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate). In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
- `-search.maxPointsPerTimeseries` limits the number of calculated points, which can be returned per each matching time series
|
||||
from [range query](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
- `-search.maxPointsSubqueryPerTimeseries` limits the number of calculated points, which can be generated per each matching time series
|
||||
during [subquery](https://docs.victoriametrics.com/MetricsQL.html#subqueries) evaluation.
|
||||
- `-search.maxSeriesPerAggrFunc` limits the number of time series, which can be generated by [MetricsQL aggregate functions](https://docs.victoriametrics.com/MetricsQL.html#aggregate-functions)
|
||||
in a single query.
|
||||
- `-search.maxSeries` limits the number of time series, which may be returned from [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series).
|
||||
This endpoint is used mostly by Grafana for auto-completion of metric names, label names and label values. Queries to this endpoint may take big amounts
|
||||
of CPU time and memory when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxSeries` to quite low value in order limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`.
|
||||
- `-search.maxTagKeys` limits the number of items, which may be returned from [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels).
|
||||
This endpoint is used mostly by Grafana for auto-completion of label names. Queries to this endpoint may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxTagKeys` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`.
|
||||
- `-search.maxTagValues` limits the number of items, which may be returned from [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues).
|
||||
This endpoint is used mostly by Grafana for auto-completion of label values. Queries to this endpoint may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxTagValues` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.maxLabelsAPISeries`.
|
||||
- `-search.maxLabelsAPISeries` limits the number of time series, which can be scanned when performing [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels),
|
||||
[/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues)
|
||||
or [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series) requests.
|
||||
These endpoints are used mostly by Grafana for auto-completion of label names and label values. Queries to these endpoints may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxLabelsAPISeries` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPIDuration` and `-search.ignoreExtraFiltersAtLabelsAPI`.
|
||||
- `-search.maxLabelsAPIDuration` limits the duration for requests to [/api/v1/labels](https://docs.victoriametrics.com/url-examples/#apiv1labels),
|
||||
[/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples/#apiv1labelvalues)
|
||||
or [/api/v1/series](https://docs.victoriametrics.com/url-examples/#apiv1series).
|
||||
The limit can be altered for each query by passing `timeout` GET parameter, but can't exceed the limit specified via cmd-line flag.
|
||||
These endpoints are used mostly by Grafana for auto-completion of label names and label values. Queries to these endpoints may take big amounts of CPU time and memory
|
||||
when the database contains big number of unique time series because of [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case it might be useful to set the `-search.maxLabelsAPIDuration` to quite low value in order to limit CPU and memory usage.
|
||||
See also `-search.maxLabelsAPISeries` and `-search.ignoreExtraFiltersAtLabelsAPI`.
|
||||
- `-search.maxTagValueSuffixesPerSearch` limits the number of entries, which may be returned from `/metrics/find` endpoint. See [Graphite Metrics API usage docs](#graphite-metrics-api-usage).
|
||||
|
||||
See also [resource usage limits at VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#resource-usage-limits),
|
||||
@@ -1717,7 +1798,7 @@ This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/
|
||||
If multiple raw samples have **the same timestamp** on the given `-dedup.minScrapeInterval` discrete interval,
|
||||
then the sample with **the biggest value** is kept.
|
||||
|
||||
[Prometheus stalenes markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are processed as any other value during de-duplication.
|
||||
[Prometheus staleness markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are processed as any other value during de-duplication.
|
||||
If raw sample with the biggest timestamp on `-dedup.minScrapeInterval` contains a stale marker, then it is kept after the deduplication.
|
||||
This allows properly preserving staleness markers during the de-duplication.
|
||||
|
||||
@@ -1743,6 +1824,12 @@ so the de-duplication consistently leaves samples for one `vmagent` instance and
|
||||
from other `vmagent` instances.
|
||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||
|
||||
VictoriaMetrics stores all the ingested samples to disk even if `-dedup.minScrapeInterval` command-line flag is set.
|
||||
The ingested samples are de-duplicated during [background merges](#storage) and during query execution.
|
||||
VictoriaMetrics also supports de-duplication during data ingestion before the data is stored to disk, via `-streamAggr.dedupInterval` command-line flag -
|
||||
see [these docs](https://docs.victoriametrics.com/stream-aggregation/#deduplication).
|
||||
|
||||
|
||||
## Storage
|
||||
|
||||
VictoriaMetrics buffers the ingested data in memory for up to a second. Then the buffered data is written to in-memory `parts`,
|
||||
@@ -1838,7 +1925,7 @@ VictoriaMetrics does not support indefinite retention, but you can specify an ar
|
||||
## Multiple retentions
|
||||
|
||||
Distinct retentions for distinct time series can be configured via [retention filters](#retention-filters)
|
||||
in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html).
|
||||
in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise/).
|
||||
|
||||
Community version of VictoriaMetrics supports only a single retention, which can be configured via [-retentionPeriod](#retention) command-line flag.
|
||||
If you need multiple retentions in community version of VictoriaMetrics, then you may start multiple VictoriaMetrics instances with distinct values for the following flags:
|
||||
@@ -1855,7 +1942,7 @@ See [these docs](https://docs.victoriametrics.com/guides/guide-vmcluster-multipl
|
||||
|
||||
## Retention filters
|
||||
|
||||
[Enterprise version of VictoriaMetrics](https://docs.victoriametrics.com/enterprise.html) supports e.g. `retention filters`,
|
||||
[Enterprise version of VictoriaMetrics](https://docs.victoriametrics.com/enterprise/) supports e.g. `retention filters`,
|
||||
which allow configuring multiple retentions for distinct sets of time series matching the configured [series filters](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
via `-retentionFilter` command-line flag. This flag accepts `filter:duration` options, where `filter` must be
|
||||
a valid [series filter](https://docs.victoriametrics.com/keyConcepts.html#filtering), while the `duration`
|
||||
@@ -1883,45 +1970,72 @@ to historical data.
|
||||
|
||||
See [how to configure multiple retentions in VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#retention-filters).
|
||||
|
||||
See also [downsampling](#downsampling).
|
||||
|
||||
Retention filters can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
See how to request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/).
|
||||
|
||||
## Downsampling
|
||||
|
||||
[VictoriaMetrics Enterprise](https://docs.victoriametrics.com/enterprise.html) supports multi-level downsampling with `-downsampling.period` command-line flag. For example:
|
||||
[VictoriaMetrics Enterprise](https://docs.victoriametrics.com/enterprise/) supports multi-level downsampling via `-downsampling.period=offset:interval` command-line flag.
|
||||
This command-line flag instructs leaving the last sample per each `interval` for [time series](https://docs.victoriametrics.com/keyconcepts/#time-series)
|
||||
[samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) older than the `offset`. For example, `-downsampling.period=30d:5m` instructs leaving the last sample
|
||||
per each 5-minute interval for samples older than 30 days, while the rest of samples are dropped.
|
||||
|
||||
* `-downsampling.period=30d:5m` instructs VictoriaMetrics to [deduplicate](#deduplication) samples older than 30 days with 5 minutes interval.
|
||||
The `-downsampling.period` command-line flag can be specified multiple times in order to apply different downsampling levels for different time ranges (aka multi-level downsampling).
|
||||
For example, `-downsampling.period=30d:5m,180d:1h` instructs leaving the last sample per each 5-minute interval for samples older than 30 days,
|
||||
while leaving the last sample per each 1-hour interval for samples older than 180 days.
|
||||
|
||||
* `-downsampling.period=30d:5m,180d:1h` instructs VictoriaMetrics to deduplicate samples older than 30 days with 5 minutes interval and to deduplicate samples older than 180 days with 1 hour interval.
|
||||
VictoriaMetrics supports configuring independent downsampling per different sets of [time series](https://docs.victoriametrics.com/keyconcepts/#time-series)
|
||||
via `-downsampling.period=filter:offset:interval` syntax. In this case the given `offset:interval` downsampling is applied only to time series matching the given `filter`.
|
||||
The `filter` can contain arbitrary [series filter](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
For example, `-downsampling.period='{__name__=~"(node|process)_.*"}:1d:1m` instructs VictoriaMetrics to deduplicate samples older than one day with one minute interval
|
||||
only for [time series](https://docs.victoriametrics.com/keyconcepts/#time-series) with names starting with `node_` or `process_` prefixes.
|
||||
The de-duplication for other time series can be configured independently via additional `-downsampling.period` command-line flags.
|
||||
|
||||
If the time series doesn't match any `filter`, then it isn't downsampled. If the time series matches multiple filters, then the downsampling
|
||||
for the first matching `filter` is applied. For example, `-downsampling.period='{env="prod"}:1d:30s,{__name__=~"node_.*"}:1d:5m'` de-duplicates
|
||||
samples older than one day with 30 seconds interval across all the time series with `env="prod"` [label](https://docs.victoriametrics.com/keyconcepts/#labels),
|
||||
even if their names start with `node_` prefix. All the other time series with names starting with `node_` prefix are de-duplicated with 5 minutes interval.
|
||||
|
||||
If downsampling shouldn't be applied to some time series matching the given `filter`, then pass `-downsampling.period=filter:0s:0s` command-line flag to VictoriaMetrics.
|
||||
For example, if series with `env="prod"` label shouldn't be downsampled, then pass `-downsampling.period='{env="prod"}:0s:0s'` command-line flag in front of other `-downsampling.period` flags.
|
||||
|
||||
Downsampling is applied independently per each time series and leaves a single [raw sample](https://docs.victoriametrics.com/keyConcepts.html#raw-samples)
|
||||
with the biggest [timestamp](https://en.wikipedia.org/wiki/Unix_time) on the configured interval, in the same way as [deduplication](#deduplication) does.
|
||||
It works the best for [counters](https://docs.victoriametrics.com/keyConcepts.html#counter) and [histograms](https://docs.victoriametrics.com/keyConcepts.html#histogram),
|
||||
as their values are always increasing. But downsampling [gauges](https://docs.victoriametrics.com/keyConcepts.html#gauge)
|
||||
and [summaries](https://docs.victoriametrics.com/keyConcepts.html#summary)
|
||||
would mean losing the changes within the downsampling interval. Please note, you can use [recording rules](https://docs.victoriametrics.com/vmalert.html#rules)
|
||||
or [steaming aggregation](https://docs.victoriametrics.com/stream-aggregation.html)
|
||||
as their values are always increasing. Downsampling [gauges](https://docs.victoriametrics.com/keyConcepts.html#gauge)
|
||||
and [summaries](https://docs.victoriametrics.com/keyConcepts.html#summary) lose some changes within the downsampling interval,
|
||||
since only the last sample on the given interval is left and the rest of samples are dropped.
|
||||
|
||||
You can use [recording rules](https://docs.victoriametrics.com/vmalert.html#rules) or [steaming aggregation](https://docs.victoriametrics.com/stream-aggregation.html)
|
||||
to apply custom aggregation functions, like min/max/avg etc., in order to make gauges more resilient to downsampling.
|
||||
|
||||
Downsampling can reduce disk space usage and improve query performance if it is applied to time series with big number
|
||||
of samples per each series. The downsampling doesn't improve query performance if the database contains big number
|
||||
of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)),
|
||||
since downsampling doesn't reduce the number of time series. In this case the majority of query time is spent on searching for the matching time series
|
||||
instead of processing the found samples.
|
||||
of samples per each series. The downsampling doesn't improve query performance and doesn't reduce disk space if the database contains big number
|
||||
of time series with small number of samples per each series, since downsampling doesn't reduce the number of time series.
|
||||
So there is little sense in applying downsampling to time series with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate).
|
||||
In this case the majority of query time is spent on searching for the matching time series instead of processing the found samples.
|
||||
It is possible to use [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) in [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||
or recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to
|
||||
or [recording rules in vmalert](https://docs.victoriametrics.com/vmalert.html#rules) in order to
|
||||
[reduce the number of time series](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
||||
|
||||
Downsampling happens during [background merges](https://docs.victoriametrics.com/#storage)
|
||||
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
Downsampling is performed during [background merges](https://docs.victoriametrics.com/#storage).
|
||||
It cannot be performed if there is not enough of free disk space or if vmstorage is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
|
||||
Please, note that intervals of `-downsampling.period` must be multiples of each other.
|
||||
In case [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled value of `-dedup.minScrapeInterval` must also be multiple of `-downsampling.period` intervals.
|
||||
This is required to ensure consistency of deduplication and downsampling results.
|
||||
Please, note that intervals of `-downsampling.period` must be multiples of each other.
|
||||
In case [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled, value of `-dedup.minScrapeInterval` command-line flag must also
|
||||
be multiple of `-downsampling.period` intervals. This is required to ensure consistency of deduplication and downsampling results.
|
||||
|
||||
It is safe updating `-downsampling.period` during VictoriaMetrics restarts - the updated downsampling configuration will be
|
||||
applied eventually to historical data during [background merges](https://docs.victoriametrics.com/#storage).
|
||||
|
||||
See [how to configure downsampling in VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#downsampling).
|
||||
|
||||
See also [retention filters](#retention-filters).
|
||||
|
||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
See how to request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/).
|
||||
See [how to request a free trial license](https://victoriametrics.com/products/enterprise/trial/).
|
||||
|
||||
## Multi-tenancy
|
||||
|
||||
@@ -1947,17 +2061,38 @@ Additionally, alerting can be set up with the following tools:
|
||||
* With Promxy - see [the corresponding docs](https://github.com/jacksontj/promxy/blob/master/README.md#how-do-i-use-alertingrecording-rules-in-promxy).
|
||||
* With Grafana - see [the corresponding docs](https://grafana.com/docs/alerting/rules/).
|
||||
|
||||
## mTLS protection
|
||||
|
||||
By default `VictoriaMetrics` accepts http requests at `8428` port (this port can be changed via `-httpListenAddr` command-line flags).
|
||||
[Enterprise version of VictoriaMetrics](https://docs.victoriametrics.com/enterprise/) supports the ability to accept [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication)
|
||||
requests at this port, by specifying `-tls` and `-mtls` command-line flags. For example, the following command runs `VictoriaMetrics`, which accepts only mTLS requests at port `8428`:
|
||||
|
||||
```
|
||||
./victoria-metrics -tls -mtls
|
||||
```
|
||||
|
||||
By default system-wide [TLS Root CA](https://en.wikipedia.org/wiki/Root_certificate) is used for verifying client certificates if `-mtls` command-line flag is specified.
|
||||
It is possible to specify custom TLS Root CA via `-mtlsCAFile` command-line flag.
|
||||
|
||||
## Security
|
||||
|
||||
General security recommendations:
|
||||
|
||||
- All the VictoriaMetrics components must run in protected private networks without direct access from untrusted networks such as Internet. The exception is [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html).
|
||||
- All the requests from untrusted networks to VictoriaMetrics components must go through auth proxy such as vmauth or vmgateway. The proxy must be set up with proper authentication and authorization.
|
||||
- Prefer using lists of allowed API endpoints, while disallowing access to other endpoints when configuring auth proxy in front of VictoriaMetrics components.
|
||||
- All the VictoriaMetrics components must run in protected private networks without direct access from untrusted networks such as Internet.
|
||||
The exception is [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html),
|
||||
which are indended for serving public requests and performing authorization with [TLS termination](https://en.wikipedia.org/wiki/TLS_termination_proxy).
|
||||
- All the requests from untrusted networks to VictoriaMetrics components must go through auth proxy such as [vmauth](https://docs.victoriametrics.com/vmauth.html)
|
||||
or [vmgateway](https://docs.victoriametrics.com/vmgateway.html). The proxy must be set up with proper authentication and authorization.
|
||||
- Prefer using lists of allowed API endpoints, while disallowing access to other endpoints when configuring [vmauth](https://docs.victoriametrics.com/vmauth.html)
|
||||
in front of VictoriaMetrics components.
|
||||
- Set reasonable [`Strict-Transport-Security`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) header value to all the components to mitigate [MitM attacks](https://en.wikipedia.org/wiki/Man-in-the-middle_attack), for example: `max-age=31536000; includeSubDomains`. See `-http.header.hsts` flag.
|
||||
- Set reasonable [`Content-Security-Policy`](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP) header value to mitigate [XSS attacks](https://en.wikipedia.org/wiki/Cross-site_scripting). See `-http.header.csp` flag.
|
||||
- Set reasonable [`X-Frame-Options`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options) header value to mitigate [clickjacking attacks](https://en.wikipedia.org/wiki/Clickjacking), for example `DENY`. See `-http.header.frameOptions` flag.
|
||||
|
||||
VictoriaMetrics provides the following security-related command-line flags:
|
||||
|
||||
* `-tls`, `-tlsCertFile` and `-tlsKeyFile` for switching from HTTP to HTTPS.
|
||||
* `-tls`, `-tlsCertFile` and `-tlsKeyFile` for switching from HTTP to HTTPS at `-httpListenAddr` (8428 by default).
|
||||
* `-mtls` and `-mtlsCAFile` for enabling [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication) for requests to `-httpListenAddr`. See [these docs](#mtls-protection).
|
||||
* `-httpAuth.username` and `-httpAuth.password` for protecting all the HTTP endpoints
|
||||
with [HTTP Basic Authentication](https://en.wikipedia.org/wiki/Basic_access_authentication).
|
||||
* `-deleteAuthKey` for protecting `/api/v1/admin/tsdb/delete_series` endpoint. See [how to delete time series](#how-to-delete-time-series).
|
||||
@@ -2002,12 +2137,17 @@ mkfs.ext4 ... -O 64bit,huge_file,extent -T huge
|
||||
## Monitoring
|
||||
|
||||
VictoriaMetrics exports internal metrics in Prometheus exposition format at `/metrics` page.
|
||||
These metrics can be scraped via [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus.
|
||||
These metrics can be scraped via [vmagent](https://docs.victoriametrics.com/vmagent.html) or any other Prometheus-compatible scraper.
|
||||
|
||||
If you use Google Cloud Managed Prometheus for scraping metrics from VictoriaMetrics components, then pass `-metrics.exposeMetadata`
|
||||
command-line to them, so they add `TYPE` and `HELP` comments per each exposed metric at `/metrics` page.
|
||||
See [these docs](https://cloud.google.com/stackdriver/docs/managed-prometheus/troubleshooting#missing-metric-type) for details.
|
||||
|
||||
Alternatively, single-node VictoriaMetrics can self-scrape the metrics when `-selfScrapeInterval` command-line flag is
|
||||
set to duration greater than 0. For example, `-selfScrapeInterval=10s` would enable self-scraping of `/metrics` page
|
||||
with 10 seconds interval.
|
||||
|
||||
_Please note, never use loadbalancer address for scraping metrics. All monitored components should be scraped directly by their address._
|
||||
_Please note, never use loadbalancer address for scraping metrics. All the monitored components should be scraped directly by their address._
|
||||
|
||||
Official Grafana dashboards available for [single-node](https://grafana.com/grafana/dashboards/10229)
|
||||
and [clustered](https://grafana.com/grafana/dashboards/11176) VictoriaMetrics.
|
||||
@@ -2224,7 +2364,10 @@ and [cardinality explorer docs](#cardinality-explorer).
|
||||
|
||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html).
|
||||
See also:
|
||||
|
||||
- [Snapshot troubleshooting](#snapshot-troubleshooting).
|
||||
- [General troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html).
|
||||
|
||||
## Push metrics
|
||||
|
||||
@@ -2270,6 +2413,21 @@ Sometimes it is needed to remove such caches on the next startup. This can be do
|
||||
In this case VictoriaMetrics will automatically remove all the caches on the next start.
|
||||
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447) for details.
|
||||
|
||||
It is also possible removing [rollup result cache](#rollup-result-cache) on startup by passing `-search.resetRollupResultCacheOnStartup` command-line flag to VictoriaMetrics.
|
||||
|
||||
## Rollup result cache
|
||||
|
||||
VictoriaMetrics caches query responses by default. This allows increasing performance for repated queries
|
||||
to [`/api/v1/query`](https://docs.victoriametrics.com/keyconcepts/#instant-query) and [`/api/v1/query_range`](https://docs.victoriametrics.com/keyconcepts/#range-query)
|
||||
with the increasing `time`, `start` and `end` query args.
|
||||
|
||||
This cache may work incorrectly when ingesting historical data into VictoriaMetrics. See [these docs](#backfilling) for details.
|
||||
|
||||
The rollup cache can be disabled either globally by running VictoriaMetrics with `-search.disableCache` command-line flag
|
||||
or on a per-query basis by passing `nocache=1` query arg to `/api/v1/query` and `/api/v1/query_range`.
|
||||
|
||||
See also [cache removal docs](#cache-removal).
|
||||
|
||||
## Cache tuning
|
||||
|
||||
VictoriaMetrics uses various in-memory caches for faster data ingestion and query performance.
|
||||
@@ -2334,11 +2492,12 @@ VictoriaMetrics accepts historical data in arbitrary order of time via [any supp
|
||||
See [how to backfill data with recording rules in vmalert](https://docs.victoriametrics.com/vmalert.html#rules-backfilling).
|
||||
Make sure that configured `-retentionPeriod` covers timestamps for the backfilled data.
|
||||
|
||||
It is recommended disabling query cache with `-search.disableCache` command-line flag when writing
|
||||
It is recommended disabling [query cache](#rollup-result-cache) with `-search.disableCache` command-line flag when writing
|
||||
historical data with timestamps from the past, since the cache assumes that the data is written with
|
||||
the current timestamps. Query cache can be enabled after the backfilling is complete.
|
||||
|
||||
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache) handler after the backfilling is complete. This will reset the query cache, which could contain incomplete data cached during the backfilling.
|
||||
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache)
|
||||
after the backfilling is complete. This will reset the [query cache](#rollup-result-cache), which could contain incomplete data cached during the backfilling.
|
||||
|
||||
Yet another solution is to increase `-search.cacheTimestampOffset` flag value in order to disable caching
|
||||
for data with timestamps close to the current time. Single-node VictoriaMetrics automatically resets response
|
||||
@@ -2437,7 +2596,7 @@ Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics
|
||||
|
||||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
* [Slack](https://slack.victoriametrics.com/)
|
||||
* [Slack Inviter](https://slack.victoriametrics.com/) and [Slack channel](https://victoriametrics.slack.com/)
|
||||
* [Twitter](https://twitter.com/VictoriaMetrics/)
|
||||
* [Linkedin](https://www.linkedin.com/company/victoriametrics/)
|
||||
* [Reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
@@ -2533,7 +2692,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-datadog.sanitizeMetricName
|
||||
Sanitize metric names for the ingested DataDog data to comply with DataDog behaviour described at https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics (default true)
|
||||
-dedup.minScrapeInterval duration
|
||||
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication
|
||||
-deleteAuthKey value
|
||||
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries
|
||||
Flag value can be read from the given file when using -deleteAuthKey=file:///abs/path/to/file or -deleteAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -deleteAuthKey=http://host/path or -deleteAuthKey=https://host/path
|
||||
@@ -2542,8 +2701,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-denyQueryTracing
|
||||
Whether to disable the ability to trace queries. See https://docs.victoriametrics.com/#query-tracing
|
||||
-downsampling.period array
|
||||
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. When setting multiple downsampling periods, it is necessary for the periods to be multiples of each other. See https://docs.victoriametrics.com/#downsampling for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. When setting multiple downsampling periods, it is necessary for the periods to be multiples of each other. See https://docs.victoriametrics.com/#downsampling for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-dryRun
|
||||
Whether to check config files without running VictoriaMetrics. The following config files are checked: -promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag
|
||||
-enableTCP6
|
||||
@@ -2553,7 +2713,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-envflag.prefix string
|
||||
Prefix for environment variables if -envflag.enable is set
|
||||
-eula
|
||||
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
|
||||
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise/
|
||||
-filestream.disableFadvise
|
||||
Whether to disable fadvise() syscall when reading large data files. The fadvise() syscall prevents from eviction of recently accessed data from OS page cache during background merges and backups. In some rare cases it is better to disable the syscall if it uses too much CPU
|
||||
-finalMergeDelay duration
|
||||
@@ -2576,15 +2736,15 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-graphiteTrimTimestamp duration
|
||||
Trim timestamps for Graphite data to this duration. Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data (default 1s)
|
||||
-http.connTimeout duration
|
||||
Incoming http connections are closed after the configured timeout. This may help to spread the incoming load among a cluster of services behind a load balancer. Please note that the real timeout may be bigger by up to 10% as a protection against the thundering herd problem (default 2m0s)
|
||||
Incoming connections to -httpListenAddr are closed after the configured timeout. This may help evenly spreading load among a cluster of services behind TCP-level load balancer. Zero value disables closing of incoming connections (default 2m0s)
|
||||
-http.disableResponseCompression
|
||||
Disable compression of HTTP responses to save CPU resources. By default, compression is enabled to save network bandwidth
|
||||
-http.header.csp string
|
||||
Value for 'Content-Security-Policy' header
|
||||
Value for 'Content-Security-Policy' header, recommended: "default-src 'self'"
|
||||
-http.header.frameOptions string
|
||||
Value for 'X-Frame-Options' header
|
||||
-http.header.hsts string
|
||||
Value for 'Strict-Transport-Security' header
|
||||
Value for 'Strict-Transport-Security' header, recommended: 'max-age=31536000; includeSubDomains'
|
||||
-http.idleConnTimeout duration
|
||||
Timeout for incoming idle http connections (default 1m0s)
|
||||
-http.maxGracefulShutdownDuration duration
|
||||
@@ -2598,16 +2758,21 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
Flag value can be read from the given file when using -httpAuth.password=file:///abs/path/to/file or -httpAuth.password=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -httpAuth.password=http://host/path or -httpAuth.password=https://host/path
|
||||
-httpAuth.username string
|
||||
Username for HTTP server's Basic Auth. The authentication is disabled if empty. See also -httpAuth.password
|
||||
-httpListenAddr string
|
||||
TCP address to listen for http connections. See also -tls and -httpListenAddr.useProxyProtocol (default ":8428")
|
||||
-httpListenAddr.useProxyProtocol
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
|
||||
-httpListenAddr array
|
||||
TCP addresses to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-httpListenAddr.useProxyProtocol array
|
||||
Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
Empty values are set to false.
|
||||
-import.maxLineLen size
|
||||
The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 10485760)
|
||||
-influx.databaseNames array
|
||||
Comma-separated list of database names to return from /query and /influx/query API. This can be needed for accepting data from Telegraf plugins such as https://github.com/fangli/fluent-plugin-influxdb
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-influx.maxLineSize size
|
||||
The maximum size in bytes for a single InfluxDB line during parsing
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 262144)
|
||||
@@ -2662,7 +2827,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-loggerWarnsPerSecondLimit int
|
||||
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
|
||||
-maxConcurrentInserts int
|
||||
The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration.
|
||||
The maximum number of concurrent insert requests. Default value depends on the number of CPU cores and should work for most cases since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration
|
||||
-maxInsertRequestSize size
|
||||
The maximum size in bytes of a single Prometheus remote_write API request
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
|
||||
@@ -2680,6 +2845,14 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-metricsAuthKey value
|
||||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
|
||||
-mtls array
|
||||
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise/
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
Empty values are set to false.
|
||||
-mtlsCAFile array
|
||||
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-newrelic.maxInsertRequestSize size
|
||||
The maximum size in bytes of a single NewRelic request to /newrelic/infra/v2/metrics/events/bulk
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
|
||||
@@ -2712,7 +2885,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-promscrape.cluster.memberNum string
|
||||
The number of vmagent instance in the cluster of scrapers. It must be a unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name. See also -promscrape.cluster.memberLabel . See https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets for more info (default "0")
|
||||
-promscrape.cluster.memberURLTemplate string
|
||||
An optional template for URL to access vmagent instance with the given -promscrape.cluster.memberNum value. Every %d occurence in the template is substituted with -promscrape.cluster.memberNum at urls to vmagent instances responsible for scraping the given target at /service-discovery page. For example -promscrape.cluster.memberURLTemplate='http://vmagent-%d:8429/targets'. See https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets for more details
|
||||
An optional template for URL to access vmagent instance with the given -promscrape.cluster.memberNum value. Every %d occurrence in the template is substituted with -promscrape.cluster.memberNum at urls to vmagent instances responsible for scraping the given target at /service-discovery page. For example -promscrape.cluster.memberURLTemplate='http://vmagent-%d:8429/targets'. See https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets for more details
|
||||
-promscrape.cluster.membersCount int
|
||||
The number of members in a cluster of scrapers. Each member must have a unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . Each member then scrapes roughly 1/N of all the targets. By default, cluster scraping is disabled, i.e. a single scraper scrapes all the targets. See https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets for more info (default 1)
|
||||
-promscrape.cluster.name string
|
||||
@@ -2759,6 +2932,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
Interval for checking for changes in 'file_sd_config'. See https://docs.victoriametrics.com/sd_configs.html#file_sd_configs for details (default 1m0s)
|
||||
-promscrape.gceSDCheckInterval duration
|
||||
Interval for checking for changes in gce. This works only if gce_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#gce_sd_configs for details (default 1m0s)
|
||||
-promscrape.hetznerSDCheckInterval duration
|
||||
Interval for checking for changes in Hetzner API. This works only if hetzner_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#hetzner_sd_configs for details (default 1m0s)
|
||||
-promscrape.httpSDCheckInterval duration
|
||||
Interval for checking for changes in http endpoint service discovery. This works only if http_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#http_sd_configs for details (default 1m0s)
|
||||
-promscrape.kubernetes.apiServerTimeout duration
|
||||
@@ -2805,22 +2980,26 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to every -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to every -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-pushmetrics.header array
|
||||
Optional HTTP request header to send to every -pushmetrics.url . For example, -pushmetrics.header='Authorization: Basic foobar' adds 'Authorization: Basic foobar' header to every request to every -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to every -pushmetrics.url (default 10s)
|
||||
-pushmetrics.url array
|
||||
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-relabelConfig string
|
||||
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. The path can point either to local file or to http url. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
|
||||
-reloadAuthKey value
|
||||
-reloadAuthKey value
|
||||
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
||||
Flag value can be read from the given file when using -reloadAuthKey=file:///abs/path/to/file or -reloadAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -reloadAuthKey=http://host/path or -reloadAuthKey=https://host/path
|
||||
-retentionFilter array
|
||||
Retention filter in the format 'filter:retention'. For example, '{env="dev"}:3d' configures the retention for time series with env="dev" label to 3 days. See https://docs.victoriametrics.com/#retention-filters for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
Retention filter in the format 'filter:retention'. For example, '{env="dev"}:3d' configures the retention for time series with env="dev" label to 3 days. See https://docs.victoriametrics.com/#retention-filters for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-retentionPeriod value
|
||||
Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter
|
||||
The following optional suffixes are supported: s (second), m (minute), h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1)
|
||||
@@ -2831,11 +3010,13 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-search.disableAutoCacheReset
|
||||
Whether to disable automatic response cache reset if a sample with timestamp outside -search.cacheTimestampOffset is inserted into VictoriaMetrics
|
||||
-search.disableCache
|
||||
Whether to disable response caching. This may be useful during data backfilling
|
||||
Whether to disable response caching. This may be useful when ingesting historical data. See https://docs.victoriametrics.com/#backfilling . See also -search.resetRollupResultCacheOnStartup
|
||||
-search.graphiteMaxPointsPerSeries int
|
||||
The maximum number of points per series Graphite render API can return (default 1000000)
|
||||
-search.graphiteStorageStep duration
|
||||
The interval between datapoints stored in the database. It is used at Graphite Render API handler for normalizing the interval between datapoints in case it isn't normalized. It can be overridden by sending 'storage_step' query arg to /render API or by sending the desired interval via 'Storage-Step' http header during querying /render API (default 10s)
|
||||
-search.ignoreExtraFiltersAtLabelsAPI
|
||||
Whether to ignore match[], extra_filters[] and extra_label query args at /api/v1/labels and /api/v1/label/.../values . This may be useful for decreasing load on VictoriaMetrics when extra filters match too many time series. The downside is that suprflouos labels or series could be returned, which do not match the extra filters. See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration
|
||||
-search.latencyOffset duration
|
||||
The time when data points become visible in query results after the collection. It can be overridden on per-query basis via latency_offset arg. Too small value can result in incomplete last points for query results (default 30s)
|
||||
-search.logQueryMemoryUsage size
|
||||
@@ -2857,6 +3038,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
The maximum number of tag keys returned from Graphite API, which returns tags. See https://docs.victoriametrics.com/#graphite-tags-api-usage (default 100000)
|
||||
-search.maxGraphiteTagValues int
|
||||
The maximum number of tag values returned from Graphite API, which returns tag values. See https://docs.victoriametrics.com/#graphite-tags-api-usage (default 100000)
|
||||
-search.maxLabelsAPIDuration duration
|
||||
The maximum duration for /api/v1/labels, /api/v1/label/.../values and /api/v1/series requests. See also -search.maxLabelsAPISeries and -search.ignoreExtraFiltersAtLabelsAPI (default 5s)
|
||||
-search.maxLabelsAPISeries int
|
||||
The maximum number of time series, which could be scanned when searching for the the matching time series at /api/v1/labels and /api/v1/label/.../values. This option allows limiting memory usage and CPU usage. See also -search.maxLabelsAPIDuration, -search.maxTagKeys, -search.maxTagValues and -search.ignoreExtraFiltersAtLabelsAPI (default 1000000)
|
||||
-search.maxLookback duration
|
||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaning due to historical reasons
|
||||
-search.maxMemoryPerQuery size
|
||||
@@ -2892,11 +3077,11 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-search.maxTSDBStatusSeries int
|
||||
The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage (default 10000000)
|
||||
-search.maxTagKeys int
|
||||
The maximum number of tag keys returned from /api/v1/labels (default 100000)
|
||||
The maximum number of tag keys returned from /api/v1/labels . See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration (default 100000)
|
||||
-search.maxTagValueSuffixesPerSearch int
|
||||
The maximum number of tag value suffixes returned from /metrics/find (default 100000)
|
||||
-search.maxTagValues int
|
||||
The maximum number of tag values returned from /api/v1/label/<label_name>/values (default 100000)
|
||||
The maximum number of tag values returned from /api/v1/label/<label_name>/values . See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration (default 100000)
|
||||
-search.maxUniqueTimeseries int
|
||||
The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage (default 300000)
|
||||
-search.maxWorkersPerQuery int
|
||||
@@ -2915,6 +3100,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-search.resetCacheAuthKey value
|
||||
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
|
||||
Flag value can be read from the given file when using -search.resetCacheAuthKey=file:///abs/path/to/file or -search.resetCacheAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -search.resetCacheAuthKey=http://host/path or -search.resetCacheAuthKey=https://host/path
|
||||
-search.resetRollupResultCacheOnStartup
|
||||
Whether to reset rollup result cache on startup. See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache
|
||||
-search.setLookbackToStep
|
||||
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
|
||||
-search.treatDotsAsIsInRegexps
|
||||
@@ -2931,7 +3118,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
authKey, which must be passed in query string to /snapshot* pages
|
||||
Flag value can be read from the given file when using -snapshotAuthKey=file:///abs/path/to/file or -snapshotAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -snapshotAuthKey=http://host/path or -snapshotAuthKey=https://host/path
|
||||
-snapshotCreateTimeout duration
|
||||
The timeout for creating new snapshot. If set, make sure that timeout is lower than backup period
|
||||
Deprecated: this flag does nothing
|
||||
-snapshotsMaxAge value
|
||||
Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted
|
||||
The following optional suffixes are supported: s (second), m (minute), h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 0)
|
||||
@@ -2961,22 +3148,37 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-streamAggr.config string
|
||||
Optional path to file with stream aggregation config. See https://docs.victoriametrics.com/stream-aggregation.html . See also -streamAggr.keepInput, -streamAggr.dropInput and -streamAggr.dedupInterval
|
||||
-streamAggr.dedupInterval duration
|
||||
Input samples are de-duplicated with this interval before being aggregated. Only the last sample per each time series per each interval is aggregated if the interval is greater than zero
|
||||
Input samples are de-duplicated with this interval before optional aggregation with -streamAggr.config . See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication
|
||||
-streamAggr.dropInput
|
||||
Whether to drop all the input samples after the aggregation with -streamAggr.config. By default, only aggregated samples are dropped, while the remaining samples are stored in the database. See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html
|
||||
-streamAggr.dropInputLabels array
|
||||
An optional list of labels to drop from samples before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-streamAggr.ignoreOldSamples
|
||||
Whether to ignore input samples with old timestamps outside the current aggregation interval. See https://docs.victoriametrics.com/stream-aggregation.html#ignoring-old-samples
|
||||
-streamAggr.keepInput
|
||||
Whether to keep all the input samples after the aggregation with -streamAggr.config. By default, only aggregated samples are dropped, while the remaining samples are stored in the database. See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html
|
||||
-tls
|
||||
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
|
||||
-tlsCertFile string
|
||||
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
|
||||
-tls array
|
||||
Whether to enable TLS for incoming HTTP requests at the given -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
Empty values are set to false.
|
||||
-tlsCertFile array
|
||||
Path to file with TLS certificate for the corresponding -httpListenAddr if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-tlsCipherSuites array
|
||||
Optional list of TLS cipher suites for incoming requests over HTTPS if -tls is set. See the list of supported cipher suites at https://pkg.go.dev/crypto/tls#pkg-constants
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-tlsKeyFile string
|
||||
Path to file with TLS key if -tls is set. The provided key file is automatically re-read every second, so it can be dynamically updated
|
||||
-tlsMinVersion string
|
||||
Optional minimum TLS version to use for incoming requests over HTTPS if -tls is set. Supported values: TLS10, TLS11, TLS12, TLS13
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-tlsKeyFile array
|
||||
Path to file with TLS key for the corresponding -httpListenAddr if -tls is set. The provided key file is automatically re-read every second, so it can be dynamically updated
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-tlsMinVersion array
|
||||
Optional minimum TLS version to use for the corresponding -httpListenAddr if -tls is set. Supported values: TLS10, TLS11, TLS12, TLS13
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
|
||||
-usePromCompatibleNaming
|
||||
Whether to replace characters unsupported by Prometheus with underscores in the ingested metric names and label names. For example, foo.bar{a.b='c'} is transformed into foo_bar{a_b='c'} during data ingestion if this flag is set. See https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
|
||||
-version
|
||||
@@ -2986,5 +3188,5 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-vmui.customDashboardsPath string
|
||||
Optional path to vmui dashboards. See https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmui/packages/vmui/public/dashboards
|
||||
-vmui.defaultTimezone string
|
||||
The default timezone to be used in vmui. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local. See https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmui#timezone-configuration
|
||||
The default timezone to be used in vmui. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local
|
||||
```
|
||||
|
||||
@@ -2,13 +2,17 @@
|
||||
|
||||
## Supported Versions
|
||||
|
||||
The following versions of VictoriaMetrics receive regular security fixes:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: |
|
||||
| v1.93.x LTS release | :white_check_mark: |
|
||||
| v1.87.x LTS release | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| v1.93.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| other releases | :x: |
|
||||
|
||||
See [this page](https://victoriametrics.com/security/) for more details.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report any security issues to security@victoriametrics.com
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
ARG base_image
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8428
|
||||
EXPOSE 9428
|
||||
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG src_binary
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
@@ -22,11 +21,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":9428", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the given -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
gogc = flag.Int("gogc", 100, "GOGC to use. See https://tip.golang.org/doc/gc-guide")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -34,18 +32,21 @@ func main() {
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
cgroup.SetGOGC(*gogc)
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
logger.Infof("starting VictoriaLogs at %q...", *httpListenAddr)
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":9428"}
|
||||
}
|
||||
logger.Infof("starting VictoriaLogs at %q...", listenAddrs)
|
||||
startTime := time.Now()
|
||||
|
||||
vlstorage.Init()
|
||||
vlselect.Init()
|
||||
vlinsert.Init()
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/VictoriaLogs/", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
@@ -53,9 +54,9 @@ func main() {
|
||||
logger.Infof("received signal %s", sig)
|
||||
pushmetrics.Stop()
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
@@ -6,7 +6,7 @@ RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8428
|
||||
EXPOSE 9428
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY victoria-logs-linux-${TARGETARCH}-prod ./victoria-logs-prod
|
||||
|
||||
@@ -26,12 +26,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8428", "TCP address to listen for http connections. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP addresses to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
||||
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
|
||||
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running VictoriaMetrics. The following config files are checked: "+
|
||||
"-promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. "+
|
||||
"This can be changed with -promscrape.config.strictParse=false command-line flag")
|
||||
@@ -66,7 +66,11 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr)
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":8428"}
|
||||
}
|
||||
logger.Infof("starting VictoriaMetrics at %q...", listenAddrs)
|
||||
startTime := time.Now()
|
||||
storage.SetDedupInterval(*minScrapeInterval)
|
||||
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
|
||||
@@ -76,7 +80,7 @@ func main() {
|
||||
|
||||
startSelfScraper()
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaMetrics in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
@@ -86,9 +90,9 @@ func main() {
|
||||
|
||||
stopSelfScraper()
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
@@ -124,6 +128,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
{"api/v1/status/tsdb", "tsdb status page"},
|
||||
{"api/v1/status/top_queries", "top queries"},
|
||||
{"api/v1/status/active_queries", "active queries"},
|
||||
{"-/reload", "reload configuration"},
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -38,11 +39,13 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testImportCSVWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/import/csv"
|
||||
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -55,14 +58,15 @@ var (
|
||||
)
|
||||
|
||||
type test struct {
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
Issue string `json:"issue"`
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
Issue string `json:"issue"`
|
||||
ExpectedResultLinesCount int `json:"expected_result_lines_count"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
@@ -180,7 +184,7 @@ func setUp() {
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
go httpserver.Serve(*httpListenAddr, false, requestHandler)
|
||||
go httpserver.Serve(*httpListenAddrs, useProxyProtocol, requestHandler)
|
||||
readyStorageCheckFunc := func() bool {
|
||||
resp, err := http.Get(testHealthHTTPPath)
|
||||
if err != nil {
|
||||
@@ -226,7 +230,7 @@ func waitFor(timeout time.Duration, f func() bool) error {
|
||||
}
|
||||
|
||||
func tearDown() {
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
if err := httpserver.Stop(*httpListenAddrs); err != nil {
|
||||
log.Printf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
@@ -237,8 +241,9 @@ func tearDown() {
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
t.Run("write", testWrite)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
vmstorage.Storage.DebugFlush()
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
t.Run("read", testRead)
|
||||
}
|
||||
|
||||
@@ -260,6 +265,14 @@ func testWrite(t *testing.T) {
|
||||
httpWrite(t, testPromWriteHTTPPath, test.InsertQuery, bytes.NewBuffer(data))
|
||||
}
|
||||
})
|
||||
t.Run("csv", func(t *testing.T) {
|
||||
for _, test := range readIn("csv", t, insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
httpWrite(t, testImportCSVWriteHTTPPath, test.InsertQuery, bytes.NewBuffer([]byte(strings.Join(test.Data, "\n"))))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("influxdb", func(t *testing.T) {
|
||||
for _, x := range readIn("influxdb", t, insertionTime) {
|
||||
@@ -301,7 +314,7 @@ func testWrite(t *testing.T) {
|
||||
}
|
||||
|
||||
func testRead(t *testing.T) {
|
||||
for _, engine := range []string{"prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
for _, engine := range []string{"csv", "prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
t.Run(engine, func(t *testing.T) {
|
||||
for _, x := range readIn(engine, t, insertionTime) {
|
||||
test := x
|
||||
@@ -312,7 +325,12 @@ func testRead(t *testing.T) {
|
||||
if test.Issue != "" {
|
||||
test.Issue = "\nRegression in " + test.Issue
|
||||
}
|
||||
switch true {
|
||||
switch {
|
||||
case strings.HasPrefix(q, "/api/v1/export/csv"):
|
||||
data := strings.Split(string(httpReadData(t, testReadHTTPPath, q)), "\n")
|
||||
if len(data) == test.ExpectedResultLinesCount {
|
||||
t.Fatalf("not expected number of csv lines want=%d\ngot=%d test=%s.%s\n\response=%q", len(data), test.ExpectedResultLinesCount, q, test.Issue, strings.Join(data, "\n"))
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/export"):
|
||||
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
|
||||
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
|
||||
@@ -351,7 +369,7 @@ func readIn(readFor string, t *testing.T, insertTime time.Time) []test {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
var tt []test
|
||||
s.noError(filepath.Walk(filepath.Join(testFixturesDir, readFor), func(path string, info os.FileInfo, err error) error {
|
||||
s.noError(filepath.Walk(filepath.Join(testFixturesDir, readFor), func(path string, _ os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -413,6 +431,7 @@ func httpReadMetrics(t *testing.T, address, query string) []Metric {
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func httpReadStruct(t *testing.T, address, query string, dst interface{}) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
@@ -425,6 +444,20 @@ func httpReadStruct(t *testing.T, address, query string, dst interface{}) {
|
||||
s.noError(json.NewDecoder(resp.Body).Decode(dst))
|
||||
}
|
||||
|
||||
func httpReadData(t *testing.T, address, query string) []byte {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Get(address + query)
|
||||
s.noError(err)
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
s.equalInt(resp.StatusCode, 200)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
s.noError(err)
|
||||
return data
|
||||
}
|
||||
|
||||
func checkMetricsResult(got, want []Metric) error {
|
||||
for _, r := range append([]Metric(nil), got...) {
|
||||
want = removeIfFoundMetrics(r, want)
|
||||
@@ -497,3 +530,73 @@ func (s *suite) greaterThan(a, b int) {
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportJSONLines(t *testing.T) {
|
||||
f := func(labelsCount, labelLen int) {
|
||||
t.Helper()
|
||||
|
||||
reqURL := fmt.Sprintf("http://localhost%s/api/v1/import", testHTTPListenAddr)
|
||||
line := generateJSONLine(labelsCount, labelLen)
|
||||
req, err := http.NewRequest("POST", reqURL, bytes.NewBufferString(line))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create request: %s", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot perform request for labelsCount=%d, labelLen=%d: %s", labelsCount, labelLen, err)
|
||||
}
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("unexpected statusCode for labelsCount=%d, labelLen=%d; got %d; want 204", labelsCount, labelLen, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// labels with various lengths
|
||||
for i := 0; i < 500; i++ {
|
||||
f(10, i*5)
|
||||
}
|
||||
|
||||
// Too many labels
|
||||
f(1000, 100)
|
||||
|
||||
// Too long labels
|
||||
f(1, 100_000)
|
||||
f(10, 100_000)
|
||||
f(10, 10_000)
|
||||
}
|
||||
|
||||
func generateJSONLine(labelsCount, labelLen int) string {
|
||||
m := make(map[string]string, labelsCount)
|
||||
m["__name__"] = generateSizedRandomString(labelLen)
|
||||
for j := 1; j < labelsCount; j++ {
|
||||
labelName := generateSizedRandomString(labelLen)
|
||||
labelValue := generateSizedRandomString(labelLen)
|
||||
m[labelName] = labelValue
|
||||
}
|
||||
|
||||
type jsonLine struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
line := &jsonLine{
|
||||
Metric: m,
|
||||
Values: []float64{1.34},
|
||||
Timestamps: []int64{time.Now().UnixNano() / 1e6},
|
||||
}
|
||||
data, err := json.Marshal(&line)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot marshal JSON: %w", err))
|
||||
}
|
||||
data = append(data, '\n')
|
||||
return string(data)
|
||||
}
|
||||
|
||||
const alphabetSample = `qwertyuiopasdfghjklzxcvbnm`
|
||||
|
||||
func generateSizedRandomString(size int) string {
|
||||
dst := make([]byte, size)
|
||||
for i := range dst {
|
||||
dst[i] = alphabetSample[rand.Intn(len(alphabetSample))]
|
||||
}
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/appmetrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
@@ -49,16 +50,8 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
var mrs []storage.MetricRow
|
||||
var labels []prompb.Label
|
||||
t := time.NewTicker(scrapeInterval)
|
||||
var currentTimestamp int64
|
||||
for {
|
||||
select {
|
||||
case <-selfScraperStopCh:
|
||||
t.Stop()
|
||||
logger.Infof("stopped self-scraping `/metrics` page")
|
||||
return
|
||||
case currentTime := <-t.C:
|
||||
currentTimestamp = currentTime.UnixNano() / 1e6
|
||||
}
|
||||
f := func(currentTime time.Time, sendStaleMarkers bool) {
|
||||
currentTimestamp := currentTime.UnixNano() / 1e6
|
||||
bb.Reset()
|
||||
appmetrics.WritePrometheusMetrics(&bb)
|
||||
s := bytesutil.ToUnsafeString(bb.B)
|
||||
@@ -83,12 +76,27 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
mr := &mrs[len(mrs)-1]
|
||||
mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels)
|
||||
mr.Timestamp = currentTimestamp
|
||||
mr.Value = r.Value
|
||||
if sendStaleMarkers {
|
||||
mr.Value = decimal.StaleNaN
|
||||
} else {
|
||||
mr.Value = r.Value
|
||||
}
|
||||
}
|
||||
if err := vmstorage.AddRows(mrs); err != nil {
|
||||
logger.Errorf("cannot store self-scraped metrics: %s", err)
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-selfScraperStopCh:
|
||||
f(time.Now(), true)
|
||||
t.Stop()
|
||||
logger.Infof("stopped self-scraping `/metrics` page")
|
||||
return
|
||||
case currentTime := <-t.C:
|
||||
f(currentTime, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addLabel(dst []prompb.Label, key, value string) []prompb.Label {
|
||||
|
||||
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "csv export",
|
||||
"data": [
|
||||
"rfc3339,4,{TIME_MS}",
|
||||
"rfc3339milli,6,{TIME_MS}",
|
||||
"ts,8,{TIME_MS}",
|
||||
"tsms,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:tfmt,2:metric:test_csv,3:time:unix_ms",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,tfmt,__value__,__timestamp__:rfc3339&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "csv export with extra_labels",
|
||||
"data": [
|
||||
"location-1,4,{TIME_MS}",
|
||||
"location-2,6,{TIME_MS}",
|
||||
"location-3,8,{TIME_MS}",
|
||||
"location-4,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:location,2:metric:test_csv_labels,3:time:unix_ms&extra_label=location=location-1",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,location,__value__,__timestamp__:unix_ms&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
processLogMessage := func(timestmap int64, fields []logstorage.Field) {}
|
||||
processLogMessage := func(_ int64, _ []logstorage.Field) {}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(data)))
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
func TestParseJSONRequestFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
n, err := parseJSONRequest([]byte(s), func(timestamp int64, fields []logstorage.Field) {
|
||||
n, err := parseJSONRequest([]byte(s), func(_ int64, _ []logstorage.Field) {
|
||||
t.Fatalf("unexpected call to parseJSONRequest callback!")
|
||||
})
|
||||
if err == nil {
|
||||
|
||||
@@ -27,7 +27,7 @@ func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
data := getJSONBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseJSONRequest(data, func(timestamp int64, fields []logstorage.Field) {})
|
||||
_, err := parseJSONRequest(data, func(_ int64, _ []logstorage.Field) {})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseProtobufRequest(body, func(timestamp int64, fields []logstorage.Field) {})
|
||||
_, err := parseProtobufRequest(body, func(_ int64, _ []logstorage.Field) {})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
@@ -17,13 +18,18 @@ var (
|
||||
)
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request
|
||||
func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan struct{}) {
|
||||
func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan struct{}, cancel func()) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQuery(qStr)
|
||||
@@ -34,7 +40,7 @@ func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan s
|
||||
w.Header().Set("Content-Type", "application/stream+json; charset=utf-8")
|
||||
|
||||
sw := getSortWriter()
|
||||
sw.Init(w, maxSortBufferSize.IntN())
|
||||
sw.Init(w, maxSortBufferSize.IntN(), limit)
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
vlstorage.RunQuery(tenantIDs, q, stopCh, func(columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 {
|
||||
@@ -42,11 +48,36 @@ func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan s
|
||||
}
|
||||
rowsCount := len(columns[0].Values)
|
||||
|
||||
// skip entries with empty _stream column
|
||||
// _stream is empty in case indexdb entry was not flushed to the storage yet
|
||||
// skipping such entries makes the result more consistent
|
||||
streamCol := 0
|
||||
|
||||
// fast path
|
||||
// _stream column is a built-in column and it is always supposed to be at the same position
|
||||
if len(columns) >= 2 && columns[1].Name == "_stream" {
|
||||
streamCol = 1
|
||||
} else {
|
||||
for i := 1; i < len(columns); i++ {
|
||||
if columns[i].Name == "_stream" {
|
||||
streamCol = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for rowIdx := 0; rowIdx < rowsCount; rowIdx++ {
|
||||
if columns[streamCol].Values[rowIdx] == "" {
|
||||
continue
|
||||
}
|
||||
WriteJSONRow(bb, columns, rowIdx)
|
||||
}
|
||||
sw.MustWrite(bb.B)
|
||||
|
||||
if !sw.TryWrite(bb.B) {
|
||||
cancel()
|
||||
}
|
||||
|
||||
blockResultPool.Put(bb)
|
||||
})
|
||||
sw.FinalFlush()
|
||||
|
||||
@@ -36,8 +36,12 @@ var sortWriterPool sync.Pool
|
||||
// If the buf isn't empty at FinalFlush() call, then the buffered data
|
||||
// is sorted by _time field.
|
||||
type sortWriter struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
|
||||
maxLines int
|
||||
linesWritten int
|
||||
|
||||
maxBufLen int
|
||||
buf []byte
|
||||
bufFlushed bool
|
||||
@@ -47,58 +51,119 @@ type sortWriter struct {
|
||||
|
||||
func (sw *sortWriter) reset() {
|
||||
sw.w = nil
|
||||
|
||||
sw.maxLines = 0
|
||||
sw.linesWritten = 0
|
||||
|
||||
sw.maxBufLen = 0
|
||||
sw.buf = sw.buf[:0]
|
||||
sw.bufFlushed = false
|
||||
sw.hasErr = false
|
||||
}
|
||||
|
||||
func (sw *sortWriter) Init(w io.Writer, maxBufLen int) {
|
||||
// Init initializes sw.
|
||||
//
|
||||
// If maxLines is set to positive value, then sw accepts up to maxLines
|
||||
// and then rejects all the other lines by returning false from TryWrite.
|
||||
func (sw *sortWriter) Init(w io.Writer, maxBufLen, maxLines int) {
|
||||
sw.reset()
|
||||
|
||||
sw.w = w
|
||||
sw.maxBufLen = maxBufLen
|
||||
sw.maxLines = maxLines
|
||||
}
|
||||
|
||||
func (sw *sortWriter) MustWrite(p []byte) {
|
||||
// TryWrite writes p to sw.
|
||||
//
|
||||
// True is returned on successful write, false otherwise.
|
||||
//
|
||||
// Unsuccessful write may occur on underlying write error or when maxLines lines are already written to sw.
|
||||
func (sw *sortWriter) TryWrite(p []byte) bool {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
|
||||
if sw.hasErr {
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
if sw.bufFlushed {
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
if !sw.writeToUnderlyingWriterLocked(p) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
if len(sw.buf)+len(p) < sw.maxBufLen {
|
||||
sw.buf = append(sw.buf, p...)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
sw.bufFlushed = true
|
||||
if len(sw.buf) > 0 {
|
||||
if _, err := sw.w.Write(sw.buf); err != nil {
|
||||
sw.hasErr = true
|
||||
return
|
||||
if !sw.writeToUnderlyingWriterLocked(sw.buf) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
sw.buf = sw.buf[:0]
|
||||
|
||||
if !sw.writeToUnderlyingWriterLocked(p) {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (sw *sortWriter) writeToUnderlyingWriterLocked(p []byte) bool {
|
||||
if len(p) == 0 {
|
||||
return true
|
||||
}
|
||||
if sw.maxLines > 0 {
|
||||
if sw.linesWritten >= sw.maxLines {
|
||||
return false
|
||||
}
|
||||
sw.buf = sw.buf[:0]
|
||||
var linesLeft int
|
||||
p, linesLeft = trimLines(p, sw.maxLines-sw.linesWritten)
|
||||
sw.linesWritten += linesLeft
|
||||
}
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
sw.hasErr = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func trimLines(p []byte, maxLines int) ([]byte, int) {
|
||||
if maxLines <= 0 {
|
||||
return nil, 0
|
||||
}
|
||||
n := bytes.Count(p, newline)
|
||||
if n < maxLines {
|
||||
return p, n
|
||||
}
|
||||
for n >= maxLines {
|
||||
idx := bytes.LastIndexByte(p, '\n')
|
||||
p = p[:idx]
|
||||
n--
|
||||
}
|
||||
return p[:len(p)+1], maxLines
|
||||
}
|
||||
|
||||
var newline = []byte("\n")
|
||||
|
||||
func (sw *sortWriter) FinalFlush() {
|
||||
if sw.hasErr || sw.bufFlushed {
|
||||
return
|
||||
}
|
||||
|
||||
rs := getRowsSorter()
|
||||
rs.parseRows(sw.buf)
|
||||
rs.sort()
|
||||
WriteJSONRows(sw.w, rs.rows)
|
||||
|
||||
rows := rs.rows
|
||||
if sw.maxLines > 0 && len(rows) > sw.maxLines {
|
||||
rows = rows[:sw.maxLines]
|
||||
}
|
||||
WriteJSONRows(sw.w, rows)
|
||||
|
||||
putRowsSorter(rs)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,15 +7,16 @@ import (
|
||||
)
|
||||
|
||||
func TestSortWriter(t *testing.T) {
|
||||
f := func(maxBufLen int, data string, expectedResult string) {
|
||||
f := func(maxBufLen, maxLines int, data string, expectedResult string) {
|
||||
t.Helper()
|
||||
|
||||
var bb bytes.Buffer
|
||||
sw := getSortWriter()
|
||||
sw.Init(&bb, maxBufLen)
|
||||
|
||||
sw.Init(&bb, maxBufLen, maxLines)
|
||||
for _, s := range strings.Split(data, "\n") {
|
||||
sw.MustWrite([]byte(s + "\n"))
|
||||
if !sw.TryWrite([]byte(s + "\n")) {
|
||||
break
|
||||
}
|
||||
}
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
@@ -26,14 +27,20 @@ func TestSortWriter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
f(100, "", "")
|
||||
f(100, "{}", "{}\n")
|
||||
f(100, 0, "", "")
|
||||
f(100, 0, "{}", "{}\n")
|
||||
|
||||
data := `{"_time":"def","_msg":"xxx"}
|
||||
{"_time":"abc","_msg":"foo"}`
|
||||
resultExpected := `{"_time":"abc","_msg":"foo"}
|
||||
{"_time":"def","_msg":"xxx"}
|
||||
`
|
||||
f(100, data, resultExpected)
|
||||
f(10, data, data+"\n")
|
||||
f(100, 0, data, resultExpected)
|
||||
f(10, 0, data, data+"\n")
|
||||
|
||||
// Test with the maxLines
|
||||
f(100, 1, data, `{"_time":"abc","_msg":"foo"}`+"\n")
|
||||
f(10, 1, data, `{"_time":"def","_msg":"xxx"}`+"\n")
|
||||
f(10, 2, data, data+"\n")
|
||||
f(100, 2, data, resultExpected)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package vlselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -101,7 +102,8 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
// Limit the number of concurrent queries, which can consume big amounts of CPU.
|
||||
startTime := time.Now()
|
||||
stopCh := r.Context().Done()
|
||||
ctx := r.Context()
|
||||
stopCh := ctx.Done()
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
@@ -139,11 +141,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
}
|
||||
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
stopCh = ctxWithCancel.Done()
|
||||
|
||||
switch {
|
||||
case path == "/logsql/query":
|
||||
logsqlQueryRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
logsql.ProcessQueryRequest(w, r, stopCh)
|
||||
logsql.ProcessQueryRequest(w, r, stopCh, cancel)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.d1313636.css",
|
||||
"main.js": "./static/js/main.1919fefe.js",
|
||||
"static/js/522.da77e7b3.chunk.js": "./static/js/522.da77e7b3.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.8644fd7c964802dd34a9.md",
|
||||
"main.css": "./static/css/main.bc07cc78.css",
|
||||
"main.js": "./static/js/main.034044a7.js",
|
||||
"static/js/685.bebe1265.chunk.js": "./static/js/685.bebe1265.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.10add6e7bdf0f1d98cf7.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.d1313636.css",
|
||||
"static/js/main.1919fefe.js"
|
||||
"static/css/main.bc07cc78.css",
|
||||
"static/js/main.034044a7.js"
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.1919fefe.js"></script><link href="./static/css/main.d1313636.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.034044a7.js"></script><link href="./static/css/main.bc07cc78.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
1
app/vlselect/vmui/static/css/main.bc07cc78.css
Normal file
1
app/vlselect/vmui/static/css/main.bc07cc78.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/js/685.bebe1265.chunk.js
Normal file
1
app/vlselect/vmui/static/js/685.bebe1265.chunk.js
Normal file
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.034044a7.js
Normal file
2
app/vlselect/vmui/static/js/main.034044a7.js
Normal file
File diff suppressed because one or more lines are too long
@@ -4,10 +4,8 @@
|
||||
http://jedwatson.github.io/classnames
|
||||
*/
|
||||
|
||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
||||
|
||||
/**
|
||||
* @remix-run/router v1.10.0
|
||||
* @remix-run/router v1.15.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
@@ -18,7 +16,7 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.17.0
|
||||
* React Router DOM v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
@@ -29,7 +27,7 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router v6.17.0
|
||||
* React Router v6.22.1
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
File diff suppressed because one or more lines are too long
@@ -26,12 +26,18 @@ and introduction into [basic querying via MetricsQL](https://docs.victoriametric
|
||||
|
||||
The following functionality is implemented differently in MetricsQL compared to PromQL. This improves user experience:
|
||||
|
||||
* MetricsQL takes into account the previous point before the window in square brackets for range functions such as [rate](#rate) and [increase](#increase).
|
||||
This allows returning the exact results users expect for `increase(metric[$__interval])` queries instead of incomplete results Prometheus returns for such queries.
|
||||
* MetricsQL doesn't extrapolate range function results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
* MetricsQL takes into account the last [raw sample](https://docs.victoriametrics.com/keyconcepts/#raw-samples) before the lookbehind window
|
||||
in square brackets for [increase](#increase) and [rate](#rate) functions. This allows returning the exact results users expect for `increase(metric[$__interval])` queries
|
||||
instead of incomplete results Prometheus returns for such queries. Prometheus misses the increase between the last sample before the lookbehind window
|
||||
and the first sample inside the lookbehind window.
|
||||
* MetricsQL doesn't extrapolate [rate](#rate) and [increase](#increase) function results, so it always returns the expected results. For example, it returns
|
||||
integer results from `increase()` over slow-changing integer counter. Prometheus in this case returns unexpected fractional results,
|
||||
which may significantly differ from the expected results. This addresses [this issue from Prometheus](https://github.com/prometheus/prometheus/issues/3746).
|
||||
See technical details about VictoriaMetrics and Prometheus calculations for [rate](#rate)
|
||||
and [increase](#increase) [in this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1215#issuecomment-850305711).
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) with `step` values smaller than scrape interval.
|
||||
* MetricsQL returns the expected non-empty responses for [rate](#rate) function when Grafana or [vmui](https://docs.victoriametrics.com/#vmui)
|
||||
passes `step` values smaller than the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query).
|
||||
This addresses [this issue from Grafana](https://github.com/grafana/grafana/issues/11451).
|
||||
See also [this blog post](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/).
|
||||
* MetricsQL treats `scalar` type the same as `instant vector` without labels, since subtle differences between these types usually confuse users.
|
||||
@@ -61,16 +67,17 @@ The list of MetricsQL features on top of PromQL:
|
||||
|
||||
* Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax.
|
||||
See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics).
|
||||
VictoriaMetrics also can be used as Graphite datasource in Grafana.
|
||||
See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the current step used for building the graph (e.g. `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)).
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query)
|
||||
and the real interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) (aka `scrape_interval`).
|
||||
For instance, the following query is valid in VictoriaMetrics: `rate(node_network_receive_bytes_total)`.
|
||||
It is equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
It is roughly equivalent to `rate(node_network_receive_bytes_total[$__interval])` when used in Grafana.
|
||||
The difference is documented in [rate() docs](#rate).
|
||||
* Numeric values can contain `_` delimiters for better readability. For example, `1_234_567_890` can be used in queries instead of `1234567890`.
|
||||
* [Series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) accept multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}`
|
||||
selects series with either `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
selects series with `{env="prod",job="a"}` or `{env="dev",job="b"}` labels.
|
||||
See [these docs](https://docs.victoriametrics.com/keyConcepts.html#filtering-by-multiple-or-filters) for details.
|
||||
* Support for `group_left(*)` and `group_right(*)` for copying all the labels from time series on the `one` side
|
||||
of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches).
|
||||
@@ -98,7 +105,7 @@ The list of MetricsQL features on top of PromQL:
|
||||
* Trailing commas on all the lists are allowed - label filters, function args and with expressions.
|
||||
For instance, the following queries are valid: `m{foo="bar",}`, `f(a, b,)`, `WITH (x=y,) x`.
|
||||
This simplifies maintenance of multi-line queries.
|
||||
* Metric names and label names may contain any unicode letter. For example `температура{город="Киев"}` is a value MetricsQL expression.
|
||||
* Metric names and label names may contain any unicode letter. For example `температура{город="Київ"}` is a value MetricsQL expression.
|
||||
* Metric names and labels names may contain escaped chars. For example, `foo\-bar{baz\=aa="b"}` is valid expression.
|
||||
It returns time series with name `foo-bar` containing label `baz=aa` with value `b`.
|
||||
Additionally, the following escape sequences are supported:
|
||||
@@ -117,7 +124,8 @@ The list of MetricsQL features on top of PromQL:
|
||||
Go to [WITH templates playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/expand-with-exprs) and try it.
|
||||
* String literals may be concatenated. This is useful with `WITH` templates:
|
||||
`WITH (commonPrefix="long_metric_prefix_") {__name__=commonPrefix+"suffix1"} / {__name__=commonPrefix+"suffix2"}`.
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions) and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
* `keep_metric_names` modifier can be applied to all the [rollup functions](#rollup-functions), [transform functions](#transform-functions)
|
||||
and [binary operators](https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operators).
|
||||
This modifier prevents from dropping metric names in function results. See [these docs](#keep_metric_names).
|
||||
|
||||
## keep_metric_names
|
||||
@@ -155,14 +163,15 @@ Additional details:
|
||||
The interval between points is set as `step` query arg passed by Grafana to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
* If the given [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) returns multiple time series,
|
||||
then rollups are calculated individually per each returned series.
|
||||
* If lookbehind window in square brackets is missing, then MetricsQL automatically sets the lookbehind window
|
||||
to the interval between points on the graph (aka `step` query arg at [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query),
|
||||
`$__interval` value from Grafana or `1i` duration in MetricsQL).
|
||||
For example, `rate(http_requests_total)` is equivalent to `rate(http_requests_total[$__interval])` in Grafana.
|
||||
It is also equivalent to `rate(http_requests_total[1i])`.
|
||||
* If lookbehind window in square brackets is missing, then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* Every [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) in MetricsQL must be wrapped into a rollup function.
|
||||
Otherwise, it is automatically wrapped into [default_rollup](#default_rollup). For example, `foo{bar="baz"}`
|
||||
is automatically converted to `default_rollup(foo{bar="baz"}[1i])` before performing the calculations.
|
||||
is automatically converted to `default_rollup(foo{bar="baz"})` before performing the calculations.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) is passed to rollup function,
|
||||
then the inner arg is automatically converted to a [subquery](#subqueries).
|
||||
* All the rollup functions accept optional `keep_metric_names` modifier. If it is set, then the function keeps metric names in results.
|
||||
@@ -177,7 +186,9 @@ The list of supported rollup functions:
|
||||
`absent_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns 1
|
||||
if the given lookbehind window `d` doesn't contain raw samples. Otherwise, it returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [present_over_time](#present_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [present_over_time](#present_over_time).
|
||||
|
||||
#### aggr_over_time
|
||||
|
||||
@@ -207,7 +218,9 @@ See also [descent_over_time](#descent_over_time).
|
||||
over raw samples on the given lookbehind window `d` per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [median_over_time](#median_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [median_over_time](#median_over_time).
|
||||
|
||||
#### changes
|
||||
|
||||
@@ -220,7 +233,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes_prometheus](#changes_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes_prometheus](#changes_prometheus).
|
||||
|
||||
#### changes_prometheus
|
||||
|
||||
@@ -233,7 +248,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [changes](#changes).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [changes](#changes).
|
||||
|
||||
#### count_eq_over_time
|
||||
|
||||
@@ -243,7 +260,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time).
|
||||
See also [count_over_time](#count_over_time), [share_eq_over_time](#share_eq_over_time) and [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### count_gt_over_time
|
||||
|
||||
@@ -253,7 +270,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time).
|
||||
See also [count_over_time](#count_over_time) and [share_gt_over_time](#share_gt_over_time).
|
||||
|
||||
#### count_le_over_time
|
||||
|
||||
@@ -263,7 +280,7 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_over_time](#count_over_time).
|
||||
See also [count_over_time](#count_over_time) and [share_le_over_time](#share_le_over_time).
|
||||
|
||||
#### count_ne_over_time
|
||||
|
||||
@@ -282,8 +299,19 @@ on the given lookbehind window `d` per each time series returned from the given
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time),
|
||||
[count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_le_over_time](#count_le_over_time), [count_gt_over_time](#count_gt_over_time), [count_eq_over_time](#count_eq_over_time) and [count_ne_over_time](#count_ne_over_time).
|
||||
|
||||
#### count_values_over_time
|
||||
|
||||
`count_values_over_time("label", series_selector[d])` is a [rollup function](#rollup-functions), which counts the number of raw samples
|
||||
with the same value over the given lookbehind window and stores the counts in a time series with an additional `label`, which contains each initial value.
|
||||
The results are calculated independently per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_eq_over_time](#count_eq_over_time), [count_values](#count_values) and [distinct_over_time](#distinct_over_time) and [label_match](#label_match).
|
||||
|
||||
#### decreases_over_time
|
||||
|
||||
@@ -299,6 +327,11 @@ See also [increases_over_time](#increases_over_time).
|
||||
`default_rollup(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
#### delta
|
||||
|
||||
`delta(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the difference between
|
||||
@@ -310,7 +343,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||
|
||||
#### delta_prometheus
|
||||
|
||||
@@ -333,7 +368,9 @@ The derivative is calculated using linear regression.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deriv_fast](#deriv_fast) and [ideriv](#ideriv).
|
||||
|
||||
#### deriv_fast
|
||||
|
||||
@@ -364,6 +401,8 @@ on the given lookbehind window `d` per each time series returned from the given
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time).
|
||||
|
||||
#### duration_over_time
|
||||
|
||||
`duration_over_time(series_selector[d], max_interval)` is a [rollup function](#rollup-functions), which returns the duration in seconds
|
||||
@@ -423,7 +462,9 @@ over the given lookbehind window `d` using the given smoothing factor `sf` and t
|
||||
Both `sf` and `tf` must be in the range `[0...1]`. It is expected that the [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
returns time series of [gauge type](https://docs.victoriametrics.com/keyConcepts.html#gauge).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### idelta
|
||||
|
||||
@@ -432,7 +473,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [delta](#delta).
|
||||
|
||||
#### ideriv
|
||||
|
||||
@@ -455,7 +498,9 @@ See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-co
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||
|
||||
#### increase_prometheus
|
||||
|
||||
@@ -499,7 +544,9 @@ It is expected that the `series_selector` returns time series of [counter type](
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rate](#rate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### lag
|
||||
|
||||
@@ -516,7 +563,9 @@ See also [lifetime](#lifetime) and [duration_over_time](#duration_over_time).
|
||||
`last_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the last raw sample value on the given lookbehind window `d`
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [first_over_time](#first_over_time) and [tlast_over_time](#tlast_over_time).
|
||||
|
||||
#### lifetime
|
||||
|
||||
@@ -539,7 +588,9 @@ See also [mad](#mad), [range_mad](#range_mad) and [outlier_iqr_over_time](#outli
|
||||
`max_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the maximum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmax_over_time](#tmax_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmax_over_time](#tmax_over_time).
|
||||
|
||||
#### median_over_time
|
||||
|
||||
@@ -554,7 +605,9 @@ See also [avg_over_time](#avg_over_time).
|
||||
`min_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the minimum value over raw samples
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [tmin_over_time](#tmin_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tmin_over_time](#tmin_over_time).
|
||||
|
||||
#### mode_over_time
|
||||
|
||||
@@ -570,7 +623,7 @@ if its value is either smaller than the `q25-1.5*iqr` or bigger than `q75+1.5*iq
|
||||
- `q25` and `q75` are 25th and 75th [percentiles](https://en.wikipedia.org/wiki/Percentile) over raw samples on the lookbehind window `d`.
|
||||
|
||||
The `outlier_iqr_over_time()` is useful for detecting anomalies in gauge values based on the previous history of values.
|
||||
For example, `outlier_iqr_over_time(memory_usage_bytes[1h])` triggers when `memory_usage_bytes` suddenly goes outside the usual value range for the last 24 hours.
|
||||
For example, `outlier_iqr_over_time(memory_usage_bytes[1h])` triggers when `memory_usage_bytes` suddenly goes outside the usual value range for the last hour.
|
||||
|
||||
See also [outliers_iqr](#outliers_iqr).
|
||||
|
||||
@@ -580,7 +633,9 @@ See also [outliers_iqr](#outliers_iqr).
|
||||
linear interpolation over raw samples on the given lookbehind window `d`. The predicted value is calculated individually per each time series
|
||||
returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
This function is supported by PromQL. See also [range_linear_regression](#range_linear_regression).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [range_linear_regression](#range_linear_regression).
|
||||
|
||||
#### present_over_time
|
||||
|
||||
@@ -597,7 +652,9 @@ This function is supported by PromQL.
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
The `phi` value must be in the range `[0...1]`.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles_over_time](#quantiles_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles_over_time](#quantiles_over_time).
|
||||
|
||||
#### quantiles_over_time
|
||||
|
||||
@@ -622,9 +679,16 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
It is expected that the `series_selector` returns time series of [counter type](https://docs.victoriametrics.com/keyConcepts.html#counter).
|
||||
|
||||
If the lookbehind window is skipped in square brackets, then it is automatically calculated as `max(step, scrape_interval)`, where `step` is the query arg value
|
||||
passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyconcepts/#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query),
|
||||
while `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for the selected time series.
|
||||
This allows avoiding unexpected gaps on the graph when `step` is smaller than the `scrape_interval`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
@@ -652,6 +716,7 @@ on the given lookbehind window `d` and returns them in time series with `rollup=
|
||||
These values are calculated individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_candlestick
|
||||
|
||||
@@ -660,7 +725,8 @@ over raw samples on the given lookbehind window `d` and returns them in time ser
|
||||
The calculations are performed individually per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering). This function is useful for financial applications.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
Optional 2nd argument `"open"`, `"high"` or `"low"` or `"close"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
#### rollup_delta
|
||||
|
||||
@@ -670,6 +736,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -683,6 +750,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
@@ -694,6 +762,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [rollup_delta](#rollup_delta).
|
||||
|
||||
@@ -707,10 +776,10 @@ See [this article](https://valyala.medium.com/why-irate-from-prometheus-doesnt-c
|
||||
when to use `rollup_rate()`.
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
#### rollup_scrape_interval
|
||||
@@ -721,6 +790,7 @@ and returns them in time series with `rollup="min"`, `rollup="max"` and `rollup=
|
||||
The calculations are performed individually per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Optional 2nd argument `"min"`, `"max"` or `"avg"` can be passed to keep only one calculation result and without adding a label.
|
||||
See also [label_match](#label_match).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names. See also [scrape_interval](#scrape_interval).
|
||||
|
||||
@@ -743,7 +813,7 @@ This function is useful for calculating SLI and SLO. Example: `share_gt_over_tim
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [share_le_over_time](#share_le_over_time).
|
||||
See also [share_le_over_time](#share_le_over_time) and [count_gt_over_time](#count_gt_over_time).
|
||||
|
||||
#### share_le_over_time
|
||||
|
||||
@@ -756,7 +826,7 @@ the share of time series values for the last 24 hours when memory usage was belo
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [share_gt_over_time](#share_gt_over_time).
|
||||
See also [share_gt_over_time](#share_gt_over_time) and [count_le_over_time](#count_le_over_time).
|
||||
|
||||
#### share_eq_over_time
|
||||
|
||||
@@ -766,6 +836,8 @@ from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.ht
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [count_eq_over_time](#count_eq_over_time).
|
||||
|
||||
#### stale_samples_over_time
|
||||
|
||||
`stale_samples_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates the number
|
||||
@@ -781,7 +853,9 @@ on the given lookbehind window `d` per each time series returned from the given
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stdvar_over_time](#stdvar_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stdvar_over_time](#stdvar_over_time).
|
||||
|
||||
#### stdvar_over_time
|
||||
|
||||
@@ -790,7 +864,36 @@ on the given lookbehind window `d` per each time series returned from the given
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [stddev_over_time](#stddev_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [stddev_over_time](#stddev_over_time).
|
||||
|
||||
#### sum_eq_over_time
|
||||
|
||||
`sum_eq_over_time(series_selector[d], eq)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values equal to `eq`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [sum_over_time](#sum_over_time) and [count_eq_over_time](#count_eq_over_time).
|
||||
|
||||
#### sum_gt_over_time
|
||||
|
||||
`sum_gt_over_time(series_selector[d], gt)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values bigger than `gt`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [sum_over_time](#sum_over_time) and [count_gt_over_time](#count_gt_over_time).
|
||||
|
||||
#### sum_le_over_time
|
||||
|
||||
`sum_le_over_time(series_selector[d], le)` is a [rollup function](#rollup-function), which calculates the sum of raw sample values smaller or equal to `le`
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [sum_over_time](#sum_over_time) and [count_le_over_time](#count_le_over_time).
|
||||
|
||||
#### sum_over_time
|
||||
|
||||
@@ -810,25 +913,27 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
|
||||
#### timestamp
|
||||
|
||||
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds for the last raw sample
|
||||
`timestamp(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [timestamp_with_name](#timestamp_with_name).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [time](#time) and [now](#now).
|
||||
|
||||
#### timestamp_with_name
|
||||
|
||||
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds for the last raw sample
|
||||
`timestamp_with_name(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are preserved in the resulting rollups.
|
||||
|
||||
See also [timestamp](#timestamp).
|
||||
See also [timestamp](#timestamp) and [keep_metric_names](#keep_metric_names) modifier.
|
||||
|
||||
#### tfirst_over_time
|
||||
|
||||
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds for the first raw sample
|
||||
`tfirst_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the first raw sample
|
||||
on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
@@ -837,7 +942,7 @@ See also [first_over_time](#first_over_time).
|
||||
|
||||
#### tlast_change_over_time
|
||||
|
||||
`tlast_change_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds for the last change
|
||||
`tlast_change_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the last change
|
||||
per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) on the given lookbehind window `d`.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
@@ -852,7 +957,7 @@ See also [tlast_change_over_time](#tlast_change_over_time).
|
||||
|
||||
#### tmax_over_time
|
||||
|
||||
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds for the raw sample
|
||||
`tmax_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
with the maximum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
@@ -862,7 +967,7 @@ See also [max_over_time](#max_over_time).
|
||||
|
||||
#### tmin_over_time
|
||||
|
||||
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds for the raw sample
|
||||
`tmin_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns the timestamp in seconds with millisecond precision for the raw sample
|
||||
with the minimum value on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
@@ -891,7 +996,7 @@ Additional details:
|
||||
|
||||
* If transform function is applied directly to a [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the transformations.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature[1i]))`.
|
||||
For example, `abs(temperature)` is implicitly transformed to `abs(default_rollup(temperature))`.
|
||||
* All the transform functions accept optional `keep_metric_names` modifier. If it is set,
|
||||
then the function doesn't drop metric names from the resulting time series. See [these docs](#keep_metric_names).
|
||||
|
||||
@@ -909,7 +1014,9 @@ This function is supported by PromQL.
|
||||
|
||||
`absent(q)` is a [transform function](#transform-functions), which returns 1 if `q` has no points. Otherwise, returns an empty result.
|
||||
|
||||
This function is supported by PromQL. See also [absent_over_time](#absent_over_time).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [absent_over_time](#absent_over_time).
|
||||
|
||||
#### acos
|
||||
|
||||
@@ -918,7 +1025,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [asin](#asin) and [cos](#cos).
|
||||
|
||||
#### acosh
|
||||
|
||||
@@ -927,7 +1036,9 @@ This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#cosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#cosh).
|
||||
|
||||
#### asin
|
||||
|
||||
@@ -936,7 +1047,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acos](#acos) and [sin](#sin).
|
||||
|
||||
#### asinh
|
||||
|
||||
@@ -945,7 +1058,9 @@ This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sinh](#sinh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sinh](#sinh).
|
||||
|
||||
#### atan
|
||||
|
||||
@@ -954,7 +1069,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tan](#tan).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tan](#tan).
|
||||
|
||||
#### atanh
|
||||
|
||||
@@ -963,7 +1080,9 @@ This function is supported by PromQL. See also [tan](#tan).
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [tanh](#tanh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [tanh](#tanh).
|
||||
|
||||
#### bitmap_and
|
||||
|
||||
@@ -994,25 +1113,33 @@ See also [prometheus_buckets](#prometheus_buckets) and [histogram_quantile](#his
|
||||
|
||||
`ceil(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the upper nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [round](#round).
|
||||
|
||||
#### clamp
|
||||
|
||||
`clamp(q, min, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` and `max` values.
|
||||
|
||||
This function is supported by PromQL. See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp_min](#clamp_min) and [clamp_max](#clamp_max).
|
||||
|
||||
#### clamp_max
|
||||
|
||||
`clamp_max(q, max)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `max` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_min](#clamp_min).
|
||||
|
||||
#### clamp_min
|
||||
|
||||
`clamp_min(q, min)` is a [transform function](#transform-functions), which clamps every point for every time series returned by `q` with the given `min` value.
|
||||
|
||||
This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [clamp](#clamp) and [clamp_max](#clamp_max).
|
||||
|
||||
#### cos
|
||||
|
||||
@@ -1020,7 +1147,9 @@ This function is supported by PromQL. See also [clamp](#clamp) and [clamp_max](#
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [sin](#sin).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sin](#sin).
|
||||
|
||||
#### cosh
|
||||
|
||||
@@ -1029,7 +1158,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [acosh](#acosh).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [acosh](#acosh).
|
||||
|
||||
#### day_of_month
|
||||
|
||||
@@ -1040,6 +1171,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_week
|
||||
|
||||
`day_of_week(q)` is a [transform function](#transform-functions), which returns the day of week for every point of every time series returned by `q`.
|
||||
@@ -1049,6 +1182,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_month](#day_of_month) and [day_of_year](#day_of_year).
|
||||
|
||||
#### day_of_year
|
||||
|
||||
`day_of_year(q)` is a [transform function](#transform-functions), which returns the day of year for every point of every time series returned by `q`.
|
||||
@@ -1058,6 +1193,8 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [day_of_week](#day_of_week) and [day_of_month](#day_of_month).
|
||||
|
||||
#### days_in_month
|
||||
|
||||
`days_in_month(q)` is a [transform function](#transform-functions), which returns the number of days in the month identified
|
||||
@@ -1075,7 +1212,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [rad](#rad).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [rad](#rad).
|
||||
|
||||
#### drop_empty_series
|
||||
|
||||
@@ -1101,13 +1240,17 @@ See also [start](#start), [time](#time) and [now](#now).
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ln](#ln).
|
||||
|
||||
#### floor
|
||||
|
||||
`floor(q)` is a [transform function](#transform-functions), which rounds every point for every time series returned by `q` to the lower nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [ceil](#ceil) and [round](#round).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [ceil](#ceil) and [round](#round).
|
||||
|
||||
#### histogram_avg
|
||||
|
||||
@@ -1130,8 +1273,9 @@ When the [percentile](https://en.wikipedia.org/wiki/Percentile) is calculated ov
|
||||
then all the input histograms **must** have buckets with identical boundaries, e.g. they must have the same set of `le` or `vmrange` labels.
|
||||
Otherwise, the returned result may be invalid. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3231) for details.
|
||||
|
||||
This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share)
|
||||
and [quantile](#quantile).
|
||||
This function is supported by PromQL (except of the `boundLabel` arg).
|
||||
|
||||
See also [histogram_quantiles](#histogram_quantiles), [histogram_share](#histogram_share) and [quantile](#quantile).
|
||||
|
||||
#### histogram_quantiles
|
||||
|
||||
@@ -1203,7 +1347,9 @@ This allows implementing simple paging for `q` time series. See also [limitk](#l
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [exp](#exp) and [log2](#log2).
|
||||
|
||||
#### log2
|
||||
|
||||
@@ -1211,7 +1357,9 @@ This function is supported by PromQL. See also [exp](#exp) and [log2](#log2).
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log10](#log10) and [ln](#ln).
|
||||
|
||||
#### log10
|
||||
|
||||
@@ -1219,7 +1367,9 @@ This function is supported by PromQL. See also [log10](#log10) and [ln](#ln).
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [log2](#log2) and [ln](#ln).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [log2](#log2) and [ln](#ln).
|
||||
|
||||
#### minute
|
||||
|
||||
@@ -1258,7 +1408,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by PromQL. See also [deg](#deg).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [deg](#deg).
|
||||
|
||||
#### prometheus_buckets
|
||||
|
||||
@@ -1386,7 +1538,9 @@ for points returned by `q`, e.g. it is equivalent to the following query: `(q -
|
||||
`round(q, nearest)` is a [transform function](#transform-functions), which rounds every point of every time series returned by `q` to the `nearest` multiple.
|
||||
If `nearest` is missing then the rounding is performed to the nearest integer.
|
||||
|
||||
This function is supported by PromQL. See also [floor](#floor) and [ceil](#ceil).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [floor](#floor) and [ceil](#ceil).
|
||||
|
||||
#### ru
|
||||
|
||||
@@ -1430,7 +1584,9 @@ This function is supported by PromQL.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cos](#cos).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cos](#cos).
|
||||
|
||||
#### sinh
|
||||
|
||||
@@ -1439,7 +1595,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [cosh](#cosh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [cosh](#cosh).
|
||||
|
||||
#### tan
|
||||
|
||||
@@ -1447,7 +1605,9 @@ This function is supported by MetricsQL. See also [cosh](#cosh).
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atan](#atan).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atan](#atan).
|
||||
|
||||
#### tanh
|
||||
|
||||
@@ -1456,7 +1616,9 @@ for every point of every time series returned by `q`.
|
||||
|
||||
Metric names are stripped from the resulting series. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is supported by MetricsQL. See also [atanh](#atanh).
|
||||
This function is supported by MetricsQL.
|
||||
|
||||
See also [atanh](#atanh).
|
||||
|
||||
#### smooth_exponential
|
||||
|
||||
@@ -1467,13 +1629,17 @@ by `q` using [exponential moving average](https://en.wikipedia.org/wiki/Moving_a
|
||||
|
||||
`sort(q)` is a [transform function](#transform-functions), which sorts series in ascending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort_desc](#sort_desc) and [sort_by_label](#sort_by_label).
|
||||
|
||||
#### sort_desc
|
||||
|
||||
`sort_desc(q)` is a [transform function](#transform-functions), which sorts series in descending order by the last point in every time series returned by `q`.
|
||||
|
||||
This function is supported by PromQL. See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [sort](#sort) and [sort_by_label](#sort_by_label_desc).
|
||||
|
||||
#### sqrt
|
||||
|
||||
@@ -1502,7 +1668,9 @@ See also [start](#start) and [end](#end).
|
||||
|
||||
`time()` is a [transform function](#transform-functions), which returns unix timestamp for every returned point.
|
||||
|
||||
This function is supported by PromQL. See also [now](#now), [start](#start) and [end](#end).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [timestamp](#timestamp), [now](#now), [start](#start) and [end](#end).
|
||||
|
||||
#### timezone_offset
|
||||
|
||||
@@ -1551,7 +1719,7 @@ Additional details:
|
||||
|
||||
* If label manipulation function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before performing the label transformation.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature[1i]), "foo")`.
|
||||
For example, `alias(temperature, "foo")` is implicitly transformed to `alias(default_rollup(temperature), "foo")`.
|
||||
|
||||
See also [implicit query conversions](#implicit-query-conversions).
|
||||
|
||||
@@ -1728,7 +1896,7 @@ Additional details:
|
||||
Multiple labels can be put in `by` and `without` modifiers.
|
||||
* If the aggregate function is applied directly to a [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
then the [default_rollup()](#default_rollup) function is automatically applied before calculating the aggregate.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up[1i]))`.
|
||||
For example, `count(up)` is implicitly transformed to `count(default_rollup(up))`.
|
||||
* Aggregate functions accept arbitrary number of args. For example, `avg(q1, q2, q3)` would return the average values for every point
|
||||
across time series returned by `q1`, `q2` and `q3`.
|
||||
* Aggregate functions support optional `limit N` suffix, which can be used for limiting the number of output groups.
|
||||
@@ -1756,7 +1924,9 @@ This function is supported by PromQL.
|
||||
`bottomk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the smallest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [topk](#topk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [topk](#topk), [bottomk_min](#bottomk_min) and [#bottomk_last](#bottomk_last).
|
||||
|
||||
#### bottomk_avg
|
||||
|
||||
@@ -1818,10 +1988,14 @@ The aggregate is calculated individually per each group of points with the same
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [count_values_over_time](#count_values_over_time) and [label_match](#label_match).
|
||||
|
||||
#### distinct
|
||||
|
||||
`distinct(q)` is [aggregate function](#aggregate-functions), which calculates the number of unique values per each group of points with the same timestamp.
|
||||
|
||||
See also [distinct_over_time](#distinct_over_time).
|
||||
|
||||
#### geomean
|
||||
|
||||
`geomean(q)` is [aggregate function](#aggregate-functions), which calculates geometric mean per each group of points with the same timestamp.
|
||||
@@ -1913,7 +2087,9 @@ See also [outliers_iqr](#outliers_iqr) and [outliers_mad](#outliers_mad).
|
||||
for all the time series returned by `q`. `phi` must be in the range `[0...1]`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [quantiles](#quantiles) and [histogram_quantile](#histogram_quantile).
|
||||
|
||||
#### quantiles
|
||||
|
||||
@@ -1972,7 +2148,9 @@ for all the time series returned by `q`. The aggregate is calculated individuall
|
||||
`topk(k, q)` is [aggregate function](#aggregate-functions), which returns up to `k` points with the biggest values across all the time series returned by `q`.
|
||||
The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
This function is supported by PromQL. See also [bottomk](#bottomk).
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [bottomk](#bottomk), [topk_max](#topk_max) and [topk_last](#topk_last).
|
||||
|
||||
#### topk_avg
|
||||
|
||||
@@ -2032,7 +2210,7 @@ See also [zscore_over_time](#zscore_over_time), [range_trim_zscore](#range_trim_
|
||||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
Any [rollup function](#rollup-functions) for something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering) form a subquery.
|
||||
Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions).
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m[1i]))[1i:1i])`, so it becomes a subquery,
|
||||
For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m))[1i:1i])`, so it becomes a subquery,
|
||||
since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
|
||||
VictoriaMetrics performs subqueries in the following way:
|
||||
@@ -2047,21 +2225,23 @@ VictoriaMetrics performs subqueries in the following way:
|
||||
|
||||
VictoriaMetrics performs the following implicit conversions for incoming queries before starting the calculations:
|
||||
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions),
|
||||
then `[1i]` is automatically added there. The `[1i]` means one `step` value, which is passed
|
||||
to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query).
|
||||
It is also known as `$__interval` in Grafana. For example, `rate(http_requests_count)` is automatically transformed to `rate(http_requests_count[1i])`.
|
||||
* If lookbehind window in square brackets is missing inside [rollup function](#rollup-functions), then it is automatically set to the following value:
|
||||
- To `step` value passed to [/api/v1/query_range](https://docs.victoriametrics.com/keyConcepts.html#range-query) or [/api/v1/query](https://docs.victoriametrics.com/keyconcepts/#instant-query)
|
||||
for all the [rollup functions](#rollup-functions) except of [default_rollup](#default_rollup) and [rate](#rate). This value is known as `$__interval` in Grafana or `1i` in MetricsQL.
|
||||
For example, `avg_over_time(temperature)` is automatically transformed to `avg_over_time(temperature[1i])`.
|
||||
- To the `max(step, scrape_interval)`, where `scrape_interval` is the interval between [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples)
|
||||
for [default_rollup](#default_rollup) and [rate](#rate) functions. This allows avoiding unexpected gaps on the graph when `step` is smaller than `scrape_interval`.
|
||||
* All the [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering),
|
||||
which aren't wrapped into [rollup functions](#rollup-functions), are automatically wrapped into [default_rollup](#default_rollup) function.
|
||||
Examples:
|
||||
* `foo` is transformed to `default_rollup(foo[1i])`
|
||||
* `foo + bar` is transformed to `default_rollup(foo[1i]) + default_rollup(bar[1i])`
|
||||
* `count(up)` is transformed to `count(default_rollup(up[1i]))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
* `foo` is transformed to `default_rollup(foo)`
|
||||
* `foo + bar` is transformed to `default_rollup(foo) + default_rollup(bar)`
|
||||
* `count(up)` is transformed to `count(default_rollup(up))`, because [count](#count) isn't a [rollup function](#rollup-functions) -
|
||||
it is [aggregate function](#aggregate-functions)
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature[1i]))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
* `abs(temperature)` is transformed to `abs(default_rollup(temperature))`, because [abs](#abs) isn't a [rollup function](#rollup-functions) -
|
||||
it is [transform function](#transform-functions)
|
||||
* If `step` in square brackets is missing inside [subquery](#subqueries), then `1i` step is automatically added there.
|
||||
For example, `avg_over_time(rate(http_requests_total[5m])[1h])` is automatically converted to `avg_over_time(rate(http_requests_total[5m])[1h:1i])`.
|
||||
* If something other than [series selector](https://docs.victoriametrics.com/keyConcepts.html#filtering)
|
||||
is passed to [rollup function](#rollup-functions), then a [subquery](#subqueries) with `1i` lookbehind window and `1i` step is automatically formed.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up[1i])))[1i:1i])`.
|
||||
For example, `rate(sum(up))` is automatically converted to `rate((sum(default_rollup(up)))[1i:1i])`.
|
||||
95
app/vmagent/datadogsketches/request_handler.go
Normal file
95
app/vmagent/datadogsketches/request_handler.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package datadogsketches
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogsketches"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogsketches/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="datadogsketches"}`)
|
||||
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="datadogsketches"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="datadogsketches"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForHTTP processes remote write for DataDog POST /api/beta/sketches request.
|
||||
func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ce := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, ce, func(sketches []*datadogsketches.Sketch) error {
|
||||
return insertRows(at, sketches, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
rowsTotal := 0
|
||||
tssDst := ctx.WriteRequest.Timeseries[:0]
|
||||
labels := ctx.Labels[:0]
|
||||
samples := ctx.Samples[:0]
|
||||
for _, sketch := range sketches {
|
||||
ms := sketch.ToSummary()
|
||||
for _, m := range ms {
|
||||
labelsLen := len(labels)
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: m.Name,
|
||||
})
|
||||
for _, label := range m.Labels {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: label.Name,
|
||||
Value: label.Value,
|
||||
})
|
||||
}
|
||||
for _, tag := range sketch.Tags {
|
||||
name, value := datadogutils.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
labels = append(labels, extraLabels...)
|
||||
samplesLen := len(samples)
|
||||
for _, p := range m.Points {
|
||||
samples = append(samples, prompbmarshal.Sample{
|
||||
Timestamp: p.Timestamp,
|
||||
Value: p.Value,
|
||||
})
|
||||
}
|
||||
rowsTotal += len(m.Points)
|
||||
tssDst = append(tssDst, prompbmarshal.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
}
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return nil
|
||||
}
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/csvimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/datadogsketches"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/datadogv1"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/datadogv2"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/graphite"
|
||||
@@ -40,15 +40,16 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8429", "TCP address to listen for http connections. "+
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. "+
|
||||
"Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. "+
|
||||
"Note that /targets and /metrics pages aren't available if -httpListenAddr=''. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
influxListenAddr = flag.String("influxListenAddr", "", "TCP and UDP address to listen for InfluxDB line protocol data. Usually :8089 must be set. Doesn't work if empty. "+
|
||||
@@ -119,8 +120,13 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("starting vmagent at %q...", *httpListenAddr)
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":8429"}
|
||||
}
|
||||
logger.Infof("starting vmagent at %q...", listenAddrs)
|
||||
startTime := time.Now()
|
||||
remotewrite.StartIngestionRateLimiter()
|
||||
remotewrite.Init()
|
||||
common.StartUnmarshalWorkers()
|
||||
if len(*influxListenAddr) > 0 {
|
||||
@@ -142,24 +148,21 @@ func main() {
|
||||
|
||||
promscrape.Init(remotewrite.PushDropSamplesOnFailure)
|
||||
|
||||
if len(*httpListenAddr) > 0 {
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
}
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started vmagent in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
remotewrite.StopIngestionRateLimiter()
|
||||
pushmetrics.Stop()
|
||||
|
||||
startTime = time.Now()
|
||||
if len(*httpListenAddr) > 0 {
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
promscrape.Stop()
|
||||
|
||||
@@ -261,7 +264,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path = strings.TrimSuffix(path, "/")
|
||||
}
|
||||
switch path {
|
||||
case "/prometheus/api/v1/write", "/api/v1/write":
|
||||
case "/prometheus/api/v1/write", "/api/v1/write", "/api/v1/push", "/prometheus/api/v1/push":
|
||||
if common.HandleVMProtoServerHandshake(w, r) {
|
||||
return true
|
||||
}
|
||||
@@ -313,14 +316,14 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "/opentelemetry/api/v1/push":
|
||||
case "/opentelemetry/api/v1/push", "/opentelemetry/v1/metrics":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(nil, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
return true
|
||||
case "/newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
@@ -368,6 +371,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.WriteHeader(202)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "/datadog/api/beta/sketches":
|
||||
datadogsketchesWriteRequests.Inc()
|
||||
if err := datadogsketches.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
datadogsketchesWriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(202)
|
||||
return true
|
||||
case "/datadog/api/v1/validate":
|
||||
datadogValidateRequests.Inc()
|
||||
// See https://docs.datadoghq.com/api/latest/authentication/#validate-api-key
|
||||
@@ -449,7 +461,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/ready":
|
||||
if rdy := atomic.LoadInt32(&promscrape.PendingScrapeConfigs); rdy > 0 {
|
||||
if rdy := promscrape.PendingScrapeConfigs.Load(); rdy > 0 {
|
||||
errMsg := fmt.Sprintf("waiting for scrapes to init, left: %d", rdy)
|
||||
http.Error(w, errMsg, http.StatusTooEarly)
|
||||
} else {
|
||||
@@ -501,7 +513,7 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
p.Suffix = strings.TrimSuffix(p.Suffix, "/")
|
||||
}
|
||||
switch p.Suffix {
|
||||
case "prometheus/", "prometheus", "prometheus/api/v1/write":
|
||||
case "prometheus/", "prometheus", "prometheus/api/v1/write", "prometheus/api/v1/push":
|
||||
prometheusWriteRequests.Inc()
|
||||
if err := promremotewrite.InsertHandler(at, r); err != nil {
|
||||
prometheusWriteErrors.Inc()
|
||||
@@ -550,14 +562,14 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "opentelemetry/api/v1/push":
|
||||
case "opentelemetry/api/v1/push", "opentelemetry/v1/metrics":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(at, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
return true
|
||||
case "newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
@@ -603,6 +615,15 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
w.WriteHeader(202)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "datadog/api/beta/sketches":
|
||||
datadogsketchesWriteRequests.Inc()
|
||||
if err := datadogsketches.InsertHandlerForHTTP(at, r); err != nil {
|
||||
datadogsketchesWriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(202)
|
||||
return true
|
||||
case "datadog/api/v1/validate":
|
||||
datadogValidateRequests.Inc()
|
||||
// See https://docs.datadoghq.com/api/latest/authentication/#validate-api-key
|
||||
@@ -659,13 +680,16 @@ var (
|
||||
datadogv2WriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v2/series", protocol="datadog"}`)
|
||||
datadogv2WriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/datadog/api/v2/series", protocol="datadog"}`)
|
||||
|
||||
datadogsketchesWriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/beta/sketches", protocol="datadog"}`)
|
||||
datadogsketchesWriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/datadog/api/beta/sketches", protocol="datadog"}`)
|
||||
|
||||
datadogValidateRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/validate", protocol="datadog"}`)
|
||||
datadogCheckRunRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/check_run", protocol="datadog"}`)
|
||||
datadogIntakeRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
|
||||
datadogMetadataRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
|
||||
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
|
||||
newrelicWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
newrelicWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -27,10 +28,15 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
||||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
var processBody func([]byte) ([]byte, error)
|
||||
if req.Header.Get("Content-Type") == "application/json" {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
if req.Header.Get("X-Amz-Firehose-Protocol-Version") != "" {
|
||||
processBody = firehose.ProcessRequestBody
|
||||
} else {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
}
|
||||
}
|
||||
return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return stream.ParseStream(req.Body, isGzipped, processBody, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return insertRows(at, tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -30,11 +31,12 @@ var (
|
||||
|
||||
rateLimit = flagutil.NewArrayInt("remoteWrite.rateLimit", 0, "Optional rate limit in bytes per second for data sent to the corresponding -remoteWrite.url. "+
|
||||
"By default, the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
|
||||
"is sent after temporary unavailability of the remote storage")
|
||||
"is sent after temporary unavailability of the remote storage. See also -maxIngestionRate")
|
||||
sendTimeout = flagutil.NewArrayDuration("remoteWrite.sendTimeout", time.Minute, "Timeout for sending a single block of data to the corresponding -remoteWrite.url")
|
||||
proxyURL = flagutil.NewArrayString("remoteWrite.proxyURL", "Optional proxy URL for writing data to the corresponding -remoteWrite.url. "+
|
||||
"Supported proxies: http, https, socks5. Example: -remoteWrite.proxyURL=socks5://proxy:1234")
|
||||
|
||||
tlsHandshakeTimeout = flagutil.NewArrayDuration("remoteWrite.tlsHandshakeTimeout", 20*time.Second, "The timeout for estabilishing tls connections to the corresponding -remoteWrite.url")
|
||||
tlsInsecureSkipVerify = flagutil.NewArrayBool("remoteWrite.tlsInsecureSkipVerify", "Whether to skip tls verification when connecting to the corresponding -remoteWrite.url")
|
||||
tlsCertFile = flagutil.NewArrayString("remoteWrite.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting "+
|
||||
"to the corresponding -remoteWrite.url")
|
||||
@@ -90,7 +92,7 @@ type client struct {
|
||||
authCfg *promauth.Config
|
||||
awsCfg *awsapi.Config
|
||||
|
||||
rl rateLimiter
|
||||
rl *ratelimiter.RateLimiter
|
||||
|
||||
bytesSent *metrics.Counter
|
||||
blocksSent *metrics.Counter
|
||||
@@ -111,18 +113,13 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot initialize auth config for -remoteWrite.url=%q: %s", remoteWriteURL, err)
|
||||
}
|
||||
tlsCfg, err := authCfg.NewTLSConfig()
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot initialize tls config for -remoteWrite.url=%q: %s", remoteWriteURL, err)
|
||||
}
|
||||
awsCfg, err := getAWSAPIConfig(argIdx)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot initialize AWS Config for -remoteWrite.url=%q: %s", remoteWriteURL, err)
|
||||
}
|
||||
tr := &http.Transport{
|
||||
DialContext: statDial,
|
||||
TLSClientConfig: tlsCfg,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSHandshakeTimeout: tlsHandshakeTimeout.GetOptionalArg(argIdx),
|
||||
MaxConnsPerHost: 2 * concurrency,
|
||||
MaxIdleConnsPerHost: 2 * concurrency,
|
||||
IdleConnTimeout: time.Minute,
|
||||
@@ -140,7 +137,7 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||
tr.Proxy = http.ProxyURL(pu)
|
||||
}
|
||||
hc := &http.Client{
|
||||
Transport: tr,
|
||||
Transport: authCfg.NewRoundTripper(tr),
|
||||
Timeout: sendTimeout.GetOptionalArg(argIdx),
|
||||
}
|
||||
c := &client{
|
||||
@@ -176,12 +173,11 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||
}
|
||||
|
||||
func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
|
||||
limitReached := metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_rate_limit_reached_total{url=%q}`, c.sanitizedURL))
|
||||
if bytesPerSec := rateLimit.GetOptionalArg(argIdx); bytesPerSec > 0 {
|
||||
logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", bytesPerSec, sanitizedURL)
|
||||
c.rl.perSecondLimit = int64(bytesPerSec)
|
||||
c.rl = ratelimiter.New(int64(bytesPerSec), limitReached, c.stopCh)
|
||||
}
|
||||
c.rl.limitReached = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_rate_limit_reached_total{url=%q}`, c.sanitizedURL))
|
||||
|
||||
c.bytesSent = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_bytes_sent_total{url=%q}`, c.sanitizedURL))
|
||||
c.blocksSent = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_blocks_sent_total{url=%q}`, c.sanitizedURL))
|
||||
c.rateLimit = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_rate_limit{url=%q}`, c.sanitizedURL), func() float64 {
|
||||
@@ -395,7 +391,7 @@ func (c *client) newRequest(url string, body []byte) (*http.Request, error) {
|
||||
// The function returns false only if c.stopCh is closed.
|
||||
// Otherwise it tries sending the block to remote storage indefinitely.
|
||||
func (c *client) sendBlockHTTP(block []byte) bool {
|
||||
c.rl.register(len(block), c.stopCh)
|
||||
c.rl.Register(len(block))
|
||||
maxRetryDuration := timeutil.AddJitterToDuration(time.Minute)
|
||||
retryDuration := timeutil.AddJitterToDuration(time.Second)
|
||||
retriesCount := 0
|
||||
@@ -478,45 +474,3 @@ again:
|
||||
}
|
||||
|
||||
var remoteWriteRejectedLogger = logger.WithThrottler("remoteWriteRejected", 5*time.Second)
|
||||
|
||||
type rateLimiter struct {
|
||||
perSecondLimit int64
|
||||
|
||||
// mu protects budget and deadline from concurrent access.
|
||||
mu sync.Mutex
|
||||
|
||||
// The current budget. It is increased by perSecondLimit every second.
|
||||
budget int64
|
||||
|
||||
// The next deadline for increasing the budget by perSecondLimit
|
||||
deadline time.Time
|
||||
|
||||
limitReached *metrics.Counter
|
||||
}
|
||||
|
||||
func (rl *rateLimiter) register(dataLen int, stopCh <-chan struct{}) {
|
||||
limit := rl.perSecondLimit
|
||||
if limit <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
rl.mu.Lock()
|
||||
defer rl.mu.Unlock()
|
||||
|
||||
for rl.budget <= 0 {
|
||||
if d := time.Until(rl.deadline); d > 0 {
|
||||
rl.limitReached.Inc()
|
||||
t := timerpool.Get(d)
|
||||
select {
|
||||
case <-stopCh:
|
||||
timerpool.Put(t)
|
||||
return
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
}
|
||||
}
|
||||
rl.budget += limit
|
||||
rl.deadline = time.Now().Add(time.Second)
|
||||
}
|
||||
rl.budget -= int64(dataLen)
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (ps *pendingSeries) periodicFlusher() {
|
||||
ps.mu.Unlock()
|
||||
return
|
||||
case <-ticker.C:
|
||||
if fasttime.UnixTimestamp()-atomic.LoadUint64(&ps.wr.lastFlushTime) < uint64(flushSeconds) {
|
||||
if fasttime.UnixTimestamp()-ps.wr.lastFlushTime.Load() < uint64(flushSeconds) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -93,8 +93,7 @@ func (ps *pendingSeries) periodicFlusher() {
|
||||
}
|
||||
|
||||
type writeRequest struct {
|
||||
// Move lastFlushTime to the top of the struct in order to guarantee atomic access on 32-bit architectures.
|
||||
lastFlushTime uint64
|
||||
lastFlushTime atomic.Uint64
|
||||
|
||||
// The queue to send blocks to.
|
||||
fq *persistentqueue.FastQueue
|
||||
@@ -155,7 +154,7 @@ func (wr *writeRequest) mustWriteBlock(block []byte) bool {
|
||||
|
||||
func (wr *writeRequest) tryFlush() bool {
|
||||
wr.wr.Timeseries = wr.tss
|
||||
atomic.StoreUint64(&wr.lastFlushTime, fasttime.UnixTimestamp())
|
||||
wr.lastFlushTime.Store(fasttime.UnixTimestamp())
|
||||
if !tryPushWriteRequest(&wr.wr, wr.fq.TryWriteBlock, wr.isVMRemoteWrite) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -50,13 +51,17 @@ var (
|
||||
"By default the data is replicated across all the -remoteWrite.url . See https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages")
|
||||
shardByURLLabels = flagutil.NewArrayString("remoteWrite.shardByURL.labels", "Optional list of labels, which must be used for sharding outgoing samples "+
|
||||
"among remote storage systems if -remoteWrite.shardByURL command-line flag is set. By default all the labels are used for sharding in order to gain "+
|
||||
"even distribution of series over the specified -remoteWrite.url systems")
|
||||
"even distribution of series over the specified -remoteWrite.url systems. See also -remoteWrite.shardByURL.ignoreLabels")
|
||||
shardByURLIgnoreLabels = flagutil.NewArrayString("remoteWrite.shardByURL.ignoreLabels", "Optional list of labels, which must be ignored when sharding outgoing samples "+
|
||||
"among remote storage systems if -remoteWrite.shardByURL command-line flag is set. By default all the labels are used for sharding in order to gain "+
|
||||
"even distribution of series over the specified -remoteWrite.url systems. See also -remoteWrite.shardByURL.labels")
|
||||
tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory for storing pending data, which isn't sent to the configured -remoteWrite.url . "+
|
||||
"See also -remoteWrite.maxDiskUsagePerURL and -remoteWrite.disableOnDiskQueue")
|
||||
keepDanglingQueues = flag.Bool("remoteWrite.keepDanglingQueues", false, "Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. "+
|
||||
"Useful when -remoteWrite.url is changed temporarily and persistent queue files will be needed later on.")
|
||||
queues = flag.Int("remoteWrite.queues", cgroup.AvailableCPUs()*2, "The number of concurrent queues to each -remoteWrite.url. Set more queues if default number of queues "+
|
||||
"isn't enough for sending high volume of collected data to remote storage. Default value is 2 * numberOfAvailableCPUs")
|
||||
"isn't enough for sending high volume of collected data to remote storage. "+
|
||||
"Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage")
|
||||
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
maxPendingBytesPerURL = flagutil.NewArrayBytes("remoteWrite.maxDiskUsagePerURL", 0, "The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath "+
|
||||
@@ -79,6 +84,8 @@ var (
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter")
|
||||
maxDailySeries = flag.Int("remoteWrite.maxDailySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmagent can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate. See also -remoteWrite.rateLimit")
|
||||
|
||||
streamAggrConfig = flagutil.NewArrayString("remoteWrite.streamAggr.config", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html . "+
|
||||
@@ -89,8 +96,13 @@ var (
|
||||
streamAggrDropInput = flagutil.NewArrayBool("remoteWrite.streamAggr.dropInput", "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before being aggregated. "+
|
||||
"Only the last sample per each time series per each interval is aggregated if the interval is greater than zero")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation "+
|
||||
"with -remoteWrite.streamAggr.config . See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
streamAggrIgnoreOldSamples = flagutil.NewArrayBool("remoteWrite.streamAggr.ignoreOldSamples", "Whether to ignore input samples with old timestamps outside the current aggregation interval "+
|
||||
"for the corresponding -remoteWrite.streamAggr.config . See https://docs.victoriametrics.com/stream-aggregation.html#ignoring-old-samples")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
|
||||
disableOnDiskQueue = flag.Bool("remoteWrite.disableOnDiskQueue", false, "Whether to disable storing pending data to -remoteWrite.tmpDataPath "+
|
||||
"when the configured remote storage systems cannot keep up with the data ingestion rate. See https://docs.victoriametrics.com/vmagent.html#disabling-on-disk-persistence ."+
|
||||
"See also -remoteWrite.dropSamplesOnOverload")
|
||||
@@ -140,7 +152,10 @@ func InitSecretFlags() {
|
||||
}
|
||||
}
|
||||
|
||||
var shardByURLLabelsMap map[string]struct{}
|
||||
var (
|
||||
shardByURLLabelsMap map[string]struct{}
|
||||
shardByURLIgnoreLabelsMap map[string]struct{}
|
||||
)
|
||||
|
||||
// Init initializes remotewrite.
|
||||
//
|
||||
@@ -172,19 +187,21 @@ func Init() {
|
||||
return float64(dailySeriesLimiter.CurrentItems())
|
||||
})
|
||||
}
|
||||
|
||||
if *queues > maxQueues {
|
||||
*queues = maxQueues
|
||||
}
|
||||
if *queues <= 0 {
|
||||
*queues = 1
|
||||
}
|
||||
if len(*shardByURLLabels) > 0 {
|
||||
m := make(map[string]struct{}, len(*shardByURLLabels))
|
||||
for _, label := range *shardByURLLabels {
|
||||
m[label] = struct{}{}
|
||||
}
|
||||
shardByURLLabelsMap = m
|
||||
|
||||
if len(*shardByURLLabels) > 0 && len(*shardByURLIgnoreLabels) > 0 {
|
||||
logger.Fatalf("-remoteWrite.shardByURL.labels and -remoteWrite.shardByURL.ignoreLabels cannot be set simultaneously; " +
|
||||
"see https://docs.victoriametrics.com/vmagent/#sharding-among-remote-storages")
|
||||
}
|
||||
shardByURLLabelsMap = newMapFromStrings(*shardByURLLabels)
|
||||
shardByURLIgnoreLabelsMap = newMapFromStrings(*shardByURLIgnoreLabels)
|
||||
|
||||
initLabelsGlobal()
|
||||
|
||||
// Register SIGHUP handler for config reload before loadRelabelConfigs.
|
||||
@@ -336,6 +353,35 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
||||
var configReloaderStopCh = make(chan struct{})
|
||||
var configReloaderWG sync.WaitGroup
|
||||
|
||||
// StartIngestionRateLimiter starts ingestion rate limiter.
|
||||
//
|
||||
// Ingestion rate limiter must be started before Init() call.
|
||||
//
|
||||
// StopIngestionRateLimiter must be called before Stop() call in order to unblock all the callers
|
||||
// to ingestion rate limiter. Otherwise deadlock may occur at Stop() call.
|
||||
func StartIngestionRateLimiter() {
|
||||
if *maxIngestionRate <= 0 {
|
||||
return
|
||||
}
|
||||
ingestionRateLimitReached := metrics.NewCounter(`vmagent_max_ingestion_rate_limit_reached_total`)
|
||||
ingestionRateLimiterStopCh = make(chan struct{})
|
||||
ingestionRateLimiter = ratelimiter.New(int64(*maxIngestionRate), ingestionRateLimitReached, ingestionRateLimiterStopCh)
|
||||
}
|
||||
|
||||
// StopIngestionRateLimiter stops ingestion rate limiter.
|
||||
func StopIngestionRateLimiter() {
|
||||
if ingestionRateLimiterStopCh == nil {
|
||||
return
|
||||
}
|
||||
close(ingestionRateLimiterStopCh)
|
||||
ingestionRateLimiterStopCh = nil
|
||||
}
|
||||
|
||||
var (
|
||||
ingestionRateLimiter *ratelimiter.RateLimiter
|
||||
ingestionRateLimiterStopCh chan struct{}
|
||||
)
|
||||
|
||||
// Stop stops remotewrite.
|
||||
//
|
||||
// It is expected that nobody calls TryPush during and after the call to this func.
|
||||
@@ -462,6 +508,9 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, dropSamplesOnFailur
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ingestionRateLimiter.Register(samplesCount)
|
||||
|
||||
tssBlock := tss
|
||||
if i < len(tss) {
|
||||
tssBlock = tss[:i]
|
||||
@@ -526,6 +575,15 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
} else if len(shardByURLIgnoreLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLIgnoreLabelsMap[label.Name]; !ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
}
|
||||
h := getLabelsHash(hashLabels)
|
||||
idx := h % uint64(len(tssByURL))
|
||||
@@ -536,22 +594,22 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
||||
// Push sharded data to remote storages in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
var anyPushFailed uint64
|
||||
var anyPushFailed atomic.Bool
|
||||
for i, rwctx := range rwctxs {
|
||||
tssShard := tssByURL[i]
|
||||
if len(tssShard) == 0 {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(rwctx *remoteWriteCtx, tss []prompbmarshal.TimeSeries) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tss) {
|
||||
atomic.StoreUint64(&anyPushFailed, 1)
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx, tssShard)
|
||||
}
|
||||
wg.Wait()
|
||||
return atomic.LoadUint64(&anyPushFailed) == 0
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
// Replicate data among rwctxs.
|
||||
@@ -559,17 +617,17 @@ func tryPushBlockToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prompbmar
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rwctxs))
|
||||
var anyPushFailed uint64
|
||||
var anyPushFailed atomic.Bool
|
||||
for _, rwctx := range rwctxs {
|
||||
go func(rwctx *remoteWriteCtx) {
|
||||
defer wg.Done()
|
||||
if !rwctx.TryPush(tssBlock) {
|
||||
atomic.StoreUint64(&anyPushFailed, 1)
|
||||
anyPushFailed.Store(true)
|
||||
}
|
||||
}(rwctx)
|
||||
}
|
||||
wg.Wait()
|
||||
return atomic.LoadUint64(&anyPushFailed) == 0
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set.
|
||||
@@ -665,12 +723,14 @@ type remoteWriteCtx struct {
|
||||
fq *persistentqueue.FastQueue
|
||||
c *client
|
||||
|
||||
sas atomic.Pointer[streamaggr.Aggregators]
|
||||
sas atomic.Pointer[streamaggr.Aggregators]
|
||||
deduplicator *streamaggr.Deduplicator
|
||||
|
||||
streamAggrKeepInput bool
|
||||
streamAggrDropInput bool
|
||||
|
||||
pss []*pendingSeries
|
||||
pssNextIdx uint64
|
||||
pssNextIdx atomic.Uint64
|
||||
|
||||
rowsPushedAfterRelabel *metrics.Counter
|
||||
rowsDroppedByRelabel *metrics.Counter
|
||||
@@ -738,9 +798,15 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
|
||||
// Initialize sas
|
||||
sasFile := streamAggrConfig.GetOptionalArg(argIdx)
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(argIdx)
|
||||
ignoreOldSamples := streamAggrIgnoreOldSamples.GetOptionalArg(argIdx)
|
||||
if sasFile != "" {
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(argIdx)
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, dedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: dedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: ignoreOldSamples,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, opts)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot initialize stream aggregators from -remoteWrite.streamAggr.config=%q: %s", sasFile, err)
|
||||
}
|
||||
@@ -749,17 +815,24 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
rwctx.streamAggrDropInput = streamAggrDropInput.GetOptionalArg(argIdx)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, sasFile)).Set(1)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, sasFile)).Set(fasttime.UnixTimestamp())
|
||||
} else if dedupInterval > 0 {
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, dedupInterval, *streamAggrDropInputLabels)
|
||||
}
|
||||
|
||||
return rwctx
|
||||
}
|
||||
|
||||
func (rwctx *remoteWriteCtx) MustStop() {
|
||||
// sas must be stopped before rwctx is closed
|
||||
// sas and deduplicator must be stopped before rwctx is closed
|
||||
// because sas can write pending series to rwctx.pss if there are any
|
||||
sas := rwctx.sas.Swap(nil)
|
||||
sas.MustStop()
|
||||
|
||||
if rwctx.deduplicator != nil {
|
||||
rwctx.deduplicator.MustStop()
|
||||
rwctx.deduplicator = nil
|
||||
}
|
||||
|
||||
for _, ps := range rwctx.pss {
|
||||
ps.MustStop()
|
||||
}
|
||||
@@ -798,7 +871,7 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
||||
rowsCount := getRowsCount(tss)
|
||||
rwctx.rowsPushedAfterRelabel.Add(rowsCount)
|
||||
|
||||
// Apply stream aggregation if any
|
||||
// Apply stream aggregation or deduplication if they are configured
|
||||
sas := rwctx.sas.Load()
|
||||
if sas != nil {
|
||||
matchIdxs := matchIdxsPool.Get()
|
||||
@@ -813,6 +886,10 @@ func (rwctx *remoteWriteCtx) TryPush(tss []prompbmarshal.TimeSeries) bool {
|
||||
tss = dropAggregatedSeries(tss, matchIdxs.B, rwctx.streamAggrDropInput)
|
||||
}
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
} else if rwctx.deduplicator != nil {
|
||||
rwctx.deduplicator.Push(tss)
|
||||
clear(tss)
|
||||
tss = tss[:0]
|
||||
}
|
||||
|
||||
// Try pushing the data to remote storage
|
||||
@@ -841,7 +918,7 @@ func dropAggregatedSeries(src []prompbmarshal.TimeSeries, matchIdxs []byte, drop
|
||||
}
|
||||
}
|
||||
tail := src[len(dst):]
|
||||
_ = prompbmarshal.ResetTimeSeries(tail)
|
||||
clear(tail)
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -872,7 +949,7 @@ func (rwctx *remoteWriteCtx) tryPushInternal(tss []prompbmarshal.TimeSeries) boo
|
||||
}
|
||||
|
||||
pss := rwctx.pss
|
||||
idx := atomic.AddUint64(&rwctx.pssNextIdx, 1) % uint64(len(pss))
|
||||
idx := rwctx.pssNextIdx.Add(1) % uint64(len(pss))
|
||||
|
||||
ok := pss[idx].TryPush(tss)
|
||||
|
||||
@@ -894,8 +971,12 @@ func (rwctx *remoteWriteCtx) reinitStreamAggr() {
|
||||
|
||||
logger.Infof("reloading stream aggregation configs pointed by -remoteWrite.streamAggr.config=%q", sasFile)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_total{path=%q}`, sasFile)).Inc()
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(rwctx.idx)
|
||||
sasNew, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, dedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: streamAggrDedupInterval.GetOptionalArg(rwctx.idx),
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: streamAggrIgnoreOldSamples.GetOptionalArg(rwctx.idx),
|
||||
}
|
||||
sasNew, err := streamaggr.LoadFromFile(sasFile, rwctx.pushInternalTrackDropped, opts)
|
||||
if err != nil {
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reloads_errors_total{path=%q}`, sasFile)).Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, sasFile)).Set(0)
|
||||
@@ -932,13 +1013,17 @@ func getRowsCount(tss []prompbmarshal.TimeSeries) int {
|
||||
|
||||
// CheckStreamAggrConfigs checks configs pointed by -remoteWrite.streamAggr.config
|
||||
func CheckStreamAggrConfigs() error {
|
||||
pushNoop := func(tss []prompbmarshal.TimeSeries) {}
|
||||
pushNoop := func(_ []prompbmarshal.TimeSeries) {}
|
||||
for idx, sasFile := range *streamAggrConfig {
|
||||
if sasFile == "" {
|
||||
continue
|
||||
}
|
||||
dedupInterval := streamAggrDedupInterval.GetOptionalArg(idx)
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, pushNoop, dedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: streamAggrDedupInterval.GetOptionalArg(idx),
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: streamAggrIgnoreOldSamples.GetOptionalArg(idx),
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(sasFile, pushNoop, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load -remoteWrite.streamAggr.config=%q: %w", sasFile, err)
|
||||
}
|
||||
@@ -946,3 +1031,11 @@ func CheckStreamAggrConfigs() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMapFromStrings(a []string) map[string]struct{} {
|
||||
m := make(map[string]struct{}, len(a))
|
||||
for _, s := range a {
|
||||
m[s] = struct{}{}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ var (
|
||||
)
|
||||
|
||||
type statConn struct {
|
||||
closed uint64
|
||||
closed atomic.Int32
|
||||
net.Conn
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func (sc *statConn) Write(p []byte) (int, error) {
|
||||
|
||||
func (sc *statConn) Close() error {
|
||||
err := sc.Conn.Close()
|
||||
if atomic.AddUint64(&sc.closed, 1) == 1 {
|
||||
if sc.closed.Add(1) == 1 {
|
||||
conns.Dec()
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
@@ -184,7 +185,8 @@ func processFlags() {
|
||||
|
||||
func setUp() {
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
go httpserver.Serve(httpListenAddr, false, func(w http.ResponseWriter, r *http.Request) bool {
|
||||
var ab flagutil.ArrayBool
|
||||
go httpserver.Serve([]string{httpListenAddr}, &ab, func(w http.ResponseWriter, r *http.Request) bool {
|
||||
switch r.URL.Path {
|
||||
case "/prometheus/api/v1/query":
|
||||
if err := prometheus.QueryHandler(nil, time.Now(), w, r); err != nil {
|
||||
@@ -225,7 +227,7 @@ checkCheck:
|
||||
}
|
||||
|
||||
func tearDown() {
|
||||
if err := httpserver.Stop(httpListenAddr); err != nil {
|
||||
if err := httpserver.Stop([]string{httpListenAddr}); err != nil {
|
||||
logger.Errorf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vmstorage.Stop()
|
||||
|
||||
@@ -1158,9 +1158,9 @@
|
||||
$labels.pod }}.'
|
||||
runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh
|
||||
expr: |
|
||||
sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (container, pod, namespace)
|
||||
sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (cluster, container, pod, namespace)
|
||||
/
|
||||
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container, pod, namespace)
|
||||
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (cluster, container, pod, namespace)
|
||||
> ( 25 / 100 )
|
||||
for: 15m
|
||||
labels:
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
@@ -45,7 +46,7 @@ var (
|
||||
oauth2TokenURL = flag.String("datasource.oauth2.tokenUrl", "", "Optional OAuth2 tokenURL to use for -datasource.url")
|
||||
oauth2Scopes = flag.String("datasource.oauth2.scopes", "", "Optional OAuth2 scopes to use for -datasource.url. Scopes must be delimited by ';'")
|
||||
|
||||
lookBack = flag.Duration("datasource.lookback", 0, `Will be deprecated soon, please adjust "-search.latencyOffset" at datasource side `+
|
||||
lookBack = flag.Duration("datasource.lookback", 0, `Deprecated: please adjust "-search.latencyOffset" at datasource side `+
|
||||
`or specify "latency_offset" in rule group's params. Lookback defines how far into the past to look when evaluating queries. `+
|
||||
`For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.`)
|
||||
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries. "+
|
||||
@@ -90,10 +91,10 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
||||
logger.Warnf("flag `-datasource.queryTimeAlignment` is deprecated and will be removed in next releases. Please use `eval_alignment` in rule group instead.")
|
||||
}
|
||||
if *lookBack != 0 {
|
||||
logger.Warnf("flag `-datasource.lookback` will be deprecated soon. Please use `-rule.evalDelay` command-line flag instead. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
|
||||
logger.Warnf("flag `-datasource.lookback` is deprecated and will be removed in next releases. Please adjust `-search.latencyOffset` at datasource side or specify `latency_offset` in rule group's params. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
|
||||
}
|
||||
|
||||
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
@@ -132,7 +133,6 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
||||
authCfg: authCfg,
|
||||
datasourceURL: strings.TrimSuffix(*addr, "/"),
|
||||
appendTypePrefix: *appendTypePrefix,
|
||||
lookBack: *lookBack,
|
||||
queryStep: *queryStep,
|
||||
dataSourceType: datasourcePrometheus,
|
||||
extraParams: extraParams,
|
||||
|
||||
@@ -35,7 +35,6 @@ type VMStorage struct {
|
||||
authCfg *promauth.Config
|
||||
datasourceURL string
|
||||
appendTypePrefix bool
|
||||
lookBack time.Duration
|
||||
queryStep time.Duration
|
||||
dataSourceType datasourceType
|
||||
|
||||
@@ -63,7 +62,6 @@ func (s *VMStorage) Clone() *VMStorage {
|
||||
authCfg: s.authCfg,
|
||||
datasourceURL: s.datasourceURL,
|
||||
appendTypePrefix: s.appendTypePrefix,
|
||||
lookBack: s.lookBack,
|
||||
queryStep: s.queryStep,
|
||||
|
||||
dataSourceType: s.dataSourceType,
|
||||
@@ -122,13 +120,12 @@ func (s *VMStorage) BuildWithParams(params QuerierParams) Querier {
|
||||
}
|
||||
|
||||
// NewVMStorage is a constructor for VMStorage
|
||||
func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Duration, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
|
||||
func NewVMStorage(baseURL string, authCfg *promauth.Config, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
|
||||
return &VMStorage{
|
||||
c: c,
|
||||
authCfg: authCfg,
|
||||
datasourceURL: strings.TrimSuffix(baseURL, "/"),
|
||||
appendTypePrefix: appendTypePrefix,
|
||||
lookBack: lookBack,
|
||||
queryStep: queryStep,
|
||||
dataSourceType: datasourcePrometheus,
|
||||
extraParams: url.Values{},
|
||||
@@ -137,11 +134,11 @@ func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Durati
|
||||
|
||||
// Query executes the given query and returns parsed response
|
||||
func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error) {
|
||||
req, err := s.newQueryRequest(query, ts)
|
||||
req, err := s.newQueryRequest(ctx, query, ts)
|
||||
if err != nil {
|
||||
return Result{}, nil, err
|
||||
}
|
||||
resp, err := s.do(ctx, req)
|
||||
resp, err := s.do(req)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// Return unexpected error to the caller.
|
||||
@@ -149,11 +146,11 @@ func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Resu
|
||||
}
|
||||
// Something in the middle between client and datasource might be closing
|
||||
// the connection. So we do a one more attempt in hope request will succeed.
|
||||
req, err = s.newQueryRequest(query, ts)
|
||||
req, err = s.newQueryRequest(ctx, query, ts)
|
||||
if err != nil {
|
||||
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
resp, err = s.do(ctx, req)
|
||||
resp, err = s.do(req)
|
||||
if err != nil {
|
||||
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
@@ -182,11 +179,11 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
|
||||
if end.IsZero() {
|
||||
return res, fmt.Errorf("end param is missing")
|
||||
}
|
||||
req, err := s.newQueryRangeRequest(query, start, end)
|
||||
req, err := s.newQueryRangeRequest(ctx, query, start, end)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
resp, err := s.do(ctx, req)
|
||||
resp, err := s.do(req)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// Return unexpected error to the caller.
|
||||
@@ -194,11 +191,11 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
|
||||
}
|
||||
// Something in the middle between client and datasource might be closing
|
||||
// the connection. So we do a one more attempt in hope request will succeed.
|
||||
req, err = s.newQueryRangeRequest(query, start, end)
|
||||
req, err = s.newQueryRangeRequest(ctx, query, start, end)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
resp, err = s.do(ctx, req)
|
||||
resp, err = s.do(req)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("second attempt: %w", err)
|
||||
}
|
||||
@@ -210,7 +207,7 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
func (s *VMStorage) do(req *http.Request) (*http.Response, error) {
|
||||
ru := req.URL.Redacted()
|
||||
if *showDatasourceURL {
|
||||
ru = req.URL.String()
|
||||
@@ -218,7 +215,7 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
|
||||
if s.debug {
|
||||
logger.Infof("DEBUG datasource request: executing %s request with params %q", req.Method, ru)
|
||||
}
|
||||
resp, err := s.c.Do(req.WithContext(ctx))
|
||||
resp, err := s.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting response from %s: %w", ru, err)
|
||||
}
|
||||
@@ -230,8 +227,8 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *VMStorage) newQueryRangeRequest(query string, start, end time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest()
|
||||
func (s *VMStorage) newQueryRangeRequest(ctx context.Context, query string, start, end time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create query_range request to datasource %q: %w", s.datasourceURL, err)
|
||||
}
|
||||
@@ -239,8 +236,8 @@ func (s *VMStorage) newQueryRangeRequest(query string, start, end time.Time) (*h
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *VMStorage) newQueryRequest(query string, ts time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest()
|
||||
func (s *VMStorage) newQueryRequest(ctx context.Context, query string, ts time.Time) (*http.Request, error) {
|
||||
req, err := s.newRequest(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create query request to datasource %q: %w", s.datasourceURL, err)
|
||||
}
|
||||
@@ -248,15 +245,15 @@ func (s *VMStorage) newQueryRequest(query string, ts time.Time) (*http.Request,
|
||||
case "", datasourcePrometheus:
|
||||
s.setPrometheusInstantReqParams(req, query, ts)
|
||||
case datasourceGraphite:
|
||||
s.setGraphiteReqParams(req, query, ts)
|
||||
s.setGraphiteReqParams(req, query)
|
||||
default:
|
||||
logger.Panicf("BUG: engine not found: %q", s.dataSourceType)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *VMStorage) newRequest() (*http.Request, error) {
|
||||
req, err := http.NewRequest(http.MethodPost, s.datasourceURL, nil)
|
||||
func (s *VMStorage) newRequest(ctx context.Context) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.datasourceURL, nil)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error from http.NewRequest(%q): %s", s.datasourceURL, err)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type graphiteResponse []graphiteResponseTarget
|
||||
@@ -48,17 +46,13 @@ const (
|
||||
graphitePrefix = "/graphite"
|
||||
)
|
||||
|
||||
func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string) {
|
||||
if s.appendTypePrefix {
|
||||
r.URL.Path += graphitePrefix
|
||||
}
|
||||
r.URL.Path += graphitePath
|
||||
q := r.URL.Query()
|
||||
from := "-5min"
|
||||
if s.lookBack > 0 {
|
||||
lookBack := timestamp.Add(-s.lookBack)
|
||||
from = strconv.FormatInt(lookBack.Unix(), 10)
|
||||
}
|
||||
q.Set("from", from)
|
||||
q.Set("format", "json")
|
||||
q.Set("target", query)
|
||||
|
||||
@@ -161,9 +161,6 @@ func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string,
|
||||
r.URL.Path += "/api/v1/query"
|
||||
}
|
||||
q := r.URL.Query()
|
||||
if s.lookBack > 0 {
|
||||
timestamp = timestamp.Add(-s.lookBack)
|
||||
}
|
||||
q.Set("time", timestamp.Format(time.RFC3339))
|
||||
if !*disableStepParam && s.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
// always convert to seconds to keep compatibility with older
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]},"stats":{"seriesFetched": "42"}}`))
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/render", func(w http.ResponseWriter, request *http.Request) {
|
||||
mux.HandleFunc("/render", func(w http.ResponseWriter, _ *http.Request) {
|
||||
c++
|
||||
switch c {
|
||||
case 8:
|
||||
@@ -86,7 +86,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected: %s", err)
|
||||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
|
||||
s := NewVMStorage(srv.URL, authCfg, 0, false, srv.Client())
|
||||
|
||||
p := datasourcePrometheus
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(p), EvaluationInterval: 15 * time.Second})
|
||||
@@ -225,7 +225,7 @@ func TestVMInstantQueryWithRetry(t *testing.T) {
|
||||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
|
||||
s := NewVMStorage(srv.URL, nil, time.Minute, 0, false, srv.Client())
|
||||
s := NewVMStorage(srv.URL, nil, 0, false, srv.Client())
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus)})
|
||||
|
||||
expErr := func(err string) {
|
||||
@@ -334,7 +334,7 @@ func TestVMRangeQuery(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected: %s", err)
|
||||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, *queryStep, false, srv.Client())
|
||||
s := NewVMStorage(srv.URL, authCfg, *queryStep, false, srv.Client())
|
||||
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus), EvaluationInterval: 15 * time.Second})
|
||||
|
||||
@@ -487,17 +487,6 @@ func TestRequestParams(t *testing.T) {
|
||||
checkEqualString(t, "bar", p)
|
||||
},
|
||||
},
|
||||
{
|
||||
"lookback",
|
||||
false,
|
||||
&VMStorage{
|
||||
lookBack: time.Minute,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
exp := url.Values{"query": {query}, "time": {timestamp.Add(-time.Minute).Format(time.RFC3339)}}
|
||||
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"evaluation interval",
|
||||
false,
|
||||
@@ -510,20 +499,6 @@ func TestRequestParams(t *testing.T) {
|
||||
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"lookback + evaluation interval",
|
||||
false,
|
||||
&VMStorage{
|
||||
lookBack: time.Minute,
|
||||
evaluationInterval: 15 * time.Second,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
evalInterval := 15 * time.Second
|
||||
tt := timestamp.Add(-time.Minute)
|
||||
exp := url.Values{"query": {query}, "step": {evalInterval.String()}, "time": {tt.Format(time.RFC3339)}}
|
||||
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"step override",
|
||||
false,
|
||||
@@ -637,7 +612,7 @@ func TestRequestParams(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := tc.vm.newRequest()
|
||||
req, err := tc.vm.newRequest(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -649,7 +624,7 @@ func TestRequestParams(t *testing.T) {
|
||||
tc.vm.setPrometheusInstantReqParams(req, query, timestamp)
|
||||
}
|
||||
case datasourceGraphite:
|
||||
tc.vm.setGraphiteReqParams(req, query, timestamp)
|
||||
tc.vm.setGraphiteReqParams(req, query)
|
||||
}
|
||||
tc.checkFn(t, req)
|
||||
})
|
||||
@@ -735,7 +710,7 @@ func TestHeaders(t *testing.T) {
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
vm := tt.vmFn()
|
||||
req, err := vm.newQueryRequest("foo", time.Now())
|
||||
req, err := vm.newQueryRequest(ctx, "foo", time.Now())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -59,8 +59,8 @@ absolute path to all .tpl files in root.
|
||||
configCheckInterval = flag.Duration("configCheckInterval", 0, "Interval for checking for changes in '-rule' or '-notifier.config' files. "+
|
||||
"By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes.")
|
||||
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8880", "Address to listen for http connections. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "Address to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
evaluationInterval = flag.Duration("evaluationInterval", time.Minute, "How often to evaluate the rules")
|
||||
@@ -178,15 +178,19 @@ func main() {
|
||||
|
||||
go configReload(ctx, manager, groupsCfg, sighupCh)
|
||||
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":8880"}
|
||||
}
|
||||
rh := &requestHandler{m: manager}
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, rh.handler)
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, rh.handler)
|
||||
|
||||
pushmetrics.Init()
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("service received signal %s", sig)
|
||||
pushmetrics.Stop()
|
||||
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
cancel()
|
||||
@@ -248,7 +252,13 @@ func newManager(ctx context.Context) (*manager, error) {
|
||||
func getExternalURL(customURL string) (*url.URL, error) {
|
||||
if customURL == "" {
|
||||
// use local hostname as external URL
|
||||
return getHostnameAsExternalURL(*httpListenAddr, httpserver.IsTLS())
|
||||
listenAddr := ":8880"
|
||||
if len(*httpListenAddrs) > 0 {
|
||||
listenAddr = (*httpListenAddrs)[0]
|
||||
}
|
||||
isTLS := httpserver.IsTLS(0)
|
||||
|
||||
return getHostnameAsExternalURL(listenAddr, isTLS)
|
||||
}
|
||||
u, err := url.Parse(customURL)
|
||||
if err != nil {
|
||||
@@ -260,13 +270,13 @@ func getExternalURL(customURL string) (*url.URL, error) {
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func getHostnameAsExternalURL(httpListenAddr string, isSecure bool) (*url.URL, error) {
|
||||
func getHostnameAsExternalURL(addr string, isSecure bool) (*url.URL, error) {
|
||||
hname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get hostname: %w", err)
|
||||
}
|
||||
port := ""
|
||||
if ipport := strings.Split(httpListenAddr, ":"); len(ipport) > 1 {
|
||||
if ipport := strings.Split(addr, ":"); len(ipport) > 1 {
|
||||
port = ":" + ipport[1]
|
||||
}
|
||||
schema := "http://"
|
||||
@@ -294,7 +304,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
|
||||
"tpl": externalAlertSource,
|
||||
}
|
||||
return func(alert notifier.Alert) string {
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
qFn := func(_ string) ([]datasource.Metric, error) {
|
||||
return nil, fmt.Errorf("`query` template isn't supported for alert source template")
|
||||
}
|
||||
templated, err := alert.ExecTemplate(qFn, alert.Labels, m)
|
||||
|
||||
@@ -178,7 +178,7 @@ func TestAlert_ExecTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
qFn := func(q string) ([]datasource.Metric, error) {
|
||||
qFn := func(_ string) ([]datasource.Metric, error) {
|
||||
return []datasource.Metric{
|
||||
{
|
||||
Labels: []datasource.Label{
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
@@ -104,7 +105,7 @@ func (am *AlertManager) send(ctx context.Context, alerts []Alert, headers map[st
|
||||
if *showNotifierURL {
|
||||
amURL = am.addr.String()
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode/100 != 2 {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response from %q: %w", amURL, err)
|
||||
@@ -127,7 +128,7 @@ func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg proma
|
||||
if authCfg.TLSConfig != nil {
|
||||
tls = authCfg.TLSConfig
|
||||
}
|
||||
tr, err := utils.Transport(alertManagerURL, tls.CertFile, tls.KeyFile, tls.CAFile, tls.ServerName, tls.InsecureSkipVerify)
|
||||
tr, err := httputils.Transport(alertManagerURL, tls.CertFile, tls.KeyFile, tls.CAFile, tls.ServerName, tls.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -60,7 +61,7 @@ func Init() (datasource.QuerierBuilder, error) {
|
||||
if *addr == "" {
|
||||
return nil, nil
|
||||
}
|
||||
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
@@ -78,5 +79,5 @@ func Init() (datasource.QuerierBuilder, error) {
|
||||
return nil, fmt.Errorf("failed to configure auth: %w", err)
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
return datasource.NewVMStorage(*addr, authCfg, 0, 0, false, c), nil
|
||||
return datasource.NewVMStorage(*addr, authCfg, 0, false, c), nil
|
||||
}
|
||||
|
||||
@@ -151,12 +151,22 @@ func (c *Client) run(ctx context.Context) {
|
||||
ticker := time.NewTicker(c.flushInterval)
|
||||
wr := &prompbmarshal.WriteRequest{}
|
||||
shutdown := func() {
|
||||
lastCtx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
logger.Infof("shutting down remote write client and flushing remained series")
|
||||
|
||||
shutdownFlushCnt := 0
|
||||
for ts := range c.input {
|
||||
wr.Timeseries = append(wr.Timeseries, ts)
|
||||
if len(wr.Timeseries) >= c.maxBatchSize {
|
||||
shutdownFlushCnt += len(wr.Timeseries)
|
||||
c.flush(lastCtx, wr)
|
||||
}
|
||||
}
|
||||
lastCtx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
logger.Infof("shutting down remote write client and flushing remained %d series", len(wr.Timeseries))
|
||||
// flush the last batch. `flush` will re-check and avoid flushing empty batch.
|
||||
shutdownFlushCnt += len(wr.Timeseries)
|
||||
c.flush(lastCtx, wr)
|
||||
|
||||
logger.Infof("shutting down remote write client flushed %d series", shutdownFlushCnt)
|
||||
cancel()
|
||||
}
|
||||
c.wg.Add(1)
|
||||
@@ -279,7 +289,7 @@ L:
|
||||
|
||||
func (c *Client) send(ctx context.Context, data []byte) error {
|
||||
r := bytes.NewReader(data)
|
||||
req, err := http.NewRequest(http.MethodPost, c.addr, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.addr, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new HTTP request: %w", err)
|
||||
}
|
||||
@@ -302,7 +312,7 @@ func (c *Client) send(ctx context.Context, data []byte) error {
|
||||
if !*disablePathAppend {
|
||||
req.URL.Path = path.Join(req.URL.Path, "/api/v1/write")
|
||||
}
|
||||
resp, err := c.c.Do(req.WithContext(ctx))
|
||||
resp, err := c.c.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while sending request to %s: %w; Data len %d(%d)",
|
||||
req.URL.Redacted(), err, len(data), r.Size())
|
||||
|
||||
@@ -84,6 +84,70 @@ func TestClient_Push(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_run_maxBatchSizeDuringShutdown(t *testing.T) {
|
||||
batchSize := 20
|
||||
|
||||
testTable := []struct {
|
||||
name string // name of the test case
|
||||
pushCnt int // how many time series is pushed to the client
|
||||
batchCnt int // the expected batch count sent by the client
|
||||
}{
|
||||
{
|
||||
name: "pushCnt % batchSize == 0",
|
||||
pushCnt: batchSize * 40,
|
||||
batchCnt: 40,
|
||||
},
|
||||
{
|
||||
name: "pushCnt % batchSize != 0",
|
||||
pushCnt: batchSize*40 + 1,
|
||||
batchCnt: 40 + 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testTable {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// run new server
|
||||
bcServer := newBatchCntRWServer()
|
||||
|
||||
// run new client
|
||||
rwClient, err := NewClient(context.Background(), Config{
|
||||
MaxBatchSize: batchSize,
|
||||
|
||||
// Set everything to 1 to simplify the calculation.
|
||||
Concurrency: 1,
|
||||
MaxQueueSize: 1000,
|
||||
FlushInterval: time.Minute,
|
||||
|
||||
// batch count server
|
||||
Addr: bcServer.URL,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("new remote write client failed, err: %v", err)
|
||||
}
|
||||
|
||||
// push time series to the client.
|
||||
for i := 0; i < tt.pushCnt; i++ {
|
||||
if err = rwClient.Push(prompbmarshal.TimeSeries{}); err != nil {
|
||||
t.Fatalf("push time series to the client failed, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// close the client so the rest ts will be flushed in `shutdown`
|
||||
if err = rwClient.Close(); err != nil {
|
||||
t.Fatalf("shutdown client failed, err: %v", err)
|
||||
}
|
||||
|
||||
// finally check how many batches is sent.
|
||||
if tt.batchCnt != bcServer.acceptedBatches() {
|
||||
t.Errorf("client sent batch count incorrect, want: %d, get: %d", tt.batchCnt, bcServer.acceptedBatches())
|
||||
}
|
||||
if tt.pushCnt != bcServer.accepted() {
|
||||
t.Errorf("client sent time series count incorrect, want: %d, get: %d", tt.pushCnt, bcServer.accepted())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newRWServer() *rwServer {
|
||||
rw := &rwServer{}
|
||||
rw.Server = httptest.NewServer(http.HandlerFunc(rw.handler))
|
||||
@@ -91,14 +155,12 @@ func newRWServer() *rwServer {
|
||||
}
|
||||
|
||||
type rwServer struct {
|
||||
// WARN: ordering of fields is important for alignment!
|
||||
// see https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
acceptedRows uint64
|
||||
acceptedRows atomic.Uint64
|
||||
*httptest.Server
|
||||
}
|
||||
|
||||
func (rw *rwServer) accepted() int {
|
||||
return int(atomic.LoadUint64(&rw.acceptedRows))
|
||||
return int(rw.acceptedRows.Load())
|
||||
}
|
||||
|
||||
func (rw *rwServer) err(w http.ResponseWriter, err error) {
|
||||
@@ -144,7 +206,7 @@ func (rw *rwServer) handler(w http.ResponseWriter, r *http.Request) {
|
||||
rw.err(w, fmt.Errorf("unmarhsal err: %w", err))
|
||||
return
|
||||
}
|
||||
atomic.AddUint64(&rw.acceptedRows, uint64(len(wr.Timeseries)))
|
||||
rw.acceptedRows.Add(uint64(len(wr.Timeseries)))
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -186,3 +248,27 @@ func (frw *faultyRWServer) handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("server overloaded"))
|
||||
}
|
||||
}
|
||||
|
||||
type batchCntRWServer struct {
|
||||
*rwServer
|
||||
|
||||
batchCnt atomic.Int64 // accepted batch count, which also equals to request count
|
||||
}
|
||||
|
||||
func newBatchCntRWServer() *batchCntRWServer {
|
||||
bc := &batchCntRWServer{
|
||||
rwServer: &rwServer{},
|
||||
}
|
||||
|
||||
bc.Server = httptest.NewServer(http.HandlerFunc(bc.handler))
|
||||
return bc
|
||||
}
|
||||
|
||||
func (bc *batchCntRWServer) handler(w http.ResponseWriter, r *http.Request) {
|
||||
bc.batchCnt.Add(1)
|
||||
bc.rwServer.handler(w, r)
|
||||
}
|
||||
|
||||
func (bc *batchCntRWServer) acceptedBatches() int {
|
||||
return int(bc.batchCnt.Load())
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
@@ -30,7 +30,7 @@ func NewDebugClient() (*DebugClient, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
t, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -64,7 +65,7 @@ func Init(ctx context.Context) (*Client, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
t, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
|
||||
@@ -310,7 +310,7 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
}
|
||||
var result []prompbmarshal.TimeSeries
|
||||
holdAlertState := make(map[uint64]*notifier.Alert)
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
qFn := func(_ string) ([]datasource.Metric, error) {
|
||||
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
||||
}
|
||||
for _, s := range res.Data {
|
||||
@@ -655,15 +655,19 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
|
||||
// alertsToSend walks through the current alerts of AlertingRule
|
||||
// and returns only those which should be sent to notifier.
|
||||
// Isn't concurrent safe.
|
||||
func (ar *AlertingRule) alertsToSend(ts time.Time, resolveDuration, resendDelay time.Duration) []notifier.Alert {
|
||||
func (ar *AlertingRule) alertsToSend(resolveDuration, resendDelay time.Duration) []notifier.Alert {
|
||||
currentTime := time.Now()
|
||||
needsSending := func(a *notifier.Alert) bool {
|
||||
if a.State == notifier.StatePending {
|
||||
return false
|
||||
}
|
||||
if a.ResolvedAt.After(a.LastSent) {
|
||||
if a.State == notifier.StateFiring && a.End.Before(a.LastSent) {
|
||||
return true
|
||||
}
|
||||
return a.LastSent.Add(resendDelay).Before(ts)
|
||||
if a.State == notifier.StateInactive && a.ResolvedAt.After(a.LastSent) {
|
||||
return true
|
||||
}
|
||||
return a.LastSent.Add(resendDelay).Before(currentTime)
|
||||
}
|
||||
|
||||
var alerts []notifier.Alert
|
||||
@@ -671,11 +675,11 @@ func (ar *AlertingRule) alertsToSend(ts time.Time, resolveDuration, resendDelay
|
||||
if !needsSending(a) {
|
||||
continue
|
||||
}
|
||||
a.End = ts.Add(resolveDuration)
|
||||
a.End = currentTime.Add(resolveDuration)
|
||||
if a.State == notifier.StateInactive {
|
||||
a.End = a.ResolvedAt
|
||||
}
|
||||
a.LastSent = ts
|
||||
a.LastSent = currentTime
|
||||
alerts = append(alerts, *a)
|
||||
}
|
||||
return alerts
|
||||
|
||||
@@ -43,11 +43,13 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("instant extra labels", 0),
|
||||
¬ifier.Alert{State: notifier.StateFiring, ActiveAt: timestamp.Add(time.Second),
|
||||
¬ifier.Alert{
|
||||
State: notifier.StateFiring, ActiveAt: timestamp.Add(time.Second),
|
||||
Labels: map[string]string{
|
||||
"job": "foo",
|
||||
"instance": "bar",
|
||||
}},
|
||||
},
|
||||
},
|
||||
[]prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertMetricName,
|
||||
@@ -66,11 +68,13 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("instant labels override", 0),
|
||||
¬ifier.Alert{State: notifier.StateFiring, ActiveAt: timestamp.Add(time.Second),
|
||||
¬ifier.Alert{
|
||||
State: notifier.StateFiring, ActiveAt: timestamp.Add(time.Second),
|
||||
Labels: map[string]string{
|
||||
alertStateLabel: "foo",
|
||||
"__name__": "bar",
|
||||
}},
|
||||
},
|
||||
},
|
||||
[]prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
||||
"__name__": alertMetricName,
|
||||
@@ -572,25 +576,33 @@ func TestAlertingRule_ExecRange(t *testing.T) {
|
||||
},
|
||||
},
|
||||
[]*notifier.Alert{
|
||||
{State: notifier.StateFiring, ActiveAt: time.Unix(1, 0),
|
||||
{
|
||||
State: notifier.StateFiring, ActiveAt: time.Unix(1, 0),
|
||||
Labels: map[string]string{
|
||||
"source": "vm",
|
||||
}},
|
||||
{State: notifier.StateFiring, ActiveAt: time.Unix(100, 0),
|
||||
},
|
||||
},
|
||||
{
|
||||
State: notifier.StateFiring, ActiveAt: time.Unix(100, 0),
|
||||
Labels: map[string]string{
|
||||
"source": "vm",
|
||||
}},
|
||||
},
|
||||
},
|
||||
//
|
||||
{State: notifier.StateFiring, ActiveAt: time.Unix(1, 0),
|
||||
{
|
||||
State: notifier.StateFiring, ActiveAt: time.Unix(1, 0),
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"source": "vm",
|
||||
}},
|
||||
{State: notifier.StateFiring, ActiveAt: time.Unix(5, 0),
|
||||
},
|
||||
},
|
||||
{
|
||||
State: notifier.StateFiring, ActiveAt: time.Unix(5, 0),
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"source": "vm",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
@@ -1042,7 +1054,7 @@ func TestAlertsToSend(t *testing.T) {
|
||||
for i, a := range alerts {
|
||||
ar.alerts[uint64(i)] = a
|
||||
}
|
||||
gotAlerts := ar.alertsToSend(ts, resolveDuration, resendDelay)
|
||||
gotAlerts := ar.alertsToSend(resolveDuration, resendDelay)
|
||||
if gotAlerts == nil && expAlerts == nil {
|
||||
return
|
||||
}
|
||||
@@ -1058,60 +1070,36 @@ func TestAlertsToSend(t *testing.T) {
|
||||
})
|
||||
for i, exp := range expAlerts {
|
||||
got := gotAlerts[i]
|
||||
if got.LastSent != exp.LastSent {
|
||||
t.Fatalf("expected LastSent to be %v; got %v", exp.LastSent, got.LastSent)
|
||||
}
|
||||
if got.End != exp.End {
|
||||
t.Fatalf("expected End to be %v; got %v", exp.End, got.End)
|
||||
if got.Name != exp.Name {
|
||||
t.Fatalf("expected Name to be %v; got %v", exp.Name, got.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f( // send firing alert with custom resolve time
|
||||
[]*notifier.Alert{{State: notifier.StateFiring}},
|
||||
[]*notifier.Alert{{LastSent: ts, End: ts.Add(5 * time.Minute)}},
|
||||
f( // check if firing alerts need to be sent with non-zero resendDelay
|
||||
[]*notifier.Alert{
|
||||
{Name: "a", State: notifier.StateFiring, Start: ts},
|
||||
// no need to resend firing
|
||||
{Name: "b", State: notifier.StateFiring, Start: ts, LastSent: ts.Add(-30 * time.Second), End: ts.Add(5 * time.Minute)},
|
||||
// last message is for resolved, send firing message this time
|
||||
{Name: "c", State: notifier.StateFiring, Start: ts, LastSent: ts.Add(-30 * time.Second), End: ts.Add(-1 * time.Minute)},
|
||||
// resend firing
|
||||
{Name: "d", State: notifier.StateFiring, Start: ts, LastSent: ts.Add(-1 * time.Minute)},
|
||||
},
|
||||
[]*notifier.Alert{{Name: "a"}, {Name: "c"}, {Name: "d"}},
|
||||
5*time.Minute, time.Minute,
|
||||
)
|
||||
f( // resolve inactive alert at the current timestamp
|
||||
[]*notifier.Alert{{State: notifier.StateInactive, ResolvedAt: ts}},
|
||||
[]*notifier.Alert{{LastSent: ts, End: ts}},
|
||||
time.Minute, time.Minute,
|
||||
)
|
||||
f( // mixed case of firing and resolved alerts. Names are added for deterministic sorting
|
||||
[]*notifier.Alert{{Name: "a", State: notifier.StateFiring}, {Name: "b", State: notifier.StateInactive, ResolvedAt: ts}},
|
||||
[]*notifier.Alert{{Name: "a", LastSent: ts, End: ts.Add(5 * time.Minute)}, {Name: "b", LastSent: ts, End: ts}},
|
||||
f( // check if resolved alerts need to be sent with non-zero resendDelay
|
||||
[]*notifier.Alert{
|
||||
{Name: "a", State: notifier.StateInactive, ResolvedAt: ts, LastSent: ts.Add(-30 * time.Second)},
|
||||
// no need to resend resolved
|
||||
{Name: "b", State: notifier.StateInactive, ResolvedAt: ts, LastSent: ts},
|
||||
// resend resolved
|
||||
{Name: "c", State: notifier.StateInactive, ResolvedAt: ts.Add(-1 * time.Minute), LastSent: ts.Add(-1 * time.Minute)},
|
||||
},
|
||||
[]*notifier.Alert{{Name: "a"}, {Name: "c"}},
|
||||
5*time.Minute, time.Minute,
|
||||
)
|
||||
f( // mixed case of pending and resolved alerts. Names are added for deterministic sorting
|
||||
[]*notifier.Alert{{Name: "a", State: notifier.StatePending}, {Name: "b", State: notifier.StateInactive, ResolvedAt: ts}},
|
||||
[]*notifier.Alert{{Name: "b", LastSent: ts, End: ts}},
|
||||
5*time.Minute, time.Minute,
|
||||
)
|
||||
f( // attempt to send alert that was already sent in the resendDelay interval
|
||||
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-time.Second)}},
|
||||
nil,
|
||||
time.Minute, time.Minute,
|
||||
)
|
||||
f( // attempt to send alert that was sent out of the resendDelay interval
|
||||
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-2 * time.Minute)}},
|
||||
[]*notifier.Alert{{LastSent: ts, End: ts.Add(time.Minute)}},
|
||||
time.Minute, time.Minute,
|
||||
)
|
||||
f( // alert must be sent even if resendDelay interval is 0
|
||||
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-time.Second)}},
|
||||
[]*notifier.Alert{{LastSent: ts, End: ts.Add(time.Minute)}},
|
||||
time.Minute, 0,
|
||||
)
|
||||
f( // inactive alert which has been sent already
|
||||
[]*notifier.Alert{{State: notifier.StateInactive, LastSent: ts.Add(-time.Second), ResolvedAt: ts.Add(-2 * time.Second)}},
|
||||
nil,
|
||||
time.Minute, time.Minute,
|
||||
)
|
||||
f( // inactive alert which has been resolved after last send
|
||||
[]*notifier.Alert{{State: notifier.StateInactive, LastSent: ts.Add(-time.Second), ResolvedAt: ts}},
|
||||
[]*notifier.Alert{{LastSent: ts, End: ts}},
|
||||
time.Minute, time.Minute,
|
||||
)
|
||||
}
|
||||
|
||||
func newTestRuleWithLabels(name string, labels ...string) *AlertingRule {
|
||||
|
||||
@@ -704,7 +704,7 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
return nil
|
||||
}
|
||||
|
||||
alerts := ar.alertsToSend(ts, resolveDuration, *resendDelay)
|
||||
alerts := ar.alertsToSend(resolveDuration, *resendDelay)
|
||||
if len(alerts) < 1 {
|
||||
return nil
|
||||
}
|
||||
@@ -724,7 +724,7 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
return errGr.Err()
|
||||
}
|
||||
|
||||
// getStaledSeries checks whether there are stale series from previously sent ones.
|
||||
// getStaleSeries checks whether there are stale series from previously sent ones.
|
||||
func (e *executor) getStaleSeries(r Rule, tss []prompbmarshal.TimeSeries, timestamp time.Time) []prompbmarshal.TimeSeries {
|
||||
ruleLabels := make(map[string][]prompbmarshal.Label, len(tss))
|
||||
for _, ts := range tss {
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
function expandAll() {
|
||||
$('.collapse').addClass('show');
|
||||
$('.group-heading').each(function () {
|
||||
let style = $(this).attr("style")
|
||||
// display only elements that are currently visible
|
||||
if (style === "display: none;") {
|
||||
return
|
||||
}
|
||||
$(this).next().addClass('show')
|
||||
});
|
||||
}
|
||||
|
||||
function collapseAll() {
|
||||
@@ -15,6 +22,100 @@ function toggleByID(id) {
|
||||
}
|
||||
}
|
||||
|
||||
function debounce(func, delay) {
|
||||
let timer;
|
||||
return function (...args) {
|
||||
clearTimeout(timer);
|
||||
timer = setTimeout(() => {
|
||||
func.apply(this, args);
|
||||
}, delay);
|
||||
};
|
||||
}
|
||||
|
||||
$('#search').on("keyup", debounce(search, 500));
|
||||
|
||||
// search shows or hides groups&rules that satisfy the search phrase.
|
||||
// case-insensitive, respects GET param `search`.
|
||||
function search() {
|
||||
$(".rule").show();
|
||||
|
||||
let groupHeader = $(".group-heading")
|
||||
let searchPhrase = $("#search").val().toLowerCase()
|
||||
if (searchPhrase.length === 0) {
|
||||
groupHeader.show()
|
||||
setParamURL('search', '')
|
||||
return
|
||||
}
|
||||
|
||||
$(".rule-table").removeClass('show');
|
||||
groupHeader.hide()
|
||||
|
||||
searchPhrase = searchPhrase.toLowerCase()
|
||||
filterRuleByName(searchPhrase);
|
||||
filterRuleByLabels(searchPhrase);
|
||||
filterGroupsByName(searchPhrase);
|
||||
|
||||
setParamURL('search', searchPhrase)
|
||||
}
|
||||
|
||||
function setParamURL(key, value) {
|
||||
let url = new URL(location.href)
|
||||
url.searchParams.set(key, value);
|
||||
window.history.replaceState(null, null, `?${url.searchParams.toString()}`);
|
||||
}
|
||||
|
||||
function getParamURL(key) {
|
||||
let url = new URL(location.href)
|
||||
return url.searchParams.get(key)
|
||||
}
|
||||
|
||||
function filterGroupsByName(searchPhrase) {
|
||||
$(".group-heading").each(function () {
|
||||
const groupName = $(this).attr('data-group-name').toLowerCase();
|
||||
const hasValue = groupName.indexOf(searchPhrase) >= 0
|
||||
|
||||
if (!hasValue) {
|
||||
return
|
||||
}
|
||||
|
||||
const target = $(this).attr("data-bs-target");
|
||||
$(`div[id="${target}"] .rule`).show();
|
||||
$(this).show();
|
||||
});
|
||||
}
|
||||
|
||||
function filterRuleByName(searchPhrase) {
|
||||
$(".rule").each(function () {
|
||||
const ruleName = $(this).attr("data-rule-name").toLowerCase();
|
||||
const hasValue = ruleName.indexOf(searchPhrase) >= 0
|
||||
if (!hasValue) {
|
||||
$(this).hide();
|
||||
return
|
||||
}
|
||||
|
||||
const target = $(this).attr('data-bs-target')
|
||||
$(`#rules-${target}`).addClass('show');
|
||||
$(`div[data-bs-target='rules-${target}']`).show();
|
||||
$(this).show();
|
||||
});
|
||||
}
|
||||
|
||||
function filterRuleByLabels(searchPhrase) {
|
||||
$(".rule").each(function () {
|
||||
const matches = $(".label", this).filter(function () {
|
||||
const label = $(this).text().toLowerCase();
|
||||
return label.indexOf(searchPhrase) >= 0;
|
||||
}).length;
|
||||
|
||||
if (matches > 0) {
|
||||
const target = $(this).attr('data-bs-target')
|
||||
$(`#rules-${target}`).addClass('show');
|
||||
$(`div[data-bs-target='rules-${target}']`).show();
|
||||
$(this).show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
$(document).ready(function () {
|
||||
$(".group-heading a").click(function (e) {
|
||||
e.stopPropagation(); // prevent collapse logic on link click
|
||||
@@ -32,6 +133,13 @@ $(document).ready(function () {
|
||||
});
|
||||
});
|
||||
|
||||
// update search element with value from URL, if any
|
||||
let searchPhrase = getParamURL('search')
|
||||
$("#search").val(searchPhrase)
|
||||
|
||||
// apply filtering by search phrase
|
||||
search()
|
||||
|
||||
let hash = window.location.hash.substr(1);
|
||||
toggleByID(hash);
|
||||
});
|
||||
|
||||
@@ -476,7 +476,7 @@ func templateFuncs() textTpl.FuncMap {
|
||||
// For example, {{ query "foo" | first | value }} will
|
||||
// execute "/api/v1/query?query=foo" request and will return
|
||||
// the first value in response.
|
||||
"query": func(q string) ([]metric, error) {
|
||||
"query": func(_ string) ([]metric, error) {
|
||||
// query function supposed to be substituted at FuncsWithQuery().
|
||||
// it is present here only for validation purposes, when there is no
|
||||
// provided datasource.
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
)
|
||||
@@ -87,7 +88,10 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
WriteRuleDetails(w, r, rule)
|
||||
return true
|
||||
case "/vmalert/groups":
|
||||
WriteListGroups(w, r, rh.groups())
|
||||
var data []apiGroup
|
||||
rf := extractRulesFilter(r)
|
||||
data = rh.groups(rf)
|
||||
WriteListGroups(w, r, data)
|
||||
return true
|
||||
case "/vmalert/notifiers":
|
||||
WriteListTargets(w, r, notifier.GetTargets())
|
||||
@@ -98,12 +102,20 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/rules":
|
||||
// Grafana makes an extra request to `/rules`
|
||||
// handler in addition to `/api/v1/rules` calls in alerts UI,
|
||||
WriteListGroups(w, r, rh.groups())
|
||||
var data []apiGroup
|
||||
rf := extractRulesFilter(r)
|
||||
data = rh.groups(rf)
|
||||
WriteListGroups(w, r, data)
|
||||
return true
|
||||
|
||||
case "/vmalert/api/v1/rules", "/api/v1/rules":
|
||||
// path used by Grafana for ng alerting
|
||||
data, err := rh.listGroups()
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
rf := extractRulesFilter(r)
|
||||
data, err = rh.listGroups(rf)
|
||||
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
@@ -111,6 +123,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(data)
|
||||
return true
|
||||
|
||||
case "/vmalert/api/v1/alerts", "/api/v1/alerts":
|
||||
// path used by Grafana for ng alerting
|
||||
data, err := rh.listAlerts()
|
||||
@@ -207,26 +220,94 @@ type listGroupsResponse struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
func (rh *requestHandler) groups() []apiGroup {
|
||||
// see https://prometheus.io/docs/prometheus/latest/querying/api/#rules
|
||||
type rulesFilter struct {
|
||||
files []string
|
||||
groupNames []string
|
||||
ruleNames []string
|
||||
ruleType string
|
||||
excludeAlerts bool
|
||||
}
|
||||
|
||||
func extractRulesFilter(r *http.Request) rulesFilter {
|
||||
rf := rulesFilter{}
|
||||
|
||||
var ruleType string
|
||||
ruleTypeParam := r.URL.Query().Get("type")
|
||||
// for some reason, `type` in filter doesn't match `type` in response,
|
||||
// so we use this matching here
|
||||
if ruleTypeParam == "alert" {
|
||||
ruleType = ruleTypeAlerting
|
||||
} else if ruleTypeParam == "record" {
|
||||
ruleType = ruleTypeRecording
|
||||
}
|
||||
rf.ruleType = ruleType
|
||||
|
||||
rf.excludeAlerts = httputils.GetBool(r, "exclude_alerts")
|
||||
rf.ruleNames = append([]string{}, r.Form["rule_name[]"]...)
|
||||
rf.groupNames = append([]string{}, r.Form["rule_group[]"]...)
|
||||
rf.files = append([]string{}, r.Form["file[]"]...)
|
||||
return rf
|
||||
}
|
||||
|
||||
func (rh *requestHandler) groups(rf rulesFilter) []apiGroup {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
groups := make([]apiGroup, 0)
|
||||
for _, g := range rh.m.groups {
|
||||
groups = append(groups, groupToAPI(g))
|
||||
isInList := func(list []string, needle string) bool {
|
||||
if len(list) < 1 {
|
||||
return true
|
||||
}
|
||||
for _, i := range list {
|
||||
if i == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// sort list of alerts for deterministic output
|
||||
sort.Slice(groups, func(i, j int) bool {
|
||||
return groups[i].Name < groups[j].Name
|
||||
})
|
||||
groups := make([]apiGroup, 0)
|
||||
for _, group := range rh.m.groups {
|
||||
if !isInList(rf.groupNames, group.Name) {
|
||||
continue
|
||||
}
|
||||
if !isInList(rf.files, group.File) {
|
||||
continue
|
||||
}
|
||||
|
||||
g := groupToAPI(group)
|
||||
// the returned list should always be non-nil
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4221
|
||||
filteredRules := make([]apiRule, 0)
|
||||
for _, r := range g.Rules {
|
||||
if rf.ruleType != "" && rf.ruleType != r.Type {
|
||||
continue
|
||||
}
|
||||
if !isInList(rf.ruleNames, r.Name) {
|
||||
continue
|
||||
}
|
||||
if rf.excludeAlerts {
|
||||
r.Alerts = nil
|
||||
}
|
||||
filteredRules = append(filteredRules, r)
|
||||
}
|
||||
g.Rules = filteredRules
|
||||
groups = append(groups, g)
|
||||
}
|
||||
// sort list of groups for deterministic output
|
||||
sort.Slice(groups, func(i, j int) bool {
|
||||
a, b := groups[i], groups[j]
|
||||
if a.Name != b.Name {
|
||||
return a.Name < b.Name
|
||||
}
|
||||
return a.File < b.File
|
||||
})
|
||||
return groups
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listGroups() ([]byte, error) {
|
||||
func (rh *requestHandler) listGroups(rf rulesFilter) ([]byte, error) {
|
||||
lr := listGroupsResponse{Status: "success"}
|
||||
lr.Data.Groups = rh.groups()
|
||||
lr.Data.Groups = rh.groups(rf)
|
||||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
|
||||
@@ -70,15 +70,29 @@ btn-primary
|
||||
}
|
||||
}
|
||||
%}
|
||||
<a class="btn {%= buttonActive(filter, "") %}" role="button" onclick="window.location = window.location.pathname">All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
<a class="btn {%= buttonActive(filter, "unhealthy") %}" role="button" onclick="location.href='?filter=unhealthy'" title="Show only rules with errors">Unhealthy</a>
|
||||
<a class="btn {%= buttonActive(filter, "noMatch") %}" role="button" onclick="location.href='?filter=noMatch'" title="Show only rules matching no time series during last evaluation">NoMatch</a>
|
||||
<div class="btn-toolbar mb-3" role="toolbar">
|
||||
<div>
|
||||
<a class="btn {%= buttonActive(filter, "") %}" role="button" onclick="window.location = window.location.pathname">All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
<a class="btn {%= buttonActive(filter, "unhealthy") %}" role="button" onclick="location.href='?filter=unhealthy'" title="Show only rules with errors">Unhealthy</a>
|
||||
<a class="btn {%= buttonActive(filter, "noMatch") %}" role="button" onclick="location.href='?filter=noMatch'" title="Show only rules matching no time series during last evaluation">NoMatch</a>
|
||||
</div>
|
||||
<div class="col-md-4 col-lg-5">
|
||||
<div class="px-3 input-group">
|
||||
<div class="input-group-prepend">
|
||||
<span class="input-group-text">
|
||||
<svg fill="#000000" height="25px" width="20px" version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 490.4 490.4" xml:space="preserve"><g id="SVGRepo_bgCarrier" stroke-width="0"></g><g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g><g id="SVGRepo_iconCarrier"> <g> <path d="M484.1,454.796l-110.5-110.6c29.8-36.3,47.6-82.8,47.6-133.4c0-116.3-94.3-210.6-210.6-210.6S0,94.496,0,210.796 s94.3,210.6,210.6,210.6c50.8,0,97.4-18,133.8-48l110.5,110.5c12.9,11.8,25,4.2,29.2,0C492.5,475.596,492.5,463.096,484.1,454.796z M41.1,210.796c0-93.6,75.9-169.5,169.5-169.5s169.6,75.9,169.6,169.5s-75.9,169.5-169.5,169.5S41.1,304.396,41.1,210.796z"></path> </g> </g></svg>
|
||||
</span>
|
||||
</div>
|
||||
<input id="search" placeholder="Filter by group, rule or labels" type="text" class="form-control"/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% if len(groups) > 0 %}
|
||||
{% for _, g := range groups %}
|
||||
<div
|
||||
class="group-heading{% if rNotOk[g.ID] > 0 %} alert-danger{%endif%}" data-bs-target="rules-{%s g.ID %}">
|
||||
class="group-heading{% if rNotOk[g.ID] > 0 %} alert-danger{%endif%}" data-bs-target="rules-{%s g.ID %}" data-group-name="{%s g.Name %}">
|
||||
<span class="anchor" id="group-{%s g.ID %}"></span>
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%f.0 g.Interval %}s) #</a>
|
||||
{% if rNotOk[g.ID] > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d rNotOk[g.ID] %}</span> {% endif %}
|
||||
@@ -100,7 +114,7 @@ btn-primary
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="collapse" id="rules-{%s g.ID %}">
|
||||
<div class="collapse rule-table" id="rules-{%s g.ID %}">
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
@@ -111,7 +125,7 @@ btn-primary
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, r := range g.Rules %}
|
||||
<tr{% if r.LastError != "" %} class="alert-danger"{% endif %}>
|
||||
<tr class="rule{% if r.LastError != "" %} alert-danger{% endif %}" data-rule-name="{%s r.Name %}" data-bs-target="{%s g.ID %}">
|
||||
<td>
|
||||
<div class="row">
|
||||
<div class="col-12 mb-2">
|
||||
@@ -134,7 +148,7 @@ btn-primary
|
||||
<div class="col-12 mb-2">
|
||||
{% if len(r.Labels) > 0 %} <b>Labels:</b>{% endif %}
|
||||
{% for k, v := range r.Labels %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s v %}</span>
|
||||
<span class="ms-1 badge bg-primary label">{%s k %}={%s v %}</span>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% if r.LastError != "" %}
|
||||
@@ -170,11 +184,25 @@ btn-primary
|
||||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
{%= tpl.Header(r, navItems, "Alerts", getLastConfigError()) %}
|
||||
{% if len(groupAlerts) > 0 %}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
<div class="btn-toolbar mb-3" role="toolbar">
|
||||
<div>
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
</div>
|
||||
<div class="col-md-4 col-lg-5">
|
||||
<div class="px-3 input-group">
|
||||
<div class="input-group-prepend">
|
||||
<span class="input-group-text">
|
||||
<svg fill="#000000" height="25px" width="20px" version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 490.4 490.4" xml:space="preserve"><g id="SVGRepo_bgCarrier" stroke-width="0"></g><g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g><g id="SVGRepo_iconCarrier"> <g> <path d="M484.1,454.796l-110.5-110.6c29.8-36.3,47.6-82.8,47.6-133.4c0-116.3-94.3-210.6-210.6-210.6S0,94.496,0,210.796 s94.3,210.6,210.6,210.6c50.8,0,97.4-18,133.8-48l110.5,110.5c12.9,11.8,25,4.2,29.2,0C492.5,475.596,492.5,463.096,484.1,454.796z M41.1,210.796c0-93.6,75.9-169.5,169.5-169.5s169.6,75.9,169.6,169.5s-75.9,169.5-169.5,169.5S41.1,304.396,41.1,210.796z"></path> </g> </g></svg>
|
||||
</span>
|
||||
</div>
|
||||
<input id="search" placeholder="Filter by group, rule or labels" type="text" class="form-control"/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% for _, ga := range groupAlerts %}
|
||||
{%code g := ga.Group %}
|
||||
<div class="group-heading alert-danger" data-bs-target="rules-{%s g.ID %}">
|
||||
<div class="group-heading alert-danger" data-bs-target="rules-{%s g.ID %}" data-group-name="{%s g.Name %}">
|
||||
<span class="anchor" id="group-{%s g.ID %}"></span>
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %}</a>
|
||||
<span class="badge bg-danger" title="Number of active alerts">{%d len(ga.Alerts) %}</span>
|
||||
@@ -192,7 +220,7 @@ btn-primary
|
||||
}
|
||||
sort.Strings(keys)
|
||||
%}
|
||||
<div class="collapse" id="rules-{%s g.ID %}">
|
||||
<div class="collapse rule-table" id="rules-{%s g.ID %}">
|
||||
{% for _, ruleID := range keys %}
|
||||
{%code
|
||||
defaultAR := alertsByRule[ruleID][0]
|
||||
@@ -203,45 +231,46 @@ btn-primary
|
||||
sort.Strings(labelKeys)
|
||||
%}
|
||||
<br>
|
||||
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})
|
||||
| <span><a target="_blank" href="{%s defaultAR.SourceLink %}">Source</a></span>
|
||||
<br>
|
||||
<b>expr:</b><code><pre>{%s defaultAR.Expression %}</pre></code>
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Labels</th>
|
||||
<th scope="col">State</th>
|
||||
<th scope="col">Active at</th>
|
||||
<th scope="col">Value</th>
|
||||
<th scope="col">Link</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, ar := range alertsByRule[ruleID] %}
|
||||
<tr>
|
||||
<td>
|
||||
{% for _, k := range labelKeys %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s ar.Labels[k] %}</span>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td>{%= badgeState(ar.State) %}</td>
|
||||
<td>
|
||||
{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
|
||||
{% if ar.Restored %}{%= badgeRestored() %}{% endif %}
|
||||
{% if ar.Stabilizing %}{%= badgeStabilizing() %}{% endif %}
|
||||
</td>
|
||||
<td>{%s ar.Value %}</td>
|
||||
<td>
|
||||
<a href="{%s prefix+ar.WebLink() %}">Details</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="rule" data-rule-name="{%s defaultAR.Name %}" data-bs-target="{%s g.ID %}">
|
||||
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})
|
||||
| <span><a target="_blank" href="{%s defaultAR.SourceLink %}">Source</a></span>
|
||||
<br>
|
||||
<b>expr:</b><code><pre>{%s defaultAR.Expression %}</pre></code>
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">Labels</th>
|
||||
<th scope="col">State</th>
|
||||
<th scope="col">Active at</th>
|
||||
<th scope="col">Value</th>
|
||||
<th scope="col">Link</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, ar := range alertsByRule[ruleID] %}
|
||||
<tr>
|
||||
<td>
|
||||
{% for _, k := range labelKeys %}
|
||||
<span class="ms-1 badge bg-primary label">{%s k %}={%s ar.Labels[k] %}</span>
|
||||
{% endfor %}
|
||||
</td>
|
||||
<td>{%= badgeState(ar.State) %}</td>
|
||||
<td>
|
||||
{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
|
||||
{% if ar.Restored %}{%= badgeRestored() %}{% endif %}
|
||||
{% if ar.Stabilizing %}{%= badgeStabilizing() %}{% endif %}
|
||||
</td>
|
||||
<td>{%s ar.Value %}</td>
|
||||
<td>
|
||||
<a href="{%s prefix+ar.WebLink() %}">Details</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<br>
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,7 @@ func TestHandler(t *testing.T) {
|
||||
})
|
||||
g := &rule.Group{
|
||||
Name: "group",
|
||||
File: "rules.yaml",
|
||||
Concurrency: 1,
|
||||
}
|
||||
ar := rule.NewAlertingRule(fq, g, config.Rule{ID: 0, Alert: "alert"})
|
||||
@@ -35,7 +36,7 @@ func TestHandler(t *testing.T) {
|
||||
}}
|
||||
rh := &requestHandler{m: m}
|
||||
|
||||
getResp := func(url string, to interface{}, code int) {
|
||||
getResp := func(t *testing.T, url string, to interface{}, code int) {
|
||||
t.Helper()
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
@@ -59,43 +60,43 @@ func TestHandler(t *testing.T) {
|
||||
defer ts.Close()
|
||||
|
||||
t.Run("/", func(t *testing.T) {
|
||||
getResp(ts.URL, nil, 200)
|
||||
getResp(ts.URL+"/vmalert", nil, 200)
|
||||
getResp(ts.URL+"/vmalert/alerts", nil, 200)
|
||||
getResp(ts.URL+"/vmalert/groups", nil, 200)
|
||||
getResp(ts.URL+"/vmalert/notifiers", nil, 200)
|
||||
getResp(ts.URL+"/rules", nil, 200)
|
||||
getResp(t, ts.URL, nil, 200)
|
||||
getResp(t, ts.URL+"/vmalert", nil, 200)
|
||||
getResp(t, ts.URL+"/vmalert/alerts", nil, 200)
|
||||
getResp(t, ts.URL+"/vmalert/groups", nil, 200)
|
||||
getResp(t, ts.URL+"/vmalert/notifiers", nil, 200)
|
||||
getResp(t, ts.URL+"/rules", nil, 200)
|
||||
})
|
||||
|
||||
t.Run("/vmalert/rule", func(t *testing.T) {
|
||||
a := ruleToAPI(ar)
|
||||
getResp(ts.URL+"/vmalert/"+a.WebLink(), nil, 200)
|
||||
getResp(t, ts.URL+"/vmalert/"+a.WebLink(), nil, 200)
|
||||
r := ruleToAPI(rr)
|
||||
getResp(ts.URL+"/vmalert/"+r.WebLink(), nil, 200)
|
||||
getResp(t, ts.URL+"/vmalert/"+r.WebLink(), nil, 200)
|
||||
})
|
||||
t.Run("/vmalert/alert", func(t *testing.T) {
|
||||
alerts := ruleToAPIAlert(ar)
|
||||
for _, a := range alerts {
|
||||
getResp(ts.URL+"/vmalert/"+a.WebLink(), nil, 200)
|
||||
getResp(t, ts.URL+"/vmalert/"+a.WebLink(), nil, 200)
|
||||
}
|
||||
})
|
||||
t.Run("/vmalert/rule?badParam", func(t *testing.T) {
|
||||
params := fmt.Sprintf("?%s=0&%s=1", paramGroupID, paramRuleID)
|
||||
getResp(ts.URL+"/vmalert/rule"+params, nil, 404)
|
||||
getResp(t, ts.URL+"/vmalert/rule"+params, nil, 404)
|
||||
|
||||
params = fmt.Sprintf("?%s=1&%s=0", paramGroupID, paramRuleID)
|
||||
getResp(ts.URL+"/vmalert/rule"+params, nil, 404)
|
||||
getResp(t, ts.URL+"/vmalert/rule"+params, nil, 404)
|
||||
})
|
||||
|
||||
t.Run("/api/v1/alerts", func(t *testing.T) {
|
||||
lr := listAlertsResponse{}
|
||||
getResp(ts.URL+"/api/v1/alerts", &lr, 200)
|
||||
getResp(t, ts.URL+"/api/v1/alerts", &lr, 200)
|
||||
if length := len(lr.Data.Alerts); length != 1 {
|
||||
t.Errorf("expected 1 alert got %d", length)
|
||||
}
|
||||
|
||||
lr = listAlertsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/alerts", &lr, 200)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/alerts", &lr, 200)
|
||||
if length := len(lr.Data.Alerts); length != 1 {
|
||||
t.Errorf("expected 1 alert got %d", length)
|
||||
}
|
||||
@@ -103,13 +104,13 @@ func TestHandler(t *testing.T) {
|
||||
t.Run("/api/v1/alert?alertID&groupID", func(t *testing.T) {
|
||||
expAlert := newAlertAPI(ar, ar.GetAlerts()[0])
|
||||
alert := &apiAlert{}
|
||||
getResp(ts.URL+"/"+expAlert.APILink(), alert, 200)
|
||||
getResp(t, ts.URL+"/"+expAlert.APILink(), alert, 200)
|
||||
if !reflect.DeepEqual(alert, expAlert) {
|
||||
t.Errorf("expected %v is equal to %v", alert, expAlert)
|
||||
}
|
||||
|
||||
alert = &apiAlert{}
|
||||
getResp(ts.URL+"/vmalert/"+expAlert.APILink(), alert, 200)
|
||||
getResp(t, ts.URL+"/vmalert/"+expAlert.APILink(), alert, 200)
|
||||
if !reflect.DeepEqual(alert, expAlert) {
|
||||
t.Errorf("expected %v is equal to %v", alert, expAlert)
|
||||
}
|
||||
@@ -117,28 +118,28 @@ func TestHandler(t *testing.T) {
|
||||
|
||||
t.Run("/api/v1/alert?badParams", func(t *testing.T) {
|
||||
params := fmt.Sprintf("?%s=0&%s=1", paramGroupID, paramAlertID)
|
||||
getResp(ts.URL+"/api/v1/alert"+params, nil, 404)
|
||||
getResp(ts.URL+"/vmalert/api/v1/alert"+params, nil, 404)
|
||||
getResp(t, ts.URL+"/api/v1/alert"+params, nil, 404)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/alert"+params, nil, 404)
|
||||
|
||||
params = fmt.Sprintf("?%s=1&%s=0", paramGroupID, paramAlertID)
|
||||
getResp(ts.URL+"/api/v1/alert"+params, nil, 404)
|
||||
getResp(ts.URL+"/vmalert/api/v1/alert"+params, nil, 404)
|
||||
getResp(t, ts.URL+"/api/v1/alert"+params, nil, 404)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/alert"+params, nil, 404)
|
||||
|
||||
// bad request, alertID is missing
|
||||
params = fmt.Sprintf("?%s=1", paramGroupID)
|
||||
getResp(ts.URL+"/api/v1/alert"+params, nil, 400)
|
||||
getResp(ts.URL+"/vmalert/api/v1/alert"+params, nil, 400)
|
||||
getResp(t, ts.URL+"/api/v1/alert"+params, nil, 400)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/alert"+params, nil, 400)
|
||||
})
|
||||
|
||||
t.Run("/api/v1/rules", func(t *testing.T) {
|
||||
lr := listGroupsResponse{}
|
||||
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||
getResp(t, ts.URL+"/api/v1/rules", &lr, 200)
|
||||
if length := len(lr.Data.Groups); length != 1 {
|
||||
t.Errorf("expected 1 group got %d", length)
|
||||
}
|
||||
|
||||
lr = listGroupsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
if length := len(lr.Data.Groups); length != 1 {
|
||||
t.Errorf("expected 1 group got %d", length)
|
||||
}
|
||||
@@ -146,25 +147,93 @@ func TestHandler(t *testing.T) {
|
||||
t.Run("/api/v1/rule?ruleID&groupID", func(t *testing.T) {
|
||||
expRule := ruleToAPI(ar)
|
||||
gotRule := apiRule{}
|
||||
getResp(ts.URL+"/"+expRule.APILink(), &gotRule, 200)
|
||||
getResp(t, ts.URL+"/"+expRule.APILink(), &gotRule, 200)
|
||||
|
||||
if expRule.ID != gotRule.ID {
|
||||
t.Errorf("expected to get Rule %q; got %q instead", expRule.ID, gotRule.ID)
|
||||
}
|
||||
|
||||
gotRule = apiRule{}
|
||||
getResp(ts.URL+"/vmalert/"+expRule.APILink(), &gotRule, 200)
|
||||
getResp(t, ts.URL+"/vmalert/"+expRule.APILink(), &gotRule, 200)
|
||||
|
||||
if expRule.ID != gotRule.ID {
|
||||
t.Errorf("expected to get Rule %q; got %q instead", expRule.ID, gotRule.ID)
|
||||
}
|
||||
|
||||
gotRuleWithUpdates := apiRuleWithUpdates{}
|
||||
getResp(ts.URL+"/"+expRule.APILink(), &gotRuleWithUpdates, 200)
|
||||
getResp(t, ts.URL+"/"+expRule.APILink(), &gotRuleWithUpdates, 200)
|
||||
if gotRuleWithUpdates.StateUpdates == nil || len(gotRuleWithUpdates.StateUpdates) < 1 {
|
||||
t.Fatalf("expected %+v to have state updates field not empty", gotRuleWithUpdates.StateUpdates)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("/api/v1/rules&filters", func(t *testing.T) {
|
||||
check := func(url string, expGroups, expRules int) {
|
||||
t.Helper()
|
||||
lr := listGroupsResponse{}
|
||||
getResp(t, ts.URL+url, &lr, 200)
|
||||
if length := len(lr.Data.Groups); length != expGroups {
|
||||
t.Errorf("expected %d groups got %d", expGroups, length)
|
||||
}
|
||||
if len(lr.Data.Groups) < 1 {
|
||||
return
|
||||
}
|
||||
var rulesN int
|
||||
for _, gr := range lr.Data.Groups {
|
||||
rulesN += len(gr.Rules)
|
||||
}
|
||||
if rulesN != expRules {
|
||||
t.Errorf("expected %d rules got %d", expRules, rulesN)
|
||||
}
|
||||
}
|
||||
|
||||
check("/api/v1/rules?type=alert", 1, 1)
|
||||
check("/api/v1/rules?type=record", 1, 1)
|
||||
|
||||
check("/vmalert/api/v1/rules?type=alert", 1, 1)
|
||||
check("/vmalert/api/v1/rules?type=record", 1, 1)
|
||||
|
||||
// no filtering expected due to bad params
|
||||
check("/api/v1/rules?type=badParam", 1, 2)
|
||||
check("/api/v1/rules?foo=bar", 1, 2)
|
||||
|
||||
check("/api/v1/rules?rule_group[]=foo&rule_group[]=bar", 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=foo&rule_group[]=group&rule_group[]=bar", 1, 2)
|
||||
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=foo", 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml", 1, 2)
|
||||
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=foo", 1, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert", 1, 1)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert&rule_name[]=record", 1, 2)
|
||||
})
|
||||
t.Run("/api/v1/rules&exclude_alerts=true", func(t *testing.T) {
|
||||
// check if response returns active alerts by default
|
||||
lr := listGroupsResponse{}
|
||||
getResp(t, ts.URL+"/api/v1/rules?rule_group[]=group&file[]=rules.yaml", &lr, 200)
|
||||
activeAlerts := 0
|
||||
for _, gr := range lr.Data.Groups {
|
||||
for _, r := range gr.Rules {
|
||||
activeAlerts += len(r.Alerts)
|
||||
}
|
||||
}
|
||||
if activeAlerts == 0 {
|
||||
t.Fatalf("expected at least 1 active alert in response; got 0")
|
||||
}
|
||||
|
||||
// disable returning alerts via param
|
||||
lr = listGroupsResponse{}
|
||||
getResp(t, ts.URL+"/api/v1/rules?rule_group[]=group&file[]=rules.yaml&exclude_alerts=true", &lr, 200)
|
||||
activeAlerts = 0
|
||||
for _, gr := range lr.Data.Groups {
|
||||
for _, r := range gr.Rules {
|
||||
activeAlerts += len(r.Alerts)
|
||||
}
|
||||
}
|
||||
if activeAlerts != 0 {
|
||||
t.Fatalf("expected to get 0 active alert in response; got %d", activeAlerts)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestEmptyResponse(t *testing.T) {
|
||||
@@ -172,7 +241,7 @@ func TestEmptyResponse(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithNoGroups.handler(w, r) }))
|
||||
defer ts.Close()
|
||||
|
||||
getResp := func(url string, to interface{}, code int) {
|
||||
getResp := func(t *testing.T, url string, to interface{}, code int) {
|
||||
t.Helper()
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
@@ -195,13 +264,13 @@ func TestEmptyResponse(t *testing.T) {
|
||||
|
||||
t.Run("no groups /api/v1/alerts", func(t *testing.T) {
|
||||
lr := listAlertsResponse{}
|
||||
getResp(ts.URL+"/api/v1/alerts", &lr, 200)
|
||||
getResp(t, ts.URL+"/api/v1/alerts", &lr, 200)
|
||||
if lr.Data.Alerts == nil {
|
||||
t.Errorf("expected /api/v1/alerts response to have non-nil data")
|
||||
}
|
||||
|
||||
lr = listAlertsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/alerts", &lr, 200)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/alerts", &lr, 200)
|
||||
if lr.Data.Alerts == nil {
|
||||
t.Errorf("expected /api/v1/alerts response to have non-nil data")
|
||||
}
|
||||
@@ -209,13 +278,13 @@ func TestEmptyResponse(t *testing.T) {
|
||||
|
||||
t.Run("no groups /api/v1/rules", func(t *testing.T) {
|
||||
lr := listGroupsResponse{}
|
||||
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||
getResp(t, ts.URL+"/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Errorf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
|
||||
lr = listGroupsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Errorf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
@@ -226,13 +295,13 @@ func TestEmptyResponse(t *testing.T) {
|
||||
|
||||
t.Run("empty group /api/v1/rules", func(t *testing.T) {
|
||||
lr := listGroupsResponse{}
|
||||
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||
getResp(t, ts.URL+"/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Fatalf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
|
||||
lr = listGroupsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Fatalf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
|
||||
@@ -193,10 +193,15 @@ func ruleToAPI(r interface{}) apiRule {
|
||||
return apiRule{}
|
||||
}
|
||||
|
||||
const (
|
||||
ruleTypeRecording = "recording"
|
||||
ruleTypeAlerting = "alerting"
|
||||
)
|
||||
|
||||
func recordingToAPI(rr *rule.RecordingRule) apiRule {
|
||||
lastState := rule.GetLastEntry(rr)
|
||||
r := apiRule{
|
||||
Type: "recording",
|
||||
Type: ruleTypeRecording,
|
||||
DatasourceType: rr.Type.String(),
|
||||
Name: rr.Name,
|
||||
Query: rr.Expr,
|
||||
@@ -224,7 +229,7 @@ func recordingToAPI(rr *rule.RecordingRule) apiRule {
|
||||
func alertingToAPI(ar *rule.AlertingRule) apiRule {
|
||||
lastState := rule.GetLastEntry(ar)
|
||||
r := apiRule{
|
||||
Type: "alerting",
|
||||
Type: ruleTypeAlerting,
|
||||
DatasourceType: ar.Type.String(),
|
||||
Name: ar.Name,
|
||||
Query: ar.Expr,
|
||||
|
||||
@@ -2,15 +2,17 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -36,22 +38,34 @@ var (
|
||||
defaultRetryStatusCodes = flagutil.NewArrayInt("retryStatusCodes", 0, "Comma-separated list of default HTTP response status codes when vmauth re-tries the request on other backends. "+
|
||||
"See https://docs.victoriametrics.com/vmauth.html#load-balancing for details")
|
||||
defaultLoadBalancingPolicy = flag.String("loadBalancingPolicy", "least_loaded", "The default load balancing policy to use for backend urls specified inside url_prefix section. "+
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth.html#load-balancing for more details")
|
||||
"Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/vmauth.html#load-balancing")
|
||||
discoverBackendIPsGlobal = flag.Bool("discoverBackendIPs", false, "Whether to discover backend IPs via periodic DNS queries to hostnames specified in url_prefix. "+
|
||||
"This may be useful when url_prefix points to a hostname with dynamically scaled instances behind it. See https://docs.victoriametrics.com/vmauth.html#discovering-backend-ips")
|
||||
discoverBackendIPsInterval = flag.Duration("discoverBackendIPsInterval", 10*time.Second, "The interval for re-discovering backend IPs if -discoverBackendIPs command-line flag is set. "+
|
||||
"Too low value may lead to DNS errors")
|
||||
httpAuthHeader = flagutil.NewArrayString("httpAuthHeader", "HTTP request header to use for obtaining authorization tokens. By default auth tokens are read from Authorization request header")
|
||||
)
|
||||
|
||||
// AuthConfig represents auth config.
|
||||
type AuthConfig struct {
|
||||
Users []UserInfo `yaml:"users,omitempty"`
|
||||
UnauthorizedUser *UserInfo `yaml:"unauthorized_user,omitempty"`
|
||||
|
||||
// ms holds all the metrics for the given AuthConfig
|
||||
ms *metrics.Set
|
||||
}
|
||||
|
||||
// UserInfo is user information read from authConfigPath
|
||||
type UserInfo struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
BearerToken string `yaml:"bearer_token,omitempty"`
|
||||
AuthToken string `yaml:"auth_token,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
|
||||
URLMaps []URLMap `yaml:"url_map,omitempty"`
|
||||
HeadersConf HeadersConf `yaml:",inline"`
|
||||
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
|
||||
@@ -106,6 +120,8 @@ func (ui *UserInfo) getMaxConcurrentRequests() int {
|
||||
type Header struct {
|
||||
Name string
|
||||
Value string
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals h from f.
|
||||
@@ -114,6 +130,8 @@ func (h *Header) UnmarshalYAML(f func(interface{}) error) error {
|
||||
if err := f(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
h.sOriginal = s
|
||||
|
||||
n := strings.IndexByte(s, ':')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing speparator char ':' between Name and Value in the header %q; expected format - 'Name: Value'", s)
|
||||
@@ -125,21 +143,29 @@ func (h *Header) UnmarshalYAML(f func(interface{}) error) error {
|
||||
|
||||
// MarshalYAML marshals h to yaml.
|
||||
func (h *Header) MarshalYAML() (interface{}, error) {
|
||||
s := fmt.Sprintf("%s: %s", h.Name, h.Value)
|
||||
return s, nil
|
||||
return h.sOriginal, nil
|
||||
}
|
||||
|
||||
// URLMap is a mapping from source paths to target urls.
|
||||
type URLMap struct {
|
||||
// SrcHosts is the list of regular expressions, which match the request hostname.
|
||||
// SrcPaths is an optional list of regular expressions, which must match the request path.
|
||||
SrcPaths []*Regex `yaml:"src_paths,omitempty"`
|
||||
|
||||
// SrcHosts is an optional list of regular expressions, which must match the request hostname.
|
||||
SrcHosts []*Regex `yaml:"src_hosts,omitempty"`
|
||||
|
||||
// SrcPaths is the list of regular expressions, which match the request path.
|
||||
SrcPaths []*Regex `yaml:"src_paths,omitempty"`
|
||||
// SrcQueryArgs is an optional list of query args, which must match request URL query args.
|
||||
SrcQueryArgs []QueryArg `yaml:"src_query_args,omitempty"`
|
||||
|
||||
// SrcHeaders is an optional list of headers, which must match request headers.
|
||||
SrcHeaders []Header `yaml:"src_headers,omitempty"`
|
||||
|
||||
// UrlPrefix contains backend url prefixes for the proxied request url.
|
||||
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
|
||||
|
||||
// DiscoverBackendIPs instructs discovering URLPrefix backend IPs via DNS.
|
||||
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
|
||||
|
||||
// HeadersConf is the config for augumenting request and response headers.
|
||||
HeadersConf HeadersConf `yaml:",inline"`
|
||||
|
||||
@@ -155,25 +181,70 @@ type URLMap struct {
|
||||
|
||||
// Regex represents a regex
|
||||
type Regex struct {
|
||||
re *regexp.Regexp
|
||||
|
||||
sOriginal string
|
||||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
// QueryArg represents HTTP query arg
|
||||
type QueryArg struct {
|
||||
Name string
|
||||
Value string
|
||||
|
||||
sOriginal string
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals up from yaml.
|
||||
func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error {
|
||||
var s string
|
||||
if err := f(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
qa.sOriginal = s
|
||||
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n >= 0 {
|
||||
qa.Name = s[:n]
|
||||
qa.Value = s[n+1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML marshals up to yaml.
|
||||
func (qa *QueryArg) MarshalYAML() (interface{}, error) {
|
||||
return qa.sOriginal, nil
|
||||
}
|
||||
|
||||
// URLPrefix represents passed `url_prefix`
|
||||
type URLPrefix struct {
|
||||
n uint32
|
||||
|
||||
// the list of backend urls
|
||||
bus []*backendURL
|
||||
|
||||
// requests are re-tried on other backend urls for these http response status codes
|
||||
retryStatusCodes []int
|
||||
|
||||
// load balancing policy used
|
||||
loadBalancingPolicy string
|
||||
|
||||
// how many request path prefix parts to drop before routing the request to backendURL.
|
||||
// how many request path prefix parts to drop before routing the request to backendURL
|
||||
dropSrcPathPrefixParts int
|
||||
|
||||
// busOriginal contains the original list of backends specified in yaml config.
|
||||
busOriginal []*url.URL
|
||||
|
||||
// n is an atomic counter, which is used for balancing load among available backends.
|
||||
n atomic.Uint32
|
||||
|
||||
// the list of backend urls
|
||||
//
|
||||
// the list can be dynamically updated if `discover_backend_ips` option is set.
|
||||
bus atomic.Pointer[[]*backendURL]
|
||||
|
||||
// if this option is set, then backend ips for busOriginal are periodically re-discovered and put to bus.
|
||||
discoverBackendIPs bool
|
||||
|
||||
// The next deadline for DNS-based discovery of backend IPs
|
||||
nextDiscoveryDeadline atomic.Uint64
|
||||
|
||||
// vOriginal contains the original yaml value for URLPrefix.
|
||||
vOriginal interface{}
|
||||
}
|
||||
|
||||
func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error {
|
||||
@@ -189,49 +260,146 @@ func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error {
|
||||
}
|
||||
|
||||
type backendURL struct {
|
||||
brokenDeadline uint64
|
||||
concurrentRequests int32
|
||||
url *url.URL
|
||||
brokenDeadline atomic.Uint64
|
||||
concurrentRequests atomic.Int32
|
||||
|
||||
url *url.URL
|
||||
}
|
||||
|
||||
func (bu *backendURL) isBroken() bool {
|
||||
ct := fasttime.UnixTimestamp()
|
||||
return ct < atomic.LoadUint64(&bu.brokenDeadline)
|
||||
return ct < bu.brokenDeadline.Load()
|
||||
}
|
||||
|
||||
func (bu *backendURL) setBroken() {
|
||||
deadline := fasttime.UnixTimestamp() + uint64((*failTimeout).Seconds())
|
||||
atomic.StoreUint64(&bu.brokenDeadline, deadline)
|
||||
bu.brokenDeadline.Store(deadline)
|
||||
}
|
||||
|
||||
func (bu *backendURL) get() {
|
||||
atomic.AddInt32(&bu.concurrentRequests, 1)
|
||||
bu.concurrentRequests.Add(1)
|
||||
}
|
||||
|
||||
func (bu *backendURL) put() {
|
||||
atomic.AddInt32(&bu.concurrentRequests, -1)
|
||||
bu.concurrentRequests.Add(-1)
|
||||
}
|
||||
|
||||
func (up *URLPrefix) getBackendsCount() int {
|
||||
return len(up.bus)
|
||||
pbus := up.bus.Load()
|
||||
return len(*pbus)
|
||||
}
|
||||
|
||||
// getBackendURL returns the backendURL depending on the load balance policy.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getBackendURL() *backendURL {
|
||||
up.discoverBackendIPsIfNeeded()
|
||||
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
if up.loadBalancingPolicy == "first_available" {
|
||||
return up.getFirstAvailableBackendURL()
|
||||
return getFirstAvailableBackendURL(bus)
|
||||
}
|
||||
return up.getLeastLoadedBackendURL()
|
||||
return getLeastLoadedBackendURL(bus, &up.n)
|
||||
}
|
||||
|
||||
func (up *URLPrefix) discoverBackendIPsIfNeeded() {
|
||||
if !up.discoverBackendIPs {
|
||||
// The discovery is disabled.
|
||||
return
|
||||
}
|
||||
|
||||
ct := fasttime.UnixTimestamp()
|
||||
deadline := up.nextDiscoveryDeadline.Load()
|
||||
if ct < deadline {
|
||||
// There is no need in discovering backends.
|
||||
return
|
||||
}
|
||||
|
||||
intervalSec := math.Ceil(discoverBackendIPsInterval.Seconds())
|
||||
if intervalSec <= 0 {
|
||||
intervalSec = 1
|
||||
}
|
||||
nextDeadline := ct + uint64(intervalSec)
|
||||
if !up.nextDiscoveryDeadline.CompareAndSwap(deadline, nextDeadline) {
|
||||
// Concurrent goroutine already started the discovery.
|
||||
return
|
||||
}
|
||||
|
||||
// Discover ips for all the backendURLs
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(intervalSec))
|
||||
hostToIPs := make(map[string][]string)
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
if hostToIPs[host] != nil {
|
||||
// ips for the given host have been already discovered
|
||||
continue
|
||||
}
|
||||
addrs, err := resolver.LookupIPAddr(ctx, host)
|
||||
var ips []string
|
||||
if err != nil {
|
||||
logger.Warnf("cannot discover backend IPs for %s: %s; use it literally", bu, err)
|
||||
ips = []string{host}
|
||||
} else {
|
||||
ips = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
ips[i] = addr.String()
|
||||
}
|
||||
// sort ips, so they could be compared below in areEqualBackendURLs()
|
||||
sort.Strings(ips)
|
||||
}
|
||||
hostToIPs[host] = ips
|
||||
}
|
||||
cancel()
|
||||
|
||||
// generate new backendURLs for the resolved IPs
|
||||
var busNew []*backendURL
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
for _, ip := range hostToIPs[host] {
|
||||
buCopy := *bu
|
||||
buCopy.Host = ip
|
||||
if port != "" {
|
||||
buCopy.Host += ":" + port
|
||||
}
|
||||
busNew = append(busNew, &backendURL{
|
||||
url: &buCopy,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pbus := up.bus.Load()
|
||||
if areEqualBackendURLs(*pbus, busNew) {
|
||||
return
|
||||
}
|
||||
|
||||
// Store new backend urls
|
||||
up.bus.Store(&busNew)
|
||||
}
|
||||
|
||||
func areEqualBackendURLs(a, b []*backendURL) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, aURL := range a {
|
||||
bURL := b[i]
|
||||
if aURL.url.String() != bURL.url.String() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var resolver = &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
// getFirstAvailableBackendURL returns the first available backendURL, which isn't broken.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getFirstAvailableBackendURL() *backendURL {
|
||||
bus := up.bus
|
||||
|
||||
func getFirstAvailableBackendURL(bus []*backendURL) *backendURL {
|
||||
bu := bus[0]
|
||||
if !bu.isBroken() {
|
||||
// Fast path - send the request to the first url.
|
||||
@@ -253,8 +421,7 @@ func (up *URLPrefix) getFirstAvailableBackendURL() *backendURL {
|
||||
// getLeastLoadedBackendURL returns the backendURL with the minimum number of concurrent requests.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func (up *URLPrefix) getLeastLoadedBackendURL() *backendURL {
|
||||
bus := up.bus
|
||||
func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *backendURL {
|
||||
if len(bus) == 1 {
|
||||
// Fast path - return the only backend url.
|
||||
bu := bus[0]
|
||||
@@ -263,7 +430,7 @@ func (up *URLPrefix) getLeastLoadedBackendURL() *backendURL {
|
||||
}
|
||||
|
||||
// Slow path - select other backend urls.
|
||||
n := atomic.AddUint32(&up.n, 1)
|
||||
n := atomicCounter.Add(1)
|
||||
|
||||
for i := uint32(0); i < uint32(len(bus)); i++ {
|
||||
idx := (n + i) % uint32(len(bus))
|
||||
@@ -271,22 +438,22 @@ func (up *URLPrefix) getLeastLoadedBackendURL() *backendURL {
|
||||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
if atomic.LoadInt32(&bu.concurrentRequests) == 0 {
|
||||
if bu.concurrentRequests.Load() == 0 {
|
||||
// Fast path - return the backend with zero concurrently executed requests.
|
||||
// Do not use atomic.CompareAndSwapInt32(), since it is much slower on systems with many CPU cores.
|
||||
atomic.AddInt32(&bu.concurrentRequests, 1)
|
||||
// Do not use CompareAndSwap() instead of Load(), since it is much slower on systems with many CPU cores.
|
||||
bu.concurrentRequests.Add(1)
|
||||
return bu
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path - return the backend with the minimum number of concurrently executed requests.
|
||||
buMin := bus[n%uint32(len(bus))]
|
||||
minRequests := atomic.LoadInt32(&buMin.concurrentRequests)
|
||||
minRequests := buMin.concurrentRequests.Load()
|
||||
for _, bu := range bus {
|
||||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
if n := atomic.LoadInt32(&bu.concurrentRequests); n < minRequests {
|
||||
if n := bu.concurrentRequests.Load(); n < minRequests {
|
||||
buMin = bu
|
||||
minRequests = n
|
||||
}
|
||||
@@ -301,6 +468,7 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error {
|
||||
if err := f(&v); err != nil {
|
||||
return err
|
||||
}
|
||||
up.vOriginal = v
|
||||
|
||||
var urls []string
|
||||
switch x := v.(type) {
|
||||
@@ -323,38 +491,21 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error {
|
||||
return fmt.Errorf("unexpected type for `url_prefix`: %T; want string or []string", v)
|
||||
}
|
||||
|
||||
bus := make([]*backendURL, len(urls))
|
||||
bus := make([]*url.URL, len(urls))
|
||||
for i, u := range urls {
|
||||
pu, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal %q into url: %w", u, err)
|
||||
}
|
||||
bus[i] = &backendURL{
|
||||
url: pu,
|
||||
}
|
||||
bus[i] = pu
|
||||
}
|
||||
up.bus = bus
|
||||
up.busOriginal = bus
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML marshals up to yaml.
|
||||
func (up *URLPrefix) MarshalYAML() (interface{}, error) {
|
||||
var b []byte
|
||||
if len(up.bus) == 1 {
|
||||
u := up.bus[0].url.String()
|
||||
b = strconv.AppendQuote(b, u)
|
||||
return string(b), nil
|
||||
}
|
||||
b = append(b, '[')
|
||||
for i, bu := range up.bus {
|
||||
u := bu.url.String()
|
||||
b = strconv.AppendQuote(b, u)
|
||||
if i+1 < len(up.bus) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, ']')
|
||||
return string(b), nil
|
||||
return up.vOriginal, nil
|
||||
}
|
||||
|
||||
func (r *Regex) match(s string) bool {
|
||||
@@ -375,12 +526,13 @@ func (r *Regex) UnmarshalYAML(f func(interface{}) error) error {
|
||||
if err := f(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
r.sOriginal = s
|
||||
|
||||
sAnchored := "^(?:" + s + ")$"
|
||||
re, err := regexp.Compile(sAnchored)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot build regexp from %q: %w", s, err)
|
||||
}
|
||||
r.sOriginal = s
|
||||
r.re = re
|
||||
return nil
|
||||
}
|
||||
@@ -503,6 +655,11 @@ func loadAuthConfig() (bool, error) {
|
||||
}
|
||||
logger.Infof("loaded information about %d users from -auth.config=%q", len(m), *authConfigPath)
|
||||
|
||||
prevAc := authConfig.Load()
|
||||
if prevAc != nil {
|
||||
metrics.UnregisterSet(prevAc.ms)
|
||||
}
|
||||
metrics.RegisterSet(ac.ms)
|
||||
authConfig.Store(ac)
|
||||
authConfigData.Store(&data)
|
||||
authUsers.Store(&m)
|
||||
@@ -515,10 +672,13 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot expand environment vars: %w", err)
|
||||
}
|
||||
var ac AuthConfig
|
||||
if err = yaml.UnmarshalStrict(data, &ac); err != nil {
|
||||
ac := &AuthConfig{
|
||||
ms: metrics.NewSet(),
|
||||
}
|
||||
if err = yaml.UnmarshalStrict(data, ac); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %w", err)
|
||||
}
|
||||
|
||||
ui := ac.UnauthorizedUser
|
||||
if ui != nil {
|
||||
if ui.Username != "" {
|
||||
@@ -530,6 +690,9 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
if ui.BearerToken != "" {
|
||||
return nil, fmt.Errorf("field bearer_token can't be specified for unauthorized_user section")
|
||||
}
|
||||
if ui.AuthToken != "" {
|
||||
return nil, fmt.Errorf("field auth_token can't be specified for unauthorized_user section")
|
||||
}
|
||||
if ui.Name != "" {
|
||||
return nil, fmt.Errorf("field name can't be specified for unauthorized_user section")
|
||||
}
|
||||
@@ -541,15 +704,15 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse metric_labels for unauthorized_user: %w", err)
|
||||
}
|
||||
ui.requests = metrics.GetOrCreateCounter(`vmauth_unauthorized_user_requests_total` + metricLabels)
|
||||
ui.backendErrors = metrics.GetOrCreateCounter(`vmauth_unauthorized_user_request_backend_errors_total` + metricLabels)
|
||||
ui.requestsDuration = metrics.GetOrCreateSummary(`vmauth_unauthorized_user_request_duration_seconds` + metricLabels)
|
||||
ui.requests = ac.ms.NewCounter(`vmauth_unauthorized_user_requests_total` + metricLabels)
|
||||
ui.backendErrors = ac.ms.NewCounter(`vmauth_unauthorized_user_request_backend_errors_total` + metricLabels)
|
||||
ui.requestsDuration = ac.ms.NewSummary(`vmauth_unauthorized_user_request_duration_seconds` + metricLabels)
|
||||
ui.concurrencyLimitCh = make(chan struct{}, ui.getMaxConcurrentRequests())
|
||||
ui.concurrencyLimitReached = metrics.GetOrCreateCounter(`vmauth_unauthorized_user_concurrent_requests_limit_reached_total` + metricLabels)
|
||||
_ = metrics.GetOrCreateGauge(`vmauth_unauthorized_user_concurrent_requests_capacity`+metricLabels, func() float64 {
|
||||
ui.concurrencyLimitReached = ac.ms.NewCounter(`vmauth_unauthorized_user_concurrent_requests_limit_reached_total` + metricLabels)
|
||||
_ = ac.ms.NewGauge(`vmauth_unauthorized_user_concurrent_requests_capacity`+metricLabels, func() float64 {
|
||||
return float64(cap(ui.concurrencyLimitCh))
|
||||
})
|
||||
_ = metrics.GetOrCreateGauge(`vmauth_unauthorized_user_concurrent_requests_current`+metricLabels, func() float64 {
|
||||
_ = ac.ms.NewGauge(`vmauth_unauthorized_user_concurrent_requests_current`+metricLabels, func() float64 {
|
||||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
|
||||
@@ -559,7 +722,7 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
}
|
||||
ui.httpTransport = tr
|
||||
}
|
||||
return &ac, nil
|
||||
return ac, nil
|
||||
}
|
||||
|
||||
func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
@@ -570,42 +733,34 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
byAuthToken := make(map[string]*UserInfo, len(uis))
|
||||
for i := range uis {
|
||||
ui := &uis[i]
|
||||
if ui.BearerToken == "" && ui.Username == "" {
|
||||
return nil, fmt.Errorf("either bearer_token or username must be set")
|
||||
}
|
||||
if ui.BearerToken != "" && ui.Username != "" {
|
||||
return nil, fmt.Errorf("bearer_token=%q and username=%q cannot be set simultaneously", ui.BearerToken, ui.Username)
|
||||
}
|
||||
at1, at2 := getAuthTokens(ui.BearerToken, ui.Username, ui.Password)
|
||||
if byAuthToken[at1] != nil {
|
||||
return nil, fmt.Errorf("duplicate auth token found for bearer_token=%q, username=%q: %q", ui.BearerToken, ui.Username, at1)
|
||||
}
|
||||
if byAuthToken[at2] != nil {
|
||||
return nil, fmt.Errorf("duplicate auth token found for bearer_token=%q, username=%q: %q", ui.BearerToken, ui.Username, at2)
|
||||
}
|
||||
|
||||
if err := ui.initURLs(); err != nil {
|
||||
ats, err := getAuthTokens(ui.AuthToken, ui.BearerToken, ui.Username, ui.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ui.BearerToken != "" && ui.Password != "" {
|
||||
return nil, fmt.Errorf("password shouldn't be set for bearer_token %q", ui.BearerToken)
|
||||
for _, at := range ats {
|
||||
if uiOld := byAuthToken[at]; uiOld != nil {
|
||||
return nil, fmt.Errorf("duplicate auth token=%q found for username=%q, name=%q; the previous one is set for username=%q, name=%q",
|
||||
at, ui.Username, ui.Name, uiOld.Username, uiOld.Name)
|
||||
}
|
||||
}
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metricLabels, err := ui.getMetricLabels()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse metric_labels: %w", err)
|
||||
}
|
||||
ui.requests = metrics.GetOrCreateCounter(`vmauth_user_requests_total` + metricLabels)
|
||||
ui.backendErrors = metrics.GetOrCreateCounter(`vmauth_user_request_backend_errors_total` + metricLabels)
|
||||
ui.requestsDuration = metrics.GetOrCreateSummary(`vmauth_user_request_duration_seconds` + metricLabels)
|
||||
ui.requests = ac.ms.GetOrCreateCounter(`vmauth_user_requests_total` + metricLabels)
|
||||
ui.backendErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_backend_errors_total` + metricLabels)
|
||||
ui.requestsDuration = ac.ms.GetOrCreateSummary(`vmauth_user_request_duration_seconds` + metricLabels)
|
||||
mcr := ui.getMaxConcurrentRequests()
|
||||
ui.concurrencyLimitCh = make(chan struct{}, mcr)
|
||||
ui.concurrencyLimitReached = metrics.GetOrCreateCounter(`vmauth_user_concurrent_requests_limit_reached_total` + metricLabels)
|
||||
_ = metrics.GetOrCreateGauge(`vmauth_user_concurrent_requests_capacity`+metricLabels, func() float64 {
|
||||
ui.concurrencyLimitReached = ac.ms.GetOrCreateCounter(`vmauth_user_concurrent_requests_limit_reached_total` + metricLabels)
|
||||
_ = ac.ms.GetOrCreateGauge(`vmauth_user_concurrent_requests_capacity`+metricLabels, func() float64 {
|
||||
return float64(cap(ui.concurrencyLimitCh))
|
||||
})
|
||||
_ = metrics.GetOrCreateGauge(`vmauth_user_concurrent_requests_current`+metricLabels, func() float64 {
|
||||
_ = ac.ms.GetOrCreateGauge(`vmauth_user_concurrent_requests_current`+metricLabels, func() float64 {
|
||||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
|
||||
@@ -615,8 +770,9 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
}
|
||||
ui.httpTransport = tr
|
||||
|
||||
byAuthToken[at1] = ui
|
||||
byAuthToken[at2] = ui
|
||||
for _, at := range ats {
|
||||
byAuthToken[at] = ui
|
||||
}
|
||||
}
|
||||
return byAuthToken, nil
|
||||
}
|
||||
@@ -648,8 +804,9 @@ func (ui *UserInfo) initURLs() error {
|
||||
retryStatusCodes := defaultRetryStatusCodes.Values()
|
||||
loadBalancingPolicy := *defaultLoadBalancingPolicy
|
||||
dropSrcPathPrefixParts := 0
|
||||
discoverBackendIPs := *discoverBackendIPsGlobal
|
||||
if ui.URLPrefix != nil {
|
||||
if err := ui.URLPrefix.sanitize(); err != nil {
|
||||
if err := ui.URLPrefix.sanitizeAndInitialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
if ui.RetryStatusCodes != nil {
|
||||
@@ -661,30 +818,35 @@ func (ui *UserInfo) initURLs() error {
|
||||
if ui.DropSrcPathPrefixParts != nil {
|
||||
dropSrcPathPrefixParts = *ui.DropSrcPathPrefixParts
|
||||
}
|
||||
if ui.DiscoverBackendIPs != nil {
|
||||
discoverBackendIPs = *ui.DiscoverBackendIPs
|
||||
}
|
||||
ui.URLPrefix.retryStatusCodes = retryStatusCodes
|
||||
ui.URLPrefix.dropSrcPathPrefixParts = dropSrcPathPrefixParts
|
||||
ui.URLPrefix.discoverBackendIPs = discoverBackendIPs
|
||||
if err := ui.URLPrefix.setLoadBalancingPolicy(loadBalancingPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if ui.DefaultURL != nil {
|
||||
if err := ui.DefaultURL.sanitize(); err != nil {
|
||||
if err := ui.DefaultURL.sanitizeAndInitialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, e := range ui.URLMaps {
|
||||
if len(e.SrcPaths) == 0 && len(e.SrcHosts) == 0 {
|
||||
return fmt.Errorf("missing `src_paths` and `src_hosts` in `url_map`")
|
||||
if len(e.SrcPaths) == 0 && len(e.SrcHosts) == 0 && len(e.SrcQueryArgs) == 0 && len(e.SrcHeaders) == 0 {
|
||||
return fmt.Errorf("missing `src_paths`, `src_hosts`, `src_query_args` and `src_headers` in `url_map`")
|
||||
}
|
||||
if e.URLPrefix == nil {
|
||||
return fmt.Errorf("missing `url_prefix` in `url_map`")
|
||||
}
|
||||
if err := e.URLPrefix.sanitize(); err != nil {
|
||||
if err := e.URLPrefix.sanitizeAndInitialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
rscs := retryStatusCodes
|
||||
lbp := loadBalancingPolicy
|
||||
dsp := dropSrcPathPrefixParts
|
||||
dbd := discoverBackendIPs
|
||||
if e.RetryStatusCodes != nil {
|
||||
rscs = e.RetryStatusCodes
|
||||
}
|
||||
@@ -694,14 +856,18 @@ func (ui *UserInfo) initURLs() error {
|
||||
if e.DropSrcPathPrefixParts != nil {
|
||||
dsp = *e.DropSrcPathPrefixParts
|
||||
}
|
||||
if e.DiscoverBackendIPs != nil {
|
||||
dbd = *e.DiscoverBackendIPs
|
||||
}
|
||||
e.URLPrefix.retryStatusCodes = rscs
|
||||
if err := e.URLPrefix.setLoadBalancingPolicy(lbp); err != nil {
|
||||
return err
|
||||
}
|
||||
e.URLPrefix.dropSrcPathPrefixParts = dsp
|
||||
e.URLPrefix.discoverBackendIPs = dbd
|
||||
}
|
||||
if len(ui.URLMaps) == 0 && ui.URLPrefix == nil {
|
||||
return fmt.Errorf("missing `url_prefix`")
|
||||
return fmt.Errorf("missing `url_prefix` or `url_map`")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -717,37 +883,97 @@ func (ui *UserInfo) name() string {
|
||||
h := xxhash.Sum64([]byte(ui.BearerToken))
|
||||
return fmt.Sprintf("bearer_token:hash:%016X", h)
|
||||
}
|
||||
if ui.AuthToken != "" {
|
||||
h := xxhash.Sum64([]byte(ui.AuthToken))
|
||||
return fmt.Sprintf("auth_token:hash:%016X", h)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getAuthTokens(bearerToken, username, password string) (string, string) {
|
||||
if bearerToken != "" {
|
||||
// Accept the bearerToken as Basic Auth username with empty password
|
||||
at1 := getAuthToken(bearerToken, "", "")
|
||||
at2 := getAuthToken("", bearerToken, "")
|
||||
return at1, at2
|
||||
func getAuthTokens(authToken, bearerToken, username, password string) ([]string, error) {
|
||||
if authToken != "" {
|
||||
if bearerToken != "" {
|
||||
return nil, fmt.Errorf("bearer_token cannot be specified if auth_token is set")
|
||||
}
|
||||
if username != "" || password != "" {
|
||||
return nil, fmt.Errorf("username and password cannot be specified if auth_token is set")
|
||||
}
|
||||
at := getHTTPAuthToken(authToken)
|
||||
return []string{at}, nil
|
||||
}
|
||||
at := getAuthToken("", username, password)
|
||||
return at, at
|
||||
if bearerToken != "" {
|
||||
if username != "" || password != "" {
|
||||
return nil, fmt.Errorf("username and password cannot be specified if bearer_token is set")
|
||||
}
|
||||
// Accept the bearerToken as Basic Auth username with empty password
|
||||
at1 := getHTTPAuthBearerToken(bearerToken)
|
||||
at2 := getHTTPAuthBasicToken(bearerToken, "")
|
||||
return []string{at1, at2}, nil
|
||||
}
|
||||
if username != "" {
|
||||
at := getHTTPAuthBasicToken(username, password)
|
||||
return []string{at}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("missing authorization options; bearer_token or username must be set")
|
||||
}
|
||||
|
||||
func getAuthToken(bearerToken, username, password string) string {
|
||||
if bearerToken != "" {
|
||||
return "Bearer " + bearerToken
|
||||
}
|
||||
func getHTTPAuthToken(authToken string) string {
|
||||
return "http_auth:" + authToken
|
||||
}
|
||||
|
||||
func getHTTPAuthBearerToken(bearerToken string) string {
|
||||
return "http_auth:Bearer " + bearerToken
|
||||
}
|
||||
|
||||
func getHTTPAuthBasicToken(username, password string) string {
|
||||
token := username + ":" + password
|
||||
token64 := base64.StdEncoding.EncodeToString([]byte(token))
|
||||
return "Basic " + token64
|
||||
return "http_auth:Basic " + token64
|
||||
}
|
||||
|
||||
func (up *URLPrefix) sanitize() error {
|
||||
for _, bu := range up.bus {
|
||||
puNew, err := sanitizeURLPrefix(bu.url)
|
||||
var defaultHeaderNames = []string{"Authorization"}
|
||||
|
||||
func getAuthTokensFromRequest(r *http.Request) []string {
|
||||
var ats []string
|
||||
|
||||
// Obtain possible auth tokens from one of the allowed auth headers
|
||||
headerNames := *httpAuthHeader
|
||||
if len(headerNames) == 0 {
|
||||
headerNames = defaultHeaderNames
|
||||
}
|
||||
for _, headerName := range headerNames {
|
||||
if ah := r.Header.Get(headerName); ah != "" {
|
||||
if strings.HasPrefix(ah, "Token ") {
|
||||
// Handle InfluxDB's proprietary token authentication scheme as a bearer token authentication
|
||||
// See https://docs.influxdata.com/influxdb/v2.0/api/
|
||||
ah = strings.Replace(ah, "Token", "Bearer", 1)
|
||||
}
|
||||
at := "http_auth:" + ah
|
||||
ats = append(ats, at)
|
||||
}
|
||||
}
|
||||
|
||||
return ats
|
||||
}
|
||||
|
||||
func (up *URLPrefix) sanitizeAndInitialize() error {
|
||||
for i, bu := range up.busOriginal {
|
||||
puNew, err := sanitizeURLPrefix(bu)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bu.url = puNew
|
||||
up.busOriginal[i] = puNew
|
||||
}
|
||||
|
||||
// Initialize up.bus
|
||||
bus := make([]*backendURL, len(up.busOriginal))
|
||||
for i, bu := range up.busOriginal {
|
||||
bus[i] = &backendURL{
|
||||
url: bu,
|
||||
}
|
||||
}
|
||||
up.bus.Store(&bus)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ func TestParseAuthConfigFailure(t *testing.T) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = parseAuthConfigUsers(ac)
|
||||
users, err := parseAuthConfigUsers(ac)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
t.Fatalf("expecting non-nil error; got %v", users)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,6 +88,22 @@ users:
|
||||
url_prefix: []
|
||||
`)
|
||||
|
||||
// auth_token and username in a single config
|
||||
f(`
|
||||
users:
|
||||
- auth_token: foo
|
||||
username: bbb
|
||||
url_prefix: http://foo.bar
|
||||
`)
|
||||
|
||||
// auth_token and bearer_token in a single config
|
||||
f(`
|
||||
users:
|
||||
- auth_token: foo
|
||||
bearer_token: bbb
|
||||
url_prefix: http://foo.bar
|
||||
`)
|
||||
|
||||
// Username and bearer_token in a single config
|
||||
f(`
|
||||
users:
|
||||
@@ -192,7 +208,7 @@ users:
|
||||
- url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid regexp in src_path.
|
||||
// Invalid regexp in src_paths
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
@@ -210,6 +226,24 @@ users:
|
||||
url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid src_query_args
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
url_map:
|
||||
- src_query_args: abc
|
||||
url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid src_headers
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
url_map:
|
||||
- src_headers: abc
|
||||
url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// Invalid headers in url_map (missing ':')
|
||||
f(`
|
||||
users:
|
||||
@@ -257,8 +291,9 @@ func TestParseAuthConfigSuccess(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Single user
|
||||
insecureSkipVerifyTrue := true
|
||||
|
||||
// Single user
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
@@ -267,7 +302,7 @@ users:
|
||||
max_concurrent_requests: 5
|
||||
tls_insecure_skip_verify: true
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("", "foo", "bar"): {
|
||||
getHTTPAuthBasicToken("foo", "bar"): {
|
||||
Username: "foo",
|
||||
Password: "bar",
|
||||
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
||||
@@ -276,6 +311,22 @@ users:
|
||||
},
|
||||
})
|
||||
|
||||
// Single user with auth_token
|
||||
f(`
|
||||
users:
|
||||
- auth_token: foo
|
||||
url_prefix: http://aaa:343/bbb
|
||||
max_concurrent_requests: 5
|
||||
tls_insecure_skip_verify: true
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthToken("foo"): {
|
||||
AuthToken: "foo",
|
||||
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
||||
MaxConcurrentRequests: 5,
|
||||
TLSInsecureSkipVerify: &insecureSkipVerifyTrue,
|
||||
},
|
||||
})
|
||||
|
||||
// Multiple url_prefix entries
|
||||
insecureSkipVerifyFalse := false
|
||||
f(`
|
||||
@@ -290,7 +341,7 @@ users:
|
||||
load_balancing_policy: first_available
|
||||
drop_src_path_prefix_parts: 1
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("", "foo", "bar"): {
|
||||
getHTTPAuthBasicToken("foo", "bar"): {
|
||||
Username: "foo",
|
||||
Password: "bar",
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
@@ -310,19 +361,60 @@ users:
|
||||
- username: foo
|
||||
url_prefix: http://foo
|
||||
- username: bar
|
||||
url_prefix: https://bar/x///
|
||||
url_prefix: https://bar/x/
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("", "foo", ""): {
|
||||
getHTTPAuthBasicToken("foo", ""): {
|
||||
Username: "foo",
|
||||
URLPrefix: mustParseURL("http://foo"),
|
||||
},
|
||||
getAuthToken("", "bar", ""): {
|
||||
getHTTPAuthBasicToken("bar", ""): {
|
||||
Username: "bar",
|
||||
URLPrefix: mustParseURL("https://bar/x"),
|
||||
URLPrefix: mustParseURL("https://bar/x/"),
|
||||
},
|
||||
})
|
||||
|
||||
// non-empty URLMap
|
||||
sharedUserInfo := &UserInfo{
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
},
|
||||
SrcHeaders: []Header{
|
||||
{
|
||||
Name: "TenantID",
|
||||
Value: "345",
|
||||
},
|
||||
},
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
f(`
|
||||
users:
|
||||
- bearer_token: foo
|
||||
@@ -331,71 +423,18 @@ users:
|
||||
url_prefix: http://vmselect/select/0/prometheus
|
||||
- src_paths: ["/api/v1/write"]
|
||||
src_hosts: ["foo\\.bar", "baz:1234"]
|
||||
src_query_args: ['foo=bar']
|
||||
src_headers: ['TenantID: 345']
|
||||
url_prefix: ["http://vminsert1/insert/0/prometheus","http://vminsert2/insert/0/prometheus"]
|
||||
headers:
|
||||
- "foo: bar"
|
||||
- "xxx: y"
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("foo", "", ""): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
getAuthToken("", "foo", ""): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getRegexs([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"foo\\.bar", "baz:1234"}),
|
||||
SrcPaths: getRegexs([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
getHTTPAuthBearerToken("foo"): sharedUserInfo,
|
||||
getHTTPAuthBasicToken("foo", ""): sharedUserInfo,
|
||||
})
|
||||
// Multiple users with the same name
|
||||
|
||||
// Multiple users with the same name - this should work, since these users have different passwords
|
||||
f(`
|
||||
users:
|
||||
- username: foo-same
|
||||
@@ -403,19 +442,20 @@ users:
|
||||
url_prefix: http://foo
|
||||
- username: foo-same
|
||||
password: bar
|
||||
url_prefix: https://bar/x///
|
||||
url_prefix: https://bar/x
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("", "foo-same", "baz"): {
|
||||
getHTTPAuthBasicToken("foo-same", "baz"): {
|
||||
Username: "foo-same",
|
||||
Password: "baz",
|
||||
URLPrefix: mustParseURL("http://foo"),
|
||||
},
|
||||
getAuthToken("", "foo-same", "bar"): {
|
||||
getHTTPAuthBasicToken("foo-same", "bar"): {
|
||||
Username: "foo-same",
|
||||
Password: "bar",
|
||||
URLPrefix: mustParseURL("https://bar/x"),
|
||||
},
|
||||
})
|
||||
|
||||
// with default url
|
||||
f(`
|
||||
users:
|
||||
@@ -432,7 +472,7 @@ users:
|
||||
- http://default1/select/0/prometheus
|
||||
- http://default2/select/0/prometheus
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("foo", "", ""): {
|
||||
getHTTPAuthBearerToken("foo"): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
@@ -464,7 +504,7 @@ users:
|
||||
"http://default2/select/0/prometheus",
|
||||
}),
|
||||
},
|
||||
getAuthToken("", "foo", ""): {
|
||||
getHTTPAuthBasicToken("foo", ""): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
@@ -497,6 +537,7 @@ users:
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
// With metric_labels
|
||||
f(`
|
||||
users:
|
||||
@@ -508,12 +549,12 @@ users:
|
||||
team: dev
|
||||
- username: foo-same
|
||||
password: bar
|
||||
url_prefix: https://bar/x///
|
||||
url_prefix: https://bar/x
|
||||
metric_labels:
|
||||
backend_env: test
|
||||
team: accounting
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("", "foo-same", "baz"): {
|
||||
getHTTPAuthBasicToken("foo-same", "baz"): {
|
||||
Username: "foo-same",
|
||||
Password: "baz",
|
||||
URLPrefix: mustParseURL("http://foo"),
|
||||
@@ -522,7 +563,7 @@ users:
|
||||
"team": "dev",
|
||||
},
|
||||
},
|
||||
getAuthToken("", "foo-same", "bar"): {
|
||||
getHTTPAuthBasicToken("foo-same", "bar"): {
|
||||
Username: "foo-same",
|
||||
Password: "bar",
|
||||
URLPrefix: mustParseURL("https://bar/x"),
|
||||
@@ -558,7 +599,7 @@ unauthorized_user:
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
ui := m[getAuthToken("", "foo", "bar")]
|
||||
ui := m[getHTTPAuthBasicToken("foo", "bar")]
|
||||
if !isSetBool(ui.TLSInsecureSkipVerify, true) || !ui.httpTransport.TLSClientConfig.InsecureSkipVerify {
|
||||
t.Fatalf("unexpected TLSInsecureSkipVerify value for user foo")
|
||||
}
|
||||
@@ -693,6 +734,7 @@ func mustParseURL(u string) *URLPrefix {
|
||||
|
||||
func mustParseURLs(us []string) *URLPrefix {
|
||||
bus := make([]*backendURL, len(us))
|
||||
urls := make([]*url.URL, len(us))
|
||||
for i, u := range us {
|
||||
pu, err := url.Parse(u)
|
||||
if err != nil {
|
||||
@@ -701,10 +743,17 @@ func mustParseURLs(us []string) *URLPrefix {
|
||||
bus[i] = &backendURL{
|
||||
url: pu,
|
||||
}
|
||||
urls[i] = pu
|
||||
}
|
||||
return &URLPrefix{
|
||||
bus: bus,
|
||||
up := &URLPrefix{}
|
||||
if len(us) == 1 {
|
||||
up.vOriginal = us[0]
|
||||
} else {
|
||||
up.vOriginal = us
|
||||
}
|
||||
up.bus.Store(&bus)
|
||||
up.busOriginal = urls
|
||||
return up
|
||||
}
|
||||
|
||||
func intp(n int) *int {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -33,8 +34,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8427", "TCP address to listen for http connections. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "TCP address to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host. "+
|
||||
@@ -65,10 +66,14 @@ func main() {
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
logger.Infof("starting vmauth at %q...", *httpListenAddr)
|
||||
listenAddrs := *httpListenAddrs
|
||||
if len(listenAddrs) == 0 {
|
||||
listenAddrs = []string{":8427"}
|
||||
}
|
||||
logger.Infof("starting vmauth at %q...", listenAddrs)
|
||||
startTime := time.Now()
|
||||
initAuthConfig()
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started vmauth in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
@@ -77,8 +82,8 @@ func main() {
|
||||
pushmetrics.Stop()
|
||||
|
||||
startTime = time.Now()
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Infof("gracefully shutting down webservice at %q", listenAddrs)
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
@@ -97,8 +102,9 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
}
|
||||
authToken := r.Header.Get("Authorization")
|
||||
if authToken == "" {
|
||||
|
||||
ats := getAuthTokensFromRequest(r)
|
||||
if len(ats) == 0 {
|
||||
// Process requests for unauthorized users
|
||||
ui := authConfig.Load().UnauthorizedUser
|
||||
if ui != nil {
|
||||
@@ -110,18 +116,12 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
http.Error(w, "missing `Authorization` request header", http.StatusUnauthorized)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(authToken, "Token ") {
|
||||
// Handle InfluxDB's proprietary token authentication scheme as a bearer token authentication
|
||||
// See https://docs.influxdata.com/influxdb/v2.0/api/
|
||||
authToken = strings.Replace(authToken, "Token", "Bearer", 1)
|
||||
}
|
||||
|
||||
ac := *authUsers.Load()
|
||||
ui := ac[authToken]
|
||||
ui := getUserInfoByAuthTokens(ats)
|
||||
if ui == nil {
|
||||
invalidAuthTokenRequests.Inc()
|
||||
if *logInvalidAuthTokens {
|
||||
err := fmt.Errorf("cannot find the provided auth token %q in config", authToken)
|
||||
err := fmt.Errorf("cannot authorize request with auth tokens %q", ats)
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: err,
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
@@ -137,6 +137,17 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func getUserInfoByAuthTokens(ats []string) *UserInfo {
|
||||
ac := *authUsers.Load()
|
||||
for _, at := range ats {
|
||||
ui := ac[at]
|
||||
if ui != nil {
|
||||
return ui
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
startTime := time.Now()
|
||||
defer ui.requestsDuration.UpdateDuration(startTime)
|
||||
@@ -150,20 +161,12 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
if err := ui.beginConcurrencyLimit(); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
<-concurrencyLimitCh
|
||||
|
||||
// Requests failed because of concurrency limit must be counted as errors,
|
||||
// since this usually means the backend cannot keep up with the current load.
|
||||
ui.backendErrors.Inc()
|
||||
return
|
||||
}
|
||||
default:
|
||||
concurrentRequestsLimitReached.Inc()
|
||||
err := fmt.Errorf("cannot serve more than -maxConcurrentRequests=%d concurrent requests", cap(concurrencyLimitCh))
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
|
||||
// Requests failed because of concurrency limit must be counted as errors,
|
||||
// since this usually means the backend cannot keep up with the current load.
|
||||
ui.backendErrors.Inc()
|
||||
return
|
||||
}
|
||||
processRequest(w, r, ui)
|
||||
@@ -173,7 +176,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
u := normalizeURL(r.URL)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Header)
|
||||
isDefault := false
|
||||
if up == nil {
|
||||
if ui.DefaultURL == nil {
|
||||
@@ -228,7 +231,14 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
// This code has been copied from net/http/httputil/reverseproxy.go
|
||||
req := sanitizeRequestHeaders(r)
|
||||
req.URL = targetURL
|
||||
req.Host = targetURL.Host
|
||||
|
||||
if req.URL.Scheme == "https" {
|
||||
// Override req.Host only for https requests, since https server verifies hostnames during TLS handshake,
|
||||
// so it expects the targetURL.Host in the request.
|
||||
// There is no need in overriding the req.Host for http requests, since it is expected that backend server
|
||||
// may properly process queries with the original req.Host.
|
||||
req.Host = targetURL.Host
|
||||
}
|
||||
updateHeadersByConfig(req.Header, hc.RequestHeaders)
|
||||
res, err := ui.httpTransport.RoundTrip(req)
|
||||
rtb, rtbOK := req.Body.(*readTrackingBody)
|
||||
@@ -261,7 +271,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; retrying the request to %s because of response error: %s", remoteAddr, req.URL, targetURL, err)
|
||||
return false
|
||||
}
|
||||
if hasInt(retryStatusCodes, res.StatusCode) {
|
||||
if slices.Contains(retryStatusCodes, res.StatusCode) {
|
||||
_ = res.Body.Close()
|
||||
if !rtbOK || !rtb.canRetry() {
|
||||
// If we get an error from the retry_status_codes list, but cannot execute retry,
|
||||
@@ -303,15 +313,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
return true
|
||||
}
|
||||
|
||||
func hasInt(a []int, n int) bool {
|
||||
for _, x := range a {
|
||||
if x == n {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var copyBufPool bytesutil.ByteBufferPool
|
||||
|
||||
func copyHeader(dst, src http.Header) {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -49,11 +51,22 @@ func dropPrefixParts(path string, parts int) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, HeadersConf) {
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL, h http.Header) (*URLPrefix, HeadersConf) {
|
||||
for _, e := range ui.URLMaps {
|
||||
if matchAnyRegex(e.SrcHosts, u.Host) && matchAnyRegex(e.SrcPaths, u.Path) {
|
||||
return e.URLPrefix, e.HeadersConf
|
||||
if !matchAnyRegex(e.SrcHosts, u.Host) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyRegex(e.SrcPaths, u.Path) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyQueryArg(e.SrcQueryArgs, u.Query()) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyHeader(e.SrcHeaders, h) {
|
||||
continue
|
||||
}
|
||||
|
||||
return e.URLPrefix, e.HeadersConf
|
||||
}
|
||||
if ui.URLPrefix != nil {
|
||||
return ui.URLPrefix, ui.HeadersConf
|
||||
@@ -73,6 +86,30 @@ func matchAnyRegex(rs []*Regex, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func matchAnyQueryArg(qas []QueryArg, args url.Values) bool {
|
||||
if len(qas) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, qa := range qas {
|
||||
if slices.Contains(args[qa.Name], qa.Value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchAnyHeader(headers []Header, h http.Header) bool {
|
||||
if len(headers) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, header := range headers {
|
||||
if slices.Contains(h.Values(header.Name), header.Value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func normalizeURL(uOrig *url.URL) *url.URL {
|
||||
u := *uOrig
|
||||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -89,19 +90,21 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot determie backend: %s", err)
|
||||
}
|
||||
bu := up.getLeastLoadedBackendURL()
|
||||
bu := up.getBackendURL()
|
||||
target := mergeURLs(bu.url, u, up.dropSrcPathPrefixParts)
|
||||
bu.put()
|
||||
if target.String() != expectedTarget {
|
||||
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
|
||||
}
|
||||
headersStr := fmt.Sprintf("%q", hc.RequestHeaders)
|
||||
if headersStr != expectedRequestHeaders {
|
||||
t.Fatalf("unexpected request headers; got %s; want %s", headersStr, expectedRequestHeaders)
|
||||
if s := headersToString(hc.RequestHeaders); s != expectedRequestHeaders {
|
||||
t.Fatalf("unexpected request headers; got %q; want %q", s, expectedRequestHeaders)
|
||||
}
|
||||
if s := headersToString(hc.ResponseHeaders); s != expectedResponseHeaders {
|
||||
t.Fatalf("unexpected response headers; got %q; want %q", s, expectedResponseHeaders)
|
||||
}
|
||||
if !reflect.DeepEqual(up.retryStatusCodes, expectedRetryStatusCodes) {
|
||||
t.Fatalf("unexpected retryStatusCodes; got %d; want %d", up.retryStatusCodes, expectedRetryStatusCodes)
|
||||
@@ -116,41 +119,55 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
// Simple routing with `url_prefix`
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "", "http://foo.bar/.", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "", "http://foo.bar/.", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
}},
|
||||
RequestHeaders: []Header{
|
||||
{
|
||||
Name: "bb",
|
||||
Value: "aaa",
|
||||
},
|
||||
},
|
||||
ResponseHeaders: []Header{
|
||||
{
|
||||
Name: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
RetryStatusCodes: []int{503, 501},
|
||||
LoadBalancingPolicy: "first_available",
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
}, "/a/b/c", "http://foo.bar/c", `[{"bb" "aaa"}]`, `[]`, []int{503, 501}, "first_available", 2)
|
||||
}, "/a/b/c", "http://foo.bar/c", `bb: aaa`, `x: y`, []int{503, 501}, "first_available", 2)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/federate"),
|
||||
}, "/", "http://foo.bar/federate", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/", "http://foo.bar/federate", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||
}, "/z", "https://sss:3894/x/y/z", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/z", "https://sss:3894/x/y/z", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||
}, "/../../aaa", "https://sss:3894/x/y/aaa", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/../../aaa", "https://sss:3894/x/y/aaa", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "", "", nil, "least_loaded", 0)
|
||||
|
||||
// Complex routing with `url_map`
|
||||
ui := &UserInfo{
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcHosts: getRegexs([]string{"host42"}),
|
||||
SrcPaths: getRegexs([]string{"/vmsingle/api/v1/query"}),
|
||||
SrcHosts: getRegexs([]string{"host42"}),
|
||||
SrcPaths: getRegexs([]string{"/vmsingle/api/v1/query"}),
|
||||
SrcQueryArgs: []QueryArg{
|
||||
{
|
||||
Name: "db",
|
||||
Value: "foo",
|
||||
},
|
||||
},
|
||||
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
|
||||
HeadersConf: HeadersConf{
|
||||
RequestHeaders: []Header{
|
||||
@@ -195,12 +212,12 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
RetryStatusCodes: []int{502},
|
||||
DropSrcPathPrefixParts: intp(2),
|
||||
}
|
||||
f(ui, "http://host42/vmsingle/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up",
|
||||
`[{"xx" "aa"} {"yy" "asdf"}]`, `[{"qwe" "rty"}]`, []int{503, 500, 501}, "first_available", 1)
|
||||
f(ui, "http://host42/vmsingle/api/v1/query?query=up&db=foo", "http://vmselect/0/prometheus/api/v1/query?db=foo&query=up",
|
||||
"xx: aa\nyy: asdf", "qwe: rty", []int{503, 500, 501}, "first_available", 1)
|
||||
f(ui, "http://host123/vmsingle/api/v1/query?query=up", "http://default-server/v1/query?query=up",
|
||||
`[{"bb" "aaa"}]`, `[{"x" "y"}]`, []int{502}, "least_loaded", 2)
|
||||
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", []int{}, "least_loaded", 0)
|
||||
f(ui, "https://foo-host/foo/bar/api/v1/query_range", "http://default-server/api/v1/query_range", `[{"bb" "aaa"}]`, `[{"x" "y"}]`, []int{502}, "least_loaded", 2)
|
||||
"bb: aaa", "x: y", []int{502}, "least_loaded", 2)
|
||||
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", []int{}, "least_loaded", 0)
|
||||
f(ui, "https://foo-host/foo/bar/api/v1/query_range", "http://default-server/api/v1/query_range", "bb: aaa", "x: y", []int{502}, "least_loaded", 2)
|
||||
|
||||
// Complex routing regexp paths in `url_map`
|
||||
ui = &UserInfo{
|
||||
@@ -220,19 +237,19 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
},
|
||||
URLPrefix: mustParseURL("http://default-server"),
|
||||
}
|
||||
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "https://vmui.foobar.com/a/b?c=d", "http://vmui.host:1234/vmui/a/b?c=d", "[]", "[]", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "", "", nil, "least_loaded", 0)
|
||||
f(ui, "https://vmui.foobar.com/a/b?c=d", "http://vmui.host:1234/vmui/a/b?c=d", "", "", nil, "least_loaded", 0)
|
||||
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=dev"),
|
||||
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
|
||||
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "[]", "[]", nil, "least_loaded", 0)
|
||||
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "", "", nil, "least_loaded", 0)
|
||||
}
|
||||
|
||||
func TestCreateTargetURLFailure(t *testing.T) {
|
||||
@@ -243,7 +260,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
if up != nil {
|
||||
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||
}
|
||||
@@ -264,3 +281,11 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||
},
|
||||
}, "/api/v1/write")
|
||||
}
|
||||
|
||||
func headersToString(hs []Header) string {
|
||||
a := make([]string, len(hs))
|
||||
for i, h := range hs {
|
||||
a[i] = fmt.Sprintf("%s: %s", h.Name, h.Value)
|
||||
}
|
||||
return strings.Join(a, "\n")
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot/snapshotutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -93,7 +94,8 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, false, nil)
|
||||
listenAddrs := []string{*httpListenAddr}
|
||||
go httpserver.Serve(listenAddrs, nil, nil)
|
||||
|
||||
pushmetrics.Init()
|
||||
err := makeBackup()
|
||||
@@ -104,8 +106,8 @@ func main() {
|
||||
pushmetrics.Stop()
|
||||
|
||||
startTime := time.Now()
|
||||
logger.Infof("gracefully shutting down http server for metrics at %q", *httpListenAddr)
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Infof("gracefully shutting down http server for metrics at %q", listenAddrs)
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop http server for metrics: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down http server for metrics in %.3f seconds", time.Since(startTime).Seconds())
|
||||
@@ -168,7 +170,7 @@ See the docs at https://docs.victoriametrics.com/vmbackup.html .
|
||||
}
|
||||
|
||||
func newSrcFS() (*fslocal.FS, error) {
|
||||
if err := snapshot.Validate(*snapshotName); err != nil {
|
||||
if err := snapshotutil.Validate(*snapshotName); err != nil {
|
||||
return nil, fmt.Errorf("invalid -snapshotName=%q: %w", *snapshotName, err)
|
||||
}
|
||||
snapshotPath := filepath.Join(*storageDataPath, "snapshots", *snapshotName)
|
||||
|
||||
@@ -40,6 +40,11 @@ const (
|
||||
vmSignificantFigures = "vm-significant-figures"
|
||||
vmRoundDigits = "vm-round-digits"
|
||||
vmDisableProgressBar = "vm-disable-progress-bar"
|
||||
vmCertFile = "vm-cert-file"
|
||||
vmKeyFile = "vm-key-file"
|
||||
vmCAFile = "vm-CA-file"
|
||||
vmServerName = "vm-server-name"
|
||||
vmInsecureSkipVerify = "vm-insecure-skip-verify"
|
||||
|
||||
// also used in vm-native
|
||||
vmExtraLabel = "vm-extra-label"
|
||||
@@ -119,19 +124,45 @@ var (
|
||||
Name: vmDisableProgressBar,
|
||||
Usage: "Whether to disable progress bar per each worker during the import.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to '--vmAddr'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to '--vmAddr'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to '--vmAddr'. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmServerName,
|
||||
Usage: "Optional TLS server name to use for connections to '--vmAddr'. By default, the server name from '--vmAddr' is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmInsecureSkipVerify,
|
||||
Usage: "Whether to skip tls verification when connecting to '--vmAddr'",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
otsdbAddr = "otsdb-addr"
|
||||
otsdbConcurrency = "otsdb-concurrency"
|
||||
otsdbQueryLimit = "otsdb-query-limit"
|
||||
otsdbOffsetDays = "otsdb-offset-days"
|
||||
otsdbHardTSStart = "otsdb-hard-ts-start"
|
||||
otsdbRetentions = "otsdb-retentions"
|
||||
otsdbFilters = "otsdb-filters"
|
||||
otsdbNormalize = "otsdb-normalize"
|
||||
otsdbMsecsTime = "otsdb-msecstime"
|
||||
otsdbAddr = "otsdb-addr"
|
||||
otsdbConcurrency = "otsdb-concurrency"
|
||||
otsdbQueryLimit = "otsdb-query-limit"
|
||||
otsdbOffsetDays = "otsdb-offset-days"
|
||||
otsdbHardTSStart = "otsdb-hard-ts-start"
|
||||
otsdbRetentions = "otsdb-retentions"
|
||||
otsdbFilters = "otsdb-filters"
|
||||
otsdbNormalize = "otsdb-normalize"
|
||||
otsdbMsecsTime = "otsdb-msecstime"
|
||||
otsdbCertFile = "otsdb-cert-file"
|
||||
otsdbKeyFile = "otsdb-key-file"
|
||||
otsdbCAFile = "otsdb-CA-file"
|
||||
otsdbServerName = "otsdb-server-name"
|
||||
otsdbInsecureSkipVerify = "otsdb-insecure-skip-verify"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -191,6 +222,27 @@ var (
|
||||
Value: false,
|
||||
Usage: "Whether to normalize all data received to lower case before forwarding to VictoriaMetrics",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to -otsdb-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to -otsdb-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to -otsdb-addr. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbServerName,
|
||||
Usage: "Optional TLS server name to use for connections to -otsdb-addr. By default, the server name from -otsdb-addr is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: otsdbInsecureSkipVerify,
|
||||
Usage: "Whether to skip tls verification when connecting to -otsdb-addr",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -208,6 +260,11 @@ const (
|
||||
influxMeasurementFieldSeparator = "influx-measurement-field-separator"
|
||||
influxSkipDatabaseLabel = "influx-skip-database-label"
|
||||
influxPrometheusMode = "influx-prometheus-mode"
|
||||
influxCertFile = "influx-cert-file"
|
||||
influxKeyFile = "influx-key-file"
|
||||
influxCAFile = "influx-CA-file"
|
||||
influxServerName = "influx-server-name"
|
||||
influxInsecureSkipVerify = "influx-insecure-skip-verify"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -272,7 +329,28 @@ var (
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: influxPrometheusMode,
|
||||
Usage: "Wether to restore the original timeseries name previously written from Prometheus to InfluxDB v1 via remote_write.",
|
||||
Usage: "Whether to restore the original timeseries name previously written from Prometheus to InfluxDB v1 via remote_write.",
|
||||
Value: false,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to -influx-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to -influx-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to -influx-addr. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxServerName,
|
||||
Usage: "Optional TLS server name to use for connections to -influx-addr. By default, the server name from -influx-addr is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: influxInsecureSkipVerify,
|
||||
Usage: "Whether to skip tls verification when connecting to -influx-addr",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
@@ -335,6 +413,10 @@ const (
|
||||
vmNativeSrcPassword = "vm-native-src-password"
|
||||
vmNativeSrcHeaders = "vm-native-src-headers"
|
||||
vmNativeSrcBearerToken = "vm-native-src-bearer-token"
|
||||
vmNativeSrcCertFile = "vm-native-src-cert-file"
|
||||
vmNativeSrcKeyFile = "vm-native-src-key-file"
|
||||
vmNativeSrcCAFile = "vm-native-src-ca-file"
|
||||
vmNativeSrcServerName = "vm-native-src-server-name"
|
||||
vmNativeSrcInsecureSkipVerify = "vm-native-src-insecure-skip-verify"
|
||||
|
||||
vmNativeDstAddr = "vm-native-dst-addr"
|
||||
@@ -342,6 +424,10 @@ const (
|
||||
vmNativeDstPassword = "vm-native-dst-password"
|
||||
vmNativeDstHeaders = "vm-native-dst-headers"
|
||||
vmNativeDstBearerToken = "vm-native-dst-bearer-token"
|
||||
vmNativeDstCertFile = "vm-native-dst-cert-file"
|
||||
vmNativeDstKeyFile = "vm-native-dst-key-file"
|
||||
vmNativeDstCAFile = "vm-native-dst-ca-file"
|
||||
vmNativeDstServerName = "vm-native-dst-server-name"
|
||||
vmNativeDstInsecureSkipVerify = "vm-native-dst-insecure-skip-verify"
|
||||
)
|
||||
|
||||
@@ -406,6 +492,28 @@ var (
|
||||
Name: vmNativeSrcBearerToken,
|
||||
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-src-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to `--vm-native-src-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to `--vm-native-src-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to `--vm-native-src-addr`. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcServerName,
|
||||
Usage: "Optional TLS server name to use for connections to `--vm-native-src-addr`. By default, the server name from `--vm-native-src-addr` is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeSrcInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to `--vm-native-src-addr`",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstAddr,
|
||||
Usage: "VictoriaMetrics address to perform import to. \n" +
|
||||
@@ -433,6 +541,28 @@ var (
|
||||
Name: vmNativeDstBearerToken,
|
||||
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-dst-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to `--vm-native-dst-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to `--vm-native-dst-addr`",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to `--vm-native-dst-addr`. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstServerName,
|
||||
Usage: "Optional TLS server name to use for connections to `--vm-native-dst-addr`. By default, the server name from `--vm-native-dst-addr` is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeDstInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to `--vm-native-dst-addr`",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
&cli.StringSliceFlag{
|
||||
Name: vmExtraLabel,
|
||||
Value: nil,
|
||||
@@ -468,16 +598,6 @@ var (
|
||||
"Non-binary export/import API is less efficient, but supports deduplication if it is configured on vm-native-src-addr side.",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeSrcInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to the source address",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeDstInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to the destination address",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -496,6 +616,10 @@ const (
|
||||
remoteReadPassword = "remote-read-password"
|
||||
remoteReadHTTPTimeout = "remote-read-http-timeout"
|
||||
remoteReadHeaders = "remote-read-headers"
|
||||
remoteReadCertFile = "remote-read-cert-file"
|
||||
remoteReadKeyFile = "remote-read-key-file"
|
||||
remoteReadCAFile = "remote-read-CA-file"
|
||||
remoteReadServerName = "remote-read-server-name"
|
||||
remoteReadInsecureSkipVerify = "remote-read-insecure-skip-verify"
|
||||
remoteReadDisablePathAppend = "remote-read-disable-path-append"
|
||||
)
|
||||
@@ -574,6 +698,22 @@ var (
|
||||
"For example, --remote-read-headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding remote source storage. \n" +
|
||||
"Multiple headers must be delimited by '^^': --remote-read-headers='header1:value1^^header2:value2'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: remoteReadCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to -remote-read-src-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: remoteReadKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to -remote-read-src-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: remoteReadCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to -remote-read-src-addr. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: remoteReadServerName,
|
||||
Usage: "Optional TLS server name to use for connections to remoteReadSrcAddr. By default, the server name from -remote-read-src-addr is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: remoteReadInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to the remote read address",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package influx
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -33,7 +34,8 @@ type Config struct {
|
||||
Retention string
|
||||
ChunkSize int
|
||||
|
||||
Filter Filter
|
||||
Filter Filter
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
// Filter contains configuration for filtering
|
||||
@@ -86,10 +88,10 @@ type LabelPair struct {
|
||||
// configured with passed Config
|
||||
func NewClient(cfg Config) (*Client, error) {
|
||||
c := influx.HTTPConfig{
|
||||
Addr: cfg.Addr,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
InsecureSkipVerify: true,
|
||||
Addr: cfg.Addr,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
TLSConfig: cfg.TLSConfig,
|
||||
}
|
||||
hc, err := influx.NewHTTPClient(c)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
@@ -25,6 +24,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native/stream"
|
||||
)
|
||||
@@ -49,8 +49,20 @@ func main() {
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("OpenTSDB import mode")
|
||||
|
||||
// create Transport with given TLS config
|
||||
certFile := c.String(otsdbCertFile)
|
||||
keyFile := c.String(otsdbKeyFile)
|
||||
caFile := c.String(otsdbCAFile)
|
||||
serverName := c.String(otsdbServerName)
|
||||
insecureSkipVerify := c.Bool(otsdbInsecureSkipVerify)
|
||||
addr := c.String(otsdbAddr)
|
||||
|
||||
tr, err := httputils.Transport(addr, certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Transport: %s", err)
|
||||
}
|
||||
oCfg := opentsdb.Config{
|
||||
Addr: c.String(otsdbAddr),
|
||||
Addr: addr,
|
||||
Limit: c.Int(otsdbQueryLimit),
|
||||
Offset: c.Int64(otsdbOffsetDays),
|
||||
HardTS: c.Int64(otsdbHardTSStart),
|
||||
@@ -58,13 +70,17 @@ func main() {
|
||||
Filters: c.StringSlice(otsdbFilters),
|
||||
Normalize: c.Bool(otsdbNormalize),
|
||||
MsecsTime: c.Bool(otsdbMsecsTime),
|
||||
Transport: tr,
|
||||
}
|
||||
otsdbClient, err := opentsdb.NewClient(oCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create opentsdb client: %s", err)
|
||||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
// disable progress bars since openTSDB implementation
|
||||
// does not use progress bar pool
|
||||
vmCfg.DisableProgressBar = true
|
||||
@@ -84,6 +100,18 @@ func main() {
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("InfluxDB import mode")
|
||||
|
||||
// create TLS config
|
||||
certFile := c.String(influxCertFile)
|
||||
keyFile := c.String(influxKeyFile)
|
||||
caFile := c.String(influxCAFile)
|
||||
serverName := c.String(influxServerName)
|
||||
insecureSkipVerify := c.Bool(influxInsecureSkipVerify)
|
||||
|
||||
tc, err := httputils.TLSConfig(certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
||||
iCfg := influx.Config{
|
||||
Addr: c.String(influxAddr),
|
||||
Username: c.String(influxUser),
|
||||
@@ -96,13 +124,18 @@ func main() {
|
||||
TimeEnd: c.String(influxFilterTimeEnd),
|
||||
},
|
||||
ChunkSize: c.Int(influxChunkSize),
|
||||
TLSConfig: tc,
|
||||
}
|
||||
|
||||
influxClient, err := influx.NewClient(iCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create influx client: %s", err)
|
||||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
importer, err = vm.NewImporter(ctx, vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
@@ -125,24 +158,42 @@ func main() {
|
||||
Usage: "Migrate time series via Prometheus remote-read protocol",
|
||||
Flags: mergeFlags(globalFlags, remoteReadFlags, vmFlags),
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("Remote-read import mode")
|
||||
|
||||
addr := c.String(remoteReadSrcAddr)
|
||||
|
||||
// create TLS config
|
||||
certFile := c.String(remoteReadCertFile)
|
||||
keyFile := c.String(remoteReadKeyFile)
|
||||
caFile := c.String(remoteReadCAFile)
|
||||
serverName := c.String(remoteReadServerName)
|
||||
insecureSkipVerify := c.Bool(remoteReadInsecureSkipVerify)
|
||||
|
||||
tr, err := httputils.Transport(addr, certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create transport: %s", err)
|
||||
}
|
||||
|
||||
rr, err := remoteread.NewClient(remoteread.Config{
|
||||
Addr: c.String(remoteReadSrcAddr),
|
||||
Username: c.String(remoteReadUser),
|
||||
Password: c.String(remoteReadPassword),
|
||||
Timeout: c.Duration(remoteReadHTTPTimeout),
|
||||
UseStream: c.Bool(remoteReadUseStream),
|
||||
Headers: c.String(remoteReadHeaders),
|
||||
LabelName: c.String(remoteReadFilterLabel),
|
||||
LabelValue: c.String(remoteReadFilterLabelValue),
|
||||
InsecureSkipVerify: c.Bool(remoteReadInsecureSkipVerify),
|
||||
DisablePathAppend: c.Bool(remoteReadDisablePathAppend),
|
||||
Addr: addr,
|
||||
Transport: tr,
|
||||
Username: c.String(remoteReadUser),
|
||||
Password: c.String(remoteReadPassword),
|
||||
Timeout: c.Duration(remoteReadHTTPTimeout),
|
||||
UseStream: c.Bool(remoteReadUseStream),
|
||||
Headers: c.String(remoteReadHeaders),
|
||||
LabelName: c.String(remoteReadFilterLabel),
|
||||
LabelValue: c.String(remoteReadFilterLabelValue),
|
||||
DisablePathAppend: c.Bool(remoteReadDisablePathAppend),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error create remote read client: %s", err)
|
||||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
importer, err := vm.NewImporter(ctx, vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
@@ -171,7 +222,10 @@ func main() {
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("Prometheus import mode")
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
importer, err = vm.NewImporter(ctx, vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
@@ -213,7 +267,6 @@ func main() {
|
||||
|
||||
var srcExtraLabels []string
|
||||
srcAddr := strings.Trim(c.String(vmNativeSrcAddr), "/")
|
||||
srcInsecureSkipVerify := c.Bool(vmNativeSrcInsecureSkipVerify)
|
||||
srcAuthConfig, err := auth.Generate(
|
||||
auth.WithBasicAuth(c.String(vmNativeSrcUser), c.String(vmNativeSrcPassword)),
|
||||
auth.WithBearer(c.String(vmNativeSrcBearerToken)),
|
||||
@@ -221,16 +274,26 @@ func main() {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initilize auth config for source: %s", srcAddr)
|
||||
}
|
||||
|
||||
// create TLS config
|
||||
srcCertFile := c.String(vmNativeSrcCertFile)
|
||||
srcKeyFile := c.String(vmNativeSrcKeyFile)
|
||||
srcCAFile := c.String(vmNativeSrcCAFile)
|
||||
srcServerName := c.String(vmNativeSrcServerName)
|
||||
srcInsecureSkipVerify := c.Bool(vmNativeSrcInsecureSkipVerify)
|
||||
|
||||
srcTC, err := httputils.TLSConfig(srcCertFile, srcKeyFile, srcCAFile, srcServerName, srcInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
||||
srcHTTPClient := &http.Client{Transport: &http.Transport{
|
||||
DisableKeepAlives: disableKeepAlive,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: srcInsecureSkipVerify,
|
||||
},
|
||||
TLSClientConfig: srcTC,
|
||||
}}
|
||||
|
||||
dstAddr := strings.Trim(c.String(vmNativeDstAddr), "/")
|
||||
dstExtraLabels := c.StringSlice(vmExtraLabel)
|
||||
dstInsecureSkipVerify := c.Bool(vmNativeDstInsecureSkipVerify)
|
||||
dstAuthConfig, err := auth.Generate(
|
||||
auth.WithBasicAuth(c.String(vmNativeDstUser), c.String(vmNativeDstPassword)),
|
||||
auth.WithBearer(c.String(vmNativeDstBearerToken)),
|
||||
@@ -238,11 +301,22 @@ func main() {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initilize auth config for destination: %s", dstAddr)
|
||||
}
|
||||
|
||||
// create TLS config
|
||||
dstCertFile := c.String(vmNativeDstCertFile)
|
||||
dstKeyFile := c.String(vmNativeDstKeyFile)
|
||||
dstCAFile := c.String(vmNativeDstCAFile)
|
||||
dstServerName := c.String(vmNativeDstServerName)
|
||||
dstInsecureSkipVerify := c.Bool(vmNativeDstInsecureSkipVerify)
|
||||
|
||||
dstTC, err := httputils.TLSConfig(dstCertFile, dstKeyFile, dstCAFile, dstServerName, dstInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
||||
dstHTTPClient := &http.Client{Transport: &http.Transport{
|
||||
DisableKeepAlives: disableKeepAlive,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: dstInsecureSkipVerify,
|
||||
},
|
||||
TLSClientConfig: dstTC,
|
||||
}}
|
||||
|
||||
p := vmNativeProcessor{
|
||||
@@ -298,14 +372,15 @@ func main() {
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Errorf("cannot open exported block at path=%q err=%w", blockPath, err), 1)
|
||||
}
|
||||
var blocksCount uint64
|
||||
if err := stream.Parse(f, isBlockGzipped, func(block *stream.Block) error {
|
||||
atomic.AddUint64(&blocksCount, 1)
|
||||
defer f.Close()
|
||||
var blocksCount atomic.Uint64
|
||||
if err := stream.Parse(f, isBlockGzipped, func(_ *stream.Block) error {
|
||||
blocksCount.Add(1)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return cli.Exit(fmt.Errorf("cannot parse block at path=%q, blocksCount=%d, err=%w", blockPath, blocksCount, err), 1)
|
||||
return cli.Exit(fmt.Errorf("cannot parse block at path=%q, blocksCount=%d, err=%w", blockPath, blocksCount.Load(), err), 1)
|
||||
}
|
||||
log.Printf("successfully verified block at path=%q, blockCount=%d", blockPath, blocksCount)
|
||||
log.Printf("successfully verified block at path=%q, blockCount=%d", blockPath, blocksCount.Load())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -330,9 +405,24 @@ func main() {
|
||||
log.Printf("Total time: %v", time.Since(start))
|
||||
}
|
||||
|
||||
func initConfigVM(c *cli.Context) vm.Config {
|
||||
func initConfigVM(c *cli.Context) (vm.Config, error) {
|
||||
addr := c.String(vmAddr)
|
||||
|
||||
// create Transport with given TLS config
|
||||
certFile := c.String(vmCertFile)
|
||||
keyFile := c.String(vmKeyFile)
|
||||
caFile := c.String(vmCAFile)
|
||||
serverName := c.String(vmServerName)
|
||||
insecureSkipVerify := c.Bool(vmInsecureSkipVerify)
|
||||
|
||||
tr, err := httputils.Transport(addr, certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return vm.Config{}, fmt.Errorf("failed to create Transport: %s", err)
|
||||
}
|
||||
|
||||
return vm.Config{
|
||||
Addr: c.String(vmAddr),
|
||||
Addr: addr,
|
||||
Transport: tr,
|
||||
User: c.String(vmUser),
|
||||
Password: c.String(vmPassword),
|
||||
Concurrency: uint8(c.Int(vmConcurrency)),
|
||||
@@ -344,5 +434,5 @@ func initConfigVM(c *cli.Context) vm.Config {
|
||||
ExtraLabels: c.StringSlice(vmExtraLabel),
|
||||
RateLimit: c.Int64(vmRateLimit),
|
||||
DisableProgressBar: c.Bool(vmDisableProgressBar),
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/auth"
|
||||
)
|
||||
@@ -34,7 +35,7 @@ type Response struct {
|
||||
}
|
||||
|
||||
// Explore finds metric names by provided filter from api/v1/label/__name__/values
|
||||
func (c *Client) Explore(ctx context.Context, f Filter, tenantID string) ([]string, error) {
|
||||
func (c *Client) Explore(ctx context.Context, f Filter, tenantID string, start, end time.Time) ([]string, error) {
|
||||
url := fmt.Sprintf("%s/%s", c.Addr, nativeMetricNamesAddr)
|
||||
if tenantID != "" {
|
||||
url = fmt.Sprintf("%s/select/%s/prometheus/%s", c.Addr, tenantID, nativeMetricNamesAddr)
|
||||
@@ -45,12 +46,8 @@ func (c *Client) Explore(ctx context.Context, f Filter, tenantID string) ([]stri
|
||||
}
|
||||
|
||||
params := req.URL.Query()
|
||||
if f.TimeStart != "" {
|
||||
params.Set("start", f.TimeStart)
|
||||
}
|
||||
if f.TimeEnd != "" {
|
||||
params.Set("end", f.TimeEnd)
|
||||
}
|
||||
params.Set("start", start.Format(time.RFC3339))
|
||||
params.Set("end", end.Format(time.RFC3339))
|
||||
params.Set("match[]", f.Match)
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
@@ -63,11 +60,7 @@ func (c *Client) Explore(ctx context.Context, f Filter, tenantID string) ([]stri
|
||||
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
|
||||
return nil, fmt.Errorf("cannot decode series response: %s", err)
|
||||
}
|
||||
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, fmt.Errorf("cannot close series response body: %s", err)
|
||||
}
|
||||
return response.MetricNames, nil
|
||||
return response.MetricNames, resp.Body.Close()
|
||||
}
|
||||
|
||||
// ImportPipe uses pipe reader in request to process data
|
||||
|
||||
@@ -47,6 +47,8 @@ type Client struct {
|
||||
Normalize bool
|
||||
HardTS int64
|
||||
MsecsTime bool
|
||||
|
||||
c *http.Client
|
||||
}
|
||||
|
||||
// Config contains fields required
|
||||
@@ -60,6 +62,7 @@ type Config struct {
|
||||
Filters []string
|
||||
Normalize bool
|
||||
MsecsTime bool
|
||||
Transport *http.Transport
|
||||
}
|
||||
|
||||
// TimeRange contains data about time ranges to query
|
||||
@@ -107,7 +110,8 @@ type Metric struct {
|
||||
// FindMetrics discovers all metrics that OpenTSDB knows about (given a filter)
|
||||
// e.g. /api/suggest?type=metrics&q=system&max=100000
|
||||
func (c Client) FindMetrics(q string) ([]string, error) {
|
||||
resp, err := http.Get(q)
|
||||
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||
}
|
||||
@@ -131,7 +135,7 @@ func (c Client) FindMetrics(q string) ([]string, error) {
|
||||
// e.g. /api/search/lookup?m=system.load5&limit=1000000
|
||||
func (c Client) FindSeries(metric string) ([]Meta, error) {
|
||||
q := fmt.Sprintf("%s/api/search/lookup?m=%s&limit=%d", c.Addr, metric, c.Limit)
|
||||
resp, err := http.Get(q)
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set GET request to %q: %s", q, err)
|
||||
}
|
||||
@@ -184,7 +188,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, m
|
||||
series.Metric, tagStr)
|
||||
|
||||
q := fmt.Sprintf("%s/api/query?%s", c.Addr, queryStr)
|
||||
resp, err := http.Get(q)
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return Metric{}, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||
}
|
||||
@@ -325,6 +329,7 @@ func NewClient(cfg Config) (*Client, error) {
|
||||
Normalize: cfg.Normalize,
|
||||
HardTS: cfg.HardTS,
|
||||
MsecsTime: cfg.MsecsTime,
|
||||
c: &http.Client{Transport: cfg.Transport},
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -61,7 +62,8 @@ func TestRemoteRead(t *testing.T) {
|
||||
{
|
||||
name: "step month on month time range",
|
||||
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
|
||||
vmCfg: vm.Config{Addr: "", Concurrency: 1, DisableProgressBar: true},
|
||||
vmCfg: vm.Config{Addr: "", Concurrency: 1, DisableProgressBar: true,
|
||||
Transport: http.DefaultTransport.(*http.Transport)},
|
||||
start: "2022-09-26T11:23:05+02:00",
|
||||
end: "2022-11-26T11:24:05+02:00",
|
||||
numOfSamples: 2,
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -46,6 +45,8 @@ type Client struct {
|
||||
type Config struct {
|
||||
// Addr of remote storage
|
||||
Addr string
|
||||
// Transport allows specifying custom http.Transport
|
||||
Transport *http.Transport
|
||||
// DisablePathAppend disable automatic appending of the remote read path
|
||||
DisablePathAppend bool
|
||||
// Timeout defines timeout for HTTP requests
|
||||
@@ -64,8 +65,6 @@ type Config struct {
|
||||
// LabelName, LabelValue stands for label=~value pair used for read requests.
|
||||
// Is optional.
|
||||
LabelName, LabelValue string
|
||||
// TLSSkipVerify defines whether to skip TLS certificate verification when connecting to the remote read address.
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
// Filter defines a list of filters applied to requested data
|
||||
@@ -103,11 +102,13 @@ func NewClient(cfg Config) (*Client, error) {
|
||||
}
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: cfg.Timeout}
|
||||
if cfg.Transport != nil {
|
||||
client.Transport = cfg.Transport
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
c: &http.Client{
|
||||
Timeout: cfg.Timeout,
|
||||
Transport: utils.Transport(cfg.Addr, cfg.InsecureSkipVerify),
|
||||
},
|
||||
c: client,
|
||||
addr: strings.TrimSuffix(cfg.Addr, "/"),
|
||||
disablePathAppend: cfg.DisablePathAppend,
|
||||
user: cfg.Username,
|
||||
@@ -170,7 +171,7 @@ func (c *Client) fetch(ctx context.Context, data []byte, streamCb StreamCallback
|
||||
if c.disablePathAppend {
|
||||
u = c.addr
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, u, r)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new HTTP request: %w", err)
|
||||
}
|
||||
@@ -183,7 +184,7 @@ func (c *Client) fetch(ctx context.Context, data []byte, streamCb StreamCallback
|
||||
}
|
||||
req.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0")
|
||||
|
||||
resp, err := c.do(req.WithContext(ctx))
|
||||
resp, err := c.do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while sending request to %s: %w; Data len %d(%d)",
|
||||
req.URL.Redacted(), err, len(data), r.Size())
|
||||
|
||||
@@ -13,13 +13,13 @@ const (
|
||||
maxTimeMsecs = int64(1<<63-1) / 1e6
|
||||
)
|
||||
|
||||
// GetTime returns time from the given string.
|
||||
func GetTime(s string) (time.Time, error) {
|
||||
secs, err := promutils.ParseTime(s)
|
||||
// ParseTime parses time in s string and returns time.Time object
|
||||
// if parse correctly or error if not
|
||||
func ParseTime(s string) (time.Time, error) {
|
||||
msecs, err := promutils.ParseTimeMsec(s)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("cannot parse %s: %w", s, err)
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs < minTimeMsecs {
|
||||
msecs = 0
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ func TestGetTime(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetTime(tt.s)
|
||||
got, err := ParseTime(tt.s)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseTime() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Transport creates http.Transport object based on provided URL.
|
||||
// Returns Transport with TLS configuration if URL contains `https` prefix
|
||||
func Transport(URL string, insecureSkipVerify bool) *http.Transport {
|
||||
t := http.DefaultTransport.(*http.Transport).Clone()
|
||||
if !strings.HasPrefix(URL, "https") {
|
||||
return t
|
||||
}
|
||||
t.TLSClientConfig = TLSConfig(insecureSkipVerify)
|
||||
return t
|
||||
}
|
||||
|
||||
// TLSConfig creates tls.Config object from provided arguments
|
||||
func TLSConfig(insecureSkipVerify bool) *tls.Config {
|
||||
return &tls.Config{
|
||||
InsecureSkipVerify: insecureSkipVerify,
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,8 @@ type Config struct {
|
||||
// --httpListenAddr value for single node version
|
||||
// --httpListenAddr value of vmselect component for cluster version
|
||||
Addr string
|
||||
// Transport allows specifying custom http.Transport
|
||||
Transport *http.Transport
|
||||
// Concurrency defines number of worker
|
||||
// performing the import requests concurrently
|
||||
Concurrency uint8
|
||||
@@ -62,6 +64,7 @@ type Config struct {
|
||||
// see https://docs.victoriametrics.com/#how-to-import-time-series-data
|
||||
type Importer struct {
|
||||
addr string
|
||||
client *http.Client
|
||||
importPath string
|
||||
compress bool
|
||||
user string
|
||||
@@ -128,8 +131,14 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
if cfg.Transport != nil {
|
||||
client.Transport = cfg.Transport
|
||||
}
|
||||
|
||||
im := &Importer{
|
||||
addr: addr,
|
||||
client: client,
|
||||
importPath: importPath,
|
||||
compress: cfg.Compress,
|
||||
user: cfg.User,
|
||||
@@ -291,7 +300,7 @@ func (im *Importer) Ping() error {
|
||||
if im.user != "" {
|
||||
req.SetBasicAuth(im.user, im.password)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
resp, err := im.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -321,7 +330,7 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
||||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- do(req)
|
||||
errCh <- im.do(req)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
@@ -375,8 +384,8 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
||||
// ErrBadRequest represents bad request error.
|
||||
var ErrBadRequest = errors.New("bad request")
|
||||
|
||||
func do(req *http.Request) error {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
func (im *Importer) do(req *http.Request) error {
|
||||
resp, err := im.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error when performing request: %s", err)
|
||||
}
|
||||
|
||||
@@ -54,14 +54,14 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||
startTime: time.Now(),
|
||||
}
|
||||
|
||||
start, err := utils.GetTime(p.filter.TimeStart)
|
||||
start, err := utils.ParseTime(p.filter.TimeStart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s, provided: %s, error: %w", vmNativeFilterTimeStart, p.filter.TimeStart, err)
|
||||
}
|
||||
|
||||
end := time.Now().In(start.Location())
|
||||
if p.filter.TimeEnd != "" {
|
||||
end, err = utils.GetTime(p.filter.TimeEnd)
|
||||
end, err = utils.ParseTime(p.filter.TimeEnd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s, provided: %s, error: %w", vmNativeFilterTimeEnd, p.filter.TimeEnd, err)
|
||||
}
|
||||
@@ -175,28 +175,29 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
dstURL = fmt.Sprintf("%s/insert/%s/prometheus/%s", p.dst.Addr, tenantID, importAddr)
|
||||
}
|
||||
|
||||
barPrefix := "Requests to make"
|
||||
initMessage := "Initing import process from %q to %q with filter %s"
|
||||
initParams := []interface{}{srcURL, dstURL, p.filter.String()}
|
||||
if p.interCluster {
|
||||
barPrefix = fmt.Sprintf("Requests to make for tenant %s", tenantID)
|
||||
initMessage = "Initing import process from %q to %q with filter %s for tenant %s"
|
||||
initParams = []interface{}{srcURL, dstURL, p.filter.String(), tenantID}
|
||||
}
|
||||
|
||||
fmt.Println("") // extra line for better output formatting
|
||||
log.Printf(initMessage, initParams...)
|
||||
if len(ranges) > 1 {
|
||||
log.Printf("Selected time range will be split into %d ranges according to %q step", len(ranges), p.filter.Chunk)
|
||||
}
|
||||
|
||||
var foundSeriesMsg string
|
||||
|
||||
metrics := []string{p.filter.Match}
|
||||
var requestsToMake int
|
||||
var metrics = map[string][][]time.Time{
|
||||
"": ranges,
|
||||
}
|
||||
if !p.disablePerMetricRequests {
|
||||
log.Printf("Exploring metrics...")
|
||||
metrics, err = p.src.Explore(ctx, p.filter, tenantID)
|
||||
metrics, err = p.explore(ctx, p.src, tenantID, ranges, silent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get metrics from source %s: %w", p.src.Addr, err)
|
||||
return fmt.Errorf("failed to explore metric names: %s", err)
|
||||
}
|
||||
|
||||
if len(metrics) == 0 {
|
||||
errMsg := "no metrics found"
|
||||
if tenantID != "" {
|
||||
@@ -205,7 +206,10 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
log.Println(errMsg)
|
||||
return nil
|
||||
}
|
||||
foundSeriesMsg = fmt.Sprintf("Found %d metrics to import", len(metrics))
|
||||
for _, m := range metrics {
|
||||
requestsToMake += len(m)
|
||||
}
|
||||
foundSeriesMsg = fmt.Sprintf("Found %d unique metric names to import. Total import/export requests to make %d", len(metrics), requestsToMake)
|
||||
}
|
||||
|
||||
if !p.interCluster {
|
||||
@@ -219,15 +223,13 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
log.Print(foundSeriesMsg)
|
||||
}
|
||||
|
||||
processingMsg := fmt.Sprintf("Requests to make: %d", len(metrics)*len(ranges))
|
||||
if len(ranges) > 1 {
|
||||
processingMsg = fmt.Sprintf("Selected time range will be split into %d ranges according to %q step. %s", len(ranges), p.filter.Chunk, processingMsg)
|
||||
}
|
||||
log.Print(processingMsg)
|
||||
|
||||
var bar *pb.ProgressBar
|
||||
barPrefix := "Requests to make"
|
||||
if p.interCluster {
|
||||
barPrefix = fmt.Sprintf("Requests to make for tenant %s", tenantID)
|
||||
}
|
||||
if !silent {
|
||||
bar = barpool.NewSingleProgress(fmt.Sprintf(nativeWithBackoffTpl, barPrefix), len(metrics)*len(ranges))
|
||||
bar = barpool.NewSingleProgress(fmt.Sprintf(nativeWithBackoffTpl, barPrefix), requestsToMake)
|
||||
if p.disablePerMetricRequests {
|
||||
bar = barpool.NewSingleProgress(nativeSingleProcessTpl, 0)
|
||||
}
|
||||
@@ -263,20 +265,19 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
}
|
||||
|
||||
// any error breaks the import
|
||||
for _, s := range metrics {
|
||||
|
||||
match, err := buildMatchWithFilter(p.filter.Match, s)
|
||||
for mName, mRanges := range metrics {
|
||||
match, err := buildMatchWithFilter(p.filter.Match, mName)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to build export filters: %s", err)
|
||||
logger.Errorf("failed to build filter %q for metric name %q: %s", p.filter.Match, mName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, times := range ranges {
|
||||
for _, times := range mRanges {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("context canceled")
|
||||
case infErr := <-errCh:
|
||||
return fmt.Errorf("native error: %s", infErr)
|
||||
return fmt.Errorf("export/import error: %s", infErr)
|
||||
case filterCh <- native.Filter{
|
||||
Match: match,
|
||||
TimeStart: times[0].Format(time.RFC3339),
|
||||
@@ -297,6 +298,32 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *vmNativeProcessor) explore(ctx context.Context, src *native.Client, tenantID string, ranges [][]time.Time, silent bool) (map[string][][]time.Time, error) {
|
||||
log.Printf("Exploring metrics...")
|
||||
|
||||
var bar *pb.ProgressBar
|
||||
if !silent {
|
||||
bar = barpool.NewSingleProgress(fmt.Sprintf(nativeWithBackoffTpl, "Explore requests to make"), len(ranges))
|
||||
bar.Start()
|
||||
defer bar.Finish()
|
||||
}
|
||||
|
||||
metrics := make(map[string][][]time.Time)
|
||||
for _, r := range ranges {
|
||||
ms, err := src.Explore(ctx, p.filter, tenantID, r[0], r[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get metrics from %s on interval %v-%v: %w", src.Addr, r[0], r[1], err)
|
||||
}
|
||||
for i := range ms {
|
||||
metrics[ms[i]] = append(metrics[ms[i]], r)
|
||||
}
|
||||
if bar != nil {
|
||||
bar.Increment()
|
||||
}
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// stats represents client statistic
|
||||
// when processing data
|
||||
type stats struct {
|
||||
|
||||
@@ -142,7 +142,7 @@ func (ctx *InsertCtx) ApplyRelabeling() {
|
||||
// FlushBufs flushes buffered rows to the underlying storage.
|
||||
func (ctx *InsertCtx) FlushBufs() error {
|
||||
sas := sasGlobal.Load()
|
||||
if sas != nil && !ctx.skipStreamAggr {
|
||||
if (sas != nil || deduplicator != nil) && !ctx.skipStreamAggr {
|
||||
matchIdxs := matchIdxsPool.Get()
|
||||
matchIdxs.B = ctx.streamAggrCtx.push(ctx.mrs, matchIdxs.B)
|
||||
if !*streamAggrKeepInput {
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -28,8 +28,12 @@ var (
|
||||
streamAggrDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation.html")
|
||||
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before being aggregated. "+
|
||||
"Only the last sample per each time series per each interval is aggregated if the interval is greater than zero")
|
||||
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation with -streamAggr.config . "+
|
||||
"See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation.html#deduplication")
|
||||
streamAggrDropInputLabels = flagutil.NewArrayString("streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
|
||||
"before stream de-duplication and aggregation . See https://docs.victoriametrics.com/stream-aggregation.html#dropping-unneeded-labels")
|
||||
streamAggrIgnoreOldSamples = flag.Bool("streamAggr.ignoreOldSamples", false, "Whether to ignore input samples with old timestamps outside the current aggregation interval. "+
|
||||
"See https://docs.victoriametrics.com/stream-aggregation.html#ignoring-old-samples")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -41,7 +45,8 @@ var (
|
||||
saCfgSuccess = metrics.NewGauge(`vminsert_streamagg_config_last_reload_successful`, nil)
|
||||
saCfgTimestamp = metrics.NewCounter(`vminsert_streamagg_config_last_reload_success_timestamp_seconds`)
|
||||
|
||||
sasGlobal atomic.Pointer[streamaggr.Aggregators]
|
||||
sasGlobal atomic.Pointer[streamaggr.Aggregators]
|
||||
deduplicator *streamaggr.Deduplicator
|
||||
)
|
||||
|
||||
// CheckStreamAggrConfig checks config pointed by -stramaggr.config
|
||||
@@ -49,8 +54,13 @@ func CheckStreamAggrConfig() error {
|
||||
if *streamAggrConfig == "" {
|
||||
return nil
|
||||
}
|
||||
pushNoop := func(tss []prompbmarshal.TimeSeries) {}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushNoop, *streamAggrDedupInterval)
|
||||
pushNoop := func(_ []prompbmarshal.TimeSeries) {}
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushNoop, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when loading -streamAggr.config=%q: %w", *streamAggrConfig, err)
|
||||
}
|
||||
@@ -65,15 +75,24 @@ func InitStreamAggr() {
|
||||
saCfgReloaderStopCh = make(chan struct{})
|
||||
|
||||
if *streamAggrConfig == "" {
|
||||
if *streamAggrDedupInterval > 0 {
|
||||
deduplicator = streamaggr.NewDeduplicator(pushAggregateSeries, *streamAggrDedupInterval, *streamAggrDropInputLabels)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
sighupCh := procutil.NewSighupChan()
|
||||
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, *streamAggrDedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
}
|
||||
sas, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot load -streamAggr.config=%q: %s", *streamAggrConfig, err)
|
||||
}
|
||||
|
||||
sasGlobal.Store(sas)
|
||||
saCfgSuccess.Set(1)
|
||||
saCfgTimestamp.Set(fasttime.UnixTimestamp())
|
||||
@@ -97,7 +116,12 @@ func reloadStreamAggrConfig() {
|
||||
logger.Infof("reloading -streamAggr.config=%q", *streamAggrConfig)
|
||||
saCfgReloads.Inc()
|
||||
|
||||
sasNew, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, *streamAggrDedupInterval)
|
||||
opts := &streamaggr.Options{
|
||||
DedupInterval: *streamAggrDedupInterval,
|
||||
DropInputLabels: *streamAggrDropInputLabels,
|
||||
IgnoreOldSamples: *streamAggrIgnoreOldSamples,
|
||||
}
|
||||
sasNew, err := streamaggr.LoadFromFile(*streamAggrConfig, pushAggregateSeries, opts)
|
||||
if err != nil {
|
||||
saCfgSuccess.Set(0)
|
||||
saCfgReloadErr.Inc()
|
||||
@@ -124,61 +148,101 @@ func MustStopStreamAggr() {
|
||||
|
||||
sas := sasGlobal.Swap(nil)
|
||||
sas.MustStop()
|
||||
|
||||
if deduplicator != nil {
|
||||
deduplicator.MustStop()
|
||||
deduplicator = nil
|
||||
}
|
||||
}
|
||||
|
||||
type streamAggrCtx struct {
|
||||
mn storage.MetricName
|
||||
tss [1]prompbmarshal.TimeSeries
|
||||
mn storage.MetricName
|
||||
tss []prompbmarshal.TimeSeries
|
||||
labels []prompbmarshal.Label
|
||||
samples []prompbmarshal.Sample
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (ctx *streamAggrCtx) Reset() {
|
||||
ctx.mn.Reset()
|
||||
ts := &ctx.tss[0]
|
||||
promrelabel.CleanLabels(ts.Labels)
|
||||
|
||||
clear(ctx.tss)
|
||||
ctx.tss = ctx.tss[:0]
|
||||
|
||||
clear(ctx.labels)
|
||||
ctx.labels = ctx.labels[:0]
|
||||
|
||||
ctx.samples = ctx.samples[:0]
|
||||
ctx.buf = ctx.buf[:0]
|
||||
}
|
||||
|
||||
func (ctx *streamAggrCtx) push(mrs []storage.MetricRow, matchIdxs []byte) []byte {
|
||||
matchIdxs = bytesutil.ResizeNoCopyMayOverallocate(matchIdxs, len(mrs))
|
||||
for i := 0; i < len(matchIdxs); i++ {
|
||||
matchIdxs[i] = 0
|
||||
}
|
||||
|
||||
mn := &ctx.mn
|
||||
tss := ctx.tss[:]
|
||||
ts := &tss[0]
|
||||
labels := ts.Labels
|
||||
samples := ts.Samples
|
||||
sas := sasGlobal.Load()
|
||||
var matchIdxsLocal []byte
|
||||
for idx, mr := range mrs {
|
||||
tss := ctx.tss
|
||||
labels := ctx.labels
|
||||
samples := ctx.samples
|
||||
buf := ctx.buf
|
||||
|
||||
tssLen := len(tss)
|
||||
for _, mr := range mrs {
|
||||
if err := mn.UnmarshalRaw(mr.MetricNameRaw); err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal recently marshaled MetricName: %s", err)
|
||||
}
|
||||
|
||||
labels = append(labels[:0], prompbmarshal.Label{
|
||||
labelsLen := len(labels)
|
||||
|
||||
bufLen := len(buf)
|
||||
buf = append(buf, mn.MetricGroup...)
|
||||
metricGroup := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: bytesutil.ToUnsafeString(mn.MetricGroup),
|
||||
Value: metricGroup,
|
||||
})
|
||||
|
||||
for _, tag := range mn.Tags {
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, tag.Key...)
|
||||
name := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, tag.Value...)
|
||||
value := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: bytesutil.ToUnsafeString(tag.Key),
|
||||
Value: bytesutil.ToUnsafeString(tag.Value),
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
samples = append(samples[:0], prompbmarshal.Sample{
|
||||
samplesLen := len(samples)
|
||||
samples = append(samples, prompbmarshal.Sample{
|
||||
Timestamp: mr.Timestamp,
|
||||
Value: mr.Value,
|
||||
})
|
||||
|
||||
ts.Labels = labels
|
||||
ts.Samples = samples
|
||||
|
||||
matchIdxsLocal = sas.Push(tss, matchIdxsLocal)
|
||||
if matchIdxsLocal[0] != 0 {
|
||||
matchIdxs[idx] = 1
|
||||
}
|
||||
tss = append(tss, prompbmarshal.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
ctx.tss = tss
|
||||
ctx.labels = labels
|
||||
ctx.samples = samples
|
||||
ctx.buf = buf
|
||||
|
||||
tss = tss[tssLen:]
|
||||
|
||||
sas := sasGlobal.Load()
|
||||
if sas != nil {
|
||||
matchIdxs = sas.Push(tss, matchIdxs)
|
||||
} else if deduplicator != nil {
|
||||
matchIdxs = bytesutil.ResizeNoCopyMayOverallocate(matchIdxs, len(tss))
|
||||
for i := range matchIdxs {
|
||||
matchIdxs[i] = 1
|
||||
}
|
||||
deduplicator.Push(tss)
|
||||
}
|
||||
|
||||
ctx.Reset()
|
||||
|
||||
return matchIdxs
|
||||
}
|
||||
|
||||
85
app/vminsert/datadogsketches/request_handler.go
Normal file
85
app/vminsert/datadogsketches/request_handler.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package datadogsketches
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogsketches"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogsketches/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadogutils"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="datadogsketches"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="datadogsketches"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForHTTP processes remote write for DataDog POST /api/beta/sketches request.
|
||||
func InsertHandlerForHTTP(req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ce := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, ce, func(sketches []*datadogsketches.Sketch) error {
|
||||
return insertRows(sketches, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(sketches []*datadogsketches.Sketch, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetInsertCtx()
|
||||
defer common.PutInsertCtx(ctx)
|
||||
|
||||
rowsLen := 0
|
||||
for _, sketch := range sketches {
|
||||
rowsLen += sketch.RowsCount()
|
||||
}
|
||||
ctx.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
hasRelabeling := relabel.HasRelabeling()
|
||||
for _, sketch := range sketches {
|
||||
ms := sketch.ToSummary()
|
||||
for _, m := range ms {
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
ctx.AddLabel("", m.Name)
|
||||
for _, label := range m.Labels {
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
for _, tag := range sketch.Tags {
|
||||
name, value := datadogutils.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
ctx.AddLabel(name, value)
|
||||
}
|
||||
for j := range extraLabels {
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
for _, p := range m.Points {
|
||||
metricNameRaw, err = ctx.WriteDataPointExt(metricNameRaw, ctx.Labels, p.Timestamp, p.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rowsTotal += len(m.Points)
|
||||
}
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ctx.FlushBufs()
|
||||
}
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
vminsertCommon "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/csvimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/datadogsketches"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/datadogv1"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/datadogv2"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite"
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
@@ -100,7 +101,7 @@ func Init() {
|
||||
if len(*opentsdbHTTPListenAddr) > 0 {
|
||||
opentsdbhttpServer = opentsdbhttpserver.MustStart(*opentsdbHTTPListenAddr, *opentsdbHTTPUseProxyProtocol, opentsdbhttp.InsertHandler)
|
||||
}
|
||||
promscrape.Init(func(at *auth.Token, wr *prompbmarshal.WriteRequest) {
|
||||
promscrape.Init(func(_ *auth.Token, wr *prompbmarshal.WriteRequest) {
|
||||
prompush.Push(wr)
|
||||
})
|
||||
}
|
||||
@@ -162,7 +163,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path = strings.TrimSuffix(path, "/")
|
||||
}
|
||||
switch path {
|
||||
case "/prometheus/api/v1/write", "/api/v1/write":
|
||||
case "/prometheus/api/v1/write", "/api/v1/write", "/api/v1/push", "/prometheus/api/v1/push":
|
||||
if common.HandleVMProtoServerHandshake(w, r) {
|
||||
return true
|
||||
}
|
||||
@@ -216,14 +217,14 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
addInfluxResponseHeaders(w)
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "/opentelemetry/api/v1/push":
|
||||
case "/opentelemetry/api/v1/push", "/opentelemetry/v1/metrics":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
return true
|
||||
case "/newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
@@ -271,6 +272,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.WriteHeader(202)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "/datadog/api/beta/sketches":
|
||||
datadogsketchesWriteRequests.Inc()
|
||||
if err := datadogsketches.InsertHandlerForHTTP(r); err != nil {
|
||||
datadogsketchesWriteErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(202)
|
||||
return true
|
||||
case "/datadog/api/v1/validate":
|
||||
datadogValidateRequests.Inc()
|
||||
// See https://docs.datadoghq.com/api/latest/authentication/#validate-api-key
|
||||
@@ -341,10 +351,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
promscrapeConfigReloadRequests.Inc()
|
||||
procutil.SelfSIGHUP()
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/ready":
|
||||
if rdy := atomic.LoadInt32(&promscrape.PendingScrapeConfigs); rdy > 0 {
|
||||
if rdy := promscrape.PendingScrapeConfigs.Load(); rdy > 0 {
|
||||
errMsg := fmt.Sprintf("waiting for scrape config to init targets, configs left: %d", rdy)
|
||||
http.Error(w, errMsg, http.StatusTooEarly)
|
||||
} else {
|
||||
@@ -394,13 +404,16 @@ var (
|
||||
datadogv2WriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v2/series", protocol="datadog"}`)
|
||||
datadogv2WriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/datadog/api/v2/series", protocol="datadog"}`)
|
||||
|
||||
datadogsketchesWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/beta/sketches", protocol="datadog"}`)
|
||||
datadogsketchesWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/datadog/api/beta/sketches", protocol="datadog"}`)
|
||||
|
||||
datadogValidateRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v1/validate", protocol="datadog"}`)
|
||||
datadogCheckRunRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v1/check_run", protocol="datadog"}`)
|
||||
datadogIntakeRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
|
||||
datadogMetadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
|
||||
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
|
||||
newrelicWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
newrelicWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
@@ -422,12 +435,12 @@ var (
|
||||
promscrapeConfigReloadRequests = metrics.NewCounter(`vm_http_requests_total{path="/-/reload"}`)
|
||||
|
||||
_ = metrics.NewGauge(`vm_metrics_with_dropped_labels_total`, func() float64 {
|
||||
return float64(atomic.LoadUint64(&storage.MetricsWithDroppedLabels))
|
||||
return float64(storage.MetricsWithDroppedLabels.Load())
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_too_long_label_names_total`, func() float64 {
|
||||
return float64(atomic.LoadUint64(&storage.TooLongLabelNames))
|
||||
return float64(storage.TooLongLabelNames.Load())
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_too_long_label_values_total`, func() float64 {
|
||||
return float64(atomic.LoadUint64(&storage.TooLongLabelValues))
|
||||
return float64(storage.TooLongLabelValues.Load())
|
||||
})
|
||||
)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
@@ -24,10 +25,15 @@ func InsertHandler(req *http.Request) error {
|
||||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
var processBody func([]byte) ([]byte, error)
|
||||
if req.Header.Get("Content-Type") == "application/json" {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
if req.Header.Get("X-Amz-Firehose-Protocol-Version") != "" {
|
||||
processBody = firehose.ProcessRequestBody
|
||||
} else {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
}
|
||||
}
|
||||
return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return stream.ParseStream(req.Body, isGzipped, processBody, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return insertRows(tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -37,7 +37,8 @@ func main() {
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, false, nil)
|
||||
listenAddrs := []string{*httpListenAddr}
|
||||
go httpserver.Serve(listenAddrs, nil, nil)
|
||||
|
||||
srcFS, err := newSrcFS()
|
||||
if err != nil {
|
||||
@@ -62,8 +63,8 @@ func main() {
|
||||
dstFS.MustStop()
|
||||
|
||||
startTime := time.Now()
|
||||
logger.Infof("gracefully shutting down http server for metrics at %q", *httpListenAddr)
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Infof("gracefully shutting down http server for metrics at %q", listenAddrs)
|
||||
if err := httpserver.Stop(listenAddrs); err != nil {
|
||||
logger.Fatalf("cannot stop http server for metrics: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down http server for metrics in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
@@ -160,7 +160,7 @@ func newNextSeriesForSearchQuery(ec *evalConfig, sq *storage.SearchQuery, expr g
|
||||
seriesCh := make(chan *series, cgroup.AvailableCPUs())
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
err := rss.RunParallel(nil, func(rs *netstorage.Result, workerID uint) error {
|
||||
err := rss.RunParallel(nil, func(rs *netstorage.Result, _ uint) error {
|
||||
nameWithTags := getCanonicalPath(&rs.MetricName)
|
||||
tags := unmarshalTags(nameWithTags)
|
||||
s := &series{
|
||||
|
||||
@@ -3279,6 +3279,102 @@ func TestExecExprSuccess(t *testing.T) {
|
||||
Tags: map[string]string{"name": "foo.bar.baz", "summarize": "45s", "summarizeFunction": "sum"},
|
||||
},
|
||||
})
|
||||
f(`aggregateSeriesLists(
|
||||
summarize(
|
||||
time('foo.bar.baz',10),
|
||||
'45s'
|
||||
),
|
||||
summarize(
|
||||
time('bar.foo.bad',10),
|
||||
'45s'
|
||||
), 'sum')`, []*series{
|
||||
{
|
||||
Timestamps: []int64{120000, 165000},
|
||||
Values: []float64{1170, 2000},
|
||||
Name: `sumSeries(summarize(foo.bar.baz,'45s','sum'),summarize(bar.foo.bad,'45s','sum'))`,
|
||||
Tags: map[string]string{"name": "foo.bar.baz", "summarize": "45s", "summarizeFunction": "sum"},
|
||||
},
|
||||
})
|
||||
f(`sumSeriesLists(
|
||||
summarize(
|
||||
time('foo.bar.baz',10),
|
||||
'45s'
|
||||
),
|
||||
summarize(
|
||||
time('bar.foo.bad',10),
|
||||
'45s'
|
||||
))`, []*series{
|
||||
{
|
||||
Timestamps: []int64{120000, 165000},
|
||||
Values: []float64{1170, 2000},
|
||||
Name: `sumSeries(summarize(foo.bar.baz,'45s','sum'),summarize(bar.foo.bad,'45s','sum'))`,
|
||||
Tags: map[string]string{"name": "foo.bar.baz", "summarize": "45s", "summarizeFunction": "sum"},
|
||||
},
|
||||
})
|
||||
f(`aggregateSeriesLists(
|
||||
summarize(
|
||||
time('foo.bar.baz',10),
|
||||
'45s'
|
||||
),
|
||||
summarize(
|
||||
time('bar.foo.bad',10),
|
||||
'45s'
|
||||
), 'diff')`, []*series{
|
||||
{
|
||||
Timestamps: []int64{120000, 165000},
|
||||
Values: []float64{0, 0},
|
||||
Name: `diffSeries(summarize(foo.bar.baz,'45s','sum'),summarize(bar.foo.bad,'45s','sum'))`,
|
||||
Tags: map[string]string{"name": "foo.bar.baz", "summarize": "45s", "summarizeFunction": "sum"},
|
||||
},
|
||||
})
|
||||
f(`diffSeriesLists(
|
||||
summarize(
|
||||
time('foo.bar.baz',10),
|
||||
'45s'
|
||||
),
|
||||
summarize(
|
||||
time('bar.foo.bad',10),
|
||||
'45s'
|
||||
))`, []*series{
|
||||
{
|
||||
Timestamps: []int64{120000, 165000},
|
||||
Values: []float64{0, 0},
|
||||
Name: `diffSeries(summarize(foo.bar.baz,'45s','sum'),summarize(bar.foo.bad,'45s','sum'))`,
|
||||
Tags: map[string]string{"name": "foo.bar.baz", "summarize": "45s", "summarizeFunction": "sum"},
|
||||
},
|
||||
})
|
||||
f(`aggregateSeriesLists(
|
||||
summarize(
|
||||
time('foo.bar.baz',10),
|
||||
'45s'
|
||||
),
|
||||
summarize(
|
||||
time('bar.foo.bad',10),
|
||||
'45s'
|
||||
), 'multiply')`, []*series{
|
||||
{
|
||||
Timestamps: []int64{120000, 165000},
|
||||
Values: []float64{342225, 1e+06},
|
||||
Name: `multiplySeries(summarize(foo.bar.baz,'45s','sum'),summarize(bar.foo.bad,'45s','sum'))`,
|
||||
Tags: map[string]string{"name": "foo.bar.baz", "summarize": "45s", "summarizeFunction": "sum"},
|
||||
},
|
||||
})
|
||||
f(`multiplySeriesLists(
|
||||
summarize(
|
||||
time('foo.bar.baz',10),
|
||||
'45s'
|
||||
),
|
||||
summarize(
|
||||
time('bar.foo.bad',10),
|
||||
'45s'
|
||||
))`, []*series{
|
||||
{
|
||||
Timestamps: []int64{120000, 165000},
|
||||
Values: []float64{342225, 1e+06},
|
||||
Name: `multiplySeries(summarize(foo.bar.baz,'45s','sum'),summarize(bar.foo.bad,'45s','sum'))`,
|
||||
Tags: map[string]string{"name": "foo.bar.baz", "summarize": "45s", "summarizeFunction": "sum"},
|
||||
},
|
||||
})
|
||||
f(`weightedAverage(
|
||||
summarize(
|
||||
group(
|
||||
|
||||
@@ -40,6 +40,52 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"aggregateSeriesLists": {
|
||||
"name": "aggregateSeriesLists",
|
||||
"function": "aggregateSeriesLists(seriesListFirstPos, seriesListSecondPos, func, xFilesFactor=None)",
|
||||
"description": "Iterates over a two lists and aggregates using specified function list1[0] to list2[0], list1[1] to list2[1] and so on. The lists will need to be the same length\n\nPosition of seriesList matters. For example using “sum” function aggregateSeriesLists(list1[0..n], list2[0..n], \"sum\") it would find sum for each member of the list list1[0] + list2[0], list1[1] + list2[1], list1[n] + list2[n].",
|
||||
"module": "graphite.render.functions",
|
||||
"group": "Combine",
|
||||
"params": [
|
||||
{
|
||||
"name": "seriesListFirstPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "seriesListSecondPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "func",
|
||||
"type": "aggFunc",
|
||||
"required": true,
|
||||
"options": [
|
||||
"average",
|
||||
"avg",
|
||||
"avg_zero",
|
||||
"count",
|
||||
"current",
|
||||
"diff",
|
||||
"last",
|
||||
"max",
|
||||
"median",
|
||||
"min",
|
||||
"multiply",
|
||||
"range",
|
||||
"rangeOf",
|
||||
"stddev",
|
||||
"sum",
|
||||
"total"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "xFilesFactor",
|
||||
"type": "float"
|
||||
}
|
||||
]
|
||||
},
|
||||
"aggregateWithWildcards": {
|
||||
"name": "aggregateWithWildcards",
|
||||
"function": "aggregateWithWildcards(seriesList, func, *positions)",
|
||||
@@ -211,6 +257,25 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"diffSeriesLists": {
|
||||
"name": "diffSeriesLists",
|
||||
"function": "diffSeriesLists(seriesListFirstPos, seriesListSecondPos)",
|
||||
"description": "Iterates over a two lists and subtracts series lists 2 through n from series 1 list1[0] to list2[0], list1[1] to list2[1] and so on. The lists will need to be the same length",
|
||||
"module": "graphite.render.functions",
|
||||
"group": "Combine",
|
||||
"params": [
|
||||
{
|
||||
"name": "seriesListFirstPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "seriesListSecondPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"divideSeries": {
|
||||
"name": "divideSeries",
|
||||
"function": "divideSeries(dividendSeriesList, divisorSeries)",
|
||||
@@ -529,6 +594,25 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"multiplySeriesLists": {
|
||||
"name": "multiplySeriesLists",
|
||||
"function": "multiplySeriesLists(seriesListFirstPos, seriesListSecondPos)",
|
||||
"description": "Iterates over a two lists and multiply series lists 2 through n from series 1 list1[0] to list2[0], list1[1] to list2[1] and so on. The lists will need to be the same length",
|
||||
"module": "graphite.render.functions",
|
||||
"group": "Combine",
|
||||
"params": [
|
||||
{
|
||||
"name": "seriesListFirstPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "seriesListSecondPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"multiplySeriesWithWildcards": {
|
||||
"name": "multiplySeriesWithWildcards",
|
||||
"function": "multiplySeriesWithWildcards(seriesList, *position)",
|
||||
@@ -715,6 +799,26 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"sumSeriesLists": {
|
||||
"name": "sumSeriesLists",
|
||||
"function": "sumSeriesLists(seriesListFirstPos, seriesListSecondPos)",
|
||||
"description": "Iterates over a two lists and sums series lists 2 through n from series 1 list1[0] to list2[0], list1[1] to list2[1] and so on. The lists will need to be the same length",
|
||||
"module": "graphite.render.functions",
|
||||
"group": "Combine",
|
||||
"params": [
|
||||
{
|
||||
"name": "seriesListFirstPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "seriesListSecondPos",
|
||||
"type": "seriesList",
|
||||
"required": true
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"sumSeriesWithWildcards": {
|
||||
"name": "sumSeriesWithWildcards",
|
||||
"function": "sumSeriesWithWildcards(seriesList, *position)",
|
||||
|
||||
@@ -36,6 +36,7 @@ func init() {
|
||||
"add": transformAdd,
|
||||
"aggregate": transformAggregate,
|
||||
"aggregateLine": transformAggregateLine,
|
||||
"aggregateSeriesLists": transformAggregateSeriesLists,
|
||||
"aggregateWithWildcards": transformAggregateWithWildcards,
|
||||
"alias": transformAlias,
|
||||
"aliasByMetric": transformAliasByMetric,
|
||||
@@ -66,6 +67,7 @@ func init() {
|
||||
"delay": transformDelay,
|
||||
"derivative": transformDerivative,
|
||||
"diffSeries": transformDiffSeries,
|
||||
"diffSeriesLists": transformDiffSeriesLists,
|
||||
"divideSeries": transformDivideSeries,
|
||||
"divideSeriesLists": transformDivideSeriesLists,
|
||||
"drawAsInfinite": transformDrawAsInfinite,
|
||||
@@ -125,6 +127,7 @@ func init() {
|
||||
"movingSum": transformMovingSum,
|
||||
"movingWindow": transformMovingWindow,
|
||||
"multiplySeries": transformMultiplySeries,
|
||||
"multiplySeriesLists": transformMultiplySeriesLists,
|
||||
"multiplySeriesWithWildcards": transformMultiplySeriesWithWildcards,
|
||||
"nPercentile": transformNPercentile,
|
||||
"nonNegativeDerivative": transformNonNegativeDerivative,
|
||||
@@ -172,6 +175,7 @@ func init() {
|
||||
"substr": transformSubstr,
|
||||
"sum": transformSumSeries,
|
||||
"sumSeries": transformSumSeries,
|
||||
"sumSeriesLists": transformSumSeriesLists,
|
||||
"sumSeriesWithWildcards": transformSumSeriesWithWildcards,
|
||||
"summarize": transformSummarize,
|
||||
"threshold": transformThreshold,
|
||||
@@ -401,7 +405,7 @@ func aggregateSeriesWithWildcards(ec *evalConfig, expr graphiteql.Expr, nextSeri
|
||||
for _, pos := range positions {
|
||||
positionsMap[pos] = struct{}{}
|
||||
}
|
||||
keyFunc := func(name string, tags map[string]string) string {
|
||||
keyFunc := func(name string, _ map[string]string) string {
|
||||
parts := strings.Split(getPathFromName(name), ".")
|
||||
dstParts := parts[:0]
|
||||
for i, part := range parts {
|
||||
@@ -1316,6 +1320,88 @@ func transformDivideSeries(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesF
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func aggregateSeriesListsGeneric(ec *evalConfig, fe *graphiteql.FuncExpr, funcName string) (nextSeriesFunc, error) {
|
||||
args := fe.Args
|
||||
agg, err := getAggrFunc(funcName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSeriesFirst, err := evalSeriesList(ec, args, "seriesListFirstPos", 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSeriesSecond, err := evalSeriesList(ec, args, "seriesListSecondPos", 1)
|
||||
if err != nil {
|
||||
_, _ = drainAllSeries(nextSeriesFirst)
|
||||
return nil, err
|
||||
}
|
||||
return aggregateSeriesList(ec, fe, nextSeriesFirst, nextSeriesSecond, agg, funcName)
|
||||
}
|
||||
|
||||
// See https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.aggregateSeriesLists
|
||||
func transformAggregateSeriesLists(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, error) {
|
||||
args := fe.Args
|
||||
if len(args) != 3 && len(args) != 4 {
|
||||
return nil, fmt.Errorf("unexpected number of args; got %d; want 3 or 4", len(args))
|
||||
}
|
||||
|
||||
funcName, err := getString(args, "func", 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aggregateSeriesListsGeneric(ec, fe, funcName)
|
||||
}
|
||||
|
||||
// See https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.sumSeriesLists
|
||||
func transformSumSeriesLists(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, error) {
|
||||
return aggregateSeriesListsGeneric(ec, fe, "sum")
|
||||
}
|
||||
|
||||
// See https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.multiplySeriesLists
|
||||
func transformMultiplySeriesLists(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, error) {
|
||||
return aggregateSeriesListsGeneric(ec, fe, "multiply")
|
||||
}
|
||||
|
||||
// See https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.diffSeriesLists
|
||||
func transformDiffSeriesLists(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, error) {
|
||||
return aggregateSeriesListsGeneric(ec, fe, "diff")
|
||||
}
|
||||
|
||||
func aggregateSeriesList(ec *evalConfig, fe *graphiteql.FuncExpr, nextSeriesFirst, nextSeriesSecond nextSeriesFunc, agg aggrFunc, funcName string) (nextSeriesFunc, error) {
|
||||
ssFirst, stepFirst, err := fetchNormalizedSeries(ec, nextSeriesFirst, false)
|
||||
if err != nil {
|
||||
_, _ = drainAllSeries(nextSeriesSecond)
|
||||
return nil, err
|
||||
}
|
||||
ssSecond, stepSecond, err := fetchNormalizedSeries(ec, nextSeriesSecond, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ssFirst) != len(ssSecond) {
|
||||
return nil, fmt.Errorf("First and second lists must have equal number of series; got %d vs %d series", len(ssFirst), len(ssSecond))
|
||||
}
|
||||
if stepFirst != stepSecond {
|
||||
return nil, fmt.Errorf("step mismatch for first and second: %d vs %d", stepFirst, stepSecond)
|
||||
}
|
||||
|
||||
valuePair := make([]float64, 2)
|
||||
for i, s := range ssFirst {
|
||||
sSecond := ssSecond[i]
|
||||
values := s.Values
|
||||
secondValues := sSecond.Values
|
||||
for j, v := range values {
|
||||
valuePair[0], valuePair[1] = v, secondValues[j]
|
||||
values[j] = agg(valuePair)
|
||||
}
|
||||
s.Name = fmt.Sprintf("%sSeries(%s,%s)", funcName, s.Name, sSecond.Name)
|
||||
s.expr = fe
|
||||
s.pathExpression = s.Name
|
||||
}
|
||||
return multiSeriesFunc(ssFirst), nil
|
||||
}
|
||||
|
||||
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.divideSeriesLists
|
||||
func transformDivideSeriesLists(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, error) {
|
||||
args := fe.Args
|
||||
@@ -1326,36 +1412,14 @@ func transformDivideSeriesLists(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSe
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ssDividend, stepDivident, err := fetchNormalizedSeries(ec, nextDividend, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextDivisor, err := evalSeriesList(ec, args, "divisorSeriesList", 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ssDivisor, stepDivisor, err := fetchNormalizedSeries(ec, nextDivisor, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ssDividend) != len(ssDivisor) {
|
||||
return nil, fmt.Errorf("divident and divisor must have equal number of series; got %d vs %d series", len(ssDividend), len(ssDivisor))
|
||||
}
|
||||
if stepDivident != stepDivisor {
|
||||
return nil, fmt.Errorf("step mismatch for divident and divisor: %d vs %d", stepDivident, stepDivisor)
|
||||
}
|
||||
for i, s := range ssDividend {
|
||||
sDivisor := ssDivisor[i]
|
||||
values := s.Values
|
||||
divisorValues := sDivisor.Values
|
||||
for j, v := range values {
|
||||
values[j] = v / divisorValues[j]
|
||||
}
|
||||
s.Name = fmt.Sprintf("divideSeries(%s,%s)", s.Name, sDivisor.Name)
|
||||
s.expr = fe
|
||||
s.pathExpression = s.Name
|
||||
}
|
||||
return multiSeriesFunc(ssDividend), nil
|
||||
|
||||
return aggregateSeriesList(ec, fe, nextDividend, nextDivisor, func(values []float64) float64 {
|
||||
return values[0] / values[1]
|
||||
}, "divide")
|
||||
}
|
||||
|
||||
// See https://graphite.readthedocs.io/en/stable/functions.html#graphite.render.functions.drawAsInfinite
|
||||
@@ -1819,7 +1883,7 @@ func transformGroupByTags(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFu
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyFunc := func(name string, tags map[string]string) string {
|
||||
keyFunc := func(_ string, tags map[string]string) string {
|
||||
return formatKeyFromTags(tags, tagKeys, callback)
|
||||
}
|
||||
return groupByKeyFunc(ec, fe, nextSeries, callback, keyFunc)
|
||||
@@ -3842,18 +3906,18 @@ func nextSeriesConcurrentWrapper(nextSeries nextSeriesFunc, f func(s *series) (*
|
||||
errCh <- err
|
||||
close(errCh)
|
||||
}()
|
||||
var skipProcessing uint32
|
||||
var skipProcessing atomic.Bool
|
||||
for i := 0; i < goroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for s := range seriesCh {
|
||||
if atomic.LoadUint32(&skipProcessing) != 0 {
|
||||
if skipProcessing.Load() {
|
||||
continue
|
||||
}
|
||||
sNew, err := f(s)
|
||||
if err != nil {
|
||||
// Drain the rest of series and do not call f for them in order to conserve CPU time.
|
||||
atomic.StoreUint32(&skipProcessing, 1)
|
||||
skipProcessing.Store(true)
|
||||
resultCh <- &result{
|
||||
err: err,
|
||||
}
|
||||
@@ -5609,9 +5673,9 @@ func (nsf *nextSeriesFunc) peekStep(step int64) (int64, error) {
|
||||
if s != nil {
|
||||
step = s.step
|
||||
}
|
||||
calls := uint64(0)
|
||||
var calls atomic.Uint64
|
||||
*nsf = func() (*series, error) {
|
||||
if atomic.AddUint64(&calls, 1) == 1 {
|
||||
if calls.Add(1) == 1 {
|
||||
return s, nil
|
||||
}
|
||||
return nextSeries()
|
||||
|
||||
@@ -5,13 +5,15 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -19,16 +21,17 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
var (
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned from /api/v1/labels")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned from /api/v1/label/<label_name>/values")
|
||||
maxSamplesPerSeries = flag.Int("search.maxSamplesPerSeries", 30e6, "The maximum number of raw samples a single query can scan per each time series. This option allows limiting memory usage")
|
||||
maxSamplesPerQuery = flag.Int("search.maxSamplesPerQuery", 1e9, "The maximum number of raw samples a single query can process across all time series. This protects from heavy queries, which select unexpectedly high number of raw samples. See also -search.maxSamplesPerSeries")
|
||||
maxWorkersPerQuery = flag.Int("search.maxWorkersPerQuery", defaultMaxWorkersPerQuery, "The maximum number of CPU cores a single query can use. "+
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned from /api/v1/labels . "+
|
||||
"See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned from /api/v1/label/<label_name>/values . "+
|
||||
"See also -search.maxLabelsAPISeries and -search.maxLabelsAPIDuration")
|
||||
maxSamplesPerSeries = flag.Int("search.maxSamplesPerSeries", 30e6, "The maximum number of raw samples a single query can scan per each time series. This option allows limiting memory usage")
|
||||
maxSamplesPerQuery = flag.Int("search.maxSamplesPerQuery", 1e9, "The maximum number of raw samples a single query can process across all time series. "+
|
||||
"This protects from heavy queries, which select unexpectedly high number of raw samples. See also -search.maxSamplesPerSeries")
|
||||
maxWorkersPerQuery = flag.Int("search.maxWorkersPerQuery", defaultMaxWorkersPerQuery, "The maximum number of CPU cores a single query can use. "+
|
||||
"The default value should work good for most cases. "+
|
||||
"The flag can be set to lower values for improving performance of big number of concurrently executed queries. "+
|
||||
"The flag can be set to bigger values for improving performance of heavy queries, which scan big number of time series (>10K) and/or big number of samples (>100M). "+
|
||||
@@ -81,7 +84,7 @@ func (rss *Results) mustClose() {
|
||||
}
|
||||
|
||||
type timeseriesWork struct {
|
||||
mustStop *uint32
|
||||
mustStop *atomic.Bool
|
||||
rss *Results
|
||||
pts *packedTimeseries
|
||||
f func(rs *Result, workerID uint) error
|
||||
@@ -91,22 +94,22 @@ type timeseriesWork struct {
|
||||
}
|
||||
|
||||
func (tsw *timeseriesWork) do(r *Result, workerID uint) error {
|
||||
if atomic.LoadUint32(tsw.mustStop) != 0 {
|
||||
if tsw.mustStop.Load() {
|
||||
return nil
|
||||
}
|
||||
rss := tsw.rss
|
||||
if rss.deadline.Exceeded() {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
tsw.mustStop.Store(true)
|
||||
return fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.String())
|
||||
}
|
||||
if err := tsw.pts.Unpack(r, rss.tbf, rss.tr); err != nil {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
tsw.mustStop.Store(true)
|
||||
return fmt.Errorf("error during time series unpacking: %w", err)
|
||||
}
|
||||
tsw.rowsProcessed = len(r.Timestamps)
|
||||
if len(r.Timestamps) > 0 {
|
||||
if err := tsw.f(r, workerID); err != nil {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
tsw.mustStop.Store(true)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -238,7 +241,7 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var mustStop uint32
|
||||
var mustStop atomic.Bool
|
||||
initTimeseriesWork := func(tsw *timeseriesWork, pts *packedTimeseries) {
|
||||
tsw.rss = rss
|
||||
tsw.pts = pts
|
||||
@@ -1008,7 +1011,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
||||
var (
|
||||
errGlobal error
|
||||
errGlobalLock sync.Mutex
|
||||
mustStop uint32
|
||||
mustStop atomic.Bool
|
||||
)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(gomaxprocs)
|
||||
@@ -1020,7 +1023,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
||||
errGlobalLock.Lock()
|
||||
if errGlobal == nil {
|
||||
errGlobal = err
|
||||
atomic.StoreUint32(&mustStop, 1)
|
||||
mustStop.Store(true)
|
||||
}
|
||||
errGlobalLock.Unlock()
|
||||
}
|
||||
@@ -1038,7 +1041,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
||||
if deadline.Exceeded() {
|
||||
return fmt.Errorf("timeout exceeded while fetching data block #%d from storage: %s", blocksRead, deadline.String())
|
||||
}
|
||||
if atomic.LoadUint32(&mustStop) != 0 {
|
||||
if mustStop.Load() {
|
||||
break
|
||||
}
|
||||
xw := exportWorkPool.Get().(*exportWork)
|
||||
@@ -1193,8 +1196,8 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
|
||||
if *maxSamplesPerQuery > 0 && samples > *maxSamplesPerQuery {
|
||||
putTmpBlocksFile(tbf)
|
||||
putStorageSearch(sr)
|
||||
return nil, fmt.Errorf("cannot select more than -search.maxSamplesPerQuery=%d samples; possible solutions: to increase the -search.maxSamplesPerQuery; "+
|
||||
"to reduce time range for the query; to use more specific label filters in order to select lower number of series", *maxSamplesPerQuery)
|
||||
return nil, fmt.Errorf("cannot select more than -search.maxSamplesPerQuery=%d samples; possible solutions: increase the -search.maxSamplesPerQuery; "+
|
||||
"reduce time range for the query; use more specific label filters in order to select fewer series", *maxSamplesPerQuery)
|
||||
}
|
||||
|
||||
buf = br.Marshal(buf[:0])
|
||||
@@ -1227,7 +1230,7 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
|
||||
// bigger than maxFastAllocBlockSize bytes at append() below.
|
||||
brsPool = make([]blockRef, 0, maxFastAllocBlockSize/unsafe.Sizeof(blockRef{}))
|
||||
}
|
||||
if brs.brs == nil || haveSameBlockRefTails(brs.brs, brsPool) {
|
||||
if canAppendToBlockRefPool(brsPool, brs.brs) {
|
||||
// It is safe appending blockRef to brsPool, since there are no other items added there yet.
|
||||
brsPool = append(brsPool, blockRef{
|
||||
partRef: partRef,
|
||||
@@ -1295,10 +1298,25 @@ type blockRef struct {
|
||||
addr tmpBlockAddr
|
||||
}
|
||||
|
||||
func haveSameBlockRefTails(a, b []blockRef) bool {
|
||||
sha := (*reflect.SliceHeader)(unsafe.Pointer(&a))
|
||||
shb := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
return sha.Data+uintptr(sha.Len)*unsafe.Sizeof(blockRef{}) == shb.Data+uintptr(shb.Len)*unsafe.Sizeof(blockRef{})
|
||||
// canAppendToBlockRefPool returns true if a points to the pool and the last item in a is the last item in the pool.
|
||||
//
|
||||
// In this case it is safe appending an item to the pool and then updating the a, so it refers to the extended slice.
|
||||
//
|
||||
// True is also returned if a is nil, since in this case it is safe appending an item to the pool and pointing a
|
||||
// to the last item in the pool.
|
||||
func canAppendToBlockRefPool(pool, a []blockRef) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
if len(a) > len(pool) {
|
||||
// a doesn't belong to pool
|
||||
return false
|
||||
}
|
||||
return getBlockRefsEnd(pool) == getBlockRefsEnd(a)
|
||||
}
|
||||
|
||||
func getBlockRefsEnd(a []blockRef) uintptr {
|
||||
return uintptr(unsafe.Pointer(unsafe.SliceData(a))) + uintptr(len(a))*unsafe.Sizeof(blockRef{})
|
||||
}
|
||||
|
||||
func setupTfss(qt *querytracer.Tracer, tr storage.TimeRange, tagFilterss [][]storage.TagFilter, maxMetrics int, deadline searchutils.Deadline) ([]*storage.TagFilters, error) {
|
||||
|
||||
@@ -136,21 +136,25 @@ func (tbf *tmpBlocksFile) Finalize() error {
|
||||
return fmt.Errorf("cannot write the remaining %d bytes to %q: %w", len(tbf.buf), fname, err)
|
||||
}
|
||||
tbf.buf = tbf.buf[:0]
|
||||
r := fs.MustOpenReaderAt(fname)
|
||||
r := fs.NewReaderAt(tbf.f)
|
||||
|
||||
// Hint the OS that the file is read almost sequentiallly.
|
||||
// This should reduce the number of disk seeks, which is important
|
||||
// for HDDs.
|
||||
r.MustFadviseSequentialRead(true)
|
||||
|
||||
// Collect local stats in order to improve performance on systems with big number of CPU cores.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3966
|
||||
r.SetUseLocalStats()
|
||||
|
||||
tbf.r = r
|
||||
tbf.f = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tbf *tmpBlocksFile) MustReadBlockRefAt(partRef storage.PartRef, addr tmpBlockAddr) storage.BlockRef {
|
||||
var buf []byte
|
||||
if tbf.f == nil {
|
||||
if tbf.r == nil {
|
||||
buf = tbf.buf[addr.offset : addr.offset+uint64(addr.size)]
|
||||
} else {
|
||||
bb := tmpBufPool.Get()
|
||||
@@ -169,21 +173,45 @@ func (tbf *tmpBlocksFile) MustReadBlockRefAt(partRef storage.PartRef, addr tmpBl
|
||||
var tmpBufPool bytesutil.ByteBufferPool
|
||||
|
||||
func (tbf *tmpBlocksFile) MustClose() {
|
||||
if tbf.f == nil {
|
||||
if tbf.f != nil {
|
||||
// tbf.f could be non-nil if Finalize wasn't called.
|
||||
// In this case tbf.r must be nil.
|
||||
if tbf.r != nil {
|
||||
logger.Panicf("BUG: tbf.r must be nil when tbf.f!=nil")
|
||||
}
|
||||
|
||||
// Try removing the file before closing it in order to prevent from flushing the in-memory data
|
||||
// from page cache to the disk and save disk write IO. This may fail on non-posix systems such as Windows.
|
||||
// Gracefully handle this case by attempting to remove the file after closing it.
|
||||
fname := tbf.f.Name()
|
||||
errRemove := os.Remove(fname)
|
||||
if err := tbf.f.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close %q: %s", fname, err)
|
||||
}
|
||||
if errRemove != nil {
|
||||
if err := os.Remove(fname); err != nil {
|
||||
logger.Panicf("FATAL: cannot remove %q: %s", fname, err)
|
||||
}
|
||||
}
|
||||
tbf.f = nil
|
||||
return
|
||||
}
|
||||
if tbf.r != nil {
|
||||
// tbf.r could be nil if Finalize wasn't called.
|
||||
tbf.r.MustClose()
|
||||
}
|
||||
fname := tbf.f.Name()
|
||||
|
||||
if err := tbf.f.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close %q: %s", fname, err)
|
||||
if tbf.r == nil {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
// We cannot remove unclosed at non-posix filesystems, like windows
|
||||
if err := os.Remove(fname); err != nil {
|
||||
logger.Panicf("FATAL: cannot remove %q: %s", fname, err)
|
||||
|
||||
// Try removing the file before closing it in order to prevent from flushing the in-memory data
|
||||
// from page cache to the disk and save disk write IO. This may fail on non-posix systems such as Windows.
|
||||
// Gracefully handle this case by attempting to remove the file after closing it.
|
||||
fname := tbf.r.Path()
|
||||
errRemove := os.Remove(fname)
|
||||
tbf.r.MustClose()
|
||||
if errRemove != nil {
|
||||
if err := os.Remove(fname); err != nil {
|
||||
logger.Panicf("FATAL: cannot remove %q: %s", fname, err)
|
||||
}
|
||||
}
|
||||
tbf.f = nil
|
||||
tbf.r = nil
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
{% endfor %}
|
||||
{% endfunc %}
|
||||
|
||||
{%code const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00" %}
|
||||
{% func exportCSVField(mn *storage.MetricName, fieldName string, timestamp int64, value float64) %}
|
||||
{% if fieldName == "__value__" %}
|
||||
{%f= value %}
|
||||
@@ -45,7 +46,7 @@
|
||||
{% case "rfc3339" %}
|
||||
{% code
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], time.RFC3339)
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], rfc3339Milli)
|
||||
%}
|
||||
{%z= bb.B %}
|
||||
{% code
|
||||
|
||||
@@ -87,586 +87,589 @@ func ExportCSVLine(xb *exportBlock, fieldNames []string) string {
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:27
|
||||
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||
const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00"
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:28
|
||||
if fieldName == "__value__" {
|
||||
func streamexportCSVField(qw422016 *qt422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:29
|
||||
qw422016.N().F(value)
|
||||
if fieldName == "__value__" {
|
||||
//line app/vmselect/prometheus/export.qtpl:30
|
||||
return
|
||||
qw422016.N().F(value)
|
||||
//line app/vmselect/prometheus/export.qtpl:31
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:32
|
||||
if fieldName == "__timestamp__" {
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
qw422016.N().DL(timestamp)
|
||||
//line app/vmselect/prometheus/export.qtpl:34
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:35
|
||||
//line app/vmselect/prometheus/export.qtpl:32
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
if fieldName == "__timestamp__" {
|
||||
//line app/vmselect/prometheus/export.qtpl:34
|
||||
qw422016.N().DL(timestamp)
|
||||
//line app/vmselect/prometheus/export.qtpl:35
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:36
|
||||
if strings.HasPrefix(fieldName, "__timestamp__:") {
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:37
|
||||
if strings.HasPrefix(fieldName, "__timestamp__:") {
|
||||
//line app/vmselect/prometheus/export.qtpl:38
|
||||
timeFormat := fieldName[len("__timestamp__:"):]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:38
|
||||
switch timeFormat {
|
||||
//line app/vmselect/prometheus/export.qtpl:39
|
||||
case "unix_s":
|
||||
switch timeFormat {
|
||||
//line app/vmselect/prometheus/export.qtpl:40
|
||||
qw422016.N().DL(timestamp / 1000)
|
||||
case "unix_s":
|
||||
//line app/vmselect/prometheus/export.qtpl:41
|
||||
case "unix_ms":
|
||||
qw422016.N().DL(timestamp / 1000)
|
||||
//line app/vmselect/prometheus/export.qtpl:42
|
||||
qw422016.N().DL(timestamp)
|
||||
case "unix_ms":
|
||||
//line app/vmselect/prometheus/export.qtpl:43
|
||||
case "unix_ns":
|
||||
qw422016.N().DL(timestamp)
|
||||
//line app/vmselect/prometheus/export.qtpl:44
|
||||
qw422016.N().DL(timestamp * 1e6)
|
||||
case "unix_ns":
|
||||
//line app/vmselect/prometheus/export.qtpl:45
|
||||
qw422016.N().DL(timestamp * 1e6)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
case "rfc3339":
|
||||
//line app/vmselect/prometheus/export.qtpl:47
|
||||
//line app/vmselect/prometheus/export.qtpl:48
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], time.RFC3339)
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], rfc3339Milli)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:50
|
||||
//line app/vmselect/prometheus/export.qtpl:51
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:52
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:54
|
||||
default:
|
||||
//line app/vmselect/prometheus/export.qtpl:55
|
||||
default:
|
||||
//line app/vmselect/prometheus/export.qtpl:56
|
||||
if strings.HasPrefix(timeFormat, "custom:") {
|
||||
//line app/vmselect/prometheus/export.qtpl:57
|
||||
//line app/vmselect/prometheus/export.qtpl:58
|
||||
layout := timeFormat[len("custom:"):]
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
bb.B = time.Unix(timestamp/1000, (timestamp%1000)*1e6).AppendFormat(bb.B[:0], layout)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:61
|
||||
if bytes.ContainsAny(bb.B, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:62
|
||||
qw422016.E().QZ(bb.B)
|
||||
if bytes.ContainsAny(bb.B, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:63
|
||||
} else {
|
||||
qw422016.E().QZ(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:64
|
||||
qw422016.N().Z(bb.B)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:65
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:66
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:67
|
||||
//line app/vmselect/prometheus/export.qtpl:68
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
qw422016.N().S(`Unsupported timeFormat=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:70
|
||||
qw422016.N().S(timeFormat)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:70
|
||||
qw422016.N().S(`Unsupported timeFormat=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:71
|
||||
}
|
||||
qw422016.N().S(timeFormat)
|
||||
//line app/vmselect/prometheus/export.qtpl:72
|
||||
}
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
return
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:74
|
||||
}
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:75
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:76
|
||||
v := mn.GetTagValue(fieldName)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:76
|
||||
if bytes.ContainsAny(v, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:77
|
||||
qw422016.N().QZ(v)
|
||||
if bytes.ContainsAny(v, `"`+",\n") {
|
||||
//line app/vmselect/prometheus/export.qtpl:78
|
||||
} else {
|
||||
qw422016.N().QZ(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:79
|
||||
qw422016.N().Z(v)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
qw422016.N().Z(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
func writeexportCSVField(qq422016 qtio422016.Writer, mn *storage.MetricName, fieldName string, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
streamexportCSVField(qw422016, mn, fieldName, timestamp, value)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
func exportCSVField(mn *storage.MetricName, fieldName string, timestamp int64, value float64) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
writeexportCSVField(qb422016, mn, fieldName, timestamp, value)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:81
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:83
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
func StreamExportPrometheusLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
if len(xb.timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:85
|
||||
if len(xb.timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:85
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:85
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:86
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:86
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
writeprometheusMetricName(bb, xb.mn)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
for i, ts := range xb.timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
qw422016.N().F(xb.values[i])
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().DL(ts)
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:92
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
func WriteExportPrometheusLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
StreamExportPrometheusLine(qw422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
func ExportPrometheusLine(xb *exportBlock) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
WriteExportPrometheusLine(qb422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:93
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
func StreamExportJSONLine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
if len(xb.timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:96
|
||||
//line app/vmselect/prometheus/export.qtpl:97
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:98
|
||||
//line app/vmselect/prometheus/export.qtpl:99
|
||||
streammetricNameObject(qw422016, xb.mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:98
|
||||
//line app/vmselect/prometheus/export.qtpl:99
|
||||
qw422016.N().S(`,"values":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:100
|
||||
if len(xb.values) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:101
|
||||
if len(xb.values) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:102
|
||||
values := xb.values
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:102
|
||||
streamconvertValueToSpecialJSON(qw422016, values[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:103
|
||||
streamconvertValueToSpecialJSON(qw422016, values[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:104
|
||||
values = values[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:104
|
||||
for _, v := range values {
|
||||
//line app/vmselect/prometheus/export.qtpl:104
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
for _, v := range values {
|
||||
//line app/vmselect/prometheus/export.qtpl:105
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:106
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:107
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:107
|
||||
//line app/vmselect/prometheus/export.qtpl:108
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:107
|
||||
//line app/vmselect/prometheus/export.qtpl:108
|
||||
qw422016.N().S(`],"timestamps":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:110
|
||||
if len(xb.timestamps) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:111
|
||||
if len(xb.timestamps) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:112
|
||||
timestamps := xb.timestamps
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:112
|
||||
qw422016.N().DL(timestamps[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:113
|
||||
qw422016.N().DL(timestamps[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:114
|
||||
timestamps = timestamps[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:114
|
||||
for _, ts := range timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:114
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:115
|
||||
qw422016.N().DL(ts)
|
||||
for _, ts := range timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:115
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:116
|
||||
qw422016.N().DL(ts)
|
||||
//line app/vmselect/prometheus/export.qtpl:117
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:117
|
||||
//line app/vmselect/prometheus/export.qtpl:118
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:117
|
||||
//line app/vmselect/prometheus/export.qtpl:118
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:119
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
func WriteExportJSONLine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
StreamExportJSONLine(qw422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
func ExportJSONLine(xb *exportBlock) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
WriteExportJSONLine(qb422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:120
|
||||
//line app/vmselect/prometheus/export.qtpl:121
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:122
|
||||
//line app/vmselect/prometheus/export.qtpl:123
|
||||
func StreamExportPromAPILine(qw422016 *qt422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:122
|
||||
//line app/vmselect/prometheus/export.qtpl:123
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:124
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
streammetricNameObject(qw422016, xb.mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:124
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
qw422016.N().S(`,"values":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
//line app/vmselect/prometheus/export.qtpl:126
|
||||
streamvaluesWithTimestamps(qw422016, xb.values, xb.timestamps)
|
||||
//line app/vmselect/prometheus/export.qtpl:125
|
||||
//line app/vmselect/prometheus/export.qtpl:126
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
func WriteExportPromAPILine(qq422016 qtio422016.Writer, xb *exportBlock) {
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
StreamExportPromAPILine(qw422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
func ExportPromAPILine(xb *exportBlock) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
WriteExportPromAPILine(qb422016, xb)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:127
|
||||
//line app/vmselect/prometheus/export.qtpl:128
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:129
|
||||
//line app/vmselect/prometheus/export.qtpl:130
|
||||
func StreamExportPromAPIHeader(qw422016 *qt422016.Writer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:129
|
||||
//line app/vmselect/prometheus/export.qtpl:130
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
func WriteExportPromAPIHeader(qq422016 qtio422016.Writer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
StreamExportPromAPIHeader(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
func ExportPromAPIHeader() string {
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
WriteExportPromAPIHeader(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:135
|
||||
//line app/vmselect/prometheus/export.qtpl:136
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:137
|
||||
//line app/vmselect/prometheus/export.qtpl:138
|
||||
func StreamExportPromAPIFooter(qw422016 *qt422016.Writer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:137
|
||||
//line app/vmselect/prometheus/export.qtpl:138
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:141
|
||||
//line app/vmselect/prometheus/export.qtpl:142
|
||||
qt.Donef("export format=promapi")
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:143
|
||||
//line app/vmselect/prometheus/export.qtpl:144
|
||||
streamdumpQueryTrace(qw422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:143
|
||||
//line app/vmselect/prometheus/export.qtpl:144
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
func WriteExportPromAPIFooter(qq422016 qtio422016.Writer, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
StreamExportPromAPIFooter(qw422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
func ExportPromAPIFooter(qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
WriteExportPromAPIFooter(qb422016, qt)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:145
|
||||
//line app/vmselect/prometheus/export.qtpl:146
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:147
|
||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:148
|
||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
qw422016.N().Z(mn.MetricGroup)
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
//line app/vmselect/prometheus/export.qtpl:150
|
||||
if len(mn.Tags) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:149
|
||||
//line app/vmselect/prometheus/export.qtpl:150
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/export.qtpl:151
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
tags := mn.Tags
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
qw422016.N().Z(tags[0].Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:152
|
||||
streamescapePrometheusLabel(qw422016, tags[0].Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:153
|
||||
qw422016.N().Z(tags[0].Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:153
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:153
|
||||
streamescapePrometheusLabel(qw422016, tags[0].Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:154
|
||||
tags = tags[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:154
|
||||
for i := range tags {
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
for i := range tags {
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
tag := &tags[i]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:155
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
qw422016.N().Z(tag.Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:156
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
streamescapePrometheusLabel(qw422016, tag.Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
//line app/vmselect/prometheus/export.qtpl:158
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:157
|
||||
//line app/vmselect/prometheus/export.qtpl:158
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:159
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
streamprometheusMetricName(qw422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
func prometheusMetricName(mn *storage.MetricName) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
writeprometheusMetricName(qb422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:160
|
||||
//line app/vmselect/prometheus/export.qtpl:161
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:162
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
func streamconvertValueToSpecialJSON(qw422016 *qt422016.Writer, v float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:164
|
||||
if math.IsNaN(v) {
|
||||
//line app/vmselect/prometheus/export.qtpl:163
|
||||
//line app/vmselect/prometheus/export.qtpl:164
|
||||
qw422016.N().S(`null`)
|
||||
//line app/vmselect/prometheus/export.qtpl:165
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
} else if math.IsInf(v, 0) {
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
//line app/vmselect/prometheus/export.qtpl:167
|
||||
if v > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:166
|
||||
//line app/vmselect/prometheus/export.qtpl:167
|
||||
qw422016.N().S(`"Infinity"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:168
|
||||
//line app/vmselect/prometheus/export.qtpl:169
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:168
|
||||
//line app/vmselect/prometheus/export.qtpl:169
|
||||
qw422016.N().S(`"-Infinity"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:170
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:171
|
||||
} else {
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:172
|
||||
qw422016.N().F(v)
|
||||
} else {
|
||||
//line app/vmselect/prometheus/export.qtpl:173
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
func writeconvertValueToSpecialJSON(qq422016 qtio422016.Writer, v float64) {
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
streamconvertValueToSpecialJSON(qw422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
func convertValueToSpecialJSON(v float64) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
writeconvertValueToSpecialJSON(qb422016, v)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:174
|
||||
//line app/vmselect/prometheus/export.qtpl:175
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:176
|
||||
//line app/vmselect/prometheus/export.qtpl:177
|
||||
func streamescapePrometheusLabel(qw422016 *qt422016.Writer, b []byte) {
|
||||
//line app/vmselect/prometheus/export.qtpl:176
|
||||
//line app/vmselect/prometheus/export.qtpl:177
|
||||
qw422016.N().S(`"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:178
|
||||
for len(b) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:179
|
||||
for len(b) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:180
|
||||
n := bytes.IndexAny(b, "\\\n\"")
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:180
|
||||
if n < 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:181
|
||||
qw422016.N().Z(b)
|
||||
if n < 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:182
|
||||
break
|
||||
qw422016.N().Z(b)
|
||||
//line app/vmselect/prometheus/export.qtpl:183
|
||||
}
|
||||
break
|
||||
//line app/vmselect/prometheus/export.qtpl:184
|
||||
qw422016.N().Z(b[:n])
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
switch b[n] {
|
||||
//line app/vmselect/prometheus/export.qtpl:186
|
||||
case '\\':
|
||||
//line app/vmselect/prometheus/export.qtpl:186
|
||||
qw422016.N().S(`\\`)
|
||||
//line app/vmselect/prometheus/export.qtpl:188
|
||||
case '\n':
|
||||
//line app/vmselect/prometheus/export.qtpl:188
|
||||
qw422016.N().S(`\n`)
|
||||
//line app/vmselect/prometheus/export.qtpl:190
|
||||
case '"':
|
||||
//line app/vmselect/prometheus/export.qtpl:190
|
||||
qw422016.N().S(`\"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:192
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:185
|
||||
qw422016.N().Z(b[:n])
|
||||
//line app/vmselect/prometheus/export.qtpl:186
|
||||
switch b[n] {
|
||||
//line app/vmselect/prometheus/export.qtpl:187
|
||||
case '\\':
|
||||
//line app/vmselect/prometheus/export.qtpl:187
|
||||
qw422016.N().S(`\\`)
|
||||
//line app/vmselect/prometheus/export.qtpl:189
|
||||
case '\n':
|
||||
//line app/vmselect/prometheus/export.qtpl:189
|
||||
qw422016.N().S(`\n`)
|
||||
//line app/vmselect/prometheus/export.qtpl:191
|
||||
case '"':
|
||||
//line app/vmselect/prometheus/export.qtpl:191
|
||||
qw422016.N().S(`\"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:193
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:194
|
||||
b = b[n+1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:194
|
||||
//line app/vmselect/prometheus/export.qtpl:195
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:194
|
||||
//line app/vmselect/prometheus/export.qtpl:195
|
||||
qw422016.N().S(`"`)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
func writeescapePrometheusLabel(qq422016 qtio422016.Writer, b []byte) {
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
streamescapePrometheusLabel(qw422016, b)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
func escapePrometheusLabel(b []byte) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
writeescapePrometheusLabel(qb422016, b)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:196
|
||||
//line app/vmselect/prometheus/export.qtpl:197
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user