mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
654 Commits
feature/bu
...
v1.31.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
192b51c246 | ||
|
|
17a4dc9782 | ||
|
|
6f67e0b56b | ||
|
|
1925ee038d | ||
|
|
bec62e4e43 | ||
|
|
d880325cf6 | ||
|
|
c18802af59 | ||
|
|
4ba4abe666 | ||
|
|
5bb39e757b | ||
|
|
d5c9841220 | ||
|
|
9e19949c6b | ||
|
|
0455c03cb9 | ||
|
|
5cb8d97743 | ||
|
|
31d04fb5df | ||
|
|
5b75984aa9 | ||
|
|
097c21931c | ||
|
|
85463a7199 | ||
|
|
6a1499efa3 | ||
|
|
bf4413e58d | ||
|
|
e3c462f08a | ||
|
|
bea5a8700a | ||
|
|
1825893eef | ||
|
|
97f70ccda7 | ||
|
|
2fba7b6f35 | ||
|
|
d03827c57d | ||
|
|
bb530a0591 | ||
|
|
aea4c80dd7 | ||
|
|
5e8e0fbc80 | ||
|
|
1e8aa89a3b | ||
|
|
56595ae12a | ||
|
|
96ff8d9adb | ||
|
|
02f6566ce1 | ||
|
|
7535f20c98 | ||
|
|
bc645152cb | ||
|
|
f5ac9b0721 | ||
|
|
d95a43f392 | ||
|
|
87a8348062 | ||
|
|
cea5a14853 | ||
|
|
9787c228a4 | ||
|
|
c121608205 | ||
|
|
492f032b38 | ||
|
|
4624c060ac | ||
|
|
8454679d9f | ||
|
|
440a15111e | ||
|
|
6ddcd162ed | ||
|
|
6504f78ce4 | ||
|
|
73b2a3d4b7 | ||
|
|
07d5bc986b | ||
|
|
caa4eb72d9 | ||
|
|
3c076544bf | ||
|
|
35f5ca1def | ||
|
|
a7d80f62be | ||
|
|
40540397c3 | ||
|
|
c107f46b0e | ||
|
|
8cce513a15 | ||
|
|
b01ddfdd76 | ||
|
|
68e1cf8942 | ||
|
|
8501b4a48d | ||
|
|
0ed9258545 | ||
|
|
b0d88460de | ||
|
|
8db7660afe | ||
|
|
18369bca42 | ||
|
|
95328782c3 | ||
|
|
981cb66a95 | ||
|
|
f15d89bfe0 | ||
|
|
36feb7d3e4 | ||
|
|
d900184d8d | ||
|
|
293b541784 | ||
|
|
84b57e8974 | ||
|
|
b458e5a213 | ||
|
|
c09472dfd9 | ||
|
|
72345eb5bd | ||
|
|
1244ad810d | ||
|
|
359c4d6109 | ||
|
|
face3d57bf | ||
|
|
a247236f61 | ||
|
|
54741ee578 | ||
|
|
efbc83a13e | ||
|
|
ade453847f | ||
|
|
f52874dab4 | ||
|
|
652ba59ce9 | ||
|
|
3e81ab2f75 | ||
|
|
a778233877 | ||
|
|
14100ed643 | ||
|
|
cfc6e7df07 | ||
|
|
c07a83374c | ||
|
|
c76b2be21f | ||
|
|
638a5cbb16 | ||
|
|
20812008a7 | ||
|
|
62a915f2b2 | ||
|
|
42da569bcd | ||
|
|
70b8191fab | ||
|
|
9476b73527 | ||
|
|
542b9c2043 | ||
|
|
c567919f80 | ||
|
|
761645b20a | ||
|
|
811b7a8303 | ||
|
|
4972bd4c96 | ||
|
|
335e0f8f6a | ||
|
|
505e46980a | ||
|
|
ab88b77515 | ||
|
|
3d8e75e065 | ||
|
|
74b4ccfc91 | ||
|
|
75ff524a4e | ||
|
|
96492348cb | ||
|
|
f733cb2186 | ||
|
|
15b7406f7b | ||
|
|
9010c6a1d6 | ||
|
|
a7125a5b7b | ||
|
|
a6d7179286 | ||
|
|
e828647d0f | ||
|
|
31fb6f2b07 | ||
|
|
2c86816950 | ||
|
|
4c859d980c | ||
|
|
14bcff6015 | ||
|
|
110235f789 | ||
|
|
205233d9a7 | ||
|
|
3f99f39e9b | ||
|
|
e91cb34c0e | ||
|
|
826dfd63a5 | ||
|
|
0401969d78 | ||
|
|
da98703748 | ||
|
|
c28876172f | ||
|
|
66c53bf3c6 | ||
|
|
50ae1879c6 | ||
|
|
4ff2fbcf3f | ||
|
|
5285acae3e | ||
|
|
8582b50360 | ||
|
|
19dfe52254 | ||
|
|
4bb88843cf | ||
|
|
0827bb6ce5 | ||
|
|
7753c8c0a1 | ||
|
|
ef25e1b049 | ||
|
|
9d1fcb2be6 | ||
|
|
c4287b3c86 | ||
|
|
1f3fd2c910 | ||
|
|
90b03309de | ||
|
|
7a4635f853 | ||
|
|
3e9b7addb1 | ||
|
|
f652c0f40f | ||
|
|
b8cde6cce1 | ||
|
|
aeea59e280 | ||
|
|
74e563ca3f | ||
|
|
5c1e4143e9 | ||
|
|
52d7ca6bf0 | ||
|
|
75eeea21ee | ||
|
|
c03b87dac0 | ||
|
|
259dc95366 | ||
|
|
cfb9fa2100 | ||
|
|
355ccba81a | ||
|
|
443189fb0a | ||
|
|
2db06f0ef8 | ||
|
|
0094bc4fc9 | ||
|
|
b6f22a62cb | ||
|
|
8a0dfc6220 | ||
|
|
2ab4cea5e5 | ||
|
|
c050abbbad | ||
|
|
3f1637fae8 | ||
|
|
c56b9ed03b | ||
|
|
3fd32e331a | ||
|
|
119dfd01bb | ||
|
|
86a1cd700b | ||
|
|
33895d4a0f | ||
|
|
c57eb0ff83 | ||
|
|
e14ab14e54 | ||
|
|
ca259864e2 | ||
|
|
01bb3c06c7 | ||
|
|
66c4961ff8 | ||
|
|
3e16248ed6 | ||
|
|
5e6c1cd986 | ||
|
|
6c2303764e | ||
|
|
f3ad330635 | ||
|
|
6c362d82cb | ||
|
|
661dd190bb | ||
|
|
630ba810f1 | ||
|
|
b4f44befa3 | ||
|
|
5fc8fb1323 | ||
|
|
8e8f98f712 | ||
|
|
c342f5e37e | ||
|
|
56d7cc8a0d | ||
|
|
4c02e496f7 | ||
|
|
3956003dd0 | ||
|
|
5c3fa59181 | ||
|
|
ee7765b10d | ||
|
|
5810ba57c2 | ||
|
|
e573ef2126 | ||
|
|
823fa085ef | ||
|
|
695c1dc5eb | ||
|
|
cdbe848102 | ||
|
|
5c25070556 | ||
|
|
bb08bab263 | ||
|
|
6ad7fe8eeb | ||
|
|
9ea549ed24 | ||
|
|
63b05c0b9f | ||
|
|
d888b21657 | ||
|
|
1e46961d68 | ||
|
|
72756ab8c7 | ||
|
|
543dc8d337 | ||
|
|
e472f0b23b | ||
|
|
c51ca04a43 | ||
|
|
e37f06dc52 | ||
|
|
5c2099ecfe | ||
|
|
885ba17905 | ||
|
|
b9a06e8e74 | ||
|
|
30c8301b11 | ||
|
|
e53f9e553d | ||
|
|
d6ade02fd3 | ||
|
|
3c90d77858 | ||
|
|
478767d0ed | ||
|
|
02e0b19a62 | ||
|
|
6be4456d88 | ||
|
|
9becc26f4b | ||
|
|
c62399eb3e | ||
|
|
55d728c849 | ||
|
|
808fc0971f | ||
|
|
370cfbb365 | ||
|
|
2f58f37f07 | ||
|
|
d18ea0c95b | ||
|
|
e0b292c6de | ||
|
|
86f6be40db | ||
|
|
e76e21e4c7 | ||
|
|
cfa5e279c2 | ||
|
|
fa7c3ab93a | ||
|
|
26d570bb3a | ||
|
|
62ed508546 | ||
|
|
2e2eff90d5 | ||
|
|
855e5c8963 | ||
|
|
04e48ef064 | ||
|
|
971206b514 | ||
|
|
d063bfaf83 | ||
|
|
6ab48838bf | ||
|
|
a42b5db39f | ||
|
|
b0295dbf2e | ||
|
|
3cea200309 | ||
|
|
32600ba4fc | ||
|
|
b3c946e35a | ||
|
|
e83fe938c8 | ||
|
|
f708aa7003 | ||
|
|
97ce4e03a5 | ||
|
|
a398343bb6 | ||
|
|
6ebf537153 | ||
|
|
f752479cb8 | ||
|
|
61e956e175 | ||
|
|
c66a691593 | ||
|
|
cc21b31502 | ||
|
|
195cefd81a | ||
|
|
c1581c3810 | ||
|
|
16cae15c45 | ||
|
|
f6334bffa1 | ||
|
|
2abd5154e0 | ||
|
|
c1cf7d9f93 | ||
|
|
956fdd89d3 | ||
|
|
1bc6377863 | ||
|
|
1e2c511747 | ||
|
|
0eeffb910f | ||
|
|
4ba86f501a | ||
|
|
fdc5cfd838 | ||
|
|
a116f5e7c1 | ||
|
|
4e9e1ca0f7 | ||
|
|
c1d3705be0 | ||
|
|
b7ee2e7af2 | ||
|
|
67d44b0845 | ||
|
|
1e6ae9eff4 | ||
|
|
fa81f82714 | ||
|
|
0fa6df94a2 | ||
|
|
c39355921e | ||
|
|
cf4786f34a | ||
|
|
3e67862676 | ||
|
|
0db9fcedd5 | ||
|
|
391530bb74 | ||
|
|
60c5b368bc | ||
|
|
26dc21cf64 | ||
|
|
2444433d83 | ||
|
|
ea4c828bae | ||
|
|
aebc45ad26 | ||
|
|
2cb811b42f | ||
|
|
b986516fbe | ||
|
|
ef2296e420 | ||
|
|
a6086cde78 | ||
|
|
c9063ece66 | ||
|
|
4e26ad869b | ||
|
|
0772191975 | ||
|
|
48999e5396 | ||
|
|
0adebae1f8 | ||
|
|
267efde5ae | ||
|
|
0686ac52c3 | ||
|
|
68722c3c74 | ||
|
|
a544f49c2b | ||
|
|
d32f88c378 | ||
|
|
00cfb2d2b9 | ||
|
|
37dc223e25 | ||
|
|
a84fe76677 | ||
|
|
3a697a935a | ||
|
|
51a21c7d4b | ||
|
|
3d83f5d334 | ||
|
|
6f3b2fd600 | ||
|
|
8d35718dc6 | ||
|
|
33975513d0 | ||
|
|
63f2b539df | ||
|
|
9428ec9c9f | ||
|
|
0c8057924f | ||
|
|
d4218d27e6 | ||
|
|
e2274714b1 | ||
|
|
4d636c244d | ||
|
|
bad53e4207 | ||
|
|
3f581a9860 | ||
|
|
398e00aa54 | ||
|
|
4fd741f40d | ||
|
|
4a2cd85b92 | ||
|
|
6c46afb087 | ||
|
|
7343e8b408 | ||
|
|
22e3fabefd | ||
|
|
88f8670ede | ||
|
|
9eb5de334f | ||
|
|
6954e126fc | ||
|
|
bce35b8dd9 | ||
|
|
16dd145586 | ||
|
|
cd2c9e39da | ||
|
|
305e7bc981 | ||
|
|
9721d06c6a | ||
|
|
4862e93024 | ||
|
|
db4560ca31 | ||
|
|
1575a560f0 | ||
|
|
e1d76ec1f3 | ||
|
|
aeaa5de5fe | ||
|
|
4c0a262a2e | ||
|
|
3685fc18d5 | ||
|
|
ede7ad3703 | ||
|
|
9196c085a7 | ||
|
|
3802ae9269 | ||
|
|
b0090dbd86 | ||
|
|
603a79b357 | ||
|
|
2655220c58 | ||
|
|
bf915fc0db | ||
|
|
2fc157ff7a | ||
|
|
0dc0006f34 | ||
|
|
4b688fffee | ||
|
|
1402a6b981 | ||
|
|
3308279c4e | ||
|
|
fb909cf710 | ||
|
|
c4e75f09dc | ||
|
|
fb8840ac38 | ||
|
|
9c9221d1b2 | ||
|
|
70ca018a57 | ||
|
|
4266091e4f | ||
|
|
8001d29b6e | ||
|
|
9d3f1fcbb9 | ||
|
|
ba7b3806be | ||
|
|
7fa88c6efc | ||
|
|
4da34b11f8 | ||
|
|
a18317adbc | ||
|
|
44d7fc599d | ||
|
|
dce6079379 | ||
|
|
98419c00ef | ||
|
|
ac004665b5 | ||
|
|
8c03a8c4b4 | ||
|
|
8a126c2865 | ||
|
|
380cae23a0 | ||
|
|
1272e407b2 | ||
|
|
5f33fc8e46 | ||
|
|
ec8125606d | ||
|
|
f4a38f7fb1 | ||
|
|
ab740afd0d | ||
|
|
7b5168adfb | ||
|
|
a0d480fbf3 | ||
|
|
0dfc1ace53 | ||
|
|
d3fd113a80 | ||
|
|
4f738c8a15 | ||
|
|
dd86e6130c | ||
|
|
6a27657d73 | ||
|
|
c23b66a1ad | ||
|
|
be39414f9c | ||
|
|
e74fb23189 | ||
|
|
582fdc059a | ||
|
|
1c108fc494 | ||
|
|
d6b5ed6d39 | ||
|
|
639b14e8ab | ||
|
|
483de1cc06 | ||
|
|
9e0896055d | ||
|
|
5bb61b8b38 | ||
|
|
75a58dee02 | ||
|
|
5b41122292 | ||
|
|
964c296f96 | ||
|
|
9ecb994671 | ||
|
|
9d41e0dcae | ||
|
|
09fc6e22e5 | ||
|
|
99c37c2c96 | ||
|
|
06c2c25544 | ||
|
|
ec1b185991 | ||
|
|
0967683ae9 | ||
|
|
ad8a43b4e1 | ||
|
|
7346982763 | ||
|
|
5d8d110010 | ||
|
|
0b488f1e37 | ||
|
|
b8bb74ffc6 | ||
|
|
5c9e48417a | ||
|
|
5c83f8e203 | ||
|
|
05713469c3 | ||
|
|
8822079b77 | ||
|
|
99e048c9df | ||
|
|
47e4b50112 | ||
|
|
241170dc05 | ||
|
|
1c69f4eadc | ||
|
|
8d93b15b86 | ||
|
|
fcc166622a | ||
|
|
a9f39168d2 | ||
|
|
f090b2e917 | ||
|
|
10caad4728 | ||
|
|
3b90c2a99a | ||
|
|
57ec4f5f92 | ||
|
|
01cb15b6f5 | ||
|
|
b9256511e8 | ||
|
|
3a38b23fa3 | ||
|
|
8bd6f1f6df | ||
|
|
4aaa5c2efc | ||
|
|
10f5a26bec | ||
|
|
c14fd6c43f | ||
|
|
a77e88db7d | ||
|
|
aad7236e5d | ||
|
|
5e5de6be9a | ||
|
|
90cf6f3fcb | ||
|
|
8e3d69219f | ||
|
|
b842a2eccc | ||
|
|
afcc7fb167 | ||
|
|
57a57c711a | ||
|
|
68f260d878 | ||
|
|
1eade9b358 | ||
|
|
7e8747f6ed | ||
|
|
0168a1b658 | ||
|
|
bf6cbb762c | ||
|
|
6aeac37fc5 | ||
|
|
c98725db55 | ||
|
|
d8043f7161 | ||
|
|
f586e1f83c | ||
|
|
d1132bb188 | ||
|
|
915fb6df79 | ||
|
|
89eb6d78a4 | ||
|
|
17096b5750 | ||
|
|
66efa5745f | ||
|
|
106ab78a47 | ||
|
|
8aa474d685 | ||
|
|
9e059bb330 | ||
|
|
2346335ea6 | ||
|
|
b339890dca | ||
|
|
6c4ca89d75 | ||
|
|
f0fe7b5ad6 | ||
|
|
22ed4e7fd4 | ||
|
|
162f1fb1b7 | ||
|
|
d07f616609 | ||
|
|
5bf4e5ffb5 | ||
|
|
8c3629a892 | ||
|
|
ea07cf68ba | ||
|
|
4ee41bab43 | ||
|
|
1273f31f19 | ||
|
|
0f2ecde0e6 | ||
|
|
6cd77d4847 | ||
|
|
fb14f23532 | ||
|
|
daba0cdb05 | ||
|
|
575d2f0a91 | ||
|
|
ec1b439329 | ||
|
|
6a943a6a58 | ||
|
|
998525999c | ||
|
|
ab88890523 | ||
|
|
374d681848 | ||
|
|
e75d5f47c4 | ||
|
|
fc90ebf43c | ||
|
|
62a7353479 | ||
|
|
54bd21eb4a | ||
|
|
2bd1a01d1a | ||
|
|
cd4833d3d0 | ||
|
|
101fa258e5 | ||
|
|
d031e04023 | ||
|
|
43ea4ce428 | ||
|
|
a336bb4e22 | ||
|
|
1fe6d784d8 | ||
|
|
55fe36149c | ||
|
|
9203170eb2 | ||
|
|
2db685c19c | ||
|
|
6ddfb06b52 | ||
|
|
40a6c0d672 | ||
|
|
1371024747 | ||
|
|
c27c6de297 | ||
|
|
0c629429de | ||
|
|
4dbd642c86 | ||
|
|
56c154f45b | ||
|
|
8d83dcf332 | ||
|
|
9a4b2b8315 | ||
|
|
e06866005d | ||
|
|
2c76a9c9ab | ||
|
|
b9166a60ff | ||
|
|
c7034fc51b | ||
|
|
715c423f1a | ||
|
|
ca74e29458 | ||
|
|
a41955863a | ||
|
|
2ecb117082 | ||
|
|
0c88afa386 | ||
|
|
74c0fb04f3 | ||
|
|
828078eb45 | ||
|
|
7b59466667 | ||
|
|
79ac02ba74 | ||
|
|
593bd35aaa | ||
|
|
7354f10336 | ||
|
|
e8998c69a7 | ||
|
|
55bcf60ea6 | ||
|
|
796b010139 | ||
|
|
0c8a09c8e1 | ||
|
|
c1be1e4342 | ||
|
|
0c8d463307 | ||
|
|
e0fccc6c60 | ||
|
|
1f7d9a213a | ||
|
|
7ce1f73ada | ||
|
|
e605315f01 | ||
|
|
fcef49184b | ||
|
|
844ce4731e | ||
|
|
683bf2a11f | ||
|
|
eb2283a029 | ||
|
|
e8377011ab | ||
|
|
33ea2120c3 | ||
|
|
cf63669303 | ||
|
|
feacfffe89 | ||
|
|
4bb738ddd9 | ||
|
|
90e72c2a42 | ||
|
|
ccd8b7a003 | ||
|
|
d32845781e | ||
|
|
af2ceaaa0b | ||
|
|
61926bae01 | ||
|
|
ee13256f74 | ||
|
|
3b3b2f1e6e | ||
|
|
c9cbf5351c | ||
|
|
146c6e1f72 | ||
|
|
d261fa2885 | ||
|
|
5b47c00910 | ||
|
|
9e1119dab8 | ||
|
|
47a3228108 | ||
|
|
e88a03323a | ||
|
|
b75630fcf4 | ||
|
|
80db24386e | ||
|
|
296c14317f | ||
|
|
973e4b5b76 | ||
|
|
7aadec8e3c | ||
|
|
45fc8cb72f | ||
|
|
4b2523fb40 | ||
|
|
70ba36fa37 | ||
|
|
a78b3dba7f | ||
|
|
a9cfca6a72 | ||
|
|
710d6c33ea | ||
|
|
a8d4224828 | ||
|
|
341bed4822 | ||
|
|
5982e94c94 | ||
|
|
6d6c9eb1f8 | ||
|
|
86d3d907a5 | ||
|
|
269285848f | ||
|
|
47e1e5eb4b | ||
|
|
d2c801029b | ||
|
|
beb479b8f1 | ||
|
|
611c4401f8 | ||
|
|
a8db528930 | ||
|
|
15613e5338 | ||
|
|
3237d0309c | ||
|
|
26f8d7ea1b | ||
|
|
419197ba08 | ||
|
|
a4b4db9bf6 | ||
|
|
c1276edab5 | ||
|
|
2322c9a45a | ||
|
|
89b928ff24 | ||
|
|
935bfd7a18 | ||
|
|
3dd36b8088 | ||
|
|
afb964670a | ||
|
|
20fc0e0e54 | ||
|
|
4d9f088526 | ||
|
|
82d1707861 | ||
|
|
70d20ce8de | ||
|
|
723bf1af7f | ||
|
|
ac7b186f13 | ||
|
|
cd1bc32158 | ||
|
|
1c33b5937e | ||
|
|
8bb6bc986d | ||
|
|
d2be567482 | ||
|
|
7e7d4d5275 | ||
|
|
bf9782eaf6 | ||
|
|
cbe692f0e2 | ||
|
|
7b6623558f | ||
|
|
a1351bbaee | ||
|
|
b4d707d9bb | ||
|
|
bee7298f81 | ||
|
|
dbd217b8f0 | ||
|
|
4d936b1524 | ||
|
|
7354090aad | ||
|
|
d37924900b | ||
|
|
c0baa977cf | ||
|
|
f4252f87e6 | ||
|
|
0b78d228d2 | ||
|
|
0371c216a7 | ||
|
|
c1f18ee48d | ||
|
|
fbd7044b2b | ||
|
|
2afe511d80 | ||
|
|
f4e63cd070 | ||
|
|
667115a5c7 | ||
|
|
1458450dba | ||
|
|
5a5ba749f2 | ||
|
|
a3e26de45e | ||
|
|
53ea90865d | ||
|
|
17f0a53068 | ||
|
|
b03bdb32ff | ||
|
|
15f59c6df9 | ||
|
|
da45a20491 | ||
|
|
5859bb9556 | ||
|
|
28f6c36ab4 | ||
|
|
4794f894a4 | ||
|
|
c7280ba61a | ||
|
|
fbd8b03f15 | ||
|
|
d17a47e3e0 | ||
|
|
d6862a2d97 | ||
|
|
f2cf5d8e36 | ||
|
|
27f0d098bd | ||
|
|
a51ff2c6cb | ||
|
|
56b952c456 | ||
|
|
61bad1e07e | ||
|
|
be97f764f5 | ||
|
|
a576d1f5d3 | ||
|
|
968d094524 | ||
|
|
e307a4d92c | ||
|
|
0eae39daa7 | ||
|
|
437e0b2300 | ||
|
|
4b3af728ea | ||
|
|
4a12c4c982 | ||
|
|
2e75efb64e | ||
|
|
25900162f6 | ||
|
|
16afcd6aff | ||
|
|
c2a5eef5e3 | ||
|
|
4859ca0cda | ||
|
|
feb6b203a4 | ||
|
|
51ee990902 | ||
|
|
5262aae5da | ||
|
|
54fb8b21f9 | ||
|
|
d6523ffe90 | ||
|
|
024560b161 | ||
|
|
96ac664b27 | ||
|
|
2ffcf7a4a5 | ||
|
|
5cbd4cfca9 | ||
|
|
718ce33714 | ||
|
|
f332c0d54e | ||
|
|
eca566ed22 | ||
|
|
5bbfdff9fe | ||
|
|
6b0ae332f8 | ||
|
|
2eb3602d61 | ||
|
|
6fb9dd09f5 | ||
|
|
19b6643e5c | ||
|
|
08b889ef09 | ||
|
|
d15d0127fe | ||
|
|
674888fdc9 | ||
|
|
fb140eda33 | ||
|
|
398ec4383e | ||
|
|
eff0debe14 |
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Version**
|
||||
The line returned when passing `--version` command line flag to binary. For example:
|
||||
```
|
||||
$ ./victoria-metrics-prod --version
|
||||
victoria-metrics-20190730-121249-heads-single-node-0-g671d9e55
|
||||
```
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here such as error logs, `/metrics` output, screenshots from [the official Grafana dashboard for VictoriaMetrics](https://grafana.com/dashboards/10229).
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
30
.github/workflows/github-pages.yml
vendored
Normal file
30
.github/workflows/github-pages.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: github-pages
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'docs/*.md'
|
||||
- 'README.md'
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: publish
|
||||
shell: bash
|
||||
env:
|
||||
TOKEN: ${{secrets.CI_TOKEN}}
|
||||
run: |
|
||||
git clone https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.github.io.git gpages
|
||||
cp docs/*.md gpages
|
||||
cp README.md gpages
|
||||
cd gpages
|
||||
git config --local user.email "info@victoriametrics.com"
|
||||
git config --local user.name "Vika"
|
||||
git add "*.md"
|
||||
git commit -m "update github pages"
|
||||
remote_repo="https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.github.io.git"
|
||||
git push "${remote_repo}"
|
||||
cd ..
|
||||
rm -rf gpages
|
||||
51
.github/workflows/main.yml
vendored
Normal file
51
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: main
|
||||
on:
|
||||
push:
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.13
|
||||
id: go
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Dependencies
|
||||
env:
|
||||
GO111MODULE: off
|
||||
run: |
|
||||
go get -v golang.org/x/lint/golint
|
||||
go get -u github.com/kisielk/errcheck
|
||||
- name: Build
|
||||
env:
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
export PATH=$PATH:$(go env GOPATH)/bin # temporary fix. See https://github.com/actions/setup-go/issues/14
|
||||
make check-all
|
||||
git diff --exit-code
|
||||
make test-full
|
||||
make test-pure
|
||||
make test-full-386
|
||||
make victoria-metrics
|
||||
make victoria-metrics-pure
|
||||
make victoria-metrics-arm
|
||||
make victoria-metrics-arm64
|
||||
make vmutils
|
||||
GOOS=freebsd go build -mod=vendor ./app/victoria-metrics
|
||||
GOOS=darwin go build -mod=vendor ./app/victoria-metrics
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v1.0.4
|
||||
with:
|
||||
token: ${{secrets.CODECOV_TOKEN}}
|
||||
file: ./coverage.txt
|
||||
|
||||
29
.github/workflows/wiki.yml
vendored
Normal file
29
.github/workflows/wiki.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: wiki
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'docs/*.md'
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: publish
|
||||
shell: bash
|
||||
env:
|
||||
TOKEN: ${{secrets.CI_TOKEN}}
|
||||
run: |
|
||||
cd docs
|
||||
git clone https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git wiki
|
||||
find ./ -name '*.md' -exec cp -prv '{}' 'wiki' ';'
|
||||
cd wiki
|
||||
git config --local user.email "info@victoriametrics.com"
|
||||
git config --local user.name "Vika"
|
||||
git add "*.md"
|
||||
git commit -m "update wiki pages"
|
||||
remote_repo="https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git"
|
||||
git push "${remote_repo}"
|
||||
cd ..
|
||||
rm -rf wiki
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
/tmp
|
||||
/tags
|
||||
/pkg
|
||||
*.pprof
|
||||
@@ -9,3 +10,7 @@
|
||||
/victoria-metrics-data
|
||||
/vmstorage-data
|
||||
/vmselect-cache
|
||||
/package/temp-deb-*
|
||||
/package/temp-rpm-*
|
||||
/package/*.deb
|
||||
/package/*.rpm
|
||||
|
||||
76
CODE_OF_CONDUCT.md
Normal file
76
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at info@victoriametrics.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
16
CONTRIBUTING.md
Normal file
16
CONTRIBUTING.md
Normal file
@@ -0,0 +1,16 @@
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
||||
- Filing issues and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
- Spreading a word about VictoriaMetrics: conference talks, articles, comments, experience sharing with colleagues.
|
||||
- Updating documentation.
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
- Prefer simple code and architecture.
|
||||
- Avoid complex abstractions.
|
||||
- Avoid magic code and fancy algorithms.
|
||||
- Avoid [big external dependencies](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d).
|
||||
- Minimize the number of moving parts in the distributed system.
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
86
Makefile
86
Makefile
@@ -1,7 +1,7 @@
|
||||
PKG_PREFIX := github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
BUILDINFO_TAG ?= $(shell echo $$(git describe --long --all | tr '/' '-')$$( \
|
||||
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | sha1sum | grep -oP '^.{8}')))
|
||||
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | openssl sha1 | cut -c 10-17)))
|
||||
|
||||
PKG_TAG ?= $(shell git tag -l --points-at HEAD)
|
||||
ifeq ($(PKG_TAG),)
|
||||
@@ -19,46 +19,106 @@ include deployment/*/Makefile
|
||||
clean:
|
||||
rm -rf bin/*
|
||||
|
||||
release: victoria-metrics-prod
|
||||
cd bin && tar czf victoria-metrics-$(PKG_TAG).tar.gz victoria-metrics-prod
|
||||
publish: \
|
||||
publish-victoria-metrics \
|
||||
publish-vmbackup \
|
||||
publish-vmrestore
|
||||
|
||||
package: \
|
||||
package-victoria-metrics \
|
||||
package-vmbackup \
|
||||
package-vmrestore
|
||||
|
||||
vmutils: \
|
||||
vmbackup \
|
||||
vmrestore
|
||||
|
||||
release: \
|
||||
release-victoria-metrics \
|
||||
release-vmutils
|
||||
|
||||
release-victoria-metrics: victoria-metrics-prod
|
||||
cd bin && tar czf victoria-metrics-$(PKG_TAG).tar.gz victoria-metrics-prod && \
|
||||
sha256sum victoria-metrics-$(PKG_TAG).tar.gz > victoria-metrics-$(PKG_TAG)_checksums.txt
|
||||
|
||||
release-vmutils: \
|
||||
vmbackup-prod \
|
||||
vmrestore-prod
|
||||
cd bin && tar czf vmutils-$(PKG_TAG).tar.gz vmbackup-prod vmrestore-prod && \
|
||||
sha256sum vmutils-$(PKG_TAG).tar.gz > vmutils-$(PKG_TAG)_checksums.txt
|
||||
|
||||
pprof-cpu:
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
||||
|
||||
fmt:
|
||||
go fmt $(PKG_PREFIX)/lib/...
|
||||
go fmt $(PKG_PREFIX)/app/...
|
||||
GO111MODULE=on gofmt -l -w -s ./lib
|
||||
GO111MODULE=on gofmt -l -w -s ./app
|
||||
|
||||
vet:
|
||||
go vet $(PKG_PREFIX)/lib/...
|
||||
go vet $(PKG_PREFIX)/app/...
|
||||
GO111MODULE=on go vet -mod=vendor ./lib/...
|
||||
GO111MODULE=on go vet -mod=vendor ./app/...
|
||||
|
||||
lint: install-golint
|
||||
golint lib/...
|
||||
golint app/...
|
||||
|
||||
install-golint:
|
||||
which golint || GO111MODULE=off go get -u github.com/golang/lint/golint
|
||||
which golint || GO111MODULE=off go get -u golang.org/x/lint/golint
|
||||
|
||||
errcheck: install-errcheck
|
||||
errcheck -exclude=errcheck_excludes.txt ./lib/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vminsert/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmselect/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmstorage/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmbackup/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmrestore/...
|
||||
|
||||
install-errcheck:
|
||||
which errcheck || GO111MODULE=off go get -u github.com/kisielk/errcheck
|
||||
|
||||
check-all: fmt vet lint errcheck golangci-lint
|
||||
|
||||
test:
|
||||
go test $(PKG_PREFIX)/lib/...
|
||||
GO111MODULE=on go test -tags=integration -mod=vendor ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -tags=integration -mod=vendor ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
GO111MODULE=on go test -tags=integration -mod=vendor -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
test-full-386:
|
||||
GO111MODULE=on GOARCH=386 go test -tags=integration -mod=vendor -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
benchmark:
|
||||
go test -bench=. $(PKG_PREFIX)/lib/...
|
||||
GO111MODULE=on go test -mod=vendor -bench=. ./lib/...
|
||||
GO111MODULE=on go test -mod=vendor -bench=. ./app/...
|
||||
|
||||
benchmark-pure:
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor -bench=. ./lib/...
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor -bench=. ./app/...
|
||||
|
||||
vendor-update:
|
||||
go get -u
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
GO111MODULE=on go get -u ./lib/...
|
||||
GO111MODULE=on go get -u ./app/...
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
app-local:
|
||||
CGO_ENABLED=1 GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-pure:
|
||||
CGO_ENABLED=0 GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc
|
||||
|
||||
install-qtc:
|
||||
which qtc || GO111MODULE=off go get -u github.com/valyala/quicktemplate/qtc
|
||||
|
||||
|
||||
golangci-lint: install-golangci-lint
|
||||
golangci-lint run --exclude '(SA4003|SA1019):' -D errcheck -D structcheck
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || GO111MODULE=off go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
|
||||
727
README.md
727
README.md
@@ -1,43 +1,63 @@
|
||||
<img text-align="center" alt="Victoria Metrics" src="logo.png">
|
||||
|
||||
## Single-node VictoriaMetrics
|
||||
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
[](https://hub.docker.com/r/victoriametrics/victoria-metrics)
|
||||
[](http://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||
|
||||
VictoriaMetrics is a long-term remote storage for Prometheus.
|
||||
<img alt="Victoria Metrics" src="logo.png" height="200px">
|
||||
|
||||
## VictoriaMetrics
|
||||
|
||||
VictoriaMetrics is fast, cost-effective and scalable time-series database. It can be used as long-term remote storage for Prometheus.
|
||||
It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[docker images](https://hub.docker.com/r/valyala/victoria-metrics/) and
|
||||
in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
[docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and
|
||||
in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics). Just download VictoriaMetrics and see [how to start it](#how-to-start-victoriametrics).
|
||||
|
||||
Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
|
||||
## Case studies
|
||||
|
||||
* [Wix.com](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#wixcom)
|
||||
* [Wedos.com](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#wedoscom)
|
||||
* [Dreamteam](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/CaseStudies#dreamteam)
|
||||
|
||||
|
||||
## Prominent features
|
||||
|
||||
* Supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana.
|
||||
Additionally, VictoriaMetrics extends PromQL with opt-in [useful features](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL).
|
||||
VictoriaMetrics implements [MetricsQL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL) query language, which is inspired by PromQL.
|
||||
* Supports global query view. Multiple Prometheus instances may write data into VictoriaMetrics. Later this data may be used in a single query.
|
||||
* High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
|
||||
and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
|
||||
[Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* [Uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) when working with millions of unique time series (aka high cardinality).
|
||||
* Optimized for time series with high churn rate. Think about [prometheus-operator](https://github.com/coreos/prometheus-operator) metrics from frequent deployments in Kubernetes.
|
||||
* High data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
|
||||
may be crammed into a limited storage comparing to TimescaleDB.
|
||||
* Optimized for storage with high-latency IO and low iops (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc). See [graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
may be crammed into limited storage comparing to TimescaleDB.
|
||||
* Optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc). See [graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, Uber M3, Cortex, InfluxDB or TimescaleDB.
|
||||
See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae)
|
||||
and [comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683).
|
||||
* Easy operation:
|
||||
* VictoriaMetrics consists of a single executable without external dependencies.
|
||||
* VictoriaMetrics consists of a single [small executable](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d) without external dependencies.
|
||||
* All the configuration is done via explicit command-line flags with reasonable defaults.
|
||||
* All the data is stored in a single directory pointed by `-storageDataPath` flag.
|
||||
* Easy backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Supports metrics' ingestion and backfilling via the following protocols:
|
||||
* Easy and fast backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
to S3 or GCS with [vmbackup](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmbackup/README.md) / [vmrestore](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmrestore/README.md).
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Supports metrics' ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
* [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
* [Graphite plaintext protocol](https://graphite.readthedocs.io/en/latest/feeding-carbon.html) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon)
|
||||
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon)
|
||||
if `-graphiteListenAddr` is set.
|
||||
* [OpenTSDB put message](http://opentsdb.net/docs/build/html/api_telnet/put.html) if `-opentsdbListenAddr` is set.
|
||||
* Ideally works with big amounts of time series data from IoT sensors, connected car sensors and industrial sensors.
|
||||
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol) if `-opentsdbListenAddr` is set.
|
||||
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests) if `-opentsdbHTTPListenAddr` is set.
|
||||
* [/api/v1/import](#how-to-import-time-series-data)
|
||||
* Ideally works with big amounts of time series data from Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various Enterprise workloads.
|
||||
* Has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
|
||||
@@ -46,82 +66,83 @@ Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaM
|
||||
|
||||
### Table of contents
|
||||
|
||||
* [How to build from sources](#how-to-build-from-sources)
|
||||
* [How to start VictoriaMetrics](#how-to-start-victoriametrics)
|
||||
* [Prometheus setup](#prometheus-setup)
|
||||
* [Grafana setup](#grafana-setup)
|
||||
* [How to send data from InfluxDB-compatible agents such as Telegraf](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
* [How to send data from Graphite-compatible agents such as StatsD](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
* [How to send data from OpenTSDB-compatible agents](#how-to-send-data-from-opentsdb-compatible-agents)
|
||||
* [How to apply new config / ugrade VictoriaMetrics](#how-to-apply-new-config--upgrade-victoriametrics)
|
||||
* [How to work with snapshots](#how-to-work-with-snapshots)
|
||||
* [How to delete time series](#how-to-delete-time-series)
|
||||
* [How to export time series](#how-to-export-time-series)
|
||||
* [Federation](#federation)
|
||||
* [Capacity planning](#capacity-planning)
|
||||
* [High Availability](#high-availability)
|
||||
* [Multiple retentions](#multiple-retentions)
|
||||
* [Scalability and cluster version](#scalability-and-cluster-version)
|
||||
* [Security](#security)
|
||||
* [Tuning](#tuning)
|
||||
* [Monitoring](#monitoring)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Community and contributions](#community-and-contributions)
|
||||
* [Reporting bugs](#reporting-bugs)
|
||||
|
||||
|
||||
### How to build from sources
|
||||
|
||||
We recommend using either [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or
|
||||
[docker images](https://hub.docker.com/r/valyala/victoria-metrics/) instead of building VictoriaMetrics
|
||||
from sources. Building from sources is reasonable when developing an additional features specific
|
||||
to your needs.
|
||||
|
||||
|
||||
#### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `go build ./app/victoria-metrics` from the root folder of the repository.
|
||||
It will build `victoria-metrics` binary in the root folder of the repository.
|
||||
|
||||
#### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-prod` from the root folder of the respository.
|
||||
It will build `victoria-metrics-prod` binary and put it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
|
||||
Run `make package-victoria-metrics`. It will build `valyala/victoria-metrics:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package`.
|
||||
|
||||
- [How to start VictoriaMetrics](#how-to-start-victoriametrics)
|
||||
- [Prometheus setup](#prometheus-setup)
|
||||
- [Grafana setup](#grafana-setup)
|
||||
- [How to upgrade VictoriaMetrics?](#how-to-upgrade-victoriametrics)
|
||||
- [How to apply new config to VictoriaMetrics?](#how-to-apply-new-config-to-victoriametrics)
|
||||
- [How to send data from InfluxDB-compatible agents such as Telegraf?](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
- [How to send data from Graphite-compatible agents such as StatsD?](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
- [Querying Graphite data](#querying-graphite-data)
|
||||
- [How to send data from OpenTSDB-compatible agents?](#how-to-send-data-from-opentsdb-compatible-agents)
|
||||
- [Prometheus querying API usage](#prometheus-querying-api-usage)
|
||||
- [How to build from sources](#how-to-build-from-sources)
|
||||
- [Development build](#development-build)
|
||||
- [Production build](#production-build)
|
||||
- [ARM build](#arm-build)
|
||||
- [Pure Go build (CGO_ENABLED=0)](#pure-go-build-cgo_enabled0)
|
||||
- [Building docker images](#building-docker-images)
|
||||
- [Start with docker-compose](#start-with-docker-compose)
|
||||
- [Setting up service](#setting-up-service)
|
||||
- [Third-party contributions](#third-party-contributions)
|
||||
- [How to work with snapshots?](#how-to-work-with-snapshots)
|
||||
- [How to delete time series?](#how-to-delete-time-series)
|
||||
- [How to export time series?](#how-to-export-time-series)
|
||||
- [How to import time series data?](#how-to-import-time-series-data)
|
||||
- [Federation](#federation)
|
||||
- [Capacity planning](#capacity-planning)
|
||||
- [High availability](#high-availability)
|
||||
- [Multiple retentions](#multiple-retentions)
|
||||
- [Downsampling](#downsampling)
|
||||
- [Multi-tenancy](#multi-tenancy)
|
||||
- [Scalability and cluster version](#scalability-and-cluster-version)
|
||||
- [Alerting](#alerting)
|
||||
- [Security](#security)
|
||||
- [Tuning](#tuning)
|
||||
- [Monitoring](#monitoring)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Backfilling](#backfilling)
|
||||
- [Profiling](#profiling)
|
||||
- [Integrations](#integrations)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Contacts](#contacts)
|
||||
- [Community and contributions](#community-and-contributions)
|
||||
- [Reporting bugs](#reporting-bugs)
|
||||
- [Victoria Metrics Logo](#victoria-metrics-logo)
|
||||
- [Logo Usage Guidelines](#logo-usage-guidelines)
|
||||
- [Font used:](#font-used)
|
||||
- [Color Palette:](#color-palette)
|
||||
- [We kindly ask:](#we-kindly-ask)
|
||||
|
||||
|
||||
### How to start VictoriaMetrics
|
||||
|
||||
Just start VictoriaMetrics executable or docker image with the desired command-line flags.
|
||||
Just start VictoriaMetrics [executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
or [docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) with the desired command-line flags.
|
||||
|
||||
The following command line flags are used the most:
|
||||
The following command-line flags are used the most:
|
||||
|
||||
* `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory.
|
||||
* `-retentionPeriod` - retention period in months for the data. Older data is automatically deleted.
|
||||
* `-httpListenAddr` - TCP address to listen to for http requests. By default it listens port `8428` on all the network interfaces.
|
||||
* `-graphiteListenAddr` - TCP and UDP address to listen to for Graphite data. By default it is disabled.
|
||||
* `-opentsdbListenAddr` - TCP and UDP address to listen to for OpenTSDB data. By default it is disabled.
|
||||
* `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in current working directory.
|
||||
* `-retentionPeriod` - retention period in months for the data. Older data is automatically deleted. Default period is 1 month.
|
||||
* `-httpListenAddr` - TCP address to listen to for http requests. By default, it listens port `8428` on all the network interfaces.
|
||||
* `-graphiteListenAddr` - TCP and UDP address to listen to for Graphite data. By default, it is disabled.
|
||||
* `-opentsdbListenAddr` - TCP and UDP address to listen to for OpenTSDB data over telnet protocol. By default, it is disabled.
|
||||
* `-opentsdbHTTPListenAddr` - TCP address to listen to for HTTP OpenTSDB data over `/api/put`. By default, it is disabled.
|
||||
|
||||
Pass `-help` to see all the available flags with description and default values.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
|
||||
### Prometheus setup
|
||||
|
||||
Add the following lines to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`):
|
||||
Prometheus must be configured with [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
in order to send data to VictoriaMetrics. Add the following lines
|
||||
to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`):
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
```
|
||||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
@@ -131,11 +152,11 @@ Then apply the new config via the following command:
|
||||
kill -HUP `pidof prometheus`
|
||||
```
|
||||
|
||||
Prometheus writes incoming data to local storage and to remote storage in parallel.
|
||||
Prometheus writes incoming data to local storage and replicates it to remote storage in parallel.
|
||||
This means the data remains available in local storage for `--storage.tsdb.retention.time` duration
|
||||
if remote storage stops working.
|
||||
even if remote storage is unavailable.
|
||||
|
||||
If you plan sending data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
If you plan to send data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
of [Prometheus config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file):
|
||||
|
||||
```yml
|
||||
@@ -146,7 +167,27 @@ global:
|
||||
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each time series sent to remote storage.
|
||||
The label name may be arbitrary - `datacenter` is just an example. The label value must be unique
|
||||
across Prometheus instances, so time series may be filtered and grouped by this label.
|
||||
across Prometheus instances, so those time series may be filtered and grouped by this label.
|
||||
|
||||
For highly loaded Prometheus instances (400k+ samples per second)
|
||||
the following tuning may be applied:
|
||||
```
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
capacity: 20000
|
||||
max_shards: 30
|
||||
```
|
||||
|
||||
Using remote write increases memory usage for Prometheus up to ~25%
|
||||
and depends on the shape of data. If you are experiencing issues with
|
||||
too high memory consumption try to lower `max_samples_per_send`
|
||||
and `capacity` params (keep in mind that these two params are tightly connected).
|
||||
Read more about tuning remote write for Prometheus [here](https://prometheus.io/docs/practices/remote_write).
|
||||
|
||||
It is recommended upgrading Prometheus to [v2.12.0](https://github.com/prometheus/prometheus/releases) or newer,
|
||||
since the previous versions may have issues with `remote_write`.
|
||||
|
||||
|
||||
### Grafana setup
|
||||
@@ -160,7 +201,35 @@ http://<victoriametrics-addr>:8428
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
|
||||
Then build graphs with the created datasource using [Prometheus query language](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
VictoriaMetrics supports native PromQL and [extends it with useful features](ExtendedPromQL).
|
||||
VictoriaMetrics supports native PromQL and [extends it with useful features](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL).
|
||||
|
||||
|
||||
### How to upgrade VictoriaMetrics?
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
say otherwise. It is recommended performing regular upgrades to the latest version,
|
||||
since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
Follow the following steps during the upgrade:
|
||||
|
||||
1) Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
2) Wait until the process stops. This can take a few seconds.
|
||||
3) Start the upgraded VictoriaMetrics.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
|
||||
|
||||
### How to apply new config to VictoriaMetrics?
|
||||
|
||||
VictoriaMetrics must be restarted for applying new config:
|
||||
|
||||
1) Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
2) Wait until the process stops. This can take a few seconds.
|
||||
3) Start VictoriaMetrics with the new config.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
|
||||
|
||||
### How to send data from InfluxDB-compatible agents such as [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)?
|
||||
@@ -177,9 +246,49 @@ Do not forget substituting `<victoriametrics-addr>` with the real address where
|
||||
|
||||
VictoriaMetrics maps Influx data using the following rules:
|
||||
* [`db` query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
||||
* Field names are mapped to time series names prefixed by `{measurement}.` value
|
||||
* Field values are mapped to time series values
|
||||
* Tags are mapped to Prometheus labels as-is
|
||||
unless `db` tag exists in the Influx line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value,
|
||||
where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag.
|
||||
See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty, then time series names correspond to field names.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
|
||||
```
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
```
|
||||
|
||||
is converted into the following Prometheus data points:
|
||||
|
||||
```
|
||||
foo_field1{tag1="value1", tag2="value2"} 12
|
||||
foo_field2{tag1="value1", tag2="value2"} 40
|
||||
```
|
||||
|
||||
Example for writing data with [Influx line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
to local VictoriaMetrics using `curl`:
|
||||
|
||||
```
|
||||
curl -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://localhost:8428/write'
|
||||
```
|
||||
|
||||
An arbitrary number of lines delimited by '\n' may be sent in a single request.
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match={__name__=~"measurement_.*"}'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"measurement_field1","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1560272508147]}
|
||||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[1.23],"timestamps":[1560272508147]}
|
||||
```
|
||||
|
||||
Note that Influx line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
while VictoriaMetrics stores them with *milliseconds* precision.
|
||||
|
||||
|
||||
### How to send data from Graphite-compatible agents such as [StatsD](https://github.com/etsy/statsd)?
|
||||
@@ -188,36 +297,216 @@ VictoriaMetrics maps Influx data using the following rules:
|
||||
the following command will enable Graphite receiver in VictoriaMetrics on TCP and UDP port `2003`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod ... -graphiteListenAddr=:2003
|
||||
/path/to/victoria-metrics-prod -graphiteListenAddr=:2003
|
||||
```
|
||||
|
||||
2) Use the configured address in Graphite-compatible agents. For instance, set `graphiteHost`
|
||||
to the VictoriaMetrics host in `StatsD` configs.
|
||||
|
||||
|
||||
### How to send data from OpenTSDB-compatible agents?
|
||||
|
||||
1) Enable OpenTSDB receiver in VictoriaMetrics by setting `-opentsdbListenAddr` command line flag. For instance,
|
||||
the following command will enable OpenTSDB receiver in VictoriaMetrics on TCP and UDP port `4242`:
|
||||
Example for writing data with Graphite plaintext protocol to local VictoriaMetrics using `nc`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod ... -opentsdbListenAddr=:4242
|
||||
echo "foo.bar.baz;tag1=value1;tag2=value2 123 `date +%s`" | nc -N localhost 2003
|
||||
```
|
||||
|
||||
VictoriaMetrics sets the current time if the timestamp is omitted.
|
||||
An arbitrary number of lines delimited by `\n` may be sent in one go.
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match=foo.bar.baz'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1560277406000]}
|
||||
```
|
||||
|
||||
|
||||
### Querying Graphite data
|
||||
|
||||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read either via
|
||||
[Prometheus querying API](#prometheus-querying-api-usage)
|
||||
or via [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/master/cmd/carbonapi/carbonapi.example.prometheus.yaml).
|
||||
|
||||
|
||||
|
||||
### How to send data from OpenTSDB-compatible agents?
|
||||
|
||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
and [HTTP /api/put requests](http://opentsdb.net/docs/build/html/api_http/put.html) for ingesting OpenTSDB data.
|
||||
|
||||
#### Sending data via `telnet put` protocol
|
||||
|
||||
1) Enable OpenTSDB receiver in VictoriaMetrics by setting `-opentsdbListenAddr` command line flag. For instance,
|
||||
the following command enables OpenTSDB receiver in VictoriaMetrics on TCP and UDP port `4242`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod -opentsdbListenAddr=:4242
|
||||
```
|
||||
|
||||
2) Send data to the given address from OpenTSDB-compatible agents.
|
||||
|
||||
|
||||
### How to apply new config / upgrade VictoriaMetrics?
|
||||
Example for writing data with OpenTSDB protocol to local VictoriaMetrics using `nc`:
|
||||
|
||||
VictoriaMetrics must be restarted in order to upgrade or apply new config:
|
||||
```
|
||||
echo "put foo.bar.baz `date +%s` 123 tag1=value1 tag2=value2" | nc -N localhost 4242
|
||||
```
|
||||
|
||||
1) Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
2) Wait until the process stops. This can take a few seconds.
|
||||
3) Start the upgraded VictoriaMetrics with new config.
|
||||
An arbitrary number of lines delimited by `\n` may be sent in one go.
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match=foo.bar.baz'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1560277292000]}
|
||||
```
|
||||
|
||||
|
||||
#### Sending OpenTSDB data via HTTP `/api/put` requests
|
||||
|
||||
1) Enable HTTP server for OpenTSDB `/api/put` requests by setting `-opentsdbHTTPListenAddr` command line flag. For instance,
|
||||
the following command enables OpenTSDB HTTP server on port `4242`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod -opentsdbHTTPListenAddr=:4242
|
||||
```
|
||||
|
||||
2) Send data to the given address from OpenTSDB-compatible agents.
|
||||
|
||||
Example for writing a single data point:
|
||||
|
||||
```
|
||||
curl -H 'Content-Type: application/json' -d '{"metric":"x.y.z","value":45.34,"tags":{"t1":"v1","t2":"v2"}}' http://localhost:4242/api/put
|
||||
```
|
||||
|
||||
Example for writing multiple data points in a single request:
|
||||
|
||||
```
|
||||
curl -H 'Content-Type: application/json' -d '[{"metric":"foo","value":45.34},{"metric":"bar","value":43}]' http://localhost:4242/api/put
|
||||
```
|
||||
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match[]=x.y.z' -d 'match[]=foo' -d 'match[]=bar'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"foo"},"values":[45.34],"timestamps":[1566464846000]}
|
||||
{"metric":{"__name__":"bar"},"values":[43],"timestamps":[1566464846000]}
|
||||
{"metric":{"__name__":"x.y.z","t1":"v1","t2":"v2"},"values":[45.34],"timestamps":[1566464763000]}
|
||||
```
|
||||
|
||||
|
||||
### Prometheus querying API usage
|
||||
|
||||
VictoriaMetrics supports the following handlers from [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/):
|
||||
|
||||
* [/api/v1/query](https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries)
|
||||
* [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries)
|
||||
* [/api/v1/series](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers)
|
||||
* [/api/v1/labels](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names)
|
||||
* [/api/v1/label/.../values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values)
|
||||
|
||||
These handlers can be queried from Prometheus-compatible clients such as Grafana or curl.
|
||||
|
||||
VictoriaMetrics accepts additional args for `/api/v1/labels` and `/api/v1/label/.../values` handlers.
|
||||
See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details:
|
||||
|
||||
* Any number [time series selectors](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) via `match[]` query arg.
|
||||
* Optional `start` and `end` query args for limiting the time range for the selected labels or label values.
|
||||
|
||||
Additionally VictoriaMetrics provides the following handlers:
|
||||
|
||||
* `/api/v1/series/count` - it returns the total number of time series in the database. Note that this handler scans all the inverted index,
|
||||
so it can be slow if the database contains tens of millions of time series.
|
||||
* `/api/v1/labels/count` - it returns a list of `label: values_count` entries. It can be used for determining labels with the maximum number of values.
|
||||
|
||||
|
||||
### How to build from sources
|
||||
|
||||
We recommend using either [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or
|
||||
[docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/) instead of building VictoriaMetrics
|
||||
from sources. Building from sources is reasonable when developing additional features specific
|
||||
to your needs.
|
||||
|
||||
|
||||
#### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make victoria-metrics` from the root folder of the repository.
|
||||
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-prod` from the root folder of the repository.
|
||||
It builds `victoria-metrics-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### ARM build
|
||||
|
||||
ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/).
|
||||
|
||||
#### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of the repository.
|
||||
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-arm-prod` or `make victoria-metrics-arm64-prod` from the root folder of the repository.
|
||||
It builds `victoria-metrics-arm-prod` or `victoria-metrics-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Pure Go build (CGO_ENABLED=0)
|
||||
|
||||
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
||||
This is an experimental mode, which may result in a lower compression ratio and slower decompression performance.
|
||||
Use it with caution!
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make victoria-metrics-pure` from the root folder of the repository.
|
||||
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
|
||||
Run `make package-victoria-metrics`. It builds `victoriametrics/victoria-metrics:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-victoria-metrics`.
|
||||
|
||||
|
||||
### Start with docker-compose
|
||||
|
||||
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
||||
helps to spin up VictoriaMetrics, Prometheus and Grafana with one command.
|
||||
More details may be found [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker#folder-contains-basic-images-and-tools-for-building-and-running-victoria-metrics-in-docker).
|
||||
|
||||
|
||||
### Setting up service
|
||||
|
||||
Read [these instructions](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/43) on how to set up VictoriaMetrics as a service in your OS.
|
||||
|
||||
|
||||
### Third-party contributions
|
||||
|
||||
* [Unofficial yum repository](https://copr.fedorainfracloud.org/coprs/antonpatsev/VictoriaMetrics/) ([source code](https://github.com/patsevanton/victoriametrics-rpm))
|
||||
|
||||
|
||||
### How to work with snapshots?
|
||||
|
||||
VictoriaMetrics can create [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
for all the data stored under `-storageDataPath` directory.
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/create` in order to create an instant snapshot.
|
||||
The page will return the following JSON response:
|
||||
|
||||
@@ -226,8 +515,8 @@ The page will return the following JSON response:
|
||||
```
|
||||
|
||||
Snapshots are created under `<-storageDataPath>/snapshots` directory, where `<-storageDataPath>`
|
||||
is the command-line flag value. Snapshots can be archived to backup storage via `rsync -L`, `scp -r`
|
||||
or any similar tool that follows symlinks during copying.
|
||||
is the command-line flag value. Snapshots can be archived to backup storage at any time
|
||||
with [vmbackup](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmbackup/README.md).
|
||||
|
||||
The `http://<victoriametrics-addr>:8428/snapshot/list` page contains the list of available snapshots.
|
||||
|
||||
@@ -236,6 +525,12 @@ to delete `<snapshot-name>` snapshot.
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete_all` in order to delete all the snapshots.
|
||||
|
||||
Steps for restoring from a snapshot:
|
||||
1. Stop VictoriaMetrics with `kill -INT`.
|
||||
2. Restore snapshot contents from backup with [vmrestore](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmrestore/README.md)
|
||||
to the directory pointed by `-storageDataPath`.
|
||||
3. Start VictoriaMetrics.
|
||||
|
||||
|
||||
### How to delete time series?
|
||||
|
||||
@@ -244,12 +539,16 @@ where `<timeseries_selector_for_delete>` may contain any [time series selector](
|
||||
for metrics to delete. After that all the time series matching the given selector are deleted. Storage space for
|
||||
the deleted time series isn't freed instantly - it is freed during subsequent merges of data files.
|
||||
|
||||
It is recommended verifying which metrics will be deleted with the call to `http://<victoria-metrics-addr>:8428/api/v1/series?match[]=<timeseries_selector_for_delete>`
|
||||
before actually deleting the metrics.
|
||||
|
||||
|
||||
### How to export time series?
|
||||
|
||||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/export?match[]=<timeseries_selector_for_export>`,
|
||||
where `<timeseries_selector_for_export>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
for metrics to export. The response would contain all the data for the selected time series in [JSON streaming format](https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON).
|
||||
for metrics to export. Use `{__name__!=""}` selector for fetching all the time series.
|
||||
The response would contain all the data for the selected time series in [JSON streaming format](https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON).
|
||||
Each JSON line would contain data for a single time series. An example output:
|
||||
|
||||
```
|
||||
@@ -260,6 +559,52 @@ Each JSON line would contain data for a single time series. An example output:
|
||||
Optional `start` and `end` args may be added to the request in order to limit the time frame for the exported data. These args may contain either
|
||||
unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values.
|
||||
|
||||
Pass `Accept-Encoding: gzip` HTTP header in the request to `/api/v1/export` in order to reduce network bandwidth during exporing big amounts
|
||||
of time series data. This enables gzip compression for the exported data. Example for exporting gzipped data:
|
||||
|
||||
```
|
||||
curl -H 'Accept-Encoding: gzip' http://localhost:8428/api/v1/export -d 'match[]={__name__!=""}' > data.jsonl.gz
|
||||
```
|
||||
|
||||
The maximum duration for each request to `/api/v1/export` is limited by `-search.maxExportDuration` command-line flag.
|
||||
|
||||
Exported data can be imported via POST'ing it to [/api/v1/import](#how-to-import-time-series-data).
|
||||
|
||||
|
||||
### How to import time series data?
|
||||
|
||||
Time series data can be imported via any supported ingestion protocol:
|
||||
|
||||
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
* [Influx line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
* [OpenTSDB telnet put protocol](#sending-data-via-telnet-put-protocol)
|
||||
* [OpenTSDB http /api/put](#sending-opentsdb-data-via-http-apiput-requests)
|
||||
* `/api/v1/import` http POST handler, which accepts data from [/api/v1/export](#how-to-export-time-series).
|
||||
|
||||
The most efficient protocol for importing data into VictoriaMetrics is `/api/v1/import`. Example for importing data obtained via `/api/v1/export`:
|
||||
|
||||
```
|
||||
# Export the data from <source-victoriametrics>:
|
||||
curl http://source-victoriametrics:8428/api/v1/export -d 'match={__name__!=""}' > exported_data.jsonl
|
||||
|
||||
# Import the data to <destination-victoriametrics>:
|
||||
curl -X POST http://destination-victoriametrics:8428/api/v1/import -T exported_data.jsonl
|
||||
```
|
||||
|
||||
Pass `Content-Encoding: gzip` HTTP request header to `/api/v1/import` for importing gzipped data:
|
||||
|
||||
```
|
||||
# Export gzipped data from <source-victoriametrics>:
|
||||
curl -H 'Accept-Encoding: gzip' http://source-victoriametrics:8428/api/v1/export -d 'match={__name__!=""}' > exported_data.jsonl.gz
|
||||
|
||||
# Import gzipped data to <destination-victoriametrics>:
|
||||
curl -X POST -H 'Content-Encoding: gzip' http://destination-victoriametrics:8428/api/v1/import -T exported_data.jsonl.gz
|
||||
```
|
||||
|
||||
Each request to `/api/v1/import` can load up to a single vCPU core on VictoriaMetrics. Import speed can be improved by splitting the original file into smaller parts
|
||||
and importing them concurrently. Note that the original file must be split on newlines.
|
||||
|
||||
|
||||
### Federation
|
||||
|
||||
@@ -267,31 +612,55 @@ VictoriaMetrics exports [Prometheus-compatible federation data](https://promethe
|
||||
at `http://<victoriametrics-addr>:8428/federate?match[]=<timeseries_selector_for_federation>`.
|
||||
|
||||
Optional `start` and `end` args may be added to the request in order to scrape the last point for each selected time series on the `[start ... end]` interval.
|
||||
`start` and `end` may contain either unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values. By default the last point
|
||||
on the interval `[now - max_lookback ... now]` is scraped for each time series. Default value for `max_lookback` is `5m` (5 minutes), but can be overriden.
|
||||
`start` and `end` may contain either unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values. By default, the last point
|
||||
on the interval `[now - max_lookback ... now]` is scraped for each time series. The default value for `max_lookback` is `5m` (5 minutes), but it can be overridden.
|
||||
For instance, `/federate?match[]=up&max_lookback=1h` would return last points on the `[now - 1h ... now]` interval. This may be useful for time series federation
|
||||
with scrape intervals exceeding `5m`.
|
||||
|
||||
|
||||
### Capacity planning
|
||||
|
||||
Rough estimation of the required resources:
|
||||
A rough estimation of the required resources for ingestion path:
|
||||
|
||||
* RAM size: less than 1KB per active time series. So, ~1GB of RAM is required for 1M active time series.
|
||||
Time series is considered active if new data points have been added to it recently or if it has been recently queried.
|
||||
VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with `-memory.allowedPercent` flag.
|
||||
The number of active time series may be obtained from `vm_cache_entries{type="storage/hour_metric_ids"}` metric
|
||||
exproted on the `/metrics` page.
|
||||
VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited by `-memory.allowedPercent` flag.
|
||||
|
||||
* CPU cores: a CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing
|
||||
the insert stream of 1M data points per second.
|
||||
the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with high number of labels.
|
||||
See [this article](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) for details.
|
||||
If you see lower numbers per CPU core, then it is likely active time series info doesn't fit caches,
|
||||
so you need more RAM for lowering CPU usage.
|
||||
* Storage size: less than a byte per data point on average. So, ~260GB is required for storing a month-long insert stream
|
||||
|
||||
* Storage space: less than a byte per data point on average. So, ~260GB is required for storing a month-long insert stream
|
||||
of 100K data points per second.
|
||||
The actual storage size heavily depends on data randomness (entropy). Higher randomness means higher storage size requirements.
|
||||
Read [this article](https://medium.com/faun/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932)
|
||||
for details.
|
||||
|
||||
* Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via
|
||||
[Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
|
||||
The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size
|
||||
of label values. The higher number of per-metric labels and longer label values mean the higher ingress bandwidth.
|
||||
|
||||
|
||||
The required resources for query path:
|
||||
|
||||
* RAM size: depends on the number of time series to scan in each query and the `step`
|
||||
argument passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries).
|
||||
The higher number of scanned time series and lower `step` argument results in the higher RAM usage.
|
||||
|
||||
* CPU cores: a CPU core per 30 millions of scanned data points per second.
|
||||
|
||||
* Network usage: depends on the frequency and the type of incoming requests. Typical Grafana dashboards usually
|
||||
require negligible network bandwidth.
|
||||
|
||||
|
||||
### High availability
|
||||
|
||||
1) Install multiple VictoriaMetrics instances in distinct datacenters.
|
||||
1) Install multiple VictoriaMetrics instances in distinct datacenters (availability zones).
|
||||
2) Add addresses of these instances to `remote_write` section in Prometheus config:
|
||||
|
||||
```yml
|
||||
@@ -316,6 +685,10 @@ kill -HUP `pidof prometheus`
|
||||
6) Set up Prometheus datasource in Grafana that points to Promxy.
|
||||
|
||||
|
||||
If you have Prometheus HA pairs with replicas `r1` and `r2` in each pair, then configure each `r1`
|
||||
to write data to `victoriametrics-addr-1`, while each `r2` should write data to `victoriametrics-addr-2`.
|
||||
|
||||
|
||||
### Multiple retentions
|
||||
|
||||
Just start multiple VictoriaMetrics instances with distinct values for the following flags:
|
||||
@@ -325,20 +698,43 @@ Just start multiple VictoriaMetrics instances with distinct values for the follo
|
||||
* `-httpListenAddr`, so clients may reach VictoriaMetrics instance with proper retention
|
||||
|
||||
|
||||
### Downsampling
|
||||
|
||||
There is no downsampling support at the moment, but:
|
||||
- VictoriaMetrics is optimized for querying big amounts of raw data. See benchmark results for heavy queries
|
||||
in [this article](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
- VictoriaMetrics has good compression for on-disk data. See [this article](https://medium.com/@valyala/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932)
|
||||
for details.
|
||||
|
||||
These properties reduce the need in downsampling. We plan to implement downsampling in the future.
|
||||
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36) for details.
|
||||
|
||||
|
||||
### Multi-tenancy
|
||||
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) instead.
|
||||
|
||||
|
||||
### Scalability and cluster version
|
||||
|
||||
Though single-node VictoriaMetrics cannot scale to multiple nodes, it is optimized for resource usage - storage size / bandwidth / IOPS, RAM, CPU.
|
||||
This means that a single-node VictoriaMetrics may scale vertically and substitute moderately sized cluster built with competing solutions
|
||||
such as Thanos, Uber M3, InfluxDB or TimescaleDB.
|
||||
This means that a single-node VictoriaMetrics may scale vertically and substitute a moderately sized cluster built with competing solutions
|
||||
such as Thanos, Uber M3, InfluxDB or TimescaleDB. See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
|
||||
So try single-node VictoriaMetrics at first and then [switch to cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) if you still need
|
||||
horizontally scalable long-term remote storage for really large Prometheus deployments.
|
||||
[Contact us](mailto:info@victoriametrics.com) for paid support.
|
||||
|
||||
|
||||
### Alerting
|
||||
|
||||
VictoriaMetrics doesn't support rule evaluation and alerting yet, so these actions must be performed either
|
||||
on [Prometheus side](https://prometheus.io/docs/alerting/overview/) or on [Grafana side](https://grafana.com/docs/alerting/rules/).
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
Do not forget protecting sensitive endpoints in VictoriaMetrics when exposing it to untrusted networks such as internet.
|
||||
Do not forget protecting sensitive endpoints in VictoriaMetrics when exposing it to untrusted networks such as the internet.
|
||||
Consider setting the following command-line flags:
|
||||
|
||||
* `-tls`, `-tlsCertFile` and `-tlsKeyFile` for switching from HTTP to HTTPS.
|
||||
@@ -353,22 +749,46 @@ For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<i
|
||||
|
||||
### Tuning
|
||||
|
||||
* There is no need in VictoriaMetrics tuning, since it uses reasonable defaults for command-line flags,
|
||||
* There is no need in VictoriaMetrics tuning since it uses reasonable defaults for command-line flags,
|
||||
which are automatically adjusted for the available CPU and RAM resources.
|
||||
* There is no need in Operating System tuning, since VictoriaMetrics is optimized for default OS settings.
|
||||
The only option is increasing the limit on [the number open files in the OS](https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a),
|
||||
* There is no need in Operating System tuning since VictoriaMetrics is optimized for default OS settings.
|
||||
The only option is increasing the limit on [the number of open files in the OS](https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a),
|
||||
so Prometheus instances could establish more connections to VictoriaMetrics.
|
||||
* The recommended filesystem is `ext4`, the recommended persistent storage is [persistent HDD-based disk on GCP](https://cloud.google.com/compute/docs/disks/#pdspecs),
|
||||
since it is protected from hardware failures via internal replication and it can be [resized on the fly](https://cloud.google.com/compute/docs/disks/add-persistent-disk#resize_pd).
|
||||
If you plan storing more than 1TB of data on `ext4` partition or plan extending it to more than 16TB,
|
||||
then the following options are recommended to pass to `mkfs.ext4`:
|
||||
|
||||
```
|
||||
mkfs.ext4 ... -O 64bit,huge_file,extent -T huge
|
||||
```
|
||||
|
||||
|
||||
### Monitoring
|
||||
|
||||
VictoriaMetrics exports internal metrics in Prometheus format on the `/metrics` page.
|
||||
Add this page to Prometheus' scrape config in order to collect VictoriaMetrics metrics.
|
||||
There is [an official Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229).
|
||||
There are officials Grafana dashboards for [single-node VictoriaMetrics](https://grafana.com/dashboards/10229) and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176).
|
||||
|
||||
The most interesting metrics are:
|
||||
|
||||
* `vm_cache_entries{type="storage/hour_metric_ids"}` - the number of time series with new data points during the last hour
|
||||
aka active time series.
|
||||
* `rate(vm_new_timeseries_created_total[5m])` - time series churn rate.
|
||||
* `vm_rows{type="indexdb"}` - the number of rows in inverted index. High value for this number usually mean high churn rate for time series.
|
||||
* Sum of `vm_rows{type="storage/big"}` and `vm_rows{type="storage/small"}` - total number of `(timestamp, value)` data points
|
||||
in the database.
|
||||
* Sum of all the `vm_cache_size_bytes` metrics - the total size of all the caches in the database.
|
||||
* `vm_allowed_memory_bytes` - the maximum allowed size for caches in the database. It is calculated as `system_memory * <-memory.allowedPercent> / 100`,
|
||||
where `system_memory` is the amount of system memory and `-memory.allowedPercent` is the corresponding flag value.
|
||||
* `vm_rows_inserted_total` - the total number of inserted rows since VictoriaMetrics start.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
* It is recommended to use default command-line flag values (i.e. don't set them explicitly) until the need
|
||||
in tweaking these flag values arises.
|
||||
|
||||
* If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second,
|
||||
then it is likely you have too many active time series for the current amount of RAM.
|
||||
It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve
|
||||
@@ -376,10 +796,85 @@ There is [an official Grafana dashboard for single-node VictoriaMetrics](https:/
|
||||
Another option is to increase `-memory.allowedPercent` command-line flag value. Be careful with this
|
||||
option, since too big value for `-memory.allowedPercent` may result in high I/O usage.
|
||||
|
||||
* VictoriaMetrics requires free disk space for [merging data files to bigger ones](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
It may slow down when there is no enough free space left. So make sure `-storageDataPath` directory
|
||||
has at least 20% of free space comparing to disk size.
|
||||
|
||||
* If VictoriaMetrics doesn't work because of certain parts are corrupted due to disk errors,
|
||||
then just remove directoreis with broken parts. This will recover VictoriaMetrics at the cost
|
||||
of data loss stored in the broken parts. In the future, `vmrecover` tool will be created
|
||||
for automatic recovering from such errors.
|
||||
|
||||
|
||||
### Backfilling
|
||||
|
||||
Make sure that configured `-retentionPeriod` covers timestamps for the backfilled data.
|
||||
|
||||
It is recommended disabling query cache with `-search.disableCache` command-line flag when writing
|
||||
historical data with timestamps from the past, since the cache assumes that the data is written with
|
||||
the current timestamps. Query cache can be enabled after the backfilling is complete.
|
||||
|
||||
|
||||
### Profiling
|
||||
|
||||
VictoriaMetrics provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
- Memory profile. It can be collected with the following command:
|
||||
```
|
||||
curl -s http://<victoria-metrics-host>:8428/debug/pprof/heap > mem.pprof
|
||||
```
|
||||
|
||||
- CPU profile. It can be collected with the following command:
|
||||
```
|
||||
curl -s http://<victoria-metrics-host>:8428/debug/pprof/profile > cpu.pprof
|
||||
```
|
||||
|
||||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
|
||||
## Integrations
|
||||
|
||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||
See [this example](/blob/master/cmd/carbonapi/carbonapi.example.prometheus.yaml).
|
||||
* [Ansible role for installing VictoriaMetrics](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
||||
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [ ] Replication [#118](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/118)
|
||||
- [ ] Support of Object Storages (GCS, S3, Azure Storage) [#38](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/38)
|
||||
- [ ] Data downsampling [#36](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36)
|
||||
- [ ] Alert Manager Integration [#119](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/119)
|
||||
- [ ] CLI tool for data migration, re-balancing and adding/removing nodes [#103](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/103)
|
||||
|
||||
|
||||
The discussion happens [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/129). Feel free to comment any item or add own one.
|
||||
|
||||
|
||||
## Contacts
|
||||
|
||||
Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics.com](mailto:info@victoriametrics.com).
|
||||
|
||||
|
||||
## Community and contributions
|
||||
|
||||
Feel free asking any questions regarding VictoriaMetrics [here](https://groups.google.com/forum/#!forum/victorametrics-users).
|
||||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
- [slack](http://slack.victoriametrics.com/)
|
||||
- [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||
- [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
- [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
||||
- Filing issues and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
- Spreading a word about VictoriaMetrics: conference talks, articles, comments, experience sharing with colleagues.
|
||||
- Updating documentation.
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
|
||||
@@ -1,21 +1,107 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
victoria-metrics:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-local
|
||||
|
||||
victoria-metrics-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker
|
||||
|
||||
victoria-metrics-pure-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-pure
|
||||
|
||||
victoria-metrics-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-amd64
|
||||
|
||||
victoria-metrics-arm-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-arm
|
||||
|
||||
victoria-metrics-arm64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-arm64
|
||||
|
||||
victoria-metrics-ppc64le-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-ppc64le
|
||||
|
||||
victoria-metrics-386-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-386
|
||||
|
||||
package-victoria-metrics:
|
||||
APP_NAME=victoria-metrics \
|
||||
$(MAKE) package-via-docker
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker
|
||||
|
||||
package-victoria-metrics-pure:
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker-pure
|
||||
|
||||
package-victoria-metrics-amd64:
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-victoria-metrics-arm:
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker-arm
|
||||
|
||||
package-victoria-metrics-arm64:
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-victoria-metrics-ppc64le:
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-victoria-metrics-386:
|
||||
APP_NAME=victoria-metrics $(MAKE) package-via-docker-386
|
||||
|
||||
publish-victoria-metrics:
|
||||
APP_NAME=victoria-metrics $(MAKE) publish-via-docker
|
||||
|
||||
run-victoria-metrics:
|
||||
mkdir -p victoria-metrics-data
|
||||
DOCKER_OPTS='-v $(shell pwd)/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 -p 2003:2003 -p 2003:2003/udp' \
|
||||
DOCKER_OPTS='-v $(shell pwd)/victoria-metrics-data:/victoria-metrics-data' \
|
||||
APP_NAME=victoria-metrics \
|
||||
ARGS='-graphiteListenAddr=:2003 -opentsdbListenAddr=:4242 -retentionPeriod=12 -search.maxUniqueTimeseries=1000000 -search.maxQueryDuration=10m' \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
victoria-metrics-amd64:
|
||||
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-amd64 ./app/victoria-metrics
|
||||
|
||||
victoria-metrics-arm:
|
||||
CC=arm-linux-gnueabi-gcc CGO_ENABLED=1 GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm ./app/victoria-metrics
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm ./app/victoria-metrics
|
||||
|
||||
victoria-metrics-arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm64 ./app/victoria-metrics
|
||||
|
||||
victoria-metrics-ppc64le:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-ppc64le ./app/victoria-metrics
|
||||
|
||||
victoria-metrics-386:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-386 ./app/victoria-metrics
|
||||
|
||||
victoria-metrics-pure:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-local-pure
|
||||
|
||||
### Packaging as DEB - amd64
|
||||
victoria-metrics-package-deb: victoria-metrics-prod
|
||||
./package/package_deb.sh amd64
|
||||
|
||||
### Packaging as DEB - arm64
|
||||
victoria-metrics-package-deb-arm64: victoria-metrics-arm64-prod
|
||||
./package/package_deb.sh arm64
|
||||
|
||||
### Packaging as DEB - all
|
||||
victoria-metrics-package-deb-all: \
|
||||
victoria-metrics-package-deb \
|
||||
victoria-metrics-package-deb-arm64
|
||||
|
||||
### Packaging as RPM - amd64
|
||||
victoria-metrics-package-rpm: victoria-metrics-prod
|
||||
./package/package_rpm.sh amd64
|
||||
|
||||
### Packaging as RPM - arm64
|
||||
victoria-metrics-package-rpm-arm64: victoria-metrics-arm64-prod
|
||||
./package/package_rpm.sh arm64
|
||||
|
||||
### Packaging as RPM - all
|
||||
victoria-metrics-package-rpm-all: \
|
||||
victoria-metrics-package-rpm \
|
||||
victoria-metrics-package-rpm-arm64
|
||||
|
||||
### Packaging as both DEB and RPM - all
|
||||
victoria-metrics-package-deb-rpm-all: \
|
||||
victoria-metrics-package-deb \
|
||||
victoria-metrics-package-deb-arm64 \
|
||||
victoria-metrics-package-rpm \
|
||||
victoria-metrics-package-rpm-arm64
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
ARG certs_image
|
||||
FROM $certs_image AS certs
|
||||
FROM scratch
|
||||
COPY --from=local/certs:1.0.2 /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY bin/victoria-metrics-prod .
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ARG src_binary
|
||||
COPY $src_binary ./victoria-metrics-prod
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
@@ -20,7 +21,7 @@ func main() {
|
||||
flag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
logger.Infof("starting VictoraMetrics at %q...", *httpListenAddr)
|
||||
logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
vmstorage.Init()
|
||||
vmselect.Init()
|
||||
@@ -43,6 +44,8 @@ func main() {
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
|
||||
fs.MustStopDirRemover()
|
||||
|
||||
logger.Infof("the VictoriaMetrics has been stopped in %s", time.Since(startTime))
|
||||
}
|
||||
|
||||
|
||||
494
app/victoria-metrics/main_test.go
Normal file
494
app/victoria-metrics/main_test.go
Normal file
@@ -0,0 +1,494 @@
|
||||
// +build integration
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testutil "github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-metrics/test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
testFixturesDir = "testdata"
|
||||
testStorageSuffix = "vm-test-storage"
|
||||
testHTTPListenAddr = ":7654"
|
||||
testStatsDListenAddr = ":2003"
|
||||
testOpenTSDBListenAddr = ":4242"
|
||||
testOpenTSDBHTTPListenAddr = ":4243"
|
||||
testLogLevel = "INFO"
|
||||
)
|
||||
|
||||
const (
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
)
|
||||
|
||||
const (
|
||||
testStorageInitTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
storagePath string
|
||||
insertionTime = time.Now().UTC()
|
||||
)
|
||||
|
||||
type test struct {
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
ResultQueryRange QueryRange `json:"result_query_range"`
|
||||
Issue string `json:"issue"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
|
||||
func (r *Metric) UnmarshalJSON(b []byte) error {
|
||||
type plain Metric
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
type Series struct {
|
||||
Status string `json:"status"`
|
||||
Data []map[string]string `json:"data"`
|
||||
}
|
||||
type Query struct {
|
||||
Status string `json:"status"`
|
||||
Data QueryData `json:"data"`
|
||||
}
|
||||
type QueryData struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []QueryDataResult `json:"result"`
|
||||
}
|
||||
|
||||
type QueryDataResult struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Value []interface{} `json:"value"`
|
||||
}
|
||||
|
||||
func (r *QueryDataResult) UnmarshalJSON(b []byte) error {
|
||||
type plain QueryDataResult
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
type QueryRange struct {
|
||||
Status string `json:"status"`
|
||||
Data QueryRangeData `json:"data"`
|
||||
}
|
||||
type QueryRangeData struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []QueryRangeDataResult `json:"result"`
|
||||
}
|
||||
|
||||
type QueryRangeDataResult struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values [][]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
func (r *QueryRangeDataResult) UnmarshalJSON(b []byte) error {
|
||||
type plain QueryRangeDataResult
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setUp()
|
||||
code := m.Run()
|
||||
tearDown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setUp() {
|
||||
storagePath = filepath.Join(os.TempDir(), testStorageSuffix)
|
||||
processFlags()
|
||||
logger.Init()
|
||||
vmstorage.InitWithoutMetrics()
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
go httpserver.Serve(*httpListenAddr, requestHandler)
|
||||
readyStorageCheckFunc := func() bool {
|
||||
resp, err := http.Get(testHealthHTTPPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
return resp.StatusCode == 200
|
||||
}
|
||||
if err := waitFor(testStorageInitTimeout, readyStorageCheckFunc); err != nil {
|
||||
log.Fatalf("http server can't start for %s seconds, err %s", testStorageInitTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
func processFlags() {
|
||||
flag.Parse()
|
||||
for _, fv := range []struct {
|
||||
flag string
|
||||
value string
|
||||
}{
|
||||
{flag: "storageDataPath", value: storagePath},
|
||||
{flag: "httpListenAddr", value: testHTTPListenAddr},
|
||||
{flag: "graphiteListenAddr", value: testStatsDListenAddr},
|
||||
{flag: "opentsdbListenAddr", value: testOpenTSDBListenAddr},
|
||||
{flag: "loggerLevel", value: testLogLevel},
|
||||
{flag: "opentsdbHTTPListenAddr", value: testOpenTSDBHTTPListenAddr},
|
||||
} {
|
||||
// panics if flag doesn't exist
|
||||
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
|
||||
log.Fatalf("unable to set %q with value %q, err: %v", fv.flag, fv.value, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitFor(timeout time.Duration, f func() bool) error {
|
||||
fraction := timeout / 10
|
||||
for i := fraction; i < timeout; i += fraction {
|
||||
if f() {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(fraction)
|
||||
}
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
|
||||
func tearDown() {
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
log.Printf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
fs.MustRemoveAll(storagePath)
|
||||
}
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
t.Run("write", testWrite)
|
||||
time.Sleep(1 * time.Second)
|
||||
vmstorage.Stop()
|
||||
// open storage after stop in write
|
||||
vmstorage.InitWithoutMetrics()
|
||||
t.Run("read", testRead)
|
||||
}
|
||||
|
||||
func testWrite(t *testing.T) {
|
||||
t.Run("prometheus", func(t *testing.T) {
|
||||
for _, test := range readIn("prometheus", t, insertionTime) {
|
||||
s := newSuite(t)
|
||||
r := testutil.WriteRequest{}
|
||||
s.noError(json.Unmarshal([]byte(strings.Join(test.Data, "\n")), &r.Timeseries))
|
||||
data, err := testutil.Compress(r)
|
||||
s.greaterThan(len(r.Timeseries), 0)
|
||||
if err != nil {
|
||||
t.Errorf("error compressing %v %s", r, err)
|
||||
t.Fail()
|
||||
}
|
||||
httpWrite(t, testPromWriteHTTPPath, bytes.NewBuffer(data))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("influxdb", func(t *testing.T) {
|
||||
for _, x := range readIn("influxdb", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpWrite(t, testWriteHTTPPath, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("graphite", func(t *testing.T) {
|
||||
for _, x := range readIn("graphite", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testStatsDListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdb", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdb", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testOpenTSDBListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdbhttp", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdbhttp", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger.Infof("writing %s", test.Data)
|
||||
httpWrite(t, testOpenTSDBWriteHTTPPath, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testRead(t *testing.T) {
|
||||
for _, engine := range []string{"prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
t.Run(engine, func(t *testing.T) {
|
||||
for _, x := range readIn(engine, t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, q := range test.Query {
|
||||
q = testutil.PopulateTimeTplString(q, insertionTime)
|
||||
if test.Issue != "" {
|
||||
test.Issue = "Regression in " + test.Issue
|
||||
}
|
||||
switch true {
|
||||
case strings.HasPrefix(q, "/api/v1/export"):
|
||||
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
|
||||
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/series"):
|
||||
s := Series{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &s)
|
||||
if err := checkSeriesResult(s, test.ResultSeries); err != nil {
|
||||
t.Fatalf("Series. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query_range"):
|
||||
queryResult := QueryRange{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
if err := checkQueryRangeResult(queryResult, test.ResultQueryRange); err != nil {
|
||||
t.Fatalf("Query Range. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query"):
|
||||
queryResult := Query{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
if err := checkQueryResult(queryResult, test.ResultQuery); err != nil {
|
||||
t.Fatalf("Query. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unsupported read query %s", q)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func readIn(readFor string, t *testing.T, insertTime time.Time) []test {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
var tt []test
|
||||
s.noError(filepath.Walk(filepath.Join(testFixturesDir, readFor), func(path string, info os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) != ".json" {
|
||||
return nil
|
||||
}
|
||||
b, err := ioutil.ReadFile(path)
|
||||
s.noError(err)
|
||||
item := test{}
|
||||
s.noError(json.Unmarshal(b, &item))
|
||||
for i := range item.Data {
|
||||
item.Data[i] = testutil.PopulateTimeTplString(item.Data[i], insertTime)
|
||||
}
|
||||
tt = append(tt, item)
|
||||
return nil
|
||||
}))
|
||||
if len(tt) == 0 {
|
||||
t.Fatalf("no test found in %s", filepath.Join(testFixturesDir, readFor))
|
||||
}
|
||||
return tt
|
||||
}
|
||||
|
||||
func httpWrite(t *testing.T, address string, r io.Reader) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Post(address, "", r)
|
||||
s.noError(err)
|
||||
s.noError(resp.Body.Close())
|
||||
s.equalInt(resp.StatusCode, 204)
|
||||
}
|
||||
|
||||
func tcpWrite(t *testing.T, address string, data string) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
conn, err := net.Dial("tcp", address)
|
||||
s.noError(err)
|
||||
defer conn.Close()
|
||||
n, err := conn.Write([]byte(data))
|
||||
s.noError(err)
|
||||
s.equalInt(n, len(data))
|
||||
}
|
||||
|
||||
func httpReadMetrics(t *testing.T, address, query string) []Metric {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Get(address + query)
|
||||
s.noError(err)
|
||||
defer resp.Body.Close()
|
||||
s.equalInt(resp.StatusCode, 200)
|
||||
var rows []Metric
|
||||
for dec := json.NewDecoder(resp.Body); dec.More(); {
|
||||
var row Metric
|
||||
s.noError(dec.Decode(&row))
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
func httpReadStruct(t *testing.T, address, query string, dst interface{}) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Get(address + query)
|
||||
s.noError(err)
|
||||
defer resp.Body.Close()
|
||||
s.equalInt(resp.StatusCode, 200)
|
||||
s.noError(json.NewDecoder(resp.Body).Decode(dst))
|
||||
}
|
||||
|
||||
func checkMetricsResult(got, want []Metric) error {
|
||||
for _, r := range append([]Metric(nil), got...) {
|
||||
want = removeIfFoundMetrics(r, want)
|
||||
}
|
||||
if len(want) > 0 {
|
||||
return fmt.Errorf("exptected metrics %+v not found in %+v", want, got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundMetrics(r Metric, contains []Metric) []Metric {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) &&
|
||||
reflect.DeepEqual(r.Timestamps, item.Timestamps) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkSeriesResult(got, want Series) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
wantData := append([]map[string]string(nil), want.Data...)
|
||||
for _, r := range got.Data {
|
||||
wantData = removeIfFoundSeries(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected seria(s) %+v not found in %+v", wantData, got.Data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundSeries(r map[string]string, contains []map[string]string) []map[string]string {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r, item) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkQueryResult(got, want Query) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
if got.Data.ResultType != want.Data.ResultType {
|
||||
return fmt.Errorf("result type mismatch %q - %q", want.Data.ResultType, got.Data.ResultType)
|
||||
}
|
||||
wantData := append([]QueryDataResult(nil), want.Data.Result...)
|
||||
for _, r := range got.Data.Result {
|
||||
wantData = removeIfFoundQueryData(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected query result %+v not found in %+v", wantData, got.Data.Result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundQueryData(r QueryDataResult, contains []QueryDataResult) []QueryDataResult {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Value[0], item.Value[0]) && reflect.DeepEqual(r.Value[1], item.Value[1]) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkQueryRangeResult(got, want QueryRange) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
if got.Data.ResultType != want.Data.ResultType {
|
||||
return fmt.Errorf("result type mismatch %q - %q", want.Data.ResultType, got.Data.ResultType)
|
||||
}
|
||||
wantData := append([]QueryRangeDataResult(nil), want.Data.Result...)
|
||||
for _, r := range got.Data.Result {
|
||||
wantData = removeIfFoundQueryRangeData(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected query range result %+v not found in %+v", wantData, got.Data.Result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundQueryRangeData(r QueryRangeDataResult, contains []QueryRangeDataResult) []QueryRangeDataResult {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
type suite struct{ t *testing.T }
|
||||
|
||||
func newSuite(t *testing.T) *suite { return &suite{t: t} }
|
||||
|
||||
func (s *suite) noError(err error) {
|
||||
s.t.Helper()
|
||||
if err != nil {
|
||||
s.t.Errorf("unexpected error %v", err)
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *suite) equalInt(a, b int) {
|
||||
s.t.Helper()
|
||||
if a != b {
|
||||
s.t.Errorf("%d not equal %d", a, b)
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *suite) greaterThan(a, b int) {
|
||||
s.t.Helper()
|
||||
if a <= b {
|
||||
s.t.Errorf("%d less or equal then %d", a, b)
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
52
app/victoria-metrics/test/parser.go
Normal file
52
app/victoria-metrics/test/parser.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
parseTimeExpRegex = regexp.MustCompile(`"?{TIME[^}]*}"?`)
|
||||
extractRegex = regexp.MustCompile(`"?{([^}]*)}"?`)
|
||||
)
|
||||
|
||||
// PopulateTimeTplString substitutes {TIME_*} with t in s and returns the result.
|
||||
func PopulateTimeTplString(s string, t time.Time) string {
|
||||
return string(PopulateTimeTpl([]byte(s), t))
|
||||
}
|
||||
|
||||
// PopulateTimeTpl substitutes {TIME_*} with tGlobal in b and returns the result.
|
||||
func PopulateTimeTpl(b []byte, tGlobal time.Time) []byte {
|
||||
return parseTimeExpRegex.ReplaceAllFunc(b, func(repl []byte) []byte {
|
||||
t := tGlobal
|
||||
repl = extractRegex.FindSubmatch(repl)[1]
|
||||
parts := strings.SplitN(string(repl), "-", 2)
|
||||
if len(parts) == 2 {
|
||||
duration, err := time.ParseDuration(strings.TrimSpace(parts[1]))
|
||||
if err != nil {
|
||||
log.Fatalf("error %s parsing duration %s in %s", err, parts[1], repl)
|
||||
}
|
||||
t = t.Add(-duration)
|
||||
}
|
||||
switch strings.TrimSpace(parts[0]) {
|
||||
case `TIME_S`:
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()))
|
||||
case `TIME_MSZ`:
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()*1e3))
|
||||
case `TIME_MS`:
|
||||
return []byte(fmt.Sprintf("%d", timeToMillis(t)))
|
||||
case `TIME_NS`:
|
||||
return []byte(fmt.Sprintf("%d", t.UnixNano()))
|
||||
default:
|
||||
log.Fatalf("unknown time pattern %s in %s", parts[0], repl)
|
||||
}
|
||||
return repl
|
||||
})
|
||||
}
|
||||
|
||||
func timeToMillis(t time.Time) int64 {
|
||||
return t.UnixNano() / 1e6
|
||||
}
|
||||
24
app/victoria-metrics/test/parser_test.go
Normal file
24
app/victoria-metrics/test/parser_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPopulateTimeTplString(t *testing.T) {
|
||||
now, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing time: %s", err)
|
||||
}
|
||||
f := func(s, resultExpected string) {
|
||||
t.Helper()
|
||||
result := PopulateTimeTplString(s, now)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result; got %q; want %q", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", "")
|
||||
f("{TIME_S}", "1136214245")
|
||||
f("now: {TIME_S}, past 30s: {TIME_MS-30s}, now: {TIME_S}", "now: 1136214245, past 30s: 1136214215000, now: 1136214245")
|
||||
f("now: {TIME_MS}, past 30m: {TIME_MSZ-30m}, past 2h: {TIME_NS-2h}", "now: 1136214245000, past 30m: 1136212445000, past 2h: 1136207045000000000")
|
||||
}
|
||||
338
app/victoria-metrics/test/prom_types.go
Normal file
338
app/victoria-metrics/test/prom_types.go
Normal file
@@ -0,0 +1,338 @@
|
||||
// +build integration
|
||||
|
||||
// Source https://github.com/prometheus/prometheus/blob/master/prompb/remote.pb.go . Code is copy pasted and cleaned up
|
||||
package test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type WriteRequest struct {
|
||||
Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
|
||||
}
|
||||
|
||||
func (m *WriteRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Timeseries) > 0 {
|
||||
for _, e := range m.Timeseries {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovRemote(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sovRemote(x uint64) (n int) {
|
||||
return (bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
|
||||
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if len(m.Timeseries) > 0 {
|
||||
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintRemote(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintRemote(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovRemote(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
|
||||
type Sample struct {
|
||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Sample) Reset() { *m = Sample{} }
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
type TimeSeries struct {
|
||||
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
|
||||
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
|
||||
}
|
||||
|
||||
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
|
||||
|
||||
type Label struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Label) Reset() { *m = Label{} }
|
||||
|
||||
type Labels struct {
|
||||
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
|
||||
}
|
||||
|
||||
func (m *Labels) Reset() { *m = Labels{} }
|
||||
|
||||
func (m *Sample) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if m.Timestamp != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.Value != 0 {
|
||||
i -= 8
|
||||
binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
|
||||
i--
|
||||
dAtA[i] = 0x9
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if len(m.Samples) > 0 {
|
||||
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Labels) > 0 {
|
||||
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Label) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Label) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Value) > 0 {
|
||||
i -= len(m.Value)
|
||||
copy(dAtA[i:], m.Value)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Value)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.Name) > 0 {
|
||||
i -= len(m.Name)
|
||||
copy(dAtA[i:], m.Name)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Name)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Labels) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Labels) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if len(m.Labels) > 0 {
|
||||
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTypes(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
|
||||
func (m *Sample) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
if m.Value != 0 {
|
||||
n += 9
|
||||
}
|
||||
if m.Timestamp != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Timestamp))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *TimeSeries) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Labels) > 0 {
|
||||
for _, e := range m.Labels {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Samples) > 0 {
|
||||
for _, e := range m.Samples {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Label) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = len(m.Value)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Labels) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Labels) > 0 {
|
||||
for _, e := range m.Labels {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTypes(x uint64) (n int) {
|
||||
return (bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
13
app/victoria-metrics/test/prom_writter.go
Normal file
13
app/victoria-metrics/test/prom_writter.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build integration
|
||||
|
||||
package test
|
||||
|
||||
import "github.com/golang/snappy"
|
||||
|
||||
func Compress(wr WriteRequest) ([]byte, error) {
|
||||
data, err := wr.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, data), nil
|
||||
}
|
||||
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["graphite.foo.bar.baz;tag1=value1;tag2=value2 123 {TIME_S}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"graphite.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
16
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
16
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "comparison-not-inf-not-nan",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/150",
|
||||
"data": [
|
||||
"not_nan_not_inf;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=x 1 {TIME_S-2m}",
|
||||
"not_nan_not_inf;item=y 3 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=y 1 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query_range?query=1/(not_nan_not_inf-1)!=inf!=nan&start={TIME_S-3m}&end={TIME_S}&step=60"],
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"],["{TIME_S}","0.5"]]}
|
||||
]}}
|
||||
}
|
||||
24
app/victoria-metrics/testdata/graphite/max_lookback_set.json
vendored
Normal file
24
app/victoria-metrics/testdata/graphite/max_lookback_set.json
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"name": "max_lookback_set",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
|
||||
"data": [
|
||||
"max_lookback_set 1 {TIME_S-30s}",
|
||||
"max_lookback_set 2 {TIME_S-60s}",
|
||||
"max_lookback_set 3 {TIME_S-120s}",
|
||||
"max_lookback_set 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_set&start={TIME_S-150s}&end={TIME_S}&step=10s&max_lookback=1s"],
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_set"},"values":[
|
||||
["{TIME_S-150s}","4"],
|
||||
["{TIME_S-140s}","4"],
|
||||
["{TIME_S-120s}","3"],
|
||||
["{TIME_S-110s}","3"],
|
||||
["{TIME_S-60s}","2"],
|
||||
["{TIME_S-50s}","2"],
|
||||
["{TIME_S-30s}","1"],
|
||||
["{TIME_S-20s}","1"]
|
||||
]}]}}
|
||||
}
|
||||
32
app/victoria-metrics/testdata/graphite/max_lookback_unset.json
vendored
Normal file
32
app/victoria-metrics/testdata/graphite/max_lookback_unset.json
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"name": "max_lookback_unset",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
|
||||
"data": [
|
||||
"max_lookback_unset 1 {TIME_S-30s}",
|
||||
"max_lookback_unset 2 {TIME_S-60s}",
|
||||
"max_lookback_unset 3 {TIME_S-120s}",
|
||||
"max_lookback_unset 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_unset&start={TIME_S-150s}&end={TIME_S}&step=10s"],
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_unset"},"values":[
|
||||
["{TIME_S-150s}","4"],
|
||||
["{TIME_S-140s}","4"],
|
||||
["{TIME_S-130s}","4"],
|
||||
["{TIME_S-120s}","3"],
|
||||
["{TIME_S-110s}","3"],
|
||||
["{TIME_S-100s}","3"],
|
||||
["{TIME_S-90s}","3"],
|
||||
["{TIME_S-80s}","3"],
|
||||
["{TIME_S-70s}","3"],
|
||||
["{TIME_S-60s}","2"],
|
||||
["{TIME_S-50s}","2"],
|
||||
["{TIME_S-40s}","2"],
|
||||
["{TIME_S-30s}","1"],
|
||||
["{TIME_S-20s}","1"],
|
||||
["{TIME_S-10s}","1"],
|
||||
["{TIME_S}","1"]
|
||||
]}]}}
|
||||
}
|
||||
18
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
18
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "not-nan-as-missing-data",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153",
|
||||
"data": [
|
||||
"not_nan_as_missing_data;item=x 2 {TIME_S-2m}",
|
||||
"not_nan_as_missing_data;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_as_missing_data;item=y 4 {TIME_S-2m}",
|
||||
"not_nan_as_missing_data;item=y 3 {TIME_S-1m}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=not_nan_as_missing_data>1&start={TIME_S-2m}&end={TIME_S}&step=60"],
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"x"},"values":[["{TIME_S-2m}","2"]]},
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"],["{TIME_S}","3"]]}
|
||||
]}}
|
||||
}
|
||||
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "subquery-aggregation",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184",
|
||||
"data": [
|
||||
"forms_daily_count;item=x 1 {TIME_S-1m}",
|
||||
"forms_daily_count;item=x 2 {TIME_S-2m}",
|
||||
"forms_daily_count;item=y 3 {TIME_S-1m}",
|
||||
"forms_daily_count;item=y 4 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[{"metric":{"item":"x"},"value":["{TIME_S-1m}","1"]},{"metric":{"item":"y"},"value":["{TIME_S-1m}","3"]}]}
|
||||
}
|
||||
}
|
||||
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["measurement,tag1=value1,tag2=value2 field1=1.23,field2=123 {TIME_NS}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"measurement_field1","tag1":"value1","tag2":"value2"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["put openstdb.foo.bar.baz {TIME_S} 123 tag1=value1 tag2=value2"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"openstdb.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["{\"metric\": \"opentsdbhttp.foo\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.foo","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "multiline",
|
||||
"data": ["[{\"metric\": \"opentsdbhttp.multiline1\", \"value\": 1001, \"timestamp\": \"{TIME_S}\", \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}, {\"metric\": \"opentsdbhttp.multiline2\", \"value\": 1002, \"timestamp\": {TIME_S}}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline1","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]},
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline2"},"values":[1002], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.bar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.bar","baz":"qux"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "case-sensitive-regex",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/161",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"sensitiveRegex\"}],\"samples\":[{\"value\":2,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"SensitiveRegex\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={label=~'(?i)sensitiveregex'}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"sensitiveRegex"},"values":[2], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"SensitiveRegex"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "duplicate_label",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/172",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.duplicate_label\"},{\"name\":\"duplicate\",\"value\":\"label\"},{\"name\":\"duplicate\",\"value\":\"label\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.duplicate_label","duplicate":"label"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"name": "match_series",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/155",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"1\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"2\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"3\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"4\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/series?match[]={__name__='MatchSeries'}", "/api/v1/series?match[]={__name__=~'MatchSeries.*'}"],
|
||||
"result_series": {
|
||||
"status": "success",
|
||||
"data": [
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"1","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"2","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"3","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"4","TurbineType":"V112"}
|
||||
]
|
||||
}
|
||||
}
|
||||
67
app/vmbackup/Makefile
Normal file
67
app/vmbackup/Makefile
Normal file
@@ -0,0 +1,67 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) app-local
|
||||
|
||||
vmbackup-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker
|
||||
|
||||
vmbackup-pure-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-pure
|
||||
|
||||
vmbackup-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-amd64
|
||||
|
||||
vmbackup-arm-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-arm
|
||||
|
||||
vmbackup-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-arm64
|
||||
|
||||
vmbackup-ppc64le-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-ppc64le
|
||||
|
||||
vmbackup-386-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-386
|
||||
|
||||
package-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker
|
||||
|
||||
package-vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmbackup-amd64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmbackup-arm:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmbackup-arm64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmbackup-ppc64le:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmbackup-386:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) publish-via-docker
|
||||
|
||||
vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) app-local-pure
|
||||
|
||||
vmbackup-amd64:
|
||||
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmbackup-amd64 ./app/vmbackup
|
||||
|
||||
vmbackup-arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmbackup-arm ./app/vmbackup
|
||||
|
||||
vmbackup-arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmbackup-arm64 ./app/vmbackup
|
||||
|
||||
vmbackup-ppc64le:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmbackup-ppc64le ./app/vmbackup
|
||||
|
||||
vmbackup-386:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmbackup-386 ./app/vmbackup
|
||||
181
app/vmbackup/README.md
Normal file
181
app/vmbackup/README.md
Normal file
@@ -0,0 +1,181 @@
|
||||
## vmbackup
|
||||
|
||||
`vmbackup` creates VictoriaMetrics data backups from [instant snapshots](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-work-with-snapshots).
|
||||
|
||||
Supported storage systems for backups:
|
||||
|
||||
* [GCS](https://cloud.google.com/storage/). Example: `gcs://<bucket>/<path/to/backup>`
|
||||
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/docs/mimic/radosgw/s3/) or [Swift](https://www.swiftstack.com/docs/admin/middleware/s3_middleware.html). See `-customS3Endpoint` command-line flag.
|
||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`
|
||||
|
||||
Incremental backups and full backups are supported. Incremental backups are created automatically if the destination path already contains data from the previous backup.
|
||||
Full backups can be sped up with `-origin` pointing to already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared
|
||||
data between the existing backup and new backup. This saves time and costs on data transfer.
|
||||
|
||||
Backup process can be interrupted at any time. It is automatically resumed from the interruption point when restarting `vmbackup` with the same args.
|
||||
|
||||
Backed up data can be restored with [vmrestore](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmrestore/README.md).
|
||||
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
|
||||
|
||||
### Use cases
|
||||
|
||||
#### Regular backups
|
||||
|
||||
Regular backup can be performed with the following command:
|
||||
|
||||
```
|
||||
vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-snapshot> -dst=gcs://<bucket>/<path/to/new/backup>
|
||||
```
|
||||
|
||||
* `</path/to/victoria-metrics-data>` - path to VictoriaMetrics data pointed by `-storageDataPath` command-line flag in single-node VictoriaMetrics or in cluster `vmstorage`.
|
||||
There is no need to stop VictoriaMetrics for creating backups, since they are performed from immutable [instant snapshots](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-work-with-snapshots).
|
||||
* `<local-snapshot>` is the snapshot to backup. See [how to create instant snapshots](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-work-with-snapshots).
|
||||
* `<bucket>` is already existing name for [GCS bucket](https://cloud.google.com/storage/docs/creating-buckets).
|
||||
* `<path/to/new/backup>` is the destination path where new backup will be placed.
|
||||
|
||||
|
||||
#### Regular backups with server-side copy from existing backup
|
||||
|
||||
If the destination GCS bucket already contains the previous backup at `-origin` path, then new backup can be sped up
|
||||
with the following command:
|
||||
|
||||
```
|
||||
vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-snapshot> -dst=gcs://<bucket>/<path/to/new/backup> -origin=gcs://<bucket>/<path/to/existing/backup>
|
||||
```
|
||||
|
||||
This saves time and network bandwidth costs by performing server-side copy for the shared data from the `-origin` to `-dst`.
|
||||
|
||||
|
||||
#### Incremental backups
|
||||
|
||||
Incremental backups are performed if `-dst` points to already existing backup. In this case only new data is uploaded to remote storage.
|
||||
This saves time and network bandwidth costs when working with big backups:
|
||||
|
||||
```
|
||||
vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-snapshot> -dst=gcs://<bucket>/<path/to/existing/backup>
|
||||
```
|
||||
|
||||
|
||||
#### Smart backups
|
||||
|
||||
Smart backups mean storing full daily backups into `YYYYMMDD` folders and creating incremental hourly backup into `latest` folder:
|
||||
|
||||
* Run the following command every hour:
|
||||
|
||||
```
|
||||
vmbackup -snapshotName=<latest-snapshot> -dst=gcs://<bucket>/latest
|
||||
```
|
||||
|
||||
Where `<latest-snapshot>` is the latest [snapshot](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-work-with-snapshots).
|
||||
The command will upload only changed data to `gcs://<bucket>/latest`.
|
||||
|
||||
* Run the following command once a day:
|
||||
|
||||
```
|
||||
vmbackup -snapshotName=<daily-snapshot> -dst=gcs://<bucket>/<YYYYMMDD> -origin=gcs://<bucket>/latest
|
||||
```
|
||||
|
||||
Where `<daily-snapshot>` is the snapshot for the last day `<YYYYMMDD>`.
|
||||
|
||||
|
||||
This apporach saves network bandwidth costs on hourly backups (since they are incremental) and allows recovering data from either the last hour (`latest` backup)
|
||||
or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when creating daily backup.
|
||||
|
||||
Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs.
|
||||
|
||||
|
||||
### How does it work?
|
||||
|
||||
The backup algorithm is the following:
|
||||
|
||||
1. Collect information about files in the `-snapshotName`, in the `-dst` and in the `-origin`.
|
||||
2. Determine files in `-dst`, which are missing in `-snapshotName`, and delete them. These are usually small files, which are already merged into bigger files in the snapshot.
|
||||
3. Determine files from `-snapshotName`, which are missing in `-dst`. These are usually small new files and bigger merged files.
|
||||
4. Determine files from step 3, which exist in the `-origin`, and perform server-side copy of these files from `-origin` to `-dst`.
|
||||
This are usually the biggest and the oldest files, which are shared between backups.
|
||||
5. Upload the remaining files from setp 3 from `-snapshotName` to `-dst`.
|
||||
|
||||
The algorithm splits source files into 100MB chunks in the backup. Each chunk is stored as a separate file in the backup.
|
||||
Such splitting minimizes the amounts of data to re-transfer after temporary errors.
|
||||
|
||||
`vmbackup` relies on [instant snapshot](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) properties:
|
||||
|
||||
- All the files in the snapshot are immutable.
|
||||
- Old files are periodically merged into new files.
|
||||
- Smaller files have higher probability to be merged.
|
||||
- Consecutive snapshots share many identical files.
|
||||
|
||||
These properties allow performing fast and cheap incremental backups and server-side copying from `-origin` paths.
|
||||
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
|
||||
`vmbackup` can work improperly or slowly when these properties are violated.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
* If the backup is slow, then try setting higher value for `-concurrency` flag. This will increase the number of concurrent workers that upload data to backup storage.
|
||||
* If `vmbackup` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
* If `vmbackup` has been interrupted due to temporary error, then just restart it with the same args. It will resume the backup process.
|
||||
|
||||
|
||||
### Advanced usage
|
||||
|
||||
Run `vmbackup -help` in order to see all the available options:
|
||||
|
||||
```
|
||||
-concurrency int
|
||||
The number of concurrent workers. Higher concurrency may reduce backup duration (default 10)
|
||||
-configFilePath string
|
||||
Path to file with S3 configs. Configs are loaded from default location if not set.
|
||||
See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
|
||||
-configProfile string
|
||||
Profile name for S3 configs (default "default")
|
||||
-credsFilePath string
|
||||
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
|
||||
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
|
||||
-customS3Endpoint string
|
||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
||||
-dst string
|
||||
Where to put the backup on the remote storage. Example: gcs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
|
||||
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
||||
-loggerLevel string
|
||||
Minimum level of errors to log. Possible values: INFO, ERROR, FATAL, PANIC (default "INFO")
|
||||
-maxBytesPerSecond int
|
||||
The maximum upload speed. There is no limit if it is set to 0
|
||||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy (default 60)
|
||||
-origin string
|
||||
Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups
|
||||
-snapshotName string
|
||||
Name for the snapshot to backup. See https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-work-with-snapshots
|
||||
-storageDataPath string
|
||||
Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage (default "victoria-metrics-data")
|
||||
-version
|
||||
Show VictoriaMetrics version
|
||||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make vmbackup` from the root folder of the repository.
|
||||
It builds `vmbackup` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmbackup-prod` from the root folder of the repository.
|
||||
It builds `vmbackup-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
|
||||
Run `make package-vmbackup`. It builds `victoriametrics/vmbackup:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-vmbackup`.
|
||||
7
app/vmbackup/deployment/Dockerfile
Normal file
7
app/vmbackup/deployment/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
ARG certs_image
|
||||
FROM $certs_image AS certs
|
||||
FROM scratch
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ARG src_binary
|
||||
COPY $src_binary ./vmbackup-prod
|
||||
ENTRYPOINT ["/vmbackup-prod"]
|
||||
114
app/vmbackup/main.go
Normal file
114
app/vmbackup/main.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/actions"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fslocal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-metrics-data", "Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage")
|
||||
snapshotName = flag.String("snapshotName", "", "Name for the snapshot to backup. See https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-work-with-snapshots")
|
||||
dst = flag.String("dst", "", "Where to put the backup on the remote storage. "+
|
||||
"Example: gcs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir\n"+
|
||||
"-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded")
|
||||
origin = flag.String("origin", "", "Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups")
|
||||
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce backup duration")
|
||||
maxBytesPerSecond = flag.Int("maxBytesPerSecond", 0, "The maximum upload speed. There is no limit if it is set to 0")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
buildinfo.Init()
|
||||
|
||||
srcFS, err := newSrcFS()
|
||||
if err != nil {
|
||||
logger.Fatalf("%s", err)
|
||||
}
|
||||
dstFS, err := newDstFS()
|
||||
if err != nil {
|
||||
logger.Fatalf("%s", err)
|
||||
}
|
||||
originFS, err := newOriginFS()
|
||||
if err != nil {
|
||||
logger.Fatalf("%s", err)
|
||||
}
|
||||
a := &actions.Backup{
|
||||
Concurrency: *concurrency,
|
||||
Src: srcFS,
|
||||
Dst: dstFS,
|
||||
Origin: originFS,
|
||||
}
|
||||
if err := a.Run(); err != nil {
|
||||
logger.Fatalf("cannot create backup: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
const s = `
|
||||
vmbackup performs backups for VictoriaMetrics data from instant snapshots to gcs, s3
|
||||
or local filesystem. Backed up data can be restored with vmrestore.
|
||||
|
||||
See the docs at https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmbackup/README.md .
|
||||
`
|
||||
|
||||
f := flag.CommandLine.Output()
|
||||
fmt.Fprintf(f, "%s\n", s)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func newSrcFS() (*fslocal.FS, error) {
|
||||
if len(*snapshotName) == 0 {
|
||||
return nil, fmt.Errorf("`-snapshotName` cannot be empty")
|
||||
}
|
||||
snapshotPath := *storageDataPath + "/snapshots/" + *snapshotName
|
||||
|
||||
// Verify the snapshot exists.
|
||||
f, err := os.Open(snapshotPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open snapshot at %q: %s", snapshotPath, err)
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot stat %q: %s", snapshotPath, err)
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, fmt.Errorf("snapshot %q must be a directory", snapshotPath)
|
||||
}
|
||||
|
||||
fs := &fslocal.FS{
|
||||
Dir: snapshotPath,
|
||||
MaxBytesPerSecond: *maxBytesPerSecond,
|
||||
}
|
||||
if err := fs.Init(); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize fs: %s", err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func newDstFS() (common.RemoteFS, error) {
|
||||
fs, err := actions.NewRemoteFS(*dst)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-dst`=%q: %s", *dst, err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func newOriginFS() (common.RemoteFS, error) {
|
||||
if len(*origin) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
fs, err := actions.NewRemoteFS(*origin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-origin`=%q: %s", *origin, err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
30
app/vminsert/common/gzip_reader.go
Normal file
30
app/vminsert/common/gzip_reader.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GetGzipReader returns new gzip reader from the pool.
|
||||
//
|
||||
// Return back the gzip reader when it no longer needed with PutGzipReader.
|
||||
func GetGzipReader(r io.Reader) (*gzip.Reader, error) {
|
||||
v := gzipReaderPool.Get()
|
||||
if v == nil {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
zr := v.(*gzip.Reader)
|
||||
if err := zr.Reset(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zr, nil
|
||||
}
|
||||
|
||||
// PutGzipReader returns back gzip reader obtained via GetGzipReader.
|
||||
func PutGzipReader(zr *gzip.Reader) {
|
||||
_ = zr.Close()
|
||||
gzipReaderPool.Put(zr)
|
||||
}
|
||||
|
||||
var gzipReaderPool sync.Pool
|
||||
@@ -2,9 +2,11 @@ package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
@@ -30,11 +32,10 @@ func (ctx *InsertCtx) Reset(rowsLen int) {
|
||||
mr.MetricNameRaw = nil
|
||||
}
|
||||
ctx.mrs = ctx.mrs[:0]
|
||||
|
||||
if n := rowsLen - cap(ctx.mrs); n > 0 {
|
||||
ctx.mrs = append(ctx.mrs[:cap(ctx.mrs)], make([]storage.MetricRow, n)...)
|
||||
}
|
||||
ctx.mrs = ctx.mrs[:rowsLen]
|
||||
ctx.mrs = ctx.mrs[:0]
|
||||
ctx.metricNamesBuf = ctx.metricNamesBuf[:0]
|
||||
}
|
||||
|
||||
@@ -46,7 +47,7 @@ func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompb.Label)
|
||||
return metricNameRaw[:len(metricNameRaw):len(metricNameRaw)]
|
||||
}
|
||||
|
||||
// WriteDataPoint writes (timestamp, value) with the given prefix and lables into ctx buffer.
|
||||
// WriteDataPoint writes (timestamp, value) with the given prefix and labels into ctx buffer.
|
||||
func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompb.Label, timestamp int64, value float64) {
|
||||
metricNameRaw := ctx.marshalMetricNameRaw(prefix, labels)
|
||||
ctx.addRow(metricNameRaw, timestamp, value)
|
||||
@@ -77,6 +78,26 @@ func (ctx *InsertCtx) addRow(metricNameRaw []byte, timestamp int64, value float6
|
||||
mr.Value = value
|
||||
}
|
||||
|
||||
// AddLabelBytes adds (name, value) label to ctx.Labels.
|
||||
//
|
||||
// name and value must exist until ctx.Labels is used.
|
||||
func (ctx *InsertCtx) AddLabelBytes(name, value []byte) {
|
||||
labels := ctx.Labels
|
||||
if cap(labels) > len(labels) {
|
||||
labels = labels[:len(labels)+1]
|
||||
} else {
|
||||
labels = append(labels, prompb.Label{})
|
||||
}
|
||||
label := &labels[len(labels)-1]
|
||||
|
||||
// Do not copy name and value contents for performance reasons.
|
||||
// This reduces GC overhead on the number of objects and allocations.
|
||||
label.Name = name
|
||||
label.Value = value
|
||||
|
||||
ctx.Labels = labels
|
||||
}
|
||||
|
||||
// AddLabel adds (name, value) label to ctx.Labels.
|
||||
//
|
||||
// name and value must exist until ctx.Labels is used.
|
||||
@@ -100,7 +121,10 @@ func (ctx *InsertCtx) AddLabel(name, value string) {
|
||||
// FlushBufs flushes buffered rows to the underlying storage.
|
||||
func (ctx *InsertCtx) FlushBufs() error {
|
||||
if err := vmstorage.AddRows(ctx.mrs); err != nil {
|
||||
return fmt.Errorf("cannot store metrics: %s", err)
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot store metrics: %s", err),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
79
app/vminsert/common/lines_reader.go
Normal file
79
app/vminsert/common/lines_reader.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
)
|
||||
|
||||
// The maximum size of a single line returned by ReadLinesBlock.
|
||||
const maxLineSize = 256 * 1024
|
||||
|
||||
// Default size in bytes of a single block returned by ReadLinesBlock.
|
||||
const defaultBlockSize = 64 * 1024
|
||||
|
||||
// ReadLinesBlock reads a block of lines delimited by '\n' from tailBuf and r into dstBuf.
|
||||
//
|
||||
// Trailing chars after the last newline are put into tailBuf.
|
||||
//
|
||||
// Returns (dstBuf, tailBuf).
|
||||
func ReadLinesBlock(r io.Reader, dstBuf, tailBuf []byte) ([]byte, []byte, error) {
|
||||
return ReadLinesBlockExt(r, dstBuf, tailBuf, maxLineSize)
|
||||
}
|
||||
|
||||
// ReadLinesBlockExt reads a block of lines delimited by '\n' from tailBuf and r into dstBuf.
|
||||
//
|
||||
// Trailing chars after the last newline are put into tailBuf.
|
||||
//
|
||||
// Returns (dstBuf, tailBuf).
|
||||
//
|
||||
// maxLineLen limits the maximum length of a single line.
|
||||
func ReadLinesBlockExt(r io.Reader, dstBuf, tailBuf []byte, maxLineLen int) ([]byte, []byte, error) {
|
||||
if cap(dstBuf) < defaultBlockSize {
|
||||
dstBuf = bytesutil.Resize(dstBuf, defaultBlockSize)
|
||||
}
|
||||
dstBuf = append(dstBuf[:0], tailBuf...)
|
||||
tailBuf = tailBuf[:0]
|
||||
again:
|
||||
n, err := r.Read(dstBuf[len(dstBuf):cap(dstBuf)])
|
||||
// Check for error only if zero bytes read from r, i.e. no forward progress made.
|
||||
// Otherwise process the read data.
|
||||
if n == 0 {
|
||||
if err == nil {
|
||||
return dstBuf, tailBuf, fmt.Errorf("no forward progress made")
|
||||
}
|
||||
if err == io.EOF && len(dstBuf) > 0 {
|
||||
// Missing newline in the end of stream. This is OK,
|
||||
// so suppress io.EOF for now. It will be returned during the next
|
||||
// call to ReadLinesBlock.
|
||||
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/60 .
|
||||
return dstBuf, tailBuf, nil
|
||||
}
|
||||
return dstBuf, tailBuf, err
|
||||
}
|
||||
dstBuf = dstBuf[:len(dstBuf)+n]
|
||||
|
||||
// Search for the last newline in dstBuf and put the rest into tailBuf.
|
||||
nn := bytes.LastIndexByte(dstBuf[len(dstBuf)-n:], '\n')
|
||||
if nn < 0 {
|
||||
// Didn't found at least a single line.
|
||||
if len(dstBuf) > maxLineLen {
|
||||
return dstBuf, tailBuf, fmt.Errorf("too long line: more than %d bytes", maxLineLen)
|
||||
}
|
||||
if cap(dstBuf) < 2*len(dstBuf) {
|
||||
// Increase dsbBuf capacity, so more data could be read into it.
|
||||
dstBufLen := len(dstBuf)
|
||||
dstBuf = bytesutil.Resize(dstBuf, 2*cap(dstBuf))
|
||||
dstBuf = dstBuf[:dstBufLen]
|
||||
}
|
||||
goto again
|
||||
}
|
||||
|
||||
// Found at least a single line. Return it.
|
||||
nn += len(dstBuf) - n
|
||||
tailBuf = append(tailBuf[:0], dstBuf[nn+1:]...)
|
||||
dstBuf = dstBuf[:nn]
|
||||
return dstBuf, tailBuf, nil
|
||||
}
|
||||
213
app/vminsert/common/lines_reader_test.go
Normal file
213
app/vminsert/common/lines_reader_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadLinesBlockFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
r := bytes.NewBufferString(s)
|
||||
if _, _, err := ReadLinesBlock(r, nil, nil); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
sbr := &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
if _, _, err := ReadLinesBlock(sbr, nil, nil); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
fr := &failureReader{}
|
||||
if _, _, err := ReadLinesBlock(fr, nil, nil); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// empty string
|
||||
f("")
|
||||
|
||||
// too long string
|
||||
b := make([]byte, maxLineSize+1)
|
||||
f(string(b))
|
||||
}
|
||||
|
||||
type failureReader struct{}
|
||||
|
||||
func (fr *failureReader) Read(p []byte) (int, error) {
|
||||
return 0, fmt.Errorf("some error")
|
||||
}
|
||||
|
||||
func TestReadLinesBlockMultiLinesSingleByteReader(t *testing.T) {
|
||||
f := func(s string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
var err error
|
||||
var dstBuf, tailBuf []byte
|
||||
var lines []string
|
||||
for {
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("unexpected error in ReadLinesBlock(%q): %s", s, err)
|
||||
}
|
||||
lines = append(lines, string(dstBuf))
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines after reading %q: got %q; want %q", s, lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("foo", []string{"foo"})
|
||||
f("foo\n", []string{"foo"})
|
||||
f("foo\nbar", []string{"foo", "bar"})
|
||||
f("\nfoo\nbar", []string{"", "foo", "bar"})
|
||||
f("\nfoo\nbar\n", []string{"", "foo", "bar"})
|
||||
f("\nfoo\nbar\n\n", []string{"", "foo", "bar", ""})
|
||||
}
|
||||
|
||||
func TestReadLinesBlockMultiLinesBytesBuffer(t *testing.T) {
|
||||
f := func(s string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(s)
|
||||
var err error
|
||||
var dstBuf, tailBuf []byte
|
||||
var lines []string
|
||||
for {
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("unexpected error in ReadLinesBlock(%q): %s", s, err)
|
||||
}
|
||||
lines = append(lines, string(dstBuf))
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines after reading %q: got %q; want %q", s, lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("foo", []string{"foo"})
|
||||
f("foo\n", []string{"foo"})
|
||||
f("foo\nbar", []string{"foo", "bar"})
|
||||
f("\nfoo\nbar", []string{"\nfoo", "bar"})
|
||||
f("\nfoo\nbar\n", []string{"\nfoo\nbar"})
|
||||
f("\nfoo\nbar\n\n", []string{"\nfoo\nbar\n"})
|
||||
}
|
||||
|
||||
func TestReadLinesBlockSuccessSingleByteReader(t *testing.T) {
|
||||
f := func(s, dstBufExpected, tailBufExpected string) {
|
||||
t.Helper()
|
||||
|
||||
r := &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
dstBuf, tailBuf, err := ReadLinesBlock(r, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
|
||||
// Verify the same with non-empty dstBuf and tailBuf
|
||||
r = &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf[:0])
|
||||
if err != nil {
|
||||
t.Fatalf("non-empty bufs: unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
}
|
||||
|
||||
f("\n", "", "")
|
||||
f("foo\n", "foo", "")
|
||||
f("\nfoo", "", "")
|
||||
f("foo\nbar", "foo", "")
|
||||
f("foo\nbar\nbaz", "foo", "")
|
||||
f("foo", "foo", "")
|
||||
|
||||
// The maximum line size
|
||||
b := make([]byte, maxLineSize+10)
|
||||
b[maxLineSize] = '\n'
|
||||
f(string(b), string(b[:maxLineSize]), "")
|
||||
}
|
||||
|
||||
func TestReadLinesBlockSuccessBytesBuffer(t *testing.T) {
|
||||
f := func(s, dstBufExpected, tailBufExpected string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(s)
|
||||
dstBuf, tailBuf, err := ReadLinesBlock(r, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
|
||||
// Verify the same with non-empty dstBuf and tailBuf
|
||||
r = bytes.NewBufferString(s)
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf[:0])
|
||||
if err != nil {
|
||||
t.Fatalf("non-empty bufs: unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
}
|
||||
|
||||
f("\n", "", "")
|
||||
f("foo\n", "foo", "")
|
||||
f("\nfoo", "", "foo")
|
||||
f("foo\nbar", "foo", "bar")
|
||||
f("foo\nbar\nbaz", "foo\nbar", "baz")
|
||||
|
||||
// The maximum line size
|
||||
b := make([]byte, maxLineSize+10)
|
||||
b[maxLineSize] = '\n'
|
||||
f(string(b), string(b[:maxLineSize]), string(b[maxLineSize+1:]))
|
||||
}
|
||||
|
||||
type singleByteReader struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (sbr *singleByteReader) Read(p []byte) (int, error) {
|
||||
if len(sbr.b) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(p, sbr.b[:1])
|
||||
sbr.b = sbr.b[n:]
|
||||
if len(sbr.b) == 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
@@ -1,34 +1,75 @@
|
||||
package concurrencylimiter
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var maxConcurrentInserts = flag.Int("maxConcurrentInserts", runtime.GOMAXPROCS(-1)*4, "The maximum number of concurrent inserts")
|
||||
|
||||
var (
|
||||
// ch is the channel for limiting concurrent inserts.
|
||||
// Put an item into it before performing an insert and remove
|
||||
// the item after the insert is complete.
|
||||
ch = make(chan struct{}, runtime.GOMAXPROCS(-1)*2)
|
||||
// ch is the channel for limiting concurrent calls to Do.
|
||||
ch chan struct{}
|
||||
|
||||
// waitDuration is the amount of time to wait until at least a single
|
||||
// concurrent insert out of cap(Ch) inserts is complete.
|
||||
// concurrent Do call out of cap(ch) inserts is complete.
|
||||
waitDuration = time.Second * 30
|
||||
)
|
||||
|
||||
// Init initializes concurrencylimiter.
|
||||
//
|
||||
// Init must be called after flag.Parse call.
|
||||
func Init() {
|
||||
ch = make(chan struct{}, *maxConcurrentInserts)
|
||||
}
|
||||
|
||||
// Do calls f with the limited concurrency.
|
||||
func Do(f func() error) error {
|
||||
// Limit the number of conurrent inserts in order to prevent from excess
|
||||
// Limit the number of conurrent f calls in order to prevent from excess
|
||||
// memory usage and CPU trashing.
|
||||
t := time.NewTimer(waitDuration)
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
t.Stop()
|
||||
err := f()
|
||||
<-ch
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
// All the workers are busy.
|
||||
// Sleep for up to waitDuration.
|
||||
concurrencyLimitReached.Inc()
|
||||
t := timerpool.Get(waitDuration)
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
timerpool.Put(t)
|
||||
err := f()
|
||||
<-ch
|
||||
return err
|
||||
case <-t.C:
|
||||
return fmt.Errorf("the server is overloaded with %d concurrent inserts; either increase the number of CPUs or reduce the load", cap(ch))
|
||||
timerpool.Put(t)
|
||||
concurrencyLimitTimeout.Inc()
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("the server is overloaded with %d concurrent inserts; either increase -maxConcurrentInserts or reduce the load", cap(ch)),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
concurrencyLimitReached = metrics.NewCounter(`vm_concurrent_insert_limit_reached_total`)
|
||||
concurrencyLimitTimeout = metrics.NewCounter(`vm_concurrent_insert_limit_timeout_total`)
|
||||
|
||||
_ = metrics.NewGauge(`vm_concurrent_insert_capacity`, func() float64 {
|
||||
return float64(cap(ch))
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_concurrent_insert_current`, func() float64 {
|
||||
return float64(len(ch))
|
||||
})
|
||||
)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
@@ -34,13 +36,8 @@ func (rs *Rows) Reset() {
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) error {
|
||||
var err error
|
||||
rs.Rows, rs.tagsPool, err = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
func (rs *Rows) Unmarshal(s string) {
|
||||
rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single graphite row.
|
||||
@@ -83,49 +80,61 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
}
|
||||
if len(r.Metric) == 0 {
|
||||
return tagsPool, fmt.Errorf("metric cannot be empty")
|
||||
}
|
||||
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between value and timestamp in %q", s)
|
||||
// There is no timestamp. Use default timestamp instead.
|
||||
r.Value = fastfloat.ParseBestEffort(tail)
|
||||
return tagsPool, nil
|
||||
}
|
||||
r.Value = fastfloat.ParseBestEffort(tail[:n])
|
||||
r.Timestamp = fastfloat.ParseInt64BestEffort(tail[n+1:])
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag, error) {
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n == 0 {
|
||||
// Skip empty line
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
}
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s[:n], tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
return unmarshalRow(dst, s, tagsPool)
|
||||
}
|
||||
dst, tagsPool = unmarshalRow(dst, s[:n], tagsPool)
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
if len(s) > 0 && s[len(s)-1] == '\r' {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// Skip empty line
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal Graphite line %q: %s", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="graphite"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
@@ -141,12 +150,20 @@ func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
if err := tag.unmarshal(s); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n]); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,9 +185,6 @@ func (t *Tag) unmarshal(s string) error {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
t.Key = s[:n]
|
||||
if len(t.Key) == 0 {
|
||||
return fmt.Errorf("tag key cannot be empty for %q", s)
|
||||
}
|
||||
t.Value = s[n+1:]
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,48 +9,42 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
|
||||
// Try again
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// Missing metric
|
||||
f(" 123 455")
|
||||
|
||||
// Missing value
|
||||
f("aaa")
|
||||
|
||||
// Missing timestamp
|
||||
f("aaa 1123")
|
||||
|
||||
// Invalid multiline
|
||||
f("aaa\nbbb 123 34")
|
||||
|
||||
// missing tag
|
||||
f("aa; 12 34")
|
||||
|
||||
// missing tag value
|
||||
f("aa;bb 23 34")
|
||||
f("aa;=dsd 234 45")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
@@ -63,7 +57,9 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\r", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
f("\n\r\n", &Rows{})
|
||||
|
||||
// Single line
|
||||
f("foobar -123.456 789", &Rows{
|
||||
@@ -81,6 +77,23 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
}},
|
||||
})
|
||||
|
||||
// Missing timestamp
|
||||
f("aaa 1123", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
}},
|
||||
})
|
||||
|
||||
// Timestamp bigger than 1<<31
|
||||
f("aaa 1123 429496729600", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
Timestamp: 429496729600,
|
||||
}},
|
||||
})
|
||||
|
||||
// Tags
|
||||
f("foo;bar=baz 1 2", &Rows{
|
||||
Rows: []Row{{
|
||||
@@ -93,7 +106,8 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
f("foo;bar=baz;aa=;x=y 1 2", &Rows{
|
||||
// Empty tags
|
||||
f("foo;bar=baz;aa=;x=y;=z 1 2", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
@@ -101,10 +115,6 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
},
|
||||
{
|
||||
Key: "aa",
|
||||
Value: "",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
@@ -116,7 +126,27 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
})
|
||||
|
||||
// Multi lines
|
||||
f("foo 0.3 2\nbar.baz 0.34 43\n", &Rows{
|
||||
f("foo 0.3 2\naaa 3\nbar.baz 0.34 43\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
},
|
||||
{
|
||||
Metric: "aaa",
|
||||
Value: 3,
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Multi lines with invalid line
|
||||
f("foo 0.3 2\naaa\nbar.baz 0.34 43\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
|
||||
@@ -16,8 +16,9 @@ cpu.usage_irq 0.34432 1234556768
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal %q: %s", s, err))
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of rows unmarshaled: got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -15,7 +14,10 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="graphite"}`)
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="graphite"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="graphite"}`)
|
||||
)
|
||||
|
||||
// insertHandler processes remote write for graphite plaintext protocol.
|
||||
//
|
||||
@@ -52,59 +54,54 @@ func (ctx *pushCtx) InsertRows() error {
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const maxReadPacketSize = 4 * 1024 * 1024
|
||||
|
||||
const flushTimeout = 3 * time.Second
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader) bool {
|
||||
graphiteReadCalls.Inc()
|
||||
readCalls.Inc()
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
if c, ok := r.(net.Conn); ok {
|
||||
if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil {
|
||||
graphiteReadErrors.Inc()
|
||||
readErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot set read deadline: %s", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
lr := io.LimitReader(r, maxReadPacketSize)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf.B = append(ctx.reqBuf.B[:0], ctx.tailBuf...)
|
||||
n, err := io.CopyBuffer(&ctx.reqBuf, lr, ctx.copyBuf[:])
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf)
|
||||
if ctx.err != nil {
|
||||
if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() {
|
||||
// Flush the read data on timeout and try reading again.
|
||||
ctx.err = nil
|
||||
} else {
|
||||
graphiteReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %s", err)
|
||||
if ctx.err != io.EOF {
|
||||
readErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %s", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
} else if n < maxReadPacketSize {
|
||||
// Mark the end of stream.
|
||||
ctx.err = io.EOF
|
||||
}
|
||||
ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf))
|
||||
|
||||
// Fill missing timestamps with the current timestamp rounded to seconds.
|
||||
currentTimestamp := time.Now().Unix()
|
||||
rows := ctx.Rows.Rows
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp == 0 {
|
||||
r.Timestamp = currentTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
// Parse all the rows until the last newline in ctx.reqBuf.B
|
||||
nn := bytes.LastIndexByte(ctx.reqBuf.B, '\n')
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
if nn >= 0 {
|
||||
ctx.tailBuf = append(ctx.tailBuf[:0], ctx.reqBuf.B[nn+1:]...)
|
||||
ctx.reqBuf.B = ctx.reqBuf.B[:nn]
|
||||
}
|
||||
if err = ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf.B)); err != nil {
|
||||
graphiteUnmarshalErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot unmarshal graphite plaintext protocol data with size %d: %s", len(ctx.reqBuf.B), err)
|
||||
return false
|
||||
// Convert timestamps from seconds to milliseconds.
|
||||
for i := range rows {
|
||||
rows[i].Timestamp *= 1e3
|
||||
}
|
||||
|
||||
// Convert timestamps from seconds to milliseconds
|
||||
for i := range ctx.Rows.Rows {
|
||||
ctx.Rows.Rows[i].Timestamp *= 1e3
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -112,9 +109,8 @@ type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
reqBuf []byte
|
||||
tailBuf []byte
|
||||
copyBuf [16 * 1024]byte
|
||||
|
||||
err error
|
||||
}
|
||||
@@ -129,16 +125,15 @@ func (ctx *pushCtx) Error() error {
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
var (
|
||||
graphiteReadCalls = metrics.NewCounter(`vm_read_calls_total{name="graphite"}`)
|
||||
graphiteReadErrors = metrics.NewCounter(`vm_read_errors_total{name="graphite"}`)
|
||||
graphiteUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="graphite"}`)
|
||||
readCalls = metrics.NewCounter(`vm_read_calls_total{name="graphite"}`)
|
||||
readErrors = metrics.NewCounter(`vm_read_errors_total{name="graphite"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -20,36 +21,62 @@ var (
|
||||
writeErrorsUDP = metrics.NewCounter(`vm_graphite_request_errors_total{name="write", net="udp"}`)
|
||||
)
|
||||
|
||||
// Serve starts graphite server on the given addr.
|
||||
func Serve(addr string) {
|
||||
// Server accepts Graphite plaintext lines over TCP and UDP.
|
||||
type Server struct {
|
||||
addr string
|
||||
lnTCP net.Listener
|
||||
lnUDP net.PacketConn
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// MustStart starts graphite server on the given addr.
|
||||
//
|
||||
// MustStop must be called on the returned server when it is no longer needed.
|
||||
func MustStart(addr string) *Server {
|
||||
logger.Infof("starting TCP Graphite server at %q", addr)
|
||||
lnTCP, err := net.Listen("tcp4", addr)
|
||||
lnTCP, err := netutil.NewTCPListener("graphite", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start TCP Graphite server at %q: %s", addr, err)
|
||||
}
|
||||
listenerTCP = lnTCP
|
||||
|
||||
logger.Infof("starting UDP Graphite server at %q", addr)
|
||||
lnUDP, err := net.ListenPacket("udp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP Graphite server at %q: %s", addr, err)
|
||||
}
|
||||
listenerUDP = lnUDP
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
s := &Server{
|
||||
addr: addr,
|
||||
lnTCP: lnTCP,
|
||||
lnUDP: lnUDP,
|
||||
}
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveTCP(listenerTCP)
|
||||
defer s.wg.Done()
|
||||
serveTCP(lnTCP)
|
||||
logger.Infof("stopped TCP Graphite server at %q", addr)
|
||||
}()
|
||||
wg.Add(1)
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveUDP(listenerUDP)
|
||||
defer s.wg.Done()
|
||||
serveUDP(lnUDP)
|
||||
logger.Infof("stopped UDP Graphite server at %q", addr)
|
||||
}()
|
||||
wg.Wait()
|
||||
return s
|
||||
}
|
||||
|
||||
// MustStop stops the server.
|
||||
func (s *Server) MustStop() {
|
||||
logger.Infof("stopping TCP Graphite server at %q...", s.addr)
|
||||
if err := s.lnTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP Graphite server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP Graphite server at %q...", s.addr)
|
||||
if err := s.lnUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP Graphite server: %s", err)
|
||||
}
|
||||
s.wg.Wait()
|
||||
logger.Infof("TCP and UDP Graphite servers at %q have been stopped", s.addr)
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener) {
|
||||
@@ -58,6 +85,7 @@ func serveTCP(ln net.Listener) {
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("graphite: temporary error when listening for TCP addr %q: %s", ln.Addr(), err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
@@ -96,6 +124,7 @@ func serveUDP(ln net.PacketConn) {
|
||||
writeErrorsUDP.Inc()
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("graphite: temporary error when listening for UDP addr %q: %s", ln.LocalAddr(), err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
@@ -118,20 +147,3 @@ func serveUDP(ln net.PacketConn) {
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var (
|
||||
listenerTCP net.Listener
|
||||
listenerUDP net.PacketConn
|
||||
)
|
||||
|
||||
// Stop stops the server.
|
||||
func Stop() {
|
||||
logger.Infof("stopping TCP Graphite server at %q...", listenerTCP.Addr())
|
||||
if err := listenerTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP Graphite server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP Graphite server at %q...", listenerUDP.LocalAddr())
|
||||
if err := listenerUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP Graphite server: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
@@ -41,13 +43,8 @@ func (rs *Rows) Reset() {
|
||||
// See https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) error {
|
||||
var err error
|
||||
rs.Rows, rs.tagsPool, rs.fieldsPool, err = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0], rs.fieldsPool[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
func (rs *Rows) Unmarshal(s string) {
|
||||
rs.Rows, rs.tagsPool, rs.fieldsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0], rs.fieldsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single influx row.
|
||||
@@ -65,9 +62,8 @@ func (r *Row) reset() {
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag, fieldsPool []Field) ([]Tag, []Field, error) {
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag, fieldsPool []Field, noEscapeChars bool) ([]Tag, []Field, error) {
|
||||
r.reset()
|
||||
noEscapeChars := strings.IndexByte(s, '\\') < 0
|
||||
n := nextUnescapedChar(s, ' ', noEscapeChars)
|
||||
if n < 0 {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("cannot find Whitespace I in %q", s)
|
||||
@@ -89,9 +85,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag, fieldsPool []Field) ([]Tag, []
|
||||
measurementTags = measurementTags[:n]
|
||||
}
|
||||
r.Measurement = unescapeTagValue(measurementTags, noEscapeChars)
|
||||
if len(r.Measurement) == 0 {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("measurement cannot be empty. measurementTags=%q", s)
|
||||
}
|
||||
// Allow empty r.Measurement. In this case metric name is constructed directly from field keys.
|
||||
|
||||
// Parse fields
|
||||
fieldsStart := len(fieldsPool)
|
||||
@@ -141,9 +135,6 @@ func (tag *Tag) unmarshal(s string, noEscapeChars bool) error {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
tag.Key = unescapeTagValue(s[:n], noEscapeChars)
|
||||
if len(tag.Key) == 0 {
|
||||
return fmt.Errorf("tag key cannot be empty")
|
||||
}
|
||||
tag.Value = unescapeTagValue(s[n+1:], noEscapeChars)
|
||||
return nil
|
||||
}
|
||||
@@ -177,39 +168,51 @@ func (f *Field) unmarshal(s string, noEscapeChars, hasQuotedFields bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag, fieldsPool []Field) ([]Row, []Tag, []Field, error) {
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag, fieldsPool []Field) ([]Row, []Tag, []Field) {
|
||||
noEscapeChars := strings.IndexByte(s, '\\') < 0
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n == 0 {
|
||||
// Skip empty line
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
var err error
|
||||
tagsPool, fieldsPool, err = r.unmarshal(s, tagsPool, fieldsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, fieldsPool, err
|
||||
}
|
||||
return dst, tagsPool, fieldsPool, nil
|
||||
}
|
||||
var err error
|
||||
tagsPool, fieldsPool, err = r.unmarshal(s[:n], tagsPool, fieldsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, fieldsPool, err
|
||||
return unmarshalRow(dst, s, tagsPool, fieldsPool, noEscapeChars)
|
||||
}
|
||||
dst, tagsPool, fieldsPool = unmarshalRow(dst, s[:n], tagsPool, fieldsPool, noEscapeChars)
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool, fieldsPool, nil
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, s string, tagsPool []Tag, fieldsPool []Field, noEscapeChars bool) ([]Row, []Tag, []Field) {
|
||||
if len(s) > 0 && s[len(s)-1] == '\r' {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// Skip empty line
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
if s[0] == '#' {
|
||||
// Skip comment
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, fieldsPool, err = r.unmarshal(s, tagsPool, fieldsPool, noEscapeChars)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal Influx line %q: %s; skipping it", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="influx"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, s string, noEscapeChars bool) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
@@ -221,14 +224,22 @@ func unmarshalTags(dst []Tag, s string, noEscapeChars bool) ([]Tag, error) {
|
||||
n := nextUnescapedChar(s, ',', noEscapeChars)
|
||||
if n < 0 {
|
||||
if err := tag.unmarshal(s, noEscapeChars); err != nil {
|
||||
return dst, err
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n], noEscapeChars); err != nil {
|
||||
return dst, err
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,8 +304,10 @@ func parseFieldValue(s string, hasQuotedFields bool) (float64, error) {
|
||||
if len(s) < 2 || s[len(s)-1] != '"' {
|
||||
return 0, fmt.Errorf("missing closing quote for quoted field value %s", s)
|
||||
}
|
||||
// Quoted string is translated to empty value.
|
||||
return 0, nil
|
||||
// Try converting quoted string to number, since sometimes Influx agents
|
||||
// send numbers as strings.
|
||||
s = s[1 : len(s)-1]
|
||||
return fastfloat.ParseBestEffort(s), nil
|
||||
}
|
||||
ch := s[len(s)-1]
|
||||
if ch == 'i' {
|
||||
|
||||
@@ -74,19 +74,18 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("expecting zero rows; got %d rows", len(rows.Rows))
|
||||
}
|
||||
|
||||
// Try again
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("expecting zero rows; got %d rows", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// Missing measurement
|
||||
f(",foo=bar baz=123")
|
||||
|
||||
// No fields
|
||||
f("foo")
|
||||
f("foo,bar=baz 1234")
|
||||
@@ -94,12 +93,8 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
// Missing tag value
|
||||
f("foo,bar")
|
||||
f("foo,bar baz")
|
||||
f("foo,bar= baz")
|
||||
f("foo,bar=123, 123")
|
||||
|
||||
// Missing tag name
|
||||
f("foo,=bar baz=234")
|
||||
|
||||
// Missing field value
|
||||
f("foo bar")
|
||||
f("foo bar=")
|
||||
@@ -122,17 +117,13 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
@@ -146,6 +137,36 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
f("\n\r\n", &Rows{})
|
||||
|
||||
// Comment
|
||||
f("\n# foobar\n", &Rows{})
|
||||
f("#foobar baz", &Rows{})
|
||||
f("#foobar baz\n#sss", &Rows{})
|
||||
|
||||
// Missing measurement
|
||||
f(" baz=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "",
|
||||
Fields: []Field{{
|
||||
Key: "baz",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f(",foo=bar baz=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "",
|
||||
Tags: []Tag{{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "baz",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Minimal line without tags and timestamp
|
||||
f("foo bar=123", &Rows{
|
||||
@@ -157,6 +178,15 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f("# comment\nfoo bar=123\r\n#comment2 sdsf dsf", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f("foo bar=123\n", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
@@ -216,7 +246,7 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
})
|
||||
|
||||
// Line with empty tag values
|
||||
f("foo,tag1=xyz,tagN=,tag2=43as bar=123", &Rows{
|
||||
f("foo,tag1=xyz,tagN=,tag2=43as,=xxx bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{
|
||||
@@ -224,10 +254,6 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
},
|
||||
{
|
||||
Key: "tagN",
|
||||
Value: "",
|
||||
},
|
||||
{
|
||||
Key: "tag2",
|
||||
Value: "43as",
|
||||
@@ -241,17 +267,27 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
})
|
||||
|
||||
// Line with multiple tags, multiple fields and timestamp
|
||||
f(`system,host=ip-172-16-10-144 uptime_format="3 days, 21:01" 1557761040000000000`, &Rows{
|
||||
f(`system,host=ip-172-16-10-144 uptime_format="3 days, 21:01",quoted_float="-1.23",quoted_int="123" 1557761040000000000`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "system",
|
||||
Tags: []Tag{{
|
||||
Key: "host",
|
||||
Value: "ip-172-16-10-144",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "uptime_format",
|
||||
Value: 0,
|
||||
}},
|
||||
Fields: []Field{
|
||||
{
|
||||
Key: "uptime_format",
|
||||
Value: 0,
|
||||
},
|
||||
{
|
||||
Key: "quoted_float",
|
||||
Value: -1.23,
|
||||
},
|
||||
{
|
||||
Key: "quoted_int",
|
||||
Value: 123,
|
||||
},
|
||||
},
|
||||
Timestamp: 1557761040000000000,
|
||||
}},
|
||||
})
|
||||
@@ -299,11 +335,11 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
})
|
||||
|
||||
// Escape chars
|
||||
f(`fo\,bar\=baz,x\==\\a\,\=\q\ \\\a\=\,=4.34`, &Rows{
|
||||
f(`fo\,bar\=baz,x\=\b=\\a\,\=\q\ \\\a\=\,=4.34`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: `fo,bar=baz`,
|
||||
Tags: []Tag{{
|
||||
Key: `x=`,
|
||||
Key: `x=\b`,
|
||||
Value: `\a,=\q `,
|
||||
}},
|
||||
Fields: []Field{{
|
||||
@@ -312,6 +348,36 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Test case from https://community.librenms.org/t/integration-with-victoriametrics/9689
|
||||
f("ports,foo=a,bar=et\\ +\\ V,baz=ype INDISCARDS=245333676,OUTDISCARDS=1798680", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "ports",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: "a",
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "et + V",
|
||||
},
|
||||
{
|
||||
Key: "baz",
|
||||
Value: "ype",
|
||||
},
|
||||
},
|
||||
Fields: []Field{
|
||||
{
|
||||
Key: "INDISCARDS",
|
||||
Value: 245333676,
|
||||
},
|
||||
{
|
||||
Key: "OUTDISCARDS",
|
||||
Value: 1798680,
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
|
||||
// Multiple lines
|
||||
f("foo,tag=xyz field=1.23 48934\n"+
|
||||
@@ -338,4 +404,78 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Multiple lines with invalid line in the middle.
|
||||
f("foo,tag=xyz field=1.23 48934\n"+
|
||||
"invalid line\n"+
|
||||
"bar x=-1i\n\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "field",
|
||||
Value: 1.23,
|
||||
}},
|
||||
Timestamp: 48934,
|
||||
},
|
||||
{
|
||||
Measurement: "bar",
|
||||
Fields: []Field{{
|
||||
Key: "x",
|
||||
Value: -1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// No newline after the second line.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/82
|
||||
f("foo,tag=xyz field=1.23 48934\n"+
|
||||
"bar x=-1i", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "field",
|
||||
Value: 1.23,
|
||||
}},
|
||||
Timestamp: 48934,
|
||||
},
|
||||
{
|
||||
Measurement: "bar",
|
||||
Fields: []Field{{
|
||||
Key: "x",
|
||||
Value: -1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f("x,y=z,g=p:\\ \\ 5432\\,\\ gp\\ mon\\ [lol]\\ con10\\ cmd5\\ SELECT f=1", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "x",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "y",
|
||||
Value: "z",
|
||||
},
|
||||
{
|
||||
Key: "g",
|
||||
Value: "p: 5432, gp mon [lol] con10 cmd5 SELECT",
|
||||
},
|
||||
},
|
||||
Fields: []Field{{
|
||||
Key: "f",
|
||||
Value: 1,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,14 +6,19 @@ import (
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 1234556768`
|
||||
s := `cpu usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 1234556768
|
||||
cpu usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 123455676344
|
||||
aaa usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 123455676344
|
||||
bbb usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 123455676344
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal %q: %s", s, err))
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of rows parsed; got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
package influx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -17,7 +16,15 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="influx"}`)
|
||||
var (
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for `{measurement}{separator}{field_name}` metric name when inserted via Influx line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses `{measurement}` instead of `{measurement}{separator}{field_name}` for metic name if Influx line contains only a single field")
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="influx"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="influx"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes remote write for influx line protocol.
|
||||
//
|
||||
@@ -29,15 +36,15 @@ func InsertHandler(req *http.Request) error {
|
||||
}
|
||||
|
||||
func insertHandlerInternal(req *http.Request) error {
|
||||
influxReadCalls.Inc()
|
||||
readCalls.Inc()
|
||||
|
||||
r := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := getGzipReader(r)
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped influx line protocol data: %s", err)
|
||||
}
|
||||
defer putGzipReader(zr)
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
@@ -75,86 +82,62 @@ func (ctx *pushCtx) InsertRows(db string) error {
|
||||
rows := ctx.Rows.Rows
|
||||
rowsLen := 0
|
||||
for i := range rows {
|
||||
rowsLen += len(rows[i].Tags)
|
||||
rowsLen += len(rows[i].Fields)
|
||||
}
|
||||
ic := &ctx.Common
|
||||
ic.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("db", db)
|
||||
hasDBLabel := false
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
if tag.Key == "db" {
|
||||
hasDBLabel = true
|
||||
}
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
if len(db) > 0 && !hasDBLabel {
|
||||
ic.AddLabel("db", db)
|
||||
}
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf[:0], r.Measurement...)
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf, '.')
|
||||
skipFieldKey := len(r.Fields) == 1 && *skipSingleField
|
||||
if len(ctx.metricGroupBuf) > 0 && !skipFieldKey {
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf, *measurementFieldSeparator...)
|
||||
}
|
||||
metricGroupPrefixLen := len(ctx.metricGroupBuf)
|
||||
for j := range r.Fields {
|
||||
f := &r.Fields[j]
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf[:metricGroupPrefixLen], f.Key...)
|
||||
if !skipFieldKey {
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf[:metricGroupPrefixLen], f.Key...)
|
||||
}
|
||||
metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf)
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", metricGroup)
|
||||
ic.WriteDataPoint(ctx.metricNameBuf, ic.Labels[:1], r.Timestamp, f.Value)
|
||||
}
|
||||
rowsInserted.Add(len(r.Fields))
|
||||
rowsTotal += len(r.Fields)
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
func getGzipReader(r io.Reader) (*gzip.Reader, error) {
|
||||
v := gzipReaderPool.Get()
|
||||
if v == nil {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
zr := v.(*gzip.Reader)
|
||||
if err := zr.Reset(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zr, nil
|
||||
}
|
||||
|
||||
func putGzipReader(zr *gzip.Reader) {
|
||||
_ = zr.Close()
|
||||
gzipReaderPool.Put(zr)
|
||||
}
|
||||
|
||||
var gzipReaderPool sync.Pool
|
||||
|
||||
const maxReadPacketSize = 4 * 1024 * 1024
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader, tsMultiplier int64) bool {
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
lr := io.LimitReader(r, maxReadPacketSize)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf.B = append(ctx.reqBuf.B[:0], ctx.tailBuf...)
|
||||
n, err := io.CopyBuffer(&ctx.reqBuf, lr, ctx.copyBuf[:])
|
||||
if err != nil {
|
||||
influxReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read influx line protocol data: %s", err)
|
||||
return false
|
||||
}
|
||||
if n < maxReadPacketSize {
|
||||
// Mark the end of stream.
|
||||
ctx.err = io.EOF
|
||||
}
|
||||
|
||||
// Parse all the rows until the last newline in ctx.reqBuf.B
|
||||
nn := bytes.LastIndexByte(ctx.reqBuf.B, '\n')
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
if nn >= 0 {
|
||||
ctx.tailBuf = append(ctx.tailBuf[:0], ctx.reqBuf.B[nn+1:]...)
|
||||
ctx.reqBuf.B = ctx.reqBuf.B[:nn]
|
||||
}
|
||||
if err = ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf.B)); err != nil {
|
||||
influxUnmarshalErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot unmarshal influx line protocol data with size %d: %s", len(ctx.reqBuf.B), err)
|
||||
ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf)
|
||||
if ctx.err != nil {
|
||||
if ctx.err != io.EOF {
|
||||
readErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read influx line protocol data: %s", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf))
|
||||
|
||||
// Adjust timestamps according to tsMultiplier
|
||||
currentTs := time.Now().UnixNano() / 1e6
|
||||
@@ -169,6 +152,7 @@ func (ctx *pushCtx) Read(r io.Reader, tsMultiplier int64) bool {
|
||||
}
|
||||
} else if tsMultiplier < 0 {
|
||||
tsMultiplier = -tsMultiplier
|
||||
currentTs -= currentTs % tsMultiplier
|
||||
for i := range ctx.Rows.Rows {
|
||||
row := &ctx.Rows.Rows[i]
|
||||
if row.Timestamp == 0 {
|
||||
@@ -182,18 +166,16 @@ func (ctx *pushCtx) Read(r io.Reader, tsMultiplier int64) bool {
|
||||
}
|
||||
|
||||
var (
|
||||
influxReadCalls = metrics.NewCounter(`vm_read_calls_total{name="influx"}`)
|
||||
influxReadErrors = metrics.NewCounter(`vm_read_errors_total{name="influx"}`)
|
||||
influxUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="influx"}`)
|
||||
readCalls = metrics.NewCounter(`vm_read_calls_total{name="influx"}`)
|
||||
readErrors = metrics.NewCounter(`vm_read_errors_total{name="influx"}`)
|
||||
)
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
reqBuf []byte
|
||||
tailBuf []byte
|
||||
copyBuf [16 * 1024]byte
|
||||
metricNameBuf []byte
|
||||
metricGroupBuf []byte
|
||||
|
||||
@@ -211,7 +193,7 @@ func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
ctx.metricNameBuf = ctx.metricNameBuf[:0]
|
||||
ctx.metricGroupBuf = ctx.metricGroupBuf[:0]
|
||||
|
||||
@@ -6,37 +6,60 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty")
|
||||
opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB put messages. Usually :4242 must be set. Doesn't work if empty")
|
||||
maxInsertRequestSize = flag.Int("maxInsertRequestSize", 32*1024*1024, "The maximum size of a single insert request in bytes")
|
||||
graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty")
|
||||
opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB metrics. "+
|
||||
"Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+
|
||||
"Usually :4242 must be set. Doesn't work if empty")
|
||||
opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty")
|
||||
maxInsertRequestSize = flag.Int("maxInsertRequestSize", 32*1024*1024, "The maximum size of a single insert request in bytes")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superflouos labels are dropped")
|
||||
)
|
||||
|
||||
var (
|
||||
graphiteServer *graphite.Server
|
||||
opentsdbServer *opentsdb.Server
|
||||
opentsdbhttpServer *opentsdbhttp.Server
|
||||
)
|
||||
|
||||
// Init initializes vminsert.
|
||||
func Init() {
|
||||
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
||||
|
||||
concurrencylimiter.Init()
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
go graphite.Serve(*graphiteListenAddr)
|
||||
graphiteServer = graphite.MustStart(*graphiteListenAddr)
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
go opentsdb.Serve(*opentsdbListenAddr)
|
||||
opentsdbServer = opentsdb.MustStart(*opentsdbListenAddr, int64(*maxInsertRequestSize))
|
||||
}
|
||||
if len(*opentsdbHTTPListenAddr) > 0 {
|
||||
opentsdbhttpServer = opentsdbhttp.MustStart(*opentsdbHTTPListenAddr, int64(*maxInsertRequestSize))
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops vminsert.
|
||||
func Stop() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphite.Stop()
|
||||
graphiteServer.MustStop()
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
opentsdb.Stop()
|
||||
opentsdbServer.MustStop()
|
||||
}
|
||||
if len(*opentsdbHTTPListenAddr) > 0 {
|
||||
opentsdbhttpServer.MustStop()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,6 +76,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import":
|
||||
vmimportRequests.Inc()
|
||||
if err := vmimport.InsertHandler(r); err != nil {
|
||||
vmimportErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/write", "/api/v2/write":
|
||||
influxWriteRequests.Inc()
|
||||
if err := influx.InsertHandler(r); err != nil {
|
||||
@@ -63,7 +95,8 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/query":
|
||||
// Emulate fake response for influx query
|
||||
// Emulate fake response for influx query.
|
||||
// This is required for TSBS benchmark.
|
||||
influxQueryRequests.Inc()
|
||||
fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`)
|
||||
return true
|
||||
@@ -77,6 +110,9 @@ var (
|
||||
prometheusWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/write", protocol="prometheus"}`)
|
||||
prometheusWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/write", protocol="prometheus"}`)
|
||||
|
||||
vmimportRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/import", protocol="vm"}`)
|
||||
vmimportErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/import", protocol="vm"}`)
|
||||
|
||||
influxWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/write", protocol="influx"}`)
|
||||
influxWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/write", protocol="influx"}`)
|
||||
|
||||
|
||||
159
app/vminsert/opentsdb/listener_switch.go
Normal file
159
app/vminsert/opentsdb/listener_switch.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
// listenerSwitch listens for incoming connections and multiplexes them to OpenTSDB http or telnet listeners
|
||||
// depending on the first byte in the accepted connection.
|
||||
//
|
||||
// It is expected that both listeners - http and telnet consume incoming connections as soon as possible.
|
||||
type listenerSwitch struct {
|
||||
ln net.Listener
|
||||
wg sync.WaitGroup
|
||||
|
||||
telnetConnsCh chan net.Conn
|
||||
httpConnsCh chan net.Conn
|
||||
|
||||
closeLock sync.Mutex
|
||||
closed bool
|
||||
acceptErr error
|
||||
closeErr error
|
||||
}
|
||||
|
||||
func newListenerSwitch(ln net.Listener) *listenerSwitch {
|
||||
ls := &listenerSwitch{
|
||||
ln: ln,
|
||||
}
|
||||
ls.telnetConnsCh = make(chan net.Conn)
|
||||
ls.httpConnsCh = make(chan net.Conn)
|
||||
ls.wg.Add(1)
|
||||
go func() {
|
||||
ls.worker()
|
||||
close(ls.telnetConnsCh)
|
||||
close(ls.httpConnsCh)
|
||||
ls.wg.Done()
|
||||
}()
|
||||
return ls
|
||||
}
|
||||
|
||||
func (ls *listenerSwitch) stop() error {
|
||||
var err error
|
||||
ls.closeLock.Lock()
|
||||
if !ls.closed {
|
||||
err = ls.ln.Close()
|
||||
ls.closeErr = err
|
||||
ls.closed = true
|
||||
}
|
||||
ls.closeLock.Unlock()
|
||||
|
||||
if err == nil {
|
||||
// Wait until worker detects the closed ls.ln and exits.
|
||||
ls.wg.Wait()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ls *listenerSwitch) worker() {
|
||||
var buf [1]byte
|
||||
for {
|
||||
c, err := ls.ln.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||
logger.Infof("listenerSwitch: temporary error at %q: %s; sleeping for a second...", ls.ln.Addr(), err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
ls.closeLock.Lock()
|
||||
ls.acceptErr = err
|
||||
ls.closeLock.Unlock()
|
||||
return
|
||||
}
|
||||
if _, err := io.ReadFull(c, buf[:]); err != nil {
|
||||
logger.Errorf("listenerSwitch: cannot read one byte from the underlying connection for %q: %s", ls.ln.Addr(), err)
|
||||
_ = c.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// It is expected that both listeners - http and telnet consume incoming connections as soon as possible,
|
||||
// so the below code shouldn't block for extended periods of time.
|
||||
pc := &peekedConn{
|
||||
Conn: c,
|
||||
firstChar: buf[0],
|
||||
}
|
||||
if buf[0] == 'p' {
|
||||
// Assume the request starts with `put`.
|
||||
ls.telnetConnsCh <- pc
|
||||
} else {
|
||||
// Assume the request starts with `POST`.
|
||||
ls.httpConnsCh <- pc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type peekedConn struct {
|
||||
net.Conn
|
||||
firstChar byte
|
||||
firstCharRead bool
|
||||
}
|
||||
|
||||
func (pc *peekedConn) Read(p []byte) (int, error) {
|
||||
// It is assumed that the pc cannot be read from concurrent goroutines.
|
||||
if pc.firstCharRead {
|
||||
// Fast path - first char already read.
|
||||
return pc.Conn.Read(p)
|
||||
}
|
||||
|
||||
// Slow path - read the first char.
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
p[0] = pc.firstChar
|
||||
pc.firstCharRead = true
|
||||
n, err := pc.Conn.Read(p[1:])
|
||||
return n + 1, err
|
||||
}
|
||||
|
||||
func (ls *listenerSwitch) newTelnetListener() *chanListener {
|
||||
return &chanListener{
|
||||
ls: ls,
|
||||
ch: ls.telnetConnsCh,
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *listenerSwitch) newHTTPListener() *chanListener {
|
||||
return &chanListener{
|
||||
ls: ls,
|
||||
ch: ls.httpConnsCh,
|
||||
}
|
||||
}
|
||||
|
||||
type chanListener struct {
|
||||
ls *listenerSwitch
|
||||
ch chan net.Conn
|
||||
}
|
||||
|
||||
func (cl *chanListener) Accept() (net.Conn, error) {
|
||||
c, ok := <-cl.ch
|
||||
if ok {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
cl.ls.closeLock.Lock()
|
||||
err := cl.ls.acceptErr
|
||||
cl.ls.closeLock.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (cl *chanListener) Close() error {
|
||||
return cl.ls.stop()
|
||||
}
|
||||
|
||||
func (cl *chanListener) Addr() net.Addr {
|
||||
return cl.ls.ln.Addr()
|
||||
}
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
@@ -34,13 +36,8 @@ func (rs *Rows) Reset() {
|
||||
// See http://opentsdb.net/docs/build/html/api_telnet/put.html
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) error {
|
||||
var err error
|
||||
rs.Rows, rs.tagsPool, err = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
func (rs *Rows) Unmarshal(s string) {
|
||||
rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single OpenTSDB row.
|
||||
@@ -69,6 +66,9 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between metric and timestamp in %q", s)
|
||||
}
|
||||
r.Metric = s[:n]
|
||||
if len(r.Metric) == 0 {
|
||||
return tagsPool, fmt.Errorf("metric cannot be empty")
|
||||
}
|
||||
tail := s[n+1:]
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
@@ -92,39 +92,46 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag, error) {
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n == 0 {
|
||||
// Skip empty line
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
}
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s[:n], tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
return unmarshalRow(dst, s, tagsPool)
|
||||
}
|
||||
dst, tagsPool = unmarshalRow(dst, s[:n], tagsPool)
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
if len(s) > 0 && s[len(s)-1] == '\r' {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// Skip empty line
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal OpenTSDB line %q: %s", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="opentsdb"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
@@ -140,12 +147,20 @@ func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
if err := tag.unmarshal(s); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n]); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,9 +182,6 @@ func (t *Tag) unmarshal(s string) error {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
t.Key = s[:n]
|
||||
if len(t.Key) == 0 {
|
||||
return fmt.Errorf("tag key cannot be empty for %q", s)
|
||||
}
|
||||
t.Value = s[n+1:]
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,19 +9,24 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
|
||||
// Try again
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// Missing put prefix
|
||||
f("xx")
|
||||
|
||||
// Missing metric
|
||||
f("put 111 34")
|
||||
|
||||
// Missing timestamp
|
||||
f("put aaa")
|
||||
|
||||
@@ -42,26 +47,19 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
|
||||
// Invalid tag
|
||||
f("put aaa 123 4.5 foo")
|
||||
f("put aaa 123 4.5 =")
|
||||
f("put aaa 123 4.5 =foo")
|
||||
f("put aaa 123 4.5 =foo a=b")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
@@ -74,7 +72,9 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\r", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
f("\n\r\n", &Rows{})
|
||||
|
||||
// Single line
|
||||
f("put foobar 789 -123.456 a=b", &Rows{
|
||||
@@ -88,17 +88,13 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Empty tag value
|
||||
f("put foobar 789 -123.456 a= b=c", &Rows{
|
||||
// Empty tag
|
||||
f("put foobar 789 -123.456 a= b=c =d", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "",
|
||||
},
|
||||
{
|
||||
Key: "b",
|
||||
Value: "c",
|
||||
@@ -200,4 +196,27 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
// Multi lines with invalid line
|
||||
f("put foo 2 0.3 a=b\naaa bbb\nput bar.baz 43 0.34 a=b\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,18 +6,19 @@ import (
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu.usage_user 1234556768 1.23 a=b
|
||||
cpu.usage_system 1234556768 23.344 a=b
|
||||
cpu.usage_iowait 1234556769 3.3443 a=b
|
||||
cpu.usage_irq 1234556768 0.34432 a=b
|
||||
s := `put cpu.usage_user 1234556768 1.23 a=b
|
||||
put cpu.usage_system 1234556768 23.344 a=b
|
||||
put cpu.usage_iowait 1234556769 3.3443 a=b
|
||||
put cpu.usage_irq 1234556768 0.34432 a=b
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal %q: %s", s, err))
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of parsed rows; got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -15,7 +14,10 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb"}`)
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="opentsdb"}`)
|
||||
)
|
||||
|
||||
// insertHandler processes remote write for OpenTSDB put protocol.
|
||||
//
|
||||
@@ -52,58 +54,52 @@ func (ctx *pushCtx) InsertRows() error {
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const maxReadPacketSize = 4 * 1024 * 1024
|
||||
|
||||
const flushTimeout = 3 * time.Second
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader) bool {
|
||||
opentsdbReadCalls.Inc()
|
||||
readCalls.Inc()
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
if c, ok := r.(net.Conn); ok {
|
||||
if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil {
|
||||
opentsdbReadErrors.Inc()
|
||||
readErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot set read deadline: %s", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
lr := io.LimitReader(r, maxReadPacketSize)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf.B = append(ctx.reqBuf.B[:0], ctx.tailBuf...)
|
||||
n, err := io.CopyBuffer(&ctx.reqBuf, lr, ctx.copyBuf[:])
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf)
|
||||
if ctx.err != nil {
|
||||
if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() {
|
||||
// Flush the read data on timeout and try reading again.
|
||||
ctx.err = nil
|
||||
} else {
|
||||
opentsdbReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %s", err)
|
||||
if ctx.err != io.EOF {
|
||||
readErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %s", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
} else if n < maxReadPacketSize {
|
||||
// Mark the end of stream.
|
||||
ctx.err = io.EOF
|
||||
}
|
||||
ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf))
|
||||
|
||||
// Parse all the rows until the last newline in ctx.reqBuf.B
|
||||
nn := bytes.LastIndexByte(ctx.reqBuf.B, '\n')
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
if nn >= 0 {
|
||||
ctx.tailBuf = append(ctx.tailBuf[:0], ctx.reqBuf.B[nn+1:]...)
|
||||
ctx.reqBuf.B = ctx.reqBuf.B[:nn]
|
||||
}
|
||||
if err = ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf.B)); err != nil {
|
||||
opentsdbUnmarshalErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot unmarshal OpenTSDB put protocol data with size %d: %s", len(ctx.reqBuf.B), err)
|
||||
return false
|
||||
// Fill in missing timestamps
|
||||
currentTimestamp := time.Now().Unix()
|
||||
rows := ctx.Rows.Rows
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp == 0 {
|
||||
r.Timestamp = currentTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
// Convert timestamps from seconds to milliseconds
|
||||
for i := range ctx.Rows.Rows {
|
||||
ctx.Rows.Rows[i].Timestamp *= 1e3
|
||||
for i := range rows {
|
||||
rows[i].Timestamp *= 1e3
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -112,9 +108,8 @@ type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
reqBuf []byte
|
||||
tailBuf []byte
|
||||
copyBuf [16 * 1024]byte
|
||||
|
||||
err error
|
||||
}
|
||||
@@ -129,16 +124,15 @@ func (ctx *pushCtx) Error() error {
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
var (
|
||||
opentsdbReadCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb"}`)
|
||||
opentsdbReadErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb"}`)
|
||||
opentsdbUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="opentsdb"}`)
|
||||
readCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb"}`)
|
||||
readErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
|
||||
@@ -7,8 +7,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -20,44 +22,91 @@ var (
|
||||
writeErrorsUDP = metrics.NewCounter(`vm_opentsdb_request_errors_total{name="write", net="udp"}`)
|
||||
)
|
||||
|
||||
// Serve starts OpenTSDB collector on the given addr.
|
||||
func Serve(addr string) {
|
||||
// Server is a server for collecting OpenTSDB TCP and UDP metrics.
|
||||
//
|
||||
// It accepts simultaneously Telnet put requests and HTTP put requests over TCP.
|
||||
type Server struct {
|
||||
addr string
|
||||
ls *listenerSwitch
|
||||
httpServer *opentsdbhttp.Server
|
||||
lnUDP net.PacketConn
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// MustStart starts OpenTSDB collector on the given addr.
|
||||
//
|
||||
// MustStop must be called on the returned server when it is no longer needed.
|
||||
func MustStart(addr string, maxRequestSize int64) *Server {
|
||||
logger.Infof("starting TCP OpenTSDB collector at %q", addr)
|
||||
lnTCP, err := net.Listen("tcp4", addr)
|
||||
lnTCP, err := netutil.NewTCPListener("opentsdb", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start TCP OpenTSDB collector at %q: %s", addr, err)
|
||||
}
|
||||
listenerTCP = lnTCP
|
||||
ls := newListenerSwitch(lnTCP)
|
||||
lnHTTP := ls.newHTTPListener()
|
||||
lnTelnet := ls.newTelnetListener()
|
||||
httpServer := opentsdbhttp.MustServe(lnHTTP, maxRequestSize)
|
||||
|
||||
logger.Infof("starting UDP OpenTSDB collector at %q", addr)
|
||||
lnUDP, err := net.ListenPacket("udp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP OpenTSDB collector at %q: %s", addr, err)
|
||||
}
|
||||
listenerUDP = lnUDP
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
s := &Server{
|
||||
addr: addr,
|
||||
ls: ls,
|
||||
httpServer: httpServer,
|
||||
lnUDP: lnUDP,
|
||||
}
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveTCP(listenerTCP)
|
||||
logger.Infof("stopped TCP OpenTSDB collector at %q", addr)
|
||||
defer s.wg.Done()
|
||||
serveTelnet(lnTelnet)
|
||||
logger.Infof("stopped TCP telnet OpenTSDB server at %q", addr)
|
||||
}()
|
||||
wg.Add(1)
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveUDP(listenerUDP)
|
||||
logger.Infof("stopped UDP OpenTSDB collector at %q", addr)
|
||||
defer s.wg.Done()
|
||||
httpServer.Wait()
|
||||
// Do not log when httpServer is stopped, since this is logged by the server itself.
|
||||
}()
|
||||
wg.Wait()
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
serveUDP(lnUDP)
|
||||
logger.Infof("stopped UDP OpenTSDB server at %q", addr)
|
||||
}()
|
||||
return s
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener) {
|
||||
// MustStop stops the server.
|
||||
func (s *Server) MustStop() {
|
||||
// Stop HTTP server. Do not emit log message, since it is emitted by the httpServer.
|
||||
s.httpServer.MustStop()
|
||||
|
||||
logger.Infof("stopping TCP telnet OpenTSDB server at %q...", s.addr)
|
||||
if err := s.ls.stop(); err != nil {
|
||||
logger.Errorf("cannot stop TCP telnet OpenTSDB server: %s", err)
|
||||
}
|
||||
|
||||
logger.Infof("stopping UDP OpenTSDB server at %q...", s.addr)
|
||||
if err := s.lnUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot stop UDP OpenTSDB server: %s", err)
|
||||
}
|
||||
|
||||
// Wait until all the servers are stopped.
|
||||
s.wg.Wait()
|
||||
logger.Infof("TCP and UDP OpenTSDB servers at %q have been stopped", s.addr)
|
||||
}
|
||||
|
||||
func serveTelnet(ln net.Listener) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("opentsdb: temporary error when listening for TCP addr %q: %s", ln.Addr(), err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
@@ -96,6 +145,7 @@ func serveUDP(ln net.PacketConn) {
|
||||
writeErrorsUDP.Inc()
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
logger.Errorf("opentsdb: temporary error when listening for UDP addr %q: %s", ln.LocalAddr(), err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
@@ -118,20 +168,3 @@ func serveUDP(ln net.PacketConn) {
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var (
|
||||
listenerTCP net.Listener
|
||||
listenerUDP net.PacketConn
|
||||
)
|
||||
|
||||
// Stop stops the server.
|
||||
func Stop() {
|
||||
logger.Infof("stopping TCP OpenTSDB server at %q...", listenerTCP.Addr())
|
||||
if err := listenerTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP OpenTSDB server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP OpenTSDB server at %q...", listenerUDP.LocalAddr())
|
||||
if err := listenerUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP OpenTSDB server: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
198
app/vminsert/opentsdbhttp/parser.go
Normal file
198
app/vminsert/opentsdbhttp/parser.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed OpenTSDB rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Release references to objects, so they can be GC'ed.
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals OpenTSDB rows from av.
|
||||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_http/put.html
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(av *fastjson.Value) {
|
||||
rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], av, rs.tagsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single OpenTSDB row.
|
||||
type Row struct {
|
||||
Metric string
|
||||
Tags []Tag
|
||||
Value float64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Metric = ""
|
||||
r.Tags = nil
|
||||
r.Value = 0
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(o *fastjson.Value, tagsPool []Tag) ([]Tag, error) {
|
||||
r.reset()
|
||||
m := o.GetStringBytes("metric")
|
||||
if len(m) == 0 {
|
||||
return tagsPool, fmt.Errorf("missing `metric` in %s", o)
|
||||
}
|
||||
r.Metric = bytesutil.ToUnsafeString(m)
|
||||
|
||||
rawTs := o.Get("timestamp")
|
||||
if rawTs != nil {
|
||||
ts, err := getFloat64(rawTs)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("invalid `timestamp` in %s: %s", o, err)
|
||||
}
|
||||
r.Timestamp = int64(ts)
|
||||
} else {
|
||||
// Allow missing timestamp. It is automatically populated
|
||||
// with the current time in this case.
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
rawV := o.Get("value")
|
||||
if rawV == nil {
|
||||
return tagsPool, fmt.Errorf("missing `value` in %s", o)
|
||||
}
|
||||
v, err := getFloat64(rawV)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("invalid `value` in %s: %s", o, err)
|
||||
}
|
||||
r.Value = v
|
||||
|
||||
vt := o.Get("tags")
|
||||
if vt == nil {
|
||||
// Allow empty tags.
|
||||
return tagsPool, nil
|
||||
}
|
||||
rawTags, err := vt.Object()
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("invalid `tags` in %s: %s", o, err)
|
||||
}
|
||||
|
||||
tagsStart := len(tagsPool)
|
||||
tagsPool, err = unmarshalTags(tagsPool, rawTags)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot parse tags %s: %s", rawTags, err)
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func getFloat64(v *fastjson.Value) (float64, error) {
|
||||
switch v.Type() {
|
||||
case fastjson.TypeNumber:
|
||||
return v.Float64()
|
||||
case fastjson.TypeString:
|
||||
vStr, _ := v.StringBytes()
|
||||
vFloat := fastfloat.ParseBestEffort(bytesutil.ToUnsafeString(vStr))
|
||||
if vFloat == 0 && string(vStr) != "0" && string(vStr) != "0.0" {
|
||||
return 0, fmt.Errorf("invalid float64 value: %q", vStr)
|
||||
}
|
||||
return vFloat, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("value doesn't contain float64; it contains %s", v.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, av *fastjson.Value, tagsPool []Tag) ([]Row, []Tag) {
|
||||
switch av.Type() {
|
||||
case fastjson.TypeObject:
|
||||
return unmarshalRow(dst, av, tagsPool)
|
||||
case fastjson.TypeArray:
|
||||
a, _ := av.Array()
|
||||
for _, o := range a {
|
||||
dst, tagsPool = unmarshalRow(dst, o, tagsPool)
|
||||
}
|
||||
return dst, tagsPool
|
||||
default:
|
||||
logger.Errorf("OpenTSDB JSON must be either object or array; got %s; body=%s", av.Type(), av)
|
||||
invalidLines.Inc()
|
||||
return dst, tagsPool
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, o *fastjson.Value, tagsPool []Tag) ([]Row, []Tag) {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(o, tagsPool)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal OpenTSDB object %s: %s", o, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="opentsdb-http"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, o *fastjson.Object) ([]Tag, error) {
|
||||
var err error
|
||||
o.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if v.Type() != fastjson.TypeString {
|
||||
err = fmt.Errorf("tag value must be string; got %s; value=%s", v.Type(), v)
|
||||
return
|
||||
}
|
||||
if len(k) == 0 {
|
||||
// Skip empty tags
|
||||
return
|
||||
}
|
||||
vStr, _ := v.StringBytes()
|
||||
if len(vStr) == 0 {
|
||||
// Skip empty tags
|
||||
return
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
tag.Key = bytesutil.ToUnsafeString(k)
|
||||
tag.Value = bytesutil.ToUnsafeString(vStr)
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// Tag is an OpenTSDB tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (t *Tag) reset() {
|
||||
t.Key = ""
|
||||
t.Value = ""
|
||||
}
|
||||
246
app/vminsert/opentsdbhttp/parser_test.go
Normal file
246
app/vminsert/opentsdbhttp/parser_test.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
// Expected JSON parser error
|
||||
return
|
||||
}
|
||||
// Verify OpenTSDB body parsing error
|
||||
rows.Unmarshal(v)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
// Try again
|
||||
rows.Unmarshal(v)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// invalid json
|
||||
f("{g")
|
||||
|
||||
// Invalid json type
|
||||
f(`1`)
|
||||
f(`"foo"`)
|
||||
f(`[1,2]`)
|
||||
f(`null`)
|
||||
|
||||
// Incomplete object
|
||||
f(`{}`)
|
||||
f(`{"metric": "aaa"}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122}`)
|
||||
f(`{"metric": "aaa", "timestamp": "tststs"}`)
|
||||
f(`{"timestamp": 1122, "value": 33}`)
|
||||
f(`{"value": 33}`)
|
||||
f(`{"value": 33, "tags": {"fooo":"bar"}}`)
|
||||
|
||||
// Invalid value
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": "0.0.0"}`)
|
||||
|
||||
// Invalid metric type
|
||||
f(`{"metric": "", "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": ["aaa"], "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": {"aaa":1}, "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": 1, "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
|
||||
// Invalid timestamp type
|
||||
f(`{"metric": "aaa", "timestamp": "foobar", "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": [1,2], "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": {"a":1}, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
|
||||
// Invalid value type
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": [0,1], "tags": {"foo":"bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": {"a":1}, "tags": {"foo":"bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": "foobar", "tags": {"foo":"bar"}}`)
|
||||
|
||||
// Invalid tags type
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": 1}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": [1,2]}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": "foo"}`)
|
||||
|
||||
// Invalid tag value type
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": {"foo": ["bar"]}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": {"foo": {"bar":"baz"}}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": {"foo": 1}}`)
|
||||
|
||||
// Invalid multiline
|
||||
f(`[{"metric": "aaa", "timestamp": 1122, "value": "trt", "tags":{"foo":"bar"}}, {"metric": "aaa", "timestamp": [1122], "value": 111}]`)
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse json %s: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(v)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
rows.Unmarshal(v)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Normal line
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": -123.456, "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Timestamp as string
|
||||
f(`{"metric": "foobar", "timestamp": "1789", "value": -123.456, "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 1789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Timestamp as float64 (it is truncated to integer)
|
||||
f(`{"metric": "foobar", "timestamp": 17.89, "value": -123.456, "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 17,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Empty tags
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": -123.456, "tags": {}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: nil,
|
||||
}},
|
||||
})
|
||||
// Missing tags
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": -123.456}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: nil,
|
||||
}},
|
||||
})
|
||||
// Empty tag value
|
||||
f(`{"metric": "foobar", "timestamp": 123, "value": -123.456, "tags": {"a":"", "b":"c", "": "d"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 123,
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "b",
|
||||
Value: "c",
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
// Value as string
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": "-12.456", "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -12.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Missing timestamp
|
||||
f(`{"metric": "foobar", "value": "-12.456", "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -12.456,
|
||||
Timestamp: 0,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Multiple tags
|
||||
f(`{"metric": "foo", "value": 1, "timestamp": 2, "tags": {"bar":"baz", "x": "y"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
|
||||
// Multi lines
|
||||
f(`[{"metric": "foo", "value": "0.3", "timestamp": 2, "tags": {"a":"b"}},
|
||||
{"metric": "bar.baz", "value": 0.34, "timestamp": 43, "tags": {"a":"b"}}]`, &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
33
app/vminsert/opentsdbhttp/parser_timing_test.go
Normal file
33
app/vminsert/opentsdbhttp/parser_timing_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `[{"metric": "cpu.usage_user", "timestamp": 1234556768, "value": 1.23, "tags": {"a":"b", "x": "y"}},
|
||||
{"metric": "cpu.usage_system", "timestamp": 1234556768, "value": 23.344, "tags": {"a":"b"}},
|
||||
{"metric": "cpu.usage_iowait", "timestamp": 1234556769, "value":3.3443, "tags": {"a":"b"}},
|
||||
{"metric": "cpu.usage_irq", "timestamp": 1234556768, "value": 0.34432, "tags": {"a":"b"}}
|
||||
]
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
var p fastjson.Parser
|
||||
for pb.Next() {
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot parse %q: %s", s, err))
|
||||
}
|
||||
rows.Unmarshal(v)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of rows unmarshaled; got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
150
app/vminsert/opentsdbhttp/request_handler.go
Normal file
150
app/vminsert/opentsdbhttp/request_handler.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb-http"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="opentsdb-http"}`)
|
||||
|
||||
readCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb-http"}`)
|
||||
readErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb-http"}`)
|
||||
unmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="opentsdb-http"}`)
|
||||
)
|
||||
|
||||
// insertHandler processes HTTP OpenTSDB put requests.
|
||||
// See http://opentsdb.net/docs/build/html/api_http/put.html
|
||||
func insertHandler(req *http.Request, maxSize int64) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(req, maxSize)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(req *http.Request, maxSize int64) error {
|
||||
readCalls.Inc()
|
||||
|
||||
r := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
readErrors.Inc()
|
||||
return fmt.Errorf("cannot read gzipped http protocol data: %s", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
|
||||
// Read the request in ctx.reqBuf
|
||||
lr := io.LimitReader(r, maxSize+1)
|
||||
reqLen, err := ctx.reqBuf.ReadFrom(lr)
|
||||
if err != nil {
|
||||
readErrors.Inc()
|
||||
return fmt.Errorf("cannot read HTTP OpenTSDB request: %s", err)
|
||||
}
|
||||
if reqLen > maxSize {
|
||||
readErrors.Inc()
|
||||
return fmt.Errorf("too big HTTP OpenTSDB request; mustn't exceed %d bytes", maxSize)
|
||||
}
|
||||
|
||||
// Unmarshal the request to ctx.Rows
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(ctx.reqBuf.B)
|
||||
if err != nil {
|
||||
unmarshalErrors.Inc()
|
||||
return fmt.Errorf("cannot parse HTTP OpenTSDB json: %s", err)
|
||||
}
|
||||
ctx.Rows.Unmarshal(v)
|
||||
|
||||
// Fill in missing timestamps
|
||||
currentTimestamp := time.Now().Unix()
|
||||
rows := ctx.Rows.Rows
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp == 0 {
|
||||
r.Timestamp = currentTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
// Convert timestamps in seconds to milliseconds if needed.
|
||||
// See http://opentsdb.net/docs/javadoc/net/opentsdb/core/Const.html#SECOND_MASK
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp&secondMask == 0 {
|
||||
r.Timestamp *= 1e3
|
||||
}
|
||||
}
|
||||
|
||||
// Insert ctx.Rows to db.
|
||||
ic := &ctx.Common
|
||||
ic.Reset(len(rows))
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", r.Metric)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const secondMask int64 = 0x7FFFFFFF00000000
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf.Reset()
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
||||
101
app/vminsert/opentsdbhttp/server.go
Normal file
101
app/vminsert/opentsdbhttp/server.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
writeRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/put", protocol="opentsdb-http"}`)
|
||||
writeErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/put", protocol="opentsdb-http"}`)
|
||||
)
|
||||
|
||||
// Server represents HTTP OpenTSDB server.
|
||||
type Server struct {
|
||||
s *http.Server
|
||||
ln net.Listener
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// MustStart starts HTTP OpenTSDB server on the given addr.
|
||||
//
|
||||
// MustStop must be called on the returned server when it is no longer needed.
|
||||
func MustStart(addr string, maxRequestSize int64) *Server {
|
||||
logger.Infof("starting HTTP OpenTSDB server at %q", addr)
|
||||
lnTCP, err := netutil.NewTCPListener("opentsdbhttp", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start HTTP OpenTSDB collector at %q: %s", addr, err)
|
||||
}
|
||||
return MustServe(lnTCP, maxRequestSize)
|
||||
}
|
||||
|
||||
// MustServe serves OpenTSDB HTTP put requests from ln with up to maxRequestSize size.
|
||||
//
|
||||
// MustStop must be called on the returned server when it is no longer needed.
|
||||
func MustServe(ln net.Listener, maxRequestSize int64) *Server {
|
||||
h := newRequestHandler(maxRequestSize)
|
||||
hs := &http.Server{
|
||||
Handler: h,
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
s := &Server{
|
||||
s: hs,
|
||||
ln: ln,
|
||||
}
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
err := s.s.Serve(s.ln)
|
||||
if err == http.ErrServerClosed {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatalf("error serving HTTP OpenTSDB at %q: %s", s.ln.Addr(), err)
|
||||
}
|
||||
}()
|
||||
return s
|
||||
}
|
||||
|
||||
// Wait waits until the server is stopped with MustStop.
|
||||
func (s *Server) Wait() {
|
||||
s.wg.Wait()
|
||||
}
|
||||
|
||||
// MustStop stops HTTP OpenTSDB server.
|
||||
func (s *Server) MustStop() {
|
||||
logger.Infof("stopping HTTP OpenTSDB server at %q...", s.ln.Addr())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := s.s.Shutdown(ctx); err != nil {
|
||||
logger.Fatalf("cannot close HTTP OpenTSDB server at %q: %s", s.ln.Addr(), err)
|
||||
}
|
||||
s.wg.Wait()
|
||||
logger.Infof("OpenTSDB HTTP server at %q has been stopped", s.ln.Addr())
|
||||
}
|
||||
|
||||
func newRequestHandler(maxRequestSize int64) http.Handler {
|
||||
rh := func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/put":
|
||||
writeRequests.Inc()
|
||||
if err := insertHandler(r, maxRequestSize); err != nil {
|
||||
writeErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
default:
|
||||
httpserver.Errorf(w, "unexpected path requested on HTTP OpenTSDB server: %q", r.URL.Path)
|
||||
}
|
||||
}
|
||||
return http.HandlerFunc(rh)
|
||||
}
|
||||
@@ -12,7 +12,10 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`)
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="prometheus"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes remote write for prometheus.
|
||||
func InsertHandler(r *http.Request, maxSize int64) error {
|
||||
@@ -34,6 +37,7 @@ func insertHandlerInternal(r *http.Request, maxSize int64) error {
|
||||
}
|
||||
ic := &ctx.Common
|
||||
ic.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
for i := range timeseries {
|
||||
ts := ×eries[i]
|
||||
var metricNameRaw []byte
|
||||
@@ -41,8 +45,10 @@ func insertHandlerInternal(r *http.Request, maxSize int64) error {
|
||||
r := &ts.Samples[i]
|
||||
metricNameRaw = ic.WriteDataPointExt(metricNameRaw, ts.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(ts.Samples))
|
||||
rowsTotal += len(ts.Samples)
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
|
||||
202
app/vminsert/vmimport/parser.go
Normal file
202
app/vminsert/vmimport/parser.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package vmimport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
// Rows contains parsed rows from `/api/v1/import` request.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tu tagsUnmarshaler
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
rs.tu.reset()
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals influx line protocol rows from s.
|
||||
//
|
||||
// See https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) {
|
||||
rs.tu.reset()
|
||||
rs.Rows = unmarshalRows(rs.Rows[:0], s, &rs.tu)
|
||||
}
|
||||
|
||||
// Row is a single row from `/api/v1/import` request.
|
||||
type Row struct {
|
||||
Tags []Tag
|
||||
Values []float64
|
||||
Timestamps []int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Tags = nil
|
||||
r.Values = r.Values[:0]
|
||||
r.Timestamps = r.Timestamps[:0]
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tu *tagsUnmarshaler) error {
|
||||
r.reset()
|
||||
v, err := tu.p.Parse(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse json line: %s", err)
|
||||
}
|
||||
|
||||
// Unmarshal tags
|
||||
metric := v.GetObject("metric")
|
||||
if metric == nil {
|
||||
return fmt.Errorf("missing `metric` object")
|
||||
}
|
||||
tagsStart := len(tu.tagsPool)
|
||||
if err := tu.unmarshalTags(metric); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal `metric`: %s", err)
|
||||
}
|
||||
tags := tu.tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
if len(r.Tags) == 0 {
|
||||
return fmt.Errorf("missing tags")
|
||||
}
|
||||
|
||||
// Unmarshal values
|
||||
values := v.GetArray("values")
|
||||
if len(values) == 0 {
|
||||
return fmt.Errorf("missing `values` array")
|
||||
}
|
||||
for i, v := range values {
|
||||
f, err := v.Float64()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal value at position %d: %s", i, err)
|
||||
}
|
||||
r.Values = append(r.Values, f)
|
||||
}
|
||||
|
||||
// Unmarshal timestamps
|
||||
timestamps := v.GetArray("timestamps")
|
||||
if len(timestamps) == 0 {
|
||||
return fmt.Errorf("missing `timestamps` array")
|
||||
}
|
||||
for i, v := range timestamps {
|
||||
ts, err := v.Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal timestamp at position %d: %s", i, err)
|
||||
}
|
||||
r.Timestamps = append(r.Timestamps, ts)
|
||||
}
|
||||
|
||||
if len(r.Timestamps) != len(r.Values) {
|
||||
return fmt.Errorf("`timestamps` array size must match `values` array size; got %d; want %d", len(r.Timestamps), len(r.Values))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tag represents `/api/v1/import` tag.
|
||||
type Tag struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func (tag *Tag) reset() {
|
||||
// tag.Key and tag.Value point to tu.bytesPool, so there is no need in keeping these byte slices here.
|
||||
tag.Key = nil
|
||||
tag.Value = nil
|
||||
}
|
||||
|
||||
type tagsUnmarshaler struct {
|
||||
p fastjson.Parser
|
||||
tagsPool []Tag
|
||||
bytesPool []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (tu *tagsUnmarshaler) reset() {
|
||||
for i := range tu.tagsPool {
|
||||
tu.tagsPool[i].reset()
|
||||
}
|
||||
tu.tagsPool = tu.tagsPool[:0]
|
||||
|
||||
tu.bytesPool = tu.bytesPool[:0]
|
||||
tu.err = nil
|
||||
}
|
||||
|
||||
func (tu *tagsUnmarshaler) addTag() *Tag {
|
||||
dst := tu.tagsPool
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
tu.tagsPool = dst
|
||||
return tag
|
||||
}
|
||||
|
||||
func (tu *tagsUnmarshaler) addBytes(b []byte) []byte {
|
||||
bytesPoolLen := len(tu.bytesPool)
|
||||
tu.bytesPool = append(tu.bytesPool, b...)
|
||||
bCopy := tu.bytesPool[bytesPoolLen:]
|
||||
return bCopy[:len(bCopy):len(bCopy)]
|
||||
}
|
||||
|
||||
func (tu *tagsUnmarshaler) unmarshalTags(o *fastjson.Object) error {
|
||||
tu.err = nil
|
||||
o.Visit(func(key []byte, v *fastjson.Value) {
|
||||
tag := tu.addTag()
|
||||
tag.Key = tu.addBytes(key)
|
||||
sb, err := v.StringBytes()
|
||||
if err != nil && tu.err != nil {
|
||||
tu.err = fmt.Errorf("cannot parse value for tag %q: %s", tag.Key, err)
|
||||
}
|
||||
tag.Value = tu.addBytes(sb)
|
||||
})
|
||||
return tu.err
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tu *tagsUnmarshaler) []Row {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
return unmarshalRow(dst, s, tu)
|
||||
}
|
||||
dst = unmarshalRow(dst, s[:n], tu)
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, s string, tu *tagsUnmarshaler) []Row {
|
||||
if len(s) > 0 && s[len(s)-1] == '\r' {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
return dst
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
if err := r.unmarshal(s, tu); err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal json line %q: %s; skipping it", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="vmimport"}`)
|
||||
216
app/vminsert/vmimport/parser_test.go
Normal file
216
app/vminsert/vmimport/parser_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package vmimport
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("expecting zero rows; got %d rows", len(rows.Rows))
|
||||
}
|
||||
|
||||
// Try again
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("expecting zero rows; got %d rows", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid json line
|
||||
f("")
|
||||
f("\n")
|
||||
f("foo\n")
|
||||
f("123")
|
||||
f("[1,3]")
|
||||
f("{}")
|
||||
f("[]")
|
||||
f(`{"foo":"bar"}`)
|
||||
|
||||
// Invalid metric
|
||||
f(`{"metric":123,"values":[1,2],"timestamps":[3,4]}`)
|
||||
f(`{"metric":[123],"values":[1,2],"timestamps":[3,4]}`)
|
||||
f(`{"metric":[],"values":[1,2],"timestamps":[3,4]}`)
|
||||
f(`{"metric":{},"values":[1,2],"timestamps":[3,4]}`)
|
||||
f(`{"metric":null,"values":[1,2],"timestamps":[3,4]}`)
|
||||
f(`{"values":[1,2],"timestamps":[3,4]}`)
|
||||
|
||||
// Invalid values
|
||||
f(`{"metric":{"foo":"bar"},"values":1,"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":{"x":1},"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":{"x":1},"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":null,"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"timestamps":[3,4]}`)
|
||||
|
||||
// Invalid timestamps
|
||||
f(`{"metric":{"foo":"bar"},"values":[1,2],"timestamps":3}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[1,2],"timestamps":false}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[1,2],"timestamps":{}}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[1,2]}`)
|
||||
|
||||
// values and timestamps count mismatch
|
||||
f(`{"metric":{"foo":"bar"},"values":[],"timestamps":[]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[],"timestamps":[1]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[2],"timestamps":[]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[2],"timestamps":[3,4]}`)
|
||||
f(`{"metric":{"foo":"bar"},"values":[2,3],"timestamps":[4]}`)
|
||||
|
||||
// Garbage after the line
|
||||
f(`{"metric":{"foo":"bar"},"values":[2],"timestamps":[4]}{}`)
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
f("\n\r\n", &Rows{})
|
||||
|
||||
// Single line with a single tag
|
||||
f(`{"metric":{"foo":"bar"},"values":[1.23],"timestamps":[456]}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Tags: []Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}},
|
||||
Values: []float64{1.23},
|
||||
Timestamps: []int64{456},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with multiple tags
|
||||
f(`{"metric":{"foo":"bar","baz":"xx"},"values":[1.23, -3.21],"timestamps" : [456,789]}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
},
|
||||
{
|
||||
Key: []byte("baz"),
|
||||
Value: []byte("xx"),
|
||||
},
|
||||
},
|
||||
Values: []float64{1.23, -3.21},
|
||||
Timestamps: []int64{456, 789},
|
||||
}},
|
||||
})
|
||||
|
||||
// Multiple lines
|
||||
f(`{"metric":{"foo":"bar","baz":"xx"},"values":[1.23, -3.21],"timestamps" : [456,789]}
|
||||
{"metric":{"__name__":"xx"},"values":[34],"timestamps" : [11]}
|
||||
`, &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
},
|
||||
{
|
||||
Key: []byte("baz"),
|
||||
Value: []byte("xx"),
|
||||
},
|
||||
},
|
||||
Values: []float64{1.23, -3.21},
|
||||
Timestamps: []int64{456, 789},
|
||||
},
|
||||
{
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: []byte("__name__"),
|
||||
Value: []byte("xx"),
|
||||
},
|
||||
},
|
||||
Values: []float64{34},
|
||||
Timestamps: []int64{11},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Multiple lines with invalid line in the middle.
|
||||
f(`{"metric":{"xfoo":"bar","baz":"xx"},"values":[1.232, -3.21],"timestamps" : [456,7890]}
|
||||
garbage here
|
||||
{"metric":{"__name__":"xxy"},"values":[34],"timestamps" : [111]}`, &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: []byte("xfoo"),
|
||||
Value: []byte("bar"),
|
||||
},
|
||||
{
|
||||
Key: []byte("baz"),
|
||||
Value: []byte("xx"),
|
||||
},
|
||||
},
|
||||
Values: []float64{1.232, -3.21},
|
||||
Timestamps: []int64{456, 7890},
|
||||
},
|
||||
{
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: []byte("__name__"),
|
||||
Value: []byte("xxy"),
|
||||
},
|
||||
},
|
||||
Values: []float64{34},
|
||||
Timestamps: []int64{111},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// No newline after the second line.
|
||||
f(`{"metric":{"foo":"bar","baz":"xx"},"values":[1.23, -3.21],"timestamps" : [456,789]}
|
||||
{"metric":{"__name__":"xx"},"values":[34],"timestamps" : [11]}`, &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
},
|
||||
{
|
||||
Key: []byte("baz"),
|
||||
Value: []byte("xx"),
|
||||
},
|
||||
},
|
||||
Values: []float64{1.23, -3.21},
|
||||
Timestamps: []int64{456, 789},
|
||||
},
|
||||
{
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: []byte("__name__"),
|
||||
Value: []byte("xx"),
|
||||
},
|
||||
},
|
||||
Values: []float64{34},
|
||||
Timestamps: []int64{11},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
25
app/vminsert/vmimport/parser_timing_test.go
Normal file
25
app/vminsert/vmimport/parser_timing_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package vmimport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `{"metric":{"__name__":"up","job":"node_exporter","instance":"localhost:9100"},"values":[0,0,0],"timestamps":[1549891472010,1549891487724,1549891503438]}
|
||||
{"metric":{"__name__":"up","job":"prometheus","instance":"localhost:9090"},"values":[1,1,1],"timestamps":[1549891461511,1549891476511,1549891491511]}
|
||||
{"metric":{"__name__":"up","job":"node_exporter","instance":"foobar.com:9100"},"values":[0,0,0],"timestamps":[1549891472010,1549891487724,1549891503438]}
|
||||
{"metric":{"__name__":"up","job":"prometheus","instance":"xxx.yyy.zzz:9090"},"values":[1,1,1],"timestamps":[1549891461511,1549891476511,1549891491511]}
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of rows parsed; got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
160
app/vminsert/vmimport/request_handler.go
Normal file
160
app/vminsert/vmimport/request_handler.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package vmimport
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var maxLineLen = flag.Int("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by `/api/v1/import`")
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="vmimport"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="vmimport"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes `/api/v1/import` request.
|
||||
//
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6
|
||||
func InsertHandler(req *http.Request) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(req)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(req *http.Request) error {
|
||||
readCalls.Inc()
|
||||
|
||||
r := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped vmimport data: %s", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
for ctx.Read(r) {
|
||||
if err := ctx.InsertRows(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ctx.Error()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) InsertRows() error {
|
||||
rows := ctx.Rows.Rows
|
||||
rowsLen := 0
|
||||
for i := range rows {
|
||||
rowsLen += len(rows[i].Values)
|
||||
}
|
||||
ic := &ctx.Common
|
||||
ic.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabelBytes(tag.Key, tag.Value)
|
||||
}
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
values := r.Values
|
||||
timestamps := r.Timestamps
|
||||
_ = timestamps[len(values)-1]
|
||||
for j, value := range values {
|
||||
timestamp := timestamps[j]
|
||||
ic.WriteDataPoint(ctx.metricNameBuf, nil, timestamp, value)
|
||||
}
|
||||
rowsTotal += len(values)
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader) bool {
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlockExt(r, ctx.reqBuf, ctx.tailBuf, *maxLineLen)
|
||||
if ctx.err != nil {
|
||||
if ctx.err != io.EOF {
|
||||
readErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read vmimport data: %s", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf))
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
readCalls = metrics.NewCounter(`vm_read_calls_total{name="vmimport"}`)
|
||||
readErrors = metrics.NewCounter(`vm_read_errors_total{name="vmimport"}`)
|
||||
)
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf []byte
|
||||
tailBuf []byte
|
||||
metricNameBuf []byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Error() error {
|
||||
if ctx.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
ctx.metricNameBuf = ctx.metricNameBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
||||
67
app/vmrestore/Makefile
Normal file
67
app/vmrestore/Makefile
Normal file
@@ -0,0 +1,67 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) app-local
|
||||
|
||||
vmrestore-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker
|
||||
|
||||
vmrestore-pure-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-pure
|
||||
|
||||
vmrestore-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-amd64
|
||||
|
||||
vmrestore-arm-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-arm
|
||||
|
||||
vmrestore-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-arm64
|
||||
|
||||
vmrestore-ppc64le-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-ppc64le
|
||||
|
||||
vmrestore-386-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-386
|
||||
|
||||
package-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker
|
||||
|
||||
package-vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmrestore-amd64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmrestore-arm:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmrestore-arm64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmrestore-ppc64le:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmrestore-386:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) publish-via-docker
|
||||
|
||||
vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) app-local-pure
|
||||
|
||||
vmrestore-amd64:
|
||||
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmrestore-amd64 ./app/vmrestore
|
||||
|
||||
vmrestore-arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmrestore-arm ./app/vmrestore
|
||||
|
||||
vmrestore-arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmrestore-arm64 ./app/vmrestore
|
||||
|
||||
vmrestore-ppc64le:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmrestore-ppc64le ./app/vmrestore
|
||||
|
||||
vmrestore-386:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmrestore-386 ./app/vmrestore
|
||||
86
app/vmrestore/README.md
Normal file
86
app/vmrestore/README.md
Normal file
@@ -0,0 +1,86 @@
|
||||
## vmrestore
|
||||
|
||||
`vmrestore` restores data from backups created by [vmbackup](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmbackup/README.md).
|
||||
VictoriaMetrics `v1.29.0` and newer versions must be used for working with the restored data.
|
||||
|
||||
Restore process can be interrupted at any time. It is automatically resumed from the inerruption point
|
||||
when restarting `vmrestore` with the same args.
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
VictoriaMetrics must be stopped during the restore process.
|
||||
|
||||
```
|
||||
vmrestore -src=gcs://<bucket>/<path/to/backup> -storageDataPath=<local/path/to/restore>
|
||||
|
||||
```
|
||||
|
||||
* `<bucket>` is [GCS bucket](https://cloud.google.com/storage/docs/creating-buckets) name.
|
||||
* `<path/to/backup>` is the path to backup made with [vmbackup](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmbackup/README.md) on GCS bucket.
|
||||
* `<local/path/to/restore>` is the path to folder where data will be restored. This folder must be passed
|
||||
to VictoriaMetrics in `-storageDataPath` command-line flag after the restore process is complete.
|
||||
|
||||
The original `-storageDataPath` directory may contain old files. They will be susbstituted by the files from backup.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
* If `vmrestore` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
* If `vmrestore` has been interrupted due to temporary error, then just restart it with the same args. It will resume the restore process.
|
||||
|
||||
|
||||
### Advanced usage
|
||||
|
||||
Run `vmrestore -help` in order to see all the available options:
|
||||
|
||||
```
|
||||
-concurrency int
|
||||
The number of concurrent workers. Higher concurrency may reduce restore duration (default 10)
|
||||
-configFilePath string
|
||||
Path to file with S3 configs. Configs are loaded from default location if not set.
|
||||
See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
|
||||
-configProfile string
|
||||
Profile name for S3 configs (default "default")
|
||||
-credsFilePath string
|
||||
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
|
||||
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
|
||||
-customS3Endpoint string
|
||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
||||
-loggerLevel string
|
||||
Minimum level of errors to log. Possible values: INFO, ERROR, FATAL, PANIC (default "INFO")
|
||||
-maxBytesPerSecond int
|
||||
The maximum download speed. There is no limit if it is set to 0
|
||||
-memory.allowedPercent float
|
||||
Allowed percent of system memory VictoriaMetrics caches may occupy (default 60)
|
||||
-src string
|
||||
Source path with backup on the remote storage. Example: gcs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
|
||||
-storageDataPath string
|
||||
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case only missing data is downloaded from backup (default "victoria-metrics-data")
|
||||
-version
|
||||
Show VictoriaMetrics version
|
||||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make vmrestore` from the root folder of the repository.
|
||||
It builds `vmrestore` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmrestore-prod` from the root folder of the repository.
|
||||
It builds `vmrestore-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
|
||||
Run `make package-vmrestore`. It builds `victoriametrics/vmrestore:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-vmrestore`.
|
||||
7
app/vmrestore/deployment/Dockerfile
Normal file
7
app/vmrestore/deployment/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
ARG certs_image
|
||||
FROM $certs_image AS certs
|
||||
FROM scratch
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ARG src_binary
|
||||
COPY $src_binary ./vmrestore-prod
|
||||
ENTRYPOINT ["/vmrestore-prod"]
|
||||
78
app/vmrestore/main.go
Normal file
78
app/vmrestore/main.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/actions"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fslocal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
src = flag.String("src", "", "Source path with backup on the remote storage. "+
|
||||
"Example: gcs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-metrics-data", "Destination path where backup must be restored. "+
|
||||
"VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case only missing data is downloaded from backup")
|
||||
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce restore duration")
|
||||
maxBytesPerSecond = flag.Int("maxBytesPerSecond", 0, "The maximum download speed. There is no limit if it is set to 0")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
buildinfo.Init()
|
||||
|
||||
srcFS, err := newSrcFS()
|
||||
if err != nil {
|
||||
logger.Fatalf("%s", err)
|
||||
}
|
||||
dstFS, err := newDstFS()
|
||||
if err != nil {
|
||||
logger.Fatalf("%s", err)
|
||||
}
|
||||
a := &actions.Restore{
|
||||
Concurrency: *concurrency,
|
||||
Src: srcFS,
|
||||
Dst: dstFS,
|
||||
}
|
||||
if err := a.Run(); err != nil {
|
||||
logger.Fatalf("cannot restore from backup: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
const s = `
|
||||
vmrestore restores VictoriaMetrics data from backups made by vmbackup.
|
||||
|
||||
See the docs at https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmrestore/README.md .
|
||||
`
|
||||
|
||||
f := flag.CommandLine.Output()
|
||||
fmt.Fprintf(f, "%s\n", s)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func newDstFS() (*fslocal.FS, error) {
|
||||
if len(*storageDataPath) == 0 {
|
||||
return nil, fmt.Errorf("`-storageDataPath` cannot be empty")
|
||||
}
|
||||
fs := &fslocal.FS{
|
||||
Dir: *storageDataPath,
|
||||
MaxBytesPerSecond: *maxBytesPerSecond,
|
||||
}
|
||||
if err := fs.Init(); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize local fs: %s", err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func newSrcFS() (common.RemoteFS, error) {
|
||||
fs, err := actions.NewRemoteFS(*src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-src`=%q: %s", *src, err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package vmselect
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -29,28 +31,53 @@ func Init() {
|
||||
fs.RemoveDirContents(tmpDirPath)
|
||||
netstorage.InitTmpBlocksDir(tmpDirPath)
|
||||
promql.InitRollupResultCache(*vmstorage.DataPath + "/cache/rollupResult")
|
||||
|
||||
concurrencyCh = make(chan struct{}, *maxConcurrentRequests)
|
||||
}
|
||||
|
||||
var concurrencyCh chan struct{}
|
||||
|
||||
// Stop stops vmselect
|
||||
func Stop() {
|
||||
promql.StopRollupResultCache()
|
||||
}
|
||||
|
||||
var concurrencyCh chan struct{}
|
||||
|
||||
var (
|
||||
concurrencyLimitReached = metrics.NewCounter(`vm_concurrent_select_limit_reached_total`)
|
||||
concurrencyLimitTimeout = metrics.NewCounter(`vm_concurrent_select_limit_timeout_total`)
|
||||
|
||||
_ = metrics.NewGauge(`vm_concurrent_select_capacity`, func() float64 {
|
||||
return float64(cap(concurrencyCh))
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_concurrent_select_current`, func() float64 {
|
||||
return float64(len(concurrencyCh))
|
||||
})
|
||||
)
|
||||
|
||||
// RequestHandler handles remote read API requests for Prometheus
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// Limit the number of concurrent queries.
|
||||
// Sleep for a second until giving up. This should resolve short bursts in requests.
|
||||
t := time.NewTimer(*maxQueueDuration)
|
||||
select {
|
||||
case concurrencyCh <- struct{}{}:
|
||||
t.Stop()
|
||||
defer func() { <-concurrencyCh }()
|
||||
case <-t.C:
|
||||
httpserver.Errorf(w, "cannot handle more than %d concurrent requests", cap(concurrencyCh))
|
||||
return true
|
||||
default:
|
||||
// Sleep for a while until giving up. This should resolve short bursts in requests.
|
||||
concurrencyLimitReached.Inc()
|
||||
t := timerpool.Get(*maxQueueDuration)
|
||||
select {
|
||||
case concurrencyCh <- struct{}{}:
|
||||
timerpool.Put(t)
|
||||
defer func() { <-concurrencyCh }()
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
concurrencyLimitTimeout.Inc()
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot handle more than %d concurrent requests", cap(concurrencyCh)),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, "%s", err)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
path := strings.Replace(r.URL.Path, "//", "/", -1)
|
||||
@@ -115,6 +142,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/labels/count":
|
||||
labelsCountRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.LabelsCountHandler(w, r); err != nil {
|
||||
labelsCountErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/export":
|
||||
exportRequests.Inc()
|
||||
if err := prometheus.ExportHandler(w, r); err != nil {
|
||||
@@ -131,6 +167,18 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/rules":
|
||||
// Return dumb placeholder
|
||||
rulesRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintf(w, "%s", `{"status":"success","data":{"groups":[]}}`)
|
||||
return true
|
||||
case "/api/v1/alerts":
|
||||
// Return dumb placehloder
|
||||
alertsRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintf(w, "%s", `{"status":"success","data":{"alerts":[]}}`)
|
||||
return true
|
||||
case "/api/v1/admin/tsdb/delete_series":
|
||||
deleteRequests.Inc()
|
||||
authKey := r.FormValue("authKey")
|
||||
@@ -154,7 +202,10 @@ func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
logger.Errorf("error in %q: %s", r.URL.Path, err)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
statusCode := 422
|
||||
statusCode := http.StatusUnprocessableEntity
|
||||
if esc, ok := err.(*httpserver.ErrorWithStatusCode); ok {
|
||||
statusCode = esc.StatusCode
|
||||
}
|
||||
w.WriteHeader(statusCode)
|
||||
prometheus.WriteErrorResponse(w, statusCode, err)
|
||||
}
|
||||
@@ -178,6 +229,9 @@ var (
|
||||
labelsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/labels"}`)
|
||||
labelsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/labels"}`)
|
||||
|
||||
labelsCountRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/labels/count"}`)
|
||||
labelsCountErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/labels/count"}`)
|
||||
|
||||
deleteRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
deleteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
|
||||
@@ -186,4 +240,7 @@ var (
|
||||
|
||||
federateRequests = metrics.NewCounter(`vm_http_requests_total{path="/federate"}`)
|
||||
federateErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/federate"}`)
|
||||
|
||||
rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`)
|
||||
alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`)
|
||||
)
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func mustFadviseRandomRead(f *os.File) {
|
||||
// Do nothing :)
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mustFadviseRandomRead(f *os.File) {
|
||||
fd := int(f.Fd())
|
||||
if err := unix.Fadvise(int(fd), 0, 0, unix.FADV_RANDOM|unix.FADV_WILLNEED); err != nil {
|
||||
logger.Panicf("FATAL: error returned from unix.Fadvise(RANDOM|WILLNEED): %s", err)
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 10e3, "The maximum number of tag keys returned per search")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 10e3, "The maximum number of tag values returned per search")
|
||||
maxMetricsPerSearch = flag.Int("search.maxUniqueTimeseries", 100e3, "The maximum number of unique time series each search can scan")
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned per search")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned per search")
|
||||
maxMetricsPerSearch = flag.Int("search.maxUniqueTimeseries", 300e3, "The maximum number of unique time series each search can scan")
|
||||
)
|
||||
|
||||
// Result is a single timeseries result.
|
||||
@@ -49,8 +49,9 @@ func (r *Result) reset() {
|
||||
|
||||
// Results holds results returned from ProcessSearchQuery.
|
||||
type Results struct {
|
||||
tr storage.TimeRange
|
||||
deadline Deadline
|
||||
tr storage.TimeRange
|
||||
fetchData bool
|
||||
deadline Deadline
|
||||
|
||||
tbf *tmpBlocksFile
|
||||
|
||||
@@ -71,9 +72,10 @@ func (rss *Results) Cancel() {
|
||||
// RunParallel runs in parallel f for all the results from rss.
|
||||
//
|
||||
// f shouldn't hold references to rs after returning.
|
||||
// workerID is the id of the worker goroutine that calls f.
|
||||
//
|
||||
// rss becomes unusable after the call to RunParallel.
|
||||
func (rss *Results) RunParallel(f func(rs *Result)) error {
|
||||
func (rss *Results) RunParallel(f func(rs *Result, workerID uint)) error {
|
||||
defer func() {
|
||||
putTmpBlocksFile(rss.tbf)
|
||||
rss.tbf = nil
|
||||
@@ -90,38 +92,43 @@ func (rss *Results) RunParallel(f func(rs *Result)) error {
|
||||
doneCh := make(chan error)
|
||||
|
||||
// Start workers.
|
||||
rowsProcessedTotal := uint64(0)
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func() {
|
||||
go func(workerID uint) {
|
||||
rs := getResult()
|
||||
defer putResult(rs)
|
||||
maxWorkersCount := gomaxprocs / workersCount
|
||||
|
||||
var err error
|
||||
rowsProcessed := 0
|
||||
for pts := range workCh {
|
||||
if time.Until(rss.deadline.Deadline) < 0 {
|
||||
err = fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.Timeout)
|
||||
break
|
||||
}
|
||||
if err = pts.Unpack(rss.tbf, rs, rss.tr, maxWorkersCount); err != nil {
|
||||
if err = pts.Unpack(rss.tbf, rs, rss.tr, rss.fetchData, maxWorkersCount); err != nil {
|
||||
break
|
||||
}
|
||||
if len(rs.Timestamps) == 0 {
|
||||
if len(rs.Timestamps) == 0 && rss.fetchData {
|
||||
// Skip empty blocks.
|
||||
continue
|
||||
}
|
||||
f(rs)
|
||||
rowsProcessed += len(rs.Values)
|
||||
f(rs, workerID)
|
||||
}
|
||||
atomic.AddUint64(&rowsProcessedTotal, uint64(rowsProcessed))
|
||||
// Drain the remaining work
|
||||
for range workCh {
|
||||
}
|
||||
doneCh <- err
|
||||
}()
|
||||
}(uint(i))
|
||||
}
|
||||
|
||||
// Feed workers with work.
|
||||
for i := range rss.packedTimeseries {
|
||||
workCh <- &rss.packedTimeseries[i]
|
||||
}
|
||||
seriesProcessedTotal := len(rss.packedTimeseries)
|
||||
rss.packedTimeseries = rss.packedTimeseries[:0]
|
||||
close(workCh)
|
||||
|
||||
@@ -132,6 +139,8 @@ func (rss *Results) RunParallel(f func(rs *Result)) error {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
perQueryRowsProcessed.Update(float64(rowsProcessedTotal))
|
||||
perQuerySeriesProcessed.Update(float64(seriesProcessedTotal))
|
||||
if len(errors) > 0 {
|
||||
// Return just the first error, since other errors
|
||||
// is likely duplicate the first error.
|
||||
@@ -140,6 +149,9 @@ func (rss *Results) RunParallel(f func(rs *Result)) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var perQueryRowsProcessed = metrics.NewHistogram(`vm_per_query_rows_processed_count`)
|
||||
var perQuerySeriesProcessed = metrics.NewHistogram(`vm_per_query_series_processed_count`)
|
||||
|
||||
var gomaxprocs = runtime.GOMAXPROCS(-1)
|
||||
|
||||
type packedTimeseries struct {
|
||||
@@ -148,7 +160,7 @@ type packedTimeseries struct {
|
||||
}
|
||||
|
||||
// Unpack unpacks pts to dst.
|
||||
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, maxWorkersCount int) error {
|
||||
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, fetchData bool, maxWorkersCount int) error {
|
||||
dst.reset()
|
||||
|
||||
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
||||
@@ -175,7 +187,7 @@ func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.
|
||||
var err error
|
||||
for addr := range workCh {
|
||||
sb := getSortBlock()
|
||||
if err = sb.unpackFrom(tbf, addr, tr); err != nil {
|
||||
if err = sb.unpackFrom(tbf, addr, tr, fetchData); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -294,10 +306,12 @@ func (sb *sortBlock) reset() {
|
||||
sb.NextIdx = 0
|
||||
}
|
||||
|
||||
func (sb *sortBlock) unpackFrom(tbf *tmpBlocksFile, addr tmpBlockAddr, tr storage.TimeRange) error {
|
||||
func (sb *sortBlock) unpackFrom(tbf *tmpBlocksFile, addr tmpBlockAddr, tr storage.TimeRange, fetchData bool) error {
|
||||
tbf.MustReadBlockAt(&sb.b, addr)
|
||||
if err := sb.b.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal block: %s", err)
|
||||
if fetchData {
|
||||
if err := sb.b.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal block: %s", err)
|
||||
}
|
||||
}
|
||||
timestamps := sb.b.Timestamps()
|
||||
|
||||
@@ -400,6 +414,33 @@ func GetLabelValues(labelName string, deadline Deadline) ([]string, error) {
|
||||
return labelValues, nil
|
||||
}
|
||||
|
||||
// GetLabelEntries returns all the label entries until the given deadline.
|
||||
func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) {
|
||||
labelEntries, err := vmstorage.SearchTagEntries(*maxTagKeysPerSearch, *maxTagValuesPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label entries request: %s", err)
|
||||
}
|
||||
|
||||
// Substitute "" with "__name__"
|
||||
for i := range labelEntries {
|
||||
e := &labelEntries[i]
|
||||
if e.Key == "" {
|
||||
e.Key = "__name__"
|
||||
}
|
||||
}
|
||||
|
||||
// Sort labelEntries by the number of label values in each entry.
|
||||
sort.Slice(labelEntries, func(i, j int) bool {
|
||||
a, b := labelEntries[i].Values, labelEntries[j].Values
|
||||
if len(a) != len(b) {
|
||||
return len(a) > len(b)
|
||||
}
|
||||
return labelEntries[i].Key > labelEntries[j].Key
|
||||
})
|
||||
|
||||
return labelEntries, nil
|
||||
}
|
||||
|
||||
// GetSeriesCount returns the number of unique series.
|
||||
func GetSeriesCount(deadline Deadline) (uint64, error) {
|
||||
n, err := vmstorage.GetSeriesCount()
|
||||
@@ -418,18 +459,14 @@ func getStorageSearch() *storage.Search {
|
||||
}
|
||||
|
||||
func putStorageSearch(sr *storage.Search) {
|
||||
n := atomic.LoadUint64(&sr.MissingMetricNamesForMetricID)
|
||||
missingMetricNamesForMetricID.Add(int(n))
|
||||
sr.MustClose()
|
||||
ssPool.Put(sr)
|
||||
}
|
||||
|
||||
var ssPool sync.Pool
|
||||
|
||||
var missingMetricNamesForMetricID = metrics.NewCounter(`vm_missing_metric_names_for_metric_id_total`)
|
||||
|
||||
// ProcessSearchQuery performs sq on storage nodes until the given deadline.
|
||||
func ProcessSearchQuery(sq *storage.SearchQuery, deadline Deadline) (*Results, error) {
|
||||
func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline Deadline) (*Results, error) {
|
||||
// Setup search.
|
||||
tfss, err := setupTfss(sq.TagFilterss)
|
||||
if err != nil {
|
||||
@@ -445,35 +482,41 @@ func ProcessSearchQuery(sq *storage.SearchQuery, deadline Deadline) (*Results, e
|
||||
|
||||
sr := getStorageSearch()
|
||||
defer putStorageSearch(sr)
|
||||
sr.Init(vmstorage.Storage, tfss, tr, *maxMetricsPerSearch)
|
||||
sr.Init(vmstorage.Storage, tfss, tr, fetchData, *maxMetricsPerSearch)
|
||||
|
||||
tbf := getTmpBlocksFile()
|
||||
m := make(map[string][]tmpBlockAddr)
|
||||
blocksRead := 0
|
||||
bb := tmpBufPool.Get()
|
||||
defer tmpBufPool.Put(bb)
|
||||
for sr.NextMetricBlock() {
|
||||
addr, err := tbf.WriteBlock(sr.MetricBlock.Block)
|
||||
blocksRead++
|
||||
bb.B = storage.MarshalBlock(bb.B[:0], sr.MetricBlock.Block)
|
||||
addr, err := tbf.WriteBlockData(bb.B)
|
||||
if err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("cannot write data to temporary blocks file: %s", err)
|
||||
return nil, fmt.Errorf("cannot write data block #%d to temporary blocks file: %s", blocksRead, err)
|
||||
}
|
||||
if time.Until(deadline.Deadline) < 0 {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("timeout exceeded while fetching data from storage: %s", deadline.Timeout)
|
||||
return nil, fmt.Errorf("timeout exceeded while fetching data block #%d from storage: %s", blocksRead, deadline.Timeout)
|
||||
}
|
||||
metricName := sr.MetricBlock.MetricName
|
||||
m[string(metricName)] = append(m[string(metricName)], addr)
|
||||
}
|
||||
if err := sr.Error(); err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("search error: %s", err)
|
||||
return nil, fmt.Errorf("search error after reading %d data blocks: %s", blocksRead, err)
|
||||
}
|
||||
if err := tbf.Finalize(); err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("cannot finalize temporary blocks file: %s", err)
|
||||
return nil, fmt.Errorf("cannot finalize temporary blocks file with %d blocks: %s", blocksRead, err)
|
||||
}
|
||||
|
||||
var rss Results
|
||||
rss.packedTimeseries = make([]packedTimeseries, len(m))
|
||||
rss.tr = tr
|
||||
rss.fetchData = fetchData
|
||||
rss.deadline = deadline
|
||||
rss.tbf = tbf
|
||||
i := 0
|
||||
@@ -483,6 +526,15 @@ func ProcessSearchQuery(sq *storage.SearchQuery, deadline Deadline) (*Results, e
|
||||
pts.metricName = metricName
|
||||
pts.addrs = addrs
|
||||
}
|
||||
|
||||
// Sort rss.packedTimeseries by the first addr offset in order
|
||||
// to reduce the number of disk seeks during unpacking in RunParallel.
|
||||
// In this case tmpBlocksFile must be read almost sequentially.
|
||||
sort.Slice(rss.packedTimeseries, func(i, j int) bool {
|
||||
pts := rss.packedTimeseries
|
||||
return pts[i].addrs[0].offset < pts[j].addrs[0].offset
|
||||
})
|
||||
|
||||
return &rss, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
@@ -22,9 +22,7 @@ func InitTmpBlocksDir(tmpDirPath string) {
|
||||
tmpDirPath = os.TempDir()
|
||||
}
|
||||
tmpBlocksDir = tmpDirPath + "/searchResults"
|
||||
if err := os.RemoveAll(tmpBlocksDir); err != nil {
|
||||
logger.Panicf("FATAL: cannot remove %q: %s", tmpBlocksDir, err)
|
||||
}
|
||||
fs.MustRemoveAll(tmpBlocksDir)
|
||||
if err := fs.MkdirAllIfNotExist(tmpBlocksDir); err != nil {
|
||||
logger.Panicf("FATAL: cannot create %q: %s", tmpBlocksDir, err)
|
||||
}
|
||||
@@ -32,13 +30,23 @@ func InitTmpBlocksDir(tmpDirPath string) {
|
||||
|
||||
var tmpBlocksDir string
|
||||
|
||||
const maxInmemoryTmpBlocksFile = 512 * 1024
|
||||
func maxInmemoryTmpBlocksFile() int {
|
||||
mem := memory.Allowed()
|
||||
maxLen := mem / 1024
|
||||
if maxLen < 64*1024 {
|
||||
return 64 * 1024
|
||||
}
|
||||
return maxLen
|
||||
}
|
||||
|
||||
var _ = metrics.NewGauge(`vm_tmp_blocks_max_inmemory_file_size_bytes`, func() float64 {
|
||||
return float64(maxInmemoryTmpBlocksFile())
|
||||
})
|
||||
|
||||
type tmpBlocksFile struct {
|
||||
buf []byte
|
||||
|
||||
f *os.File
|
||||
bw *bufio.Writer
|
||||
f *os.File
|
||||
|
||||
offset uint64
|
||||
}
|
||||
@@ -46,7 +54,9 @@ type tmpBlocksFile struct {
|
||||
func getTmpBlocksFile() *tmpBlocksFile {
|
||||
v := tmpBlocksFilePool.Get()
|
||||
if v == nil {
|
||||
return &tmpBlocksFile{}
|
||||
return &tmpBlocksFile{
|
||||
buf: make([]byte, 0, maxInmemoryTmpBlocksFile()),
|
||||
}
|
||||
}
|
||||
return v.(*tmpBlocksFile)
|
||||
}
|
||||
@@ -55,7 +65,6 @@ func putTmpBlocksFile(tbf *tmpBlocksFile) {
|
||||
tbf.MustClose()
|
||||
tbf.buf = tbf.buf[:0]
|
||||
tbf.f = nil
|
||||
tbf.bw = nil
|
||||
tbf.offset = 0
|
||||
tmpBlocksFilePool.Put(tbf)
|
||||
}
|
||||
@@ -71,51 +80,34 @@ func (addr tmpBlockAddr) String() string {
|
||||
return fmt.Sprintf("offset %d, size %d", addr.offset, addr.size)
|
||||
}
|
||||
|
||||
func getBufioWriter(f *os.File) *bufio.Writer {
|
||||
v := bufioWriterPool.Get()
|
||||
if v == nil {
|
||||
return bufio.NewWriterSize(f, maxInmemoryTmpBlocksFile*2)
|
||||
}
|
||||
bw := v.(*bufio.Writer)
|
||||
bw.Reset(f)
|
||||
return bw
|
||||
}
|
||||
|
||||
func putBufioWriter(bw *bufio.Writer) {
|
||||
bufioWriterPool.Put(bw)
|
||||
}
|
||||
|
||||
var bufioWriterPool sync.Pool
|
||||
|
||||
var tmpBlocksFilesCreated = metrics.NewCounter(`vm_tmp_blocks_files_created_total`)
|
||||
|
||||
// WriteBlock writes b to tbf.
|
||||
// WriteBlockData writes b to tbf.
|
||||
//
|
||||
// It returns errors since the operation may fail on space shortage
|
||||
// and this must be handled.
|
||||
func (tbf *tmpBlocksFile) WriteBlock(b *storage.Block) (tmpBlockAddr, error) {
|
||||
func (tbf *tmpBlocksFile) WriteBlockData(b []byte) (tmpBlockAddr, error) {
|
||||
var addr tmpBlockAddr
|
||||
addr.offset = tbf.offset
|
||||
|
||||
tbfBufLen := len(tbf.buf)
|
||||
tbf.buf = storage.MarshalBlock(tbf.buf, b)
|
||||
addr.size = len(tbf.buf) - tbfBufLen
|
||||
addr.size = len(b)
|
||||
tbf.offset += uint64(addr.size)
|
||||
if tbf.offset <= maxInmemoryTmpBlocksFile {
|
||||
if len(tbf.buf)+len(b) <= cap(tbf.buf) {
|
||||
// Fast path - the data fits tbf.buf
|
||||
tbf.buf = append(tbf.buf, b...)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Slow path: flush the data from tbf.buf to file.
|
||||
if tbf.f == nil {
|
||||
f, err := ioutil.TempFile(tmpBlocksDir, "")
|
||||
if err != nil {
|
||||
return addr, err
|
||||
}
|
||||
tbf.f = f
|
||||
tbf.bw = getBufioWriter(f)
|
||||
tmpBlocksFilesCreated.Inc()
|
||||
}
|
||||
_, err := tbf.bw.Write(tbf.buf)
|
||||
tbf.buf = tbf.buf[:0]
|
||||
_, err := tbf.f.Write(tbf.buf)
|
||||
tbf.buf = append(tbf.buf[:0], b...)
|
||||
if err != nil {
|
||||
return addr, fmt.Errorf("cannot write block to %q: %s", tbf.f.Name(), err)
|
||||
}
|
||||
@@ -126,15 +118,18 @@ func (tbf *tmpBlocksFile) Finalize() error {
|
||||
if tbf.f == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := tbf.bw.Flush()
|
||||
putBufioWriter(tbf.bw)
|
||||
tbf.bw = nil
|
||||
if _, err := tbf.f.Write(tbf.buf); err != nil {
|
||||
return fmt.Errorf("cannot flush the remaining %d bytes to tmpBlocksFile: %s", len(tbf.buf), err)
|
||||
}
|
||||
tbf.buf = tbf.buf[:0]
|
||||
if _, err := tbf.f.Seek(0, 0); err != nil {
|
||||
logger.Panicf("FATAL: cannot seek to the start of file: %s", err)
|
||||
}
|
||||
mustFadviseRandomRead(tbf.f)
|
||||
return err
|
||||
// Hint the OS that the file is read almost sequentiallly.
|
||||
// This should reduce the number of disk seeks, which is important
|
||||
// for HDDs.
|
||||
fs.MustFadviseSequentialRead(tbf.f, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tbf *tmpBlocksFile) MustReadBlockAt(dst *storage.Block, addr tmpBlockAddr) {
|
||||
@@ -169,10 +164,6 @@ func (tbf *tmpBlocksFile) MustClose() {
|
||||
if tbf.f == nil {
|
||||
return
|
||||
}
|
||||
if tbf.bw != nil {
|
||||
putBufioWriter(tbf.bw)
|
||||
tbf.bw = nil
|
||||
}
|
||||
fname := tbf.f.Name()
|
||||
|
||||
// Remove the file at first, then close it.
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestTmpBlocksFileSerial(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTmpBlocksFileConcurrent(t *testing.T) {
|
||||
concurrency := 4
|
||||
concurrency := 3
|
||||
ch := make(chan error, concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
@@ -69,7 +69,7 @@ func testTmpBlocksFile() error {
|
||||
_, _, _ = b.MarshalData(0, 0)
|
||||
return &b
|
||||
}
|
||||
for _, size := range []int{1024, 16 * 1024, maxInmemoryTmpBlocksFile / 2, 2 * maxInmemoryTmpBlocksFile} {
|
||||
for _, size := range []int{1024, 16 * 1024, maxInmemoryTmpBlocksFile() / 2, 2 * maxInmemoryTmpBlocksFile()} {
|
||||
err := func() error {
|
||||
tbf := getTmpBlocksFile()
|
||||
defer putTmpBlocksFile(tbf)
|
||||
@@ -77,9 +77,12 @@ func testTmpBlocksFile() error {
|
||||
// Write blocks until their summary size exceeds `size`.
|
||||
var addrs []tmpBlockAddr
|
||||
var blocks []*storage.Block
|
||||
bb := tmpBufPool.Get()
|
||||
defer tmpBufPool.Put(bb)
|
||||
for tbf.offset < uint64(size) {
|
||||
b := createBlock()
|
||||
addr, err := tbf.WriteBlock(b)
|
||||
bb.B = storage.MarshalBlock(bb.B[:0], b)
|
||||
addr, err := tbf.WriteBlockData(bb.B)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot write block at offset %d: %s", tbf.offset, err)
|
||||
}
|
||||
@@ -94,7 +97,7 @@ func testTmpBlocksFile() error {
|
||||
}
|
||||
|
||||
// Read blocks in parallel and verify them
|
||||
concurrency := 3
|
||||
concurrency := 2
|
||||
workCh := make(chan int)
|
||||
doneCh := make(chan error)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
{% for i, ts := range rs.Timestamps %}
|
||||
{%z= bb.B %}{% space %}
|
||||
{%f= rs.Values[i] %}{% space %}
|
||||
{%d= int(ts) %}{% newline %}
|
||||
{%dl= ts %}{% newline %}
|
||||
{% endfor %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfunc %}
|
||||
@@ -35,10 +35,10 @@
|
||||
"timestamps":[
|
||||
{% if len(rs.Timestamps) > 0 %}
|
||||
{% code timestamps := rs.Timestamps %}
|
||||
{%d= int(timestamps[0]) %}
|
||||
{%dl= timestamps[0] %}
|
||||
{% code timestamps = timestamps[1:] %}
|
||||
{% for _, ts := range timestamps %}
|
||||
,{%d= int(ts) %}
|
||||
,{%dl= ts %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
|
||||
@@ -49,7 +49,7 @@ func StreamExportPrometheusLine(qw422016 *qt422016.Writer, rs *netstorage.Result
|
||||
//line app/vmselect/prometheus/export.qtpl:15
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:16
|
||||
qw422016.N().D(int(ts))
|
||||
qw422016.N().DL(ts)
|
||||
//line app/vmselect/prometheus/export.qtpl:16
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
@@ -129,7 +129,7 @@ func StreamExportJSONLine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
timestamps := rs.Timestamps
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:38
|
||||
qw422016.N().D(int(timestamps[0]))
|
||||
qw422016.N().DL(timestamps[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:39
|
||||
timestamps = timestamps[1:]
|
||||
|
||||
@@ -138,7 +138,7 @@ func StreamExportJSONLine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:40
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:41
|
||||
qw422016.N().D(int(ts))
|
||||
qw422016.N().DL(ts)
|
||||
//line app/vmselect/prometheus/export.qtpl:42
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:43
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{% if len(rs.Timestamps) == 0 || len(rs.Values) == 0 %}{% return %}{% endif %}
|
||||
{%= prometheusMetricName(&rs.MetricName) %}{% space %}
|
||||
{%f= rs.Values[len(rs.Values)-1] %}{% space %}
|
||||
{%d= int(rs.Timestamps[len(rs.Timestamps)-1]) %}{% newline %}
|
||||
{%dl= rs.Timestamps[len(rs.Timestamps)-1] %}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
|
||||
@@ -41,7 +41,7 @@ func StreamFederate(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().D(int(rs.Timestamps[len(rs.Timestamps)-1]))
|
||||
qw422016.N().DL(rs.Timestamps[len(rs.Timestamps)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
|
||||
17
app/vmselect/prometheus/labels_count_response.qtpl
Normal file
17
app/vmselect/prometheus/labels_count_response.qtpl
Normal file
@@ -0,0 +1,17 @@
|
||||
{% import "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" %}
|
||||
|
||||
{% stripspace %}
|
||||
LabelsCountResponse generates response for /api/v1/labels/count .
|
||||
{% func LabelsCountResponse(labelEntries []storage.TagEntry) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
{% for i, e := range labelEntries %}
|
||||
{%q= e.Key %}:{%d= len(e.Values) %}
|
||||
{% if i+1 < len(labelEntries) %},{% endif %}
|
||||
{% endfor %}
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
74
app/vmselect/prometheus/labels_count_response.qtpl.go
Normal file
74
app/vmselect/prometheus/labels_count_response.qtpl.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Code generated by qtc from "labels_count_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:1
|
||||
import "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
||||
// LabelsCountResponse generates response for /api/v1/labels/count .
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
func StreamLabelsCountResponse(qw422016 *qt422016.Writer, labelEntries []storage.TagEntry) {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
qw422016.N().S(`{"status":"success","data":{`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:9
|
||||
for i, e := range labelEntries {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:10
|
||||
qw422016.N().Q(e.Key)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:10
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:10
|
||||
qw422016.N().D(len(e.Values))
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:11
|
||||
if i+1 < len(labelEntries) {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:11
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:12
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:12
|
||||
qw422016.N().S(`}}`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
func WriteLabelsCountResponse(qq422016 qtio422016.Writer, labelEntries []storage.TagEntry) {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
StreamLabelsCountResponse(qw422016, labelEntries)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
func LabelsCountResponse(labelEntries []storage.TagEntry) string {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
WriteLabelsCountResponse(qb422016, labelEntries)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
}
|
||||
@@ -6,29 +6,34 @@ import (
|
||||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
var (
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum time for search query execution")
|
||||
maxQueryLen = flag.Int("search.maxQueryLen", 16*1024, "The maximum search query length in bytes")
|
||||
latencyOffset = flag.Duration("search.latencyOffset", time.Second*30, "The time when data points become visible in query results after the colection. "+
|
||||
"Too small value can result in incomplete last points for query results")
|
||||
maxExportDuration = flag.Duration("search.maxExportDuration", 10*time.Minute, "The maximum duration for `/api/v1/export` call")
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for search query execution")
|
||||
maxQueryLen = flag.Int("search.maxQueryLen", 16*1024, "The maximum search query length in bytes")
|
||||
maxLookback = flag.Duration("search.maxLookback", 0, "Synonim to `-search.lookback-delta` from Prometheus. "+
|
||||
"The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via `max_lookback` arg")
|
||||
)
|
||||
|
||||
// Default step used if not set.
|
||||
const defaultStep = 5 * 60 * 1000
|
||||
|
||||
// Latency for data processing pipeline, i.e. the time between data is ignested
|
||||
// into the system and the time it becomes visible to search.
|
||||
const latencyOffset = 60 * 1000
|
||||
|
||||
// FederateHandler implements /federate . See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
func FederateHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
@@ -37,10 +42,25 @@ func FederateHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
maxLookback := getDuration(r, "max_lookback", defaultStep)
|
||||
start := getTime(r, "start", ct-maxLookback)
|
||||
end := getTime(r, "end", ct)
|
||||
deadline := getDeadline(r)
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lookbackDelta <= 0 {
|
||||
lookbackDelta = defaultStep
|
||||
}
|
||||
start, err := getTime(r, "start", ct-lookbackDelta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deadline := getDeadlineForQuery(r)
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
@@ -53,7 +73,7 @@ func FederateHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
@@ -61,7 +81,7 @@ func FederateHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result) {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
WriteFederate(bb, rs)
|
||||
resultsCh <- bb
|
||||
@@ -97,14 +117,23 @@ func ExportHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
if len(matches) == 0 {
|
||||
// Maintain backwards compatibility
|
||||
match := r.FormValue("match")
|
||||
if len(match) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
matches = []string{match}
|
||||
}
|
||||
start := getTime(r, "start", 0)
|
||||
end := getTime(r, "end", ct)
|
||||
start, err := getTime(r, "start", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
format := r.FormValue("format")
|
||||
deadline := getDeadline(r)
|
||||
deadline := getDeadlineForExport(r)
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
end = start + defaultStep
|
||||
}
|
||||
if err := exportHandler(w, matches, start, end, format, deadline); err != nil {
|
||||
return err
|
||||
@@ -118,7 +147,7 @@ var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/
|
||||
func exportHandler(w http.ResponseWriter, matches []string, start, end int64, format string, deadline netstorage.Deadline) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := WriteExportJSONLine
|
||||
contentType := "application/json"
|
||||
contentType := "application/stream+json"
|
||||
if format == "prometheus" {
|
||||
contentType = "text/plain"
|
||||
writeLineFunc = WriteExportPrometheusLine
|
||||
@@ -136,7 +165,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
@@ -144,7 +173,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, runtime.GOMAXPROCS(-1))
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result) {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writeLineFunc(bb, rs)
|
||||
resultsCh <- bb
|
||||
@@ -156,6 +185,11 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
writeResponseFunc(w, resultsCh)
|
||||
|
||||
// Consume all the data from resultsCh in the event writeResponseFunc
|
||||
// fails to consume all the data.
|
||||
for bb := range resultsCh {
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
@@ -175,6 +209,9 @@ func DeleteHandler(r *http.Request) error {
|
||||
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -200,10 +237,40 @@ var deleteDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
func LabelValuesHandler(labelName string, w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
labelValues, err := netstorage.GetLabelValues(labelName, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err)
|
||||
deadline := getDeadlineForQuery(r)
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
}
|
||||
var labelValues []string
|
||||
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err)
|
||||
}
|
||||
} else {
|
||||
// Extended functionality that allows filtering by label filters and time range
|
||||
// i.e. /api/v1/label/foo/values?match[]=foobar{baz="abc"}&start=...&end=...
|
||||
// is equivalent to `label_values(foobar{baz="abc"}, foo)` call on the selected
|
||||
// time range in Grafana templating.
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
matches = []string{fmt.Sprintf("{%s!=''}", labelName)}
|
||||
}
|
||||
ct := currentTime()
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start, err := getTime(r, "start", end-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %s", labelName, matches, start, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -212,17 +279,113 @@ func LabelValuesHandler(labelName string, w http.ResponseWriter, r *http.Request
|
||||
return nil
|
||||
}
|
||||
|
||||
func labelValuesWithMatches(labelName string, matches []string, start, end int64, deadline netstorage.Deadline) ([]string, error) {
|
||||
if len(matches) == 0 {
|
||||
logger.Panicf("BUG: matches must be non-empty")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, tfs := range tagFilterss {
|
||||
// Add `labelName!=''` tag filter in order to filter out series without the labelName.
|
||||
tagFilterss[i] = append(tfs, storage.TagFilter{
|
||||
Key: []byte(labelName),
|
||||
IsNegative: true,
|
||||
})
|
||||
}
|
||||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
m := make(map[string]struct{})
|
||||
var mLock sync.Mutex
|
||||
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
labelValue := rs.MetricName.GetTagValue(labelName)
|
||||
if len(labelValue) == 0 {
|
||||
return
|
||||
}
|
||||
mLock.Lock()
|
||||
m[string(labelValue)] = struct{}{}
|
||||
mLock.Unlock()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when data fetching: %s", err)
|
||||
}
|
||||
|
||||
labelValues := make([]string, 0, len(m))
|
||||
for labelValue := range m {
|
||||
labelValues = append(labelValues, labelValue)
|
||||
}
|
||||
sort.Strings(labelValues)
|
||||
return labelValues, nil
|
||||
}
|
||||
|
||||
var labelValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/label/{}/values"}`)
|
||||
|
||||
// LabelsCountHandler processes /api/v1/labels/count request.
|
||||
func LabelsCountHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadlineForQuery(r)
|
||||
labelEntries, err := netstorage.GetLabelEntries(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label entries: %s`, err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteLabelsCountResponse(w, labelEntries)
|
||||
labelsCountDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var labelsCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/labels/count"}`)
|
||||
|
||||
// LabelsHandler processes /api/v1/labels request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||
func LabelsHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
labels, err := netstorage.GetLabels(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels: %s", err)
|
||||
deadline := getDeadlineForQuery(r)
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
}
|
||||
var labels []string
|
||||
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labels, err = netstorage.GetLabels(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels: %s", err)
|
||||
}
|
||||
} else {
|
||||
// Extended functionality that allows filtering by label filters and time range
|
||||
// i.e. /api/v1/labels?match[]=foobar{baz="abc"}&start=...&end=...
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
matches = []string{"{__name__!=''}"}
|
||||
}
|
||||
ct := currentTime()
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start, err := getTime(r, "start", end-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels, err = labelsWithMatches(matches, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %s", matches, start, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -231,12 +394,57 @@ func LabelsHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func labelsWithMatches(matches []string, start, end int64, deadline netstorage.Deadline) ([]string, error) {
|
||||
if len(matches) == 0 {
|
||||
logger.Panicf("BUG: matches must be non-empty")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
m := make(map[string]struct{})
|
||||
var mLock sync.Mutex
|
||||
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
mLock.Lock()
|
||||
tags := rs.MetricName.Tags
|
||||
for i := range tags {
|
||||
t := &tags[i]
|
||||
m[string(t.Key)] = struct{}{}
|
||||
}
|
||||
m["__name__"] = struct{}{}
|
||||
mLock.Unlock()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when data fetching: %s", err)
|
||||
}
|
||||
|
||||
labels := make([]string, 0, len(m))
|
||||
for label := range m {
|
||||
labels = append(labels, label)
|
||||
}
|
||||
sort.Strings(labels)
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
var labelsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/labels"}`)
|
||||
|
||||
// SeriesCountHandler processes /api/v1/series/count request.
|
||||
func SeriesCountHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
deadline := getDeadlineForQuery(r)
|
||||
n, err := netstorage.GetSeriesCount(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain series count: %s", err)
|
||||
@@ -260,23 +468,37 @@ func SeriesHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
start := getTime(r, "start", ct-defaultStep)
|
||||
end := getTime(r, "end", ct)
|
||||
deadline := getDeadline(r)
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Do not set start to minTimeMsecs by default as Prometheus does,
|
||||
// since this leads to fetching and scanning all the data from the storage,
|
||||
// which can take a lot of time for big storages.
|
||||
// It is better setting start as end-defaultStep by default.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/91
|
||||
start, err := getTime(r, "start", end-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deadline := getDeadlineForQuery(r)
|
||||
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
end = start + defaultStep
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
@@ -284,7 +506,7 @@ func SeriesHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result) {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writemetricNameObject(bb, &rs.MetricName)
|
||||
resultsCh <- bb
|
||||
@@ -297,11 +519,10 @@ func SeriesHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
WriteSeriesResponse(w, resultsCh)
|
||||
|
||||
// Consume all the data from resultsCh in the event WriteSeriesResponse
|
||||
// fail to consume all the data.
|
||||
// fails to consume all the data.
|
||||
for bb := range resultsCh {
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
@@ -320,32 +541,40 @@ func QueryHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
ct := currentTime()
|
||||
|
||||
query := r.FormValue("query")
|
||||
start := getTime(r, "time", ct)
|
||||
step := getDuration(r, "step", latencyOffset)
|
||||
deadline := getDeadline(r)
|
||||
if len(query) == 0 {
|
||||
return fmt.Errorf("missing `query` arg")
|
||||
}
|
||||
start, err := getTime(r, "time", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queryOffset := getLatencyOffsetMilliseconds()
|
||||
step, err := getDuration(r, "step", queryOffset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deadline := getDeadlineForQuery(r)
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(query) > *maxQueryLen {
|
||||
return fmt.Errorf(`too long query; got %d bytes; mustn't exceed %d bytes`, len(query), *maxQueryLen)
|
||||
}
|
||||
if ct-start < latencyOffset {
|
||||
start -= latencyOffset
|
||||
if !getBool(r, "nocache") && ct-start < queryOffset {
|
||||
// Adjust start time only if `nocache` arg isn't set.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/241
|
||||
start = ct - queryOffset
|
||||
}
|
||||
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
|
||||
var window int64
|
||||
if len(windowStr) > 0 {
|
||||
var err error
|
||||
window, err = promql.DurationValue(windowStr, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
window, err := parsePositiveDuration(windowStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse window: %s", err)
|
||||
}
|
||||
var offset int64
|
||||
if len(offsetStr) > 0 {
|
||||
var err error
|
||||
offset, err = promql.DurationValue(offsetStr, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset, err := parseDuration(offsetStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse offset: %s", err)
|
||||
}
|
||||
start -= offset
|
||||
end := start
|
||||
@@ -356,14 +585,40 @@ func QueryHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
if childQuery, windowStr, stepStr, offsetStr := promql.IsRollup(query); childQuery != "" {
|
||||
newStep, err := parsePositiveDuration(stepStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse step: %s", err)
|
||||
}
|
||||
if newStep > 0 {
|
||||
step = newStep
|
||||
}
|
||||
window, err := parsePositiveDuration(windowStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse window: %s", err)
|
||||
}
|
||||
offset, err := parseDuration(offsetStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse offset: %s", err)
|
||||
}
|
||||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := queryRangeHandler(w, childQuery, start, end, step, r, ct); err != nil {
|
||||
return err
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
LookbackDelta: lookbackDelta,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query)
|
||||
result, err := promql.Exec(&ec, query, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot execute %q: %s", query, err)
|
||||
}
|
||||
@@ -376,6 +631,20 @@ func QueryHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
|
||||
var queryDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/query"}`)
|
||||
|
||||
func parseDuration(s string, step int64) (int64, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return metricsql.DurationValue(s, step)
|
||||
}
|
||||
|
||||
func parsePositiveDuration(s string, step int64) (int64, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return metricsql.PositiveDurationValue(s, step)
|
||||
}
|
||||
|
||||
// QueryRangeHandler processes /api/v1/query_range request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
@@ -384,60 +653,122 @@ func QueryRangeHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
ct := currentTime()
|
||||
|
||||
query := r.FormValue("query")
|
||||
start := getTime(r, "start", ct-defaultStep)
|
||||
end := getTime(r, "end", ct)
|
||||
step := getDuration(r, "step", defaultStep)
|
||||
deadline := getDeadline(r)
|
||||
if len(query) == 0 {
|
||||
return fmt.Errorf("missing `query` arg")
|
||||
}
|
||||
start, err := getTime(r, "start", ct-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
step, err := getDuration(r, "step", defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := queryRangeHandler(w, query, start, end, step, r, ct); err != nil {
|
||||
return err
|
||||
}
|
||||
queryRangeDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func queryRangeHandler(w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64) error {
|
||||
deadline := getDeadlineForQuery(r)
|
||||
mayCache := !getBool(r, "nocache")
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate input args.
|
||||
if len(query) > *maxQueryLen {
|
||||
return fmt.Errorf(`too long query; got %d bytes; mustn't exceed %d bytes`, len(query), *maxQueryLen)
|
||||
}
|
||||
if start > end {
|
||||
start = end
|
||||
end = start + defaultStep
|
||||
}
|
||||
if err := promql.ValidateMaxPointsPerTimeseries(start, end, step); err != nil {
|
||||
return err
|
||||
}
|
||||
start, end = promql.AdjustStartEnd(start, end, step)
|
||||
if mayCache {
|
||||
start, end = promql.AdjustStartEnd(start, end, step)
|
||||
}
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
LookbackDelta: lookbackDelta,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query)
|
||||
result, err := promql.Exec(&ec, query, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot execute %q: %s", query, err)
|
||||
}
|
||||
if ct-end < latencyOffset {
|
||||
adjustLastPoints(result)
|
||||
queryOffset := getLatencyOffsetMilliseconds()
|
||||
if ct-end < queryOffset {
|
||||
result = adjustLastPoints(result)
|
||||
}
|
||||
|
||||
// Remove NaN values as Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153
|
||||
removeNaNValuesInplace(result)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteQueryRangeResponse(w, result)
|
||||
queryRangeDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeNaNValuesInplace(tss []netstorage.Result) {
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
hasNaNs := false
|
||||
for _, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
hasNaNs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasNaNs {
|
||||
// Fast path: nothing to remove.
|
||||
continue
|
||||
}
|
||||
|
||||
// Slow path: remove NaNs.
|
||||
srcTimestamps := ts.Timestamps
|
||||
dstValues := ts.Values[:0]
|
||||
dstTimestamps := ts.Timestamps[:0]
|
||||
for j, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues = append(dstValues, v)
|
||||
dstTimestamps = append(dstTimestamps, srcTimestamps[j])
|
||||
}
|
||||
ts.Values = dstValues
|
||||
ts.Timestamps = dstTimestamps
|
||||
}
|
||||
}
|
||||
|
||||
var queryRangeDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/query_range"}`)
|
||||
|
||||
// adjustLastPoints substitutes the last point values with the previous
|
||||
// point values, since the last points may contain garbage.
|
||||
func adjustLastPoints(tss []netstorage.Result) {
|
||||
func adjustLastPoints(tss []netstorage.Result) []netstorage.Result {
|
||||
if len(tss) == 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search for the last non-NaN value across all the timeseries.
|
||||
lastNonNaNIdx := -1
|
||||
for i := range tss {
|
||||
r := &tss[i]
|
||||
j := len(r.Values) - 1
|
||||
for j >= 0 && math.IsNaN(r.Values[j]) {
|
||||
values := tss[i].Values
|
||||
j := len(values) - 1
|
||||
for j >= 0 && math.IsNaN(values[j]) {
|
||||
j--
|
||||
}
|
||||
if j > lastNonNaNIdx {
|
||||
@@ -446,76 +777,112 @@ func adjustLastPoints(tss []netstorage.Result) {
|
||||
}
|
||||
if lastNonNaNIdx == -1 {
|
||||
// All timeseries contain only NaNs.
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// Substitute last three values starting from lastNonNaNIdx
|
||||
// Substitute the last two values starting from lastNonNaNIdx
|
||||
// with the previous values for each timeseries.
|
||||
for i := range tss {
|
||||
r := &tss[i]
|
||||
for j := 0; j < 3; j++ {
|
||||
values := tss[i].Values
|
||||
for j := 0; j < 2; j++ {
|
||||
idx := lastNonNaNIdx + j
|
||||
if idx <= 0 || idx >= len(r.Values) {
|
||||
if idx <= 0 || idx >= len(values) || math.IsNaN(values[idx-1]) {
|
||||
continue
|
||||
}
|
||||
r.Values[idx] = r.Values[idx-1]
|
||||
values[idx] = values[idx-1]
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
|
||||
func getTime(r *http.Request, argKey string, defaultValue int64) int64 {
|
||||
func getTime(r *http.Request, argKey string, defaultValue int64) (int64, error) {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return defaultValue
|
||||
return defaultValue, nil
|
||||
}
|
||||
secs, err := strconv.ParseFloat(argValue, 64)
|
||||
if err != nil {
|
||||
// Try parsing string format
|
||||
t, err := time.Parse(time.RFC3339, argValue)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
// Handle Prometheus'-provided minTime and maxTime.
|
||||
// See https://github.com/prometheus/client_golang/issues/614
|
||||
switch argValue {
|
||||
case prometheusMinTimeFormatted:
|
||||
return minTimeMsecs, nil
|
||||
case prometheusMaxTimeFormatted:
|
||||
return maxTimeMsecs, nil
|
||||
}
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
||||
}
|
||||
secs = float64(t.UnixNano()) / 1e9
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs < minTimeMsecs || msecs > maxTimeMsecs {
|
||||
return defaultValue
|
||||
if msecs < minTimeMsecs {
|
||||
msecs = 0
|
||||
}
|
||||
return msecs
|
||||
if msecs > maxTimeMsecs {
|
||||
msecs = maxTimeMsecs
|
||||
}
|
||||
return msecs, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// These constants were obtained from https://github.com/prometheus/prometheus/blob/91d7175eaac18b00e370965f3a8186cc40bf9f55/web/api/v1/api.go#L442
|
||||
// See https://github.com/prometheus/client_golang/issues/614 for details.
|
||||
prometheusMinTimeFormatted = time.Unix(math.MinInt64/1000+62135596801, 0).UTC().Format(time.RFC3339Nano)
|
||||
prometheusMaxTimeFormatted = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC().Format(time.RFC3339Nano)
|
||||
)
|
||||
|
||||
const (
|
||||
// These values prevent from overflow when storing msec-precision time in int64.
|
||||
minTimeMsecs = int64(-1<<63) / 1e6
|
||||
minTimeMsecs = 0 // use 0 instead of `int64(-1<<63) / 1e6` because the storage engine doesn't actually support negative time
|
||||
maxTimeMsecs = int64(1<<63-1) / 1e6
|
||||
)
|
||||
|
||||
func getDuration(r *http.Request, argKey string, defaultValue int64) int64 {
|
||||
func getDuration(r *http.Request, argKey string, defaultValue int64) (int64, error) {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return defaultValue
|
||||
return defaultValue, nil
|
||||
}
|
||||
secs, err := strconv.ParseFloat(argValue, 64)
|
||||
if err != nil {
|
||||
// Try parsing string format
|
||||
d, err := time.ParseDuration(argValue)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
||||
}
|
||||
secs = d.Seconds()
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs <= 0 || msecs > maxDurationMsecs {
|
||||
return defaultValue
|
||||
return 0, fmt.Errorf("%q=%dms is out of allowed range [%d ... %d]", argKey, msecs, 0, int64(maxDurationMsecs))
|
||||
}
|
||||
return msecs
|
||||
return msecs, nil
|
||||
}
|
||||
|
||||
const maxDurationMsecs = 100 * 365 * 24 * 3600 * 1000
|
||||
|
||||
func getDeadline(r *http.Request) netstorage.Deadline {
|
||||
d := getDuration(r, "timeout", 0)
|
||||
func getMaxLookback(r *http.Request) (int64, error) {
|
||||
d := int64(*maxLookback / time.Millisecond)
|
||||
return getDuration(r, "max_lookback", d)
|
||||
}
|
||||
|
||||
func getDeadlineForQuery(r *http.Request) netstorage.Deadline {
|
||||
dMax := int64(maxQueryDuration.Seconds() * 1e3)
|
||||
return getDeadlineWithMaxDuration(r, dMax)
|
||||
}
|
||||
|
||||
func getDeadlineForExport(r *http.Request) netstorage.Deadline {
|
||||
dMax := int64(maxExportDuration.Seconds() * 1e3)
|
||||
return getDeadlineWithMaxDuration(r, dMax)
|
||||
}
|
||||
|
||||
func getDeadlineWithMaxDuration(r *http.Request, dMax int64) netstorage.Deadline {
|
||||
d, err := getDuration(r, "timeout", 0)
|
||||
if err != nil {
|
||||
d = 0
|
||||
}
|
||||
if d <= 0 || d > dMax {
|
||||
d = dMax
|
||||
}
|
||||
@@ -548,3 +915,11 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error)
|
||||
}
|
||||
return tagFilterss, nil
|
||||
}
|
||||
|
||||
func getLatencyOffsetMilliseconds() int64 {
|
||||
d := int64(*latencyOffset / time.Millisecond)
|
||||
if d <= 1000 {
|
||||
d = 1000
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
115
app/vmselect/prometheus/prometheus_test.go
Normal file
115
app/vmselect/prometheus/prometheus_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
func TestRemoveNaNValuesInplace(t *testing.T) {
|
||||
f := func(tss []netstorage.Result, tssExpected []netstorage.Result) {
|
||||
t.Helper()
|
||||
removeNaNValuesInplace(tss)
|
||||
if !reflect.DeepEqual(tss, tssExpected) {
|
||||
t.Fatalf("unexpected result; got %v; want %v", tss, tssExpected)
|
||||
}
|
||||
}
|
||||
|
||||
nan := math.NaN()
|
||||
|
||||
f(nil, nil)
|
||||
f([]netstorage.Result{
|
||||
{
|
||||
Timestamps: []int64{100, 200, 300},
|
||||
Values: []float64{1, 2, 3},
|
||||
},
|
||||
{
|
||||
Timestamps: []int64{100, 200, 300, 400},
|
||||
Values: []float64{nan, nan, 3, nan},
|
||||
},
|
||||
}, []netstorage.Result{
|
||||
{
|
||||
Timestamps: []int64{100, 200, 300},
|
||||
Values: []float64{1, 2, 3},
|
||||
},
|
||||
{
|
||||
Timestamps: []int64{300},
|
||||
Values: []float64{3},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetTimeSuccess(t *testing.T) {
|
||||
f := func(s string, timestampExpected int64) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest("GET", urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
// Verify defaultValue
|
||||
ts, err := getTime(r, "foo", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when obtaining default time from getTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != 123 {
|
||||
t.Fatalf("unexpected default value for getTime(%q); got %d; want %d", s, ts, 123)
|
||||
}
|
||||
|
||||
// Verify timestampExpected
|
||||
ts, err = getTime(r, "s", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in getTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != timestampExpected {
|
||||
t.Fatalf("unexpected timestamp for getTime(%q); got %d; want %d", s, ts, timestampExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("2019-07-07T20:01:02Z", 1562529662000)
|
||||
f("2019-07-07T20:47:40+03:00", 1562521660000)
|
||||
f("-292273086-05-16T16:47:06Z", minTimeMsecs)
|
||||
f("292277025-08-18T07:12:54.999999999Z", maxTimeMsecs)
|
||||
f("1562529662.324", 1562529662324)
|
||||
f("-9223372036.854", minTimeMsecs)
|
||||
f("-9223372036.855", minTimeMsecs)
|
||||
f("9223372036.855", maxTimeMsecs)
|
||||
}
|
||||
|
||||
func TestGetTimeError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest("GET", urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
// Verify defaultValue
|
||||
ts, err := getTime(r, "foo", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when obtaining default time from getTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != 123 {
|
||||
t.Fatalf("unexpected default value for getTime(%q); got %d; want %d", s, ts, 123)
|
||||
}
|
||||
|
||||
// Verify timestampExpected
|
||||
_, err = getTime(r, "s", 123)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error in getTime(%q)", s)
|
||||
}
|
||||
}
|
||||
|
||||
f("foo")
|
||||
f("2019-07-07T20:01:02Zisdf")
|
||||
f("2019-07-07T20:47:40+03:00123")
|
||||
f("-292273086-05-16T16:47:07Z")
|
||||
f("292277025-08-18T07:12:54.999999998Z")
|
||||
}
|
||||
@@ -3,7 +3,7 @@ SeriesCountResponse generates response for /api/v1/series/count .
|
||||
{% func SeriesCountResponse(n uint64) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[{%d int(n) %}]
|
||||
"data":[{%dl int64(n) %}]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
|
||||
@@ -24,7 +24,7 @@ func StreamSeriesCountResponse(qw422016 *qt422016.Writer, n uint64) {
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:6
|
||||
qw422016.N().D(int(n))
|
||||
qw422016.N().DL(int64(n))
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:6
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
|
||||
@@ -6,6 +6,12 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/histogram"
|
||||
)
|
||||
|
||||
var aggrFuncs = map[string]aggrFunc{
|
||||
@@ -22,17 +28,28 @@ var aggrFuncs = map[string]aggrFunc{
|
||||
"topk": newAggrFuncTopK(false),
|
||||
"quantile": aggrFuncQuantile,
|
||||
|
||||
// Extended PromQL funcs
|
||||
"median": aggrFuncMedian,
|
||||
"limitk": aggrFuncLimitK,
|
||||
"distinct": newAggrFunc(aggrFuncDistinct),
|
||||
// PromQL extension funcs
|
||||
"median": aggrFuncMedian,
|
||||
"limitk": aggrFuncLimitK,
|
||||
"distinct": newAggrFunc(aggrFuncDistinct),
|
||||
"sum2": newAggrFunc(aggrFuncSum2),
|
||||
"geomean": newAggrFunc(aggrFuncGeomean),
|
||||
"histogram": newAggrFunc(aggrFuncHistogram),
|
||||
"topk_min": newAggrFuncRangeTopK(minValue, false),
|
||||
"topk_max": newAggrFuncRangeTopK(maxValue, false),
|
||||
"topk_avg": newAggrFuncRangeTopK(avgValue, false),
|
||||
"topk_median": newAggrFuncRangeTopK(medianValue, false),
|
||||
"bottomk_min": newAggrFuncRangeTopK(minValue, true),
|
||||
"bottomk_max": newAggrFuncRangeTopK(maxValue, true),
|
||||
"bottomk_avg": newAggrFuncRangeTopK(avgValue, true),
|
||||
"bottomk_median": newAggrFuncRangeTopK(medianValue, true),
|
||||
}
|
||||
|
||||
type aggrFunc func(afa *aggrFuncArg) ([]*timeseries, error)
|
||||
|
||||
type aggrFuncArg struct {
|
||||
args [][]*timeseries
|
||||
ae *aggrFuncExpr
|
||||
ae *metricsql.AggrFuncExpr
|
||||
ec *EvalConfig
|
||||
}
|
||||
|
||||
@@ -41,20 +58,6 @@ func getAggrFunc(s string) aggrFunc {
|
||||
return aggrFuncs[s]
|
||||
}
|
||||
|
||||
func isAggrFunc(s string) bool {
|
||||
return getAggrFunc(s) != nil
|
||||
}
|
||||
|
||||
func isAggrFuncModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "by", "without":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func newAggrFunc(afe func(tss []*timeseries) []*timeseries) aggrFunc {
|
||||
return func(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
@@ -65,33 +68,26 @@ func newAggrFunc(afe func(tss []*timeseries) []*timeseries) aggrFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func aggrFuncExt(afe func(tss []*timeseries) []*timeseries, argOrig []*timeseries, modifier *modifierExpr, keepOriginal bool) ([]*timeseries, error) {
|
||||
arg := copyTimeseriesMetricNames(argOrig)
|
||||
|
||||
// Filter out superflouos tags.
|
||||
var groupTags []string
|
||||
groupOp := "by"
|
||||
if modifier.Op != "" {
|
||||
groupTags = modifier.Args
|
||||
groupOp = strings.ToLower(modifier.Op)
|
||||
}
|
||||
func removeGroupTags(metricName *storage.MetricName, modifier *metricsql.ModifierExpr) {
|
||||
groupOp := strings.ToLower(modifier.Op)
|
||||
switch groupOp {
|
||||
case "by":
|
||||
for _, ts := range arg {
|
||||
ts.MetricName.RemoveTagsOn(groupTags)
|
||||
}
|
||||
case "", "by":
|
||||
metricName.RemoveTagsOn(modifier.Args)
|
||||
case "without":
|
||||
for _, ts := range arg {
|
||||
ts.MetricName.RemoveTagsIgnoring(groupTags)
|
||||
}
|
||||
metricName.RemoveTagsIgnoring(modifier.Args)
|
||||
default:
|
||||
return nil, fmt.Errorf(`unknown modifier: %q`, groupOp)
|
||||
logger.Panicf("BUG: unknown group modifier: %q", groupOp)
|
||||
}
|
||||
}
|
||||
|
||||
func aggrFuncExt(afe func(tss []*timeseries) []*timeseries, argOrig []*timeseries, modifier *metricsql.ModifierExpr, keepOriginal bool) ([]*timeseries, error) {
|
||||
arg := copyTimeseriesMetricNames(argOrig)
|
||||
|
||||
// Perform grouping.
|
||||
m := make(map[string][]*timeseries)
|
||||
bb := bbPool.Get()
|
||||
for i, ts := range arg {
|
||||
removeGroupTags(&ts.MetricName, modifier)
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
if keepOriginal {
|
||||
ts = argOrig[i]
|
||||
@@ -100,10 +96,18 @@ func aggrFuncExt(afe func(tss []*timeseries) []*timeseries, argOrig []*timeserie
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
|
||||
srcTssCount := 0
|
||||
dstTssCount := 0
|
||||
rvs := make([]*timeseries, 0, len(m))
|
||||
for _, tss := range m {
|
||||
rv := afe(tss)
|
||||
rvs = append(rvs, rv...)
|
||||
srcTssCount += len(tss)
|
||||
dstTssCount += len(rv)
|
||||
if dstTssCount > 2000 && dstTssCount > 16*srcTssCount {
|
||||
// This looks like count_values explosion.
|
||||
return nil, fmt.Errorf(`too many timeseries after aggragation; got %d; want less than %d`, dstTssCount, 16*srcTssCount)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
@@ -132,6 +136,84 @@ func aggrFuncSum(tss []*timeseries) []*timeseries {
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncSum2(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
sum2 := float64(0)
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
sum2 += v * v
|
||||
count++
|
||||
}
|
||||
if count == 0 {
|
||||
sum2 = nan
|
||||
}
|
||||
dst.Values[i] = sum2
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncGeomean(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to geomean.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
p := 1.0
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
p *= v
|
||||
count++
|
||||
}
|
||||
if count == 0 {
|
||||
p = nan
|
||||
}
|
||||
dst.Values[i] = math.Pow(p, 1/float64(count))
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncHistogram(tss []*timeseries) []*timeseries {
|
||||
var h metrics.Histogram
|
||||
m := make(map[string]*timeseries)
|
||||
for i := range tss[0].Values {
|
||||
h.Reset()
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
h.Update(v)
|
||||
}
|
||||
h.VisitNonZeroBuckets(func(vmrange string, count uint64) {
|
||||
ts := m[vmrange]
|
||||
if ts == nil {
|
||||
ts = ×eries{}
|
||||
ts.CopyFromShallowTimestamps(tss[0])
|
||||
ts.MetricName.RemoveTag("vmrange")
|
||||
ts.MetricName.AddTag("vmrange", vmrange)
|
||||
values := ts.Values
|
||||
for k := range values {
|
||||
values[k] = 0
|
||||
}
|
||||
m[vmrange] = ts
|
||||
}
|
||||
ts.Values[i] = float64(count)
|
||||
})
|
||||
}
|
||||
rvs := make([]*timeseries, 0, len(m))
|
||||
for _, ts := range m {
|
||||
rvs = append(rvs, ts)
|
||||
}
|
||||
return vmrangeBucketsToLE(rvs)
|
||||
}
|
||||
|
||||
func aggrFuncMin(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to min.
|
||||
@@ -260,7 +342,11 @@ func aggrFuncCount(tss []*timeseries) []*timeseries {
|
||||
}
|
||||
count++
|
||||
}
|
||||
dst.Values[i] = float64(count)
|
||||
v := float64(count)
|
||||
if count == 0 {
|
||||
v = nan
|
||||
}
|
||||
dst.Values[i] = v
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
@@ -297,10 +383,32 @@ func aggrFuncCountValues(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove dstLabel from grouping like Prometheus does.
|
||||
modifier := &afa.ae.Modifier
|
||||
switch strings.ToLower(modifier.Op) {
|
||||
case "without":
|
||||
modifier.Args = append(modifier.Args, dstLabel)
|
||||
case "by":
|
||||
dstArgs := modifier.Args[:0]
|
||||
for _, arg := range modifier.Args {
|
||||
if arg == dstLabel {
|
||||
continue
|
||||
}
|
||||
dstArgs = append(dstArgs, arg)
|
||||
}
|
||||
modifier.Args = dstArgs
|
||||
default:
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
m := make(map[float64]bool)
|
||||
for _, ts := range tss {
|
||||
for _, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
m[v] = true
|
||||
}
|
||||
}
|
||||
@@ -313,7 +421,7 @@ func aggrFuncCountValues(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
var rvs []*timeseries
|
||||
for _, v := range values {
|
||||
var dst timeseries
|
||||
dst.CopyFrom(tss[0])
|
||||
dst.CopyFromShallowTimestamps(tss[0])
|
||||
dst.MetricName.RemoveTag(dstLabel)
|
||||
dst.MetricName.AddTag(dstLabel, strconv.FormatFloat(v, 'g', -1, 64))
|
||||
for i := range dst.Values {
|
||||
@@ -347,37 +455,138 @@ func newAggrFuncTopK(isReverse bool) aggrFunc {
|
||||
return nil, err
|
||||
}
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
rvs := tss
|
||||
for n := range rvs[0].Values {
|
||||
sort.Slice(rvs, func(i, j int) bool {
|
||||
a := rvs[i].Values[n]
|
||||
b := rvs[j].Values[n]
|
||||
cmp := lessWithNaNs(a, b)
|
||||
for n := range tss[0].Values {
|
||||
sort.Slice(tss, func(i, j int) bool {
|
||||
a := tss[i].Values[n]
|
||||
b := tss[j].Values[n]
|
||||
if isReverse {
|
||||
cmp = !cmp
|
||||
a, b = b, a
|
||||
}
|
||||
return cmp
|
||||
return lessWithNaNs(a, b)
|
||||
})
|
||||
if math.IsNaN(ks[n]) {
|
||||
ks[n] = 0
|
||||
}
|
||||
k := int(ks[n])
|
||||
if k < 0 {
|
||||
k = 0
|
||||
}
|
||||
if k > len(rvs) {
|
||||
k = len(rvs)
|
||||
}
|
||||
for _, ts := range rvs[:len(rvs)-k] {
|
||||
ts.Values[n] = nan
|
||||
}
|
||||
fillNaNsAtIdx(n, ks[n], tss)
|
||||
}
|
||||
return rvs
|
||||
return removeNaNs(tss)
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, true)
|
||||
}
|
||||
}
|
||||
|
||||
type tsWithValue struct {
|
||||
ts *timeseries
|
||||
value float64
|
||||
}
|
||||
|
||||
func newAggrFuncRangeTopK(f func(values []float64) float64, isReverse bool) aggrFunc {
|
||||
return func(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ks, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
maxs := make([]tsWithValue, len(tss))
|
||||
for i, ts := range tss {
|
||||
value := f(ts.Values)
|
||||
maxs[i] = tsWithValue{
|
||||
ts: ts,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
sort.Slice(maxs, func(i, j int) bool {
|
||||
a := maxs[i].value
|
||||
b := maxs[j].value
|
||||
if isReverse {
|
||||
a, b = b, a
|
||||
}
|
||||
return lessWithNaNs(a, b)
|
||||
})
|
||||
for i := range maxs {
|
||||
tss[i] = maxs[i].ts
|
||||
}
|
||||
for i, k := range ks {
|
||||
fillNaNsAtIdx(i, k, tss)
|
||||
}
|
||||
return removeNaNs(tss)
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, true)
|
||||
}
|
||||
}
|
||||
|
||||
func fillNaNsAtIdx(idx int, k float64, tss []*timeseries) {
|
||||
if math.IsNaN(k) {
|
||||
k = 0
|
||||
}
|
||||
kn := int(k)
|
||||
if kn < 0 {
|
||||
kn = 0
|
||||
}
|
||||
if kn > len(tss) {
|
||||
kn = len(tss)
|
||||
}
|
||||
for _, ts := range tss[:len(tss)-kn] {
|
||||
ts.Values[idx] = nan
|
||||
}
|
||||
}
|
||||
|
||||
func minValue(values []float64) float64 {
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
min := values[0]
|
||||
for _, v := range values[1:] {
|
||||
if v < min {
|
||||
min = v
|
||||
}
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
func maxValue(values []float64) float64 {
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
max := values[0]
|
||||
for _, v := range values[1:] {
|
||||
if v > max {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func avgValue(values []float64) float64 {
|
||||
sum := float64(0)
|
||||
count := 0
|
||||
for _, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
sum += v
|
||||
}
|
||||
if count == 0 {
|
||||
return nan
|
||||
}
|
||||
return sum / float64(count)
|
||||
}
|
||||
|
||||
func medianValue(values []float64) float64 {
|
||||
h := histogram.GetFast()
|
||||
for _, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
h.Update(v)
|
||||
}
|
||||
value := h.Quantile(0.5)
|
||||
histogram.PutFast(h)
|
||||
return value
|
||||
}
|
||||
|
||||
func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
@@ -457,6 +666,7 @@ func newAggrQuantileFunc(phis []float64) func(tss []*timeseries) []*timeseries {
|
||||
idx := int(math.Round(float64(len(tss)-1) * phi))
|
||||
dst.Values[n] = tss[idx].Values[n]
|
||||
}
|
||||
tss[0] = dst
|
||||
return tss[:1]
|
||||
}
|
||||
}
|
||||
|
||||
452
app/vmselect/promql/aggr_incremental.go
Normal file
452
app/vmselect/promql/aggr_incremental.go
Normal file
@@ -0,0 +1,452 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql"
|
||||
)
|
||||
|
||||
// callbacks for optimized incremental calculations for aggregate functions
|
||||
// over rollups over metricsql.MetricExpr.
|
||||
//
|
||||
// These calculations save RAM for aggregates over big number of time series.
|
||||
var incrementalAggrFuncCallbacksMap = map[string]*incrementalAggrFuncCallbacks{
|
||||
"sum": {
|
||||
updateAggrFunc: updateAggrSum,
|
||||
mergeAggrFunc: mergeAggrSum,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"min": {
|
||||
updateAggrFunc: updateAggrMin,
|
||||
mergeAggrFunc: mergeAggrMin,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"max": {
|
||||
updateAggrFunc: updateAggrMax,
|
||||
mergeAggrFunc: mergeAggrMax,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"avg": {
|
||||
updateAggrFunc: updateAggrAvg,
|
||||
mergeAggrFunc: mergeAggrAvg,
|
||||
finalizeAggrFunc: finalizeAggrAvg,
|
||||
},
|
||||
"count": {
|
||||
updateAggrFunc: updateAggrCount,
|
||||
mergeAggrFunc: mergeAggrCount,
|
||||
finalizeAggrFunc: finalizeAggrCount,
|
||||
},
|
||||
"sum2": {
|
||||
updateAggrFunc: updateAggrSum2,
|
||||
mergeAggrFunc: mergeAggrSum2,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"geomean": {
|
||||
updateAggrFunc: updateAggrGeomean,
|
||||
mergeAggrFunc: mergeAggrGeomean,
|
||||
finalizeAggrFunc: finalizeAggrGeomean,
|
||||
},
|
||||
}
|
||||
|
||||
type incrementalAggrFuncContext struct {
|
||||
ae *metricsql.AggrFuncExpr
|
||||
|
||||
mLock sync.Mutex
|
||||
m map[uint]map[string]*incrementalAggrContext
|
||||
|
||||
callbacks *incrementalAggrFuncCallbacks
|
||||
}
|
||||
|
||||
func newIncrementalAggrFuncContext(ae *metricsql.AggrFuncExpr, callbacks *incrementalAggrFuncCallbacks) *incrementalAggrFuncContext {
|
||||
return &incrementalAggrFuncContext{
|
||||
ae: ae,
|
||||
m: make(map[uint]map[string]*incrementalAggrContext),
|
||||
callbacks: callbacks,
|
||||
}
|
||||
}
|
||||
|
||||
func (iafc *incrementalAggrFuncContext) updateTimeseries(ts *timeseries, workerID uint) {
|
||||
iafc.mLock.Lock()
|
||||
m := iafc.m[workerID]
|
||||
if m == nil {
|
||||
m = make(map[string]*incrementalAggrContext, 1)
|
||||
iafc.m[workerID] = m
|
||||
}
|
||||
iafc.mLock.Unlock()
|
||||
|
||||
removeGroupTags(&ts.MetricName, &iafc.ae.Modifier)
|
||||
bb := bbPool.Get()
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
iac := m[string(bb.B)]
|
||||
if iac == nil {
|
||||
tsAggr := ×eries{
|
||||
Values: make([]float64, len(ts.Values)),
|
||||
Timestamps: ts.Timestamps,
|
||||
denyReuse: true,
|
||||
}
|
||||
tsAggr.MetricName.CopyFrom(&ts.MetricName)
|
||||
iac = &incrementalAggrContext{
|
||||
ts: tsAggr,
|
||||
values: make([]float64, len(ts.Values)),
|
||||
}
|
||||
m[string(bb.B)] = iac
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
iafc.callbacks.updateAggrFunc(iac, ts.Values)
|
||||
}
|
||||
|
||||
func (iafc *incrementalAggrFuncContext) finalizeTimeseries() []*timeseries {
|
||||
// There is no need in iafc.mLock.Lock here, since finalizeTimeseries must be called
|
||||
// without concurrent goroutines touching iafc.
|
||||
mGlobal := make(map[string]*incrementalAggrContext)
|
||||
mergeAggrFunc := iafc.callbacks.mergeAggrFunc
|
||||
for _, m := range iafc.m {
|
||||
for k, iac := range m {
|
||||
iacGlobal := mGlobal[k]
|
||||
if iacGlobal == nil {
|
||||
mGlobal[k] = iac
|
||||
continue
|
||||
}
|
||||
mergeAggrFunc(iacGlobal, iac)
|
||||
}
|
||||
}
|
||||
tss := make([]*timeseries, 0, len(mGlobal))
|
||||
finalizeAggrFunc := iafc.callbacks.finalizeAggrFunc
|
||||
for _, iac := range mGlobal {
|
||||
finalizeAggrFunc(iac)
|
||||
tss = append(tss, iac.ts)
|
||||
}
|
||||
return tss
|
||||
}
|
||||
|
||||
type incrementalAggrFuncCallbacks struct {
|
||||
updateAggrFunc func(iac *incrementalAggrContext, values []float64)
|
||||
mergeAggrFunc func(dst, src *incrementalAggrContext)
|
||||
finalizeAggrFunc func(iac *incrementalAggrContext)
|
||||
}
|
||||
|
||||
func getIncrementalAggrFuncCallbacks(name string) *incrementalAggrFuncCallbacks {
|
||||
name = strings.ToLower(name)
|
||||
return incrementalAggrFuncCallbacksMap[name]
|
||||
}
|
||||
|
||||
type incrementalAggrContext struct {
|
||||
ts *timeseries
|
||||
values []float64
|
||||
}
|
||||
|
||||
func finalizeAggrCommon(iac *incrementalAggrContext) {
|
||||
counts := iac.values
|
||||
dstValues := iac.ts.Values
|
||||
_ = dstValues[len(counts)-1]
|
||||
for i, v := range counts {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrSum(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrSum(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrMin(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v < dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrMin(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v < dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrMax(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v > dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrMax(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v > dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrAvg(iac *incrementalAggrContext, values []float64) {
|
||||
// Do not use `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation,
|
||||
// since it is slower and has no obvious benefits in increased precision.
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
dstCounts[i]++
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrAvg(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = srcCounts[i]
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
dstCounts[i] += srcCounts[i]
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeAggrAvg(iac *incrementalAggrContext) {
|
||||
dstValues := iac.ts.Values
|
||||
counts := iac.values
|
||||
_ = dstValues[len(counts)-1]
|
||||
for i, v := range counts {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
continue
|
||||
}
|
||||
dstValues[i] /= v
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrCount(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
_ = dstValues[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues[i]++
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrCount(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeAggrCount(iac *incrementalAggrContext) {
|
||||
dstValues := iac.ts.Values
|
||||
for i, v := range dstValues {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrSum2(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v * v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v * v
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrSum2(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrGeomean(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] *= v
|
||||
dstCounts[i]++
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrGeomean(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = srcCounts[i]
|
||||
continue
|
||||
}
|
||||
dstValues[i] *= v
|
||||
dstCounts[i] += srcCounts[i]
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeAggrGeomean(iac *incrementalAggrContext) {
|
||||
dstValues := iac.ts.Values
|
||||
counts := iac.values
|
||||
_ = dstValues[len(counts)-1]
|
||||
for i, v := range counts {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
continue
|
||||
}
|
||||
dstValues[i] = math.Pow(dstValues[i], 1/v)
|
||||
}
|
||||
}
|
||||
190
app/vmselect/promql/aggr_incremental_test.go
Normal file
190
app/vmselect/promql/aggr_incremental_test.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql"
|
||||
)
|
||||
|
||||
func TestIncrementalAggr(t *testing.T) {
|
||||
defaultTimestamps := []int64{100e3, 200e3, 300e3, 400e3}
|
||||
values := [][]float64{
|
||||
{1, nan, 2, nan},
|
||||
{3, nan, nan, 4},
|
||||
{nan, nan, 5, 6},
|
||||
{7, nan, 8, 9},
|
||||
{4, nan, nan, nan},
|
||||
{2, nan, 3, 2},
|
||||
{0, nan, 1, 1},
|
||||
}
|
||||
tssSrc := make([]*timeseries, len(values))
|
||||
for i, vs := range values {
|
||||
ts := ×eries{
|
||||
Timestamps: defaultTimestamps,
|
||||
Values: vs,
|
||||
}
|
||||
tssSrc[i] = ts
|
||||
}
|
||||
|
||||
copyTimeseries := func(tssSrc []*timeseries) []*timeseries {
|
||||
tssDst := make([]*timeseries, len(tssSrc))
|
||||
for i, tsSrc := range tssSrc {
|
||||
var tsDst timeseries
|
||||
tsDst.CopyFromShallowTimestamps(tsSrc)
|
||||
tssDst[i] = &tsDst
|
||||
}
|
||||
return tssDst
|
||||
}
|
||||
|
||||
f := func(name string, valuesExpected []float64) {
|
||||
t.Helper()
|
||||
callbacks := getIncrementalAggrFuncCallbacks(name)
|
||||
ae := &metricsql.AggrFuncExpr{
|
||||
Name: name,
|
||||
}
|
||||
tssExpected := []*timeseries{{
|
||||
Timestamps: defaultTimestamps,
|
||||
Values: valuesExpected,
|
||||
}}
|
||||
// run the test multiple times to make sure there are no side effects on concurrency
|
||||
for i := 0; i < 10; i++ {
|
||||
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
||||
tssSrcCopy := copyTimeseries(tssSrc)
|
||||
if err := testIncrementalParallelAggr(iafc, tssSrcCopy, tssExpected); err != nil {
|
||||
t.Fatalf("unexpected error on iteration %d: %s", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("sum", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{17, nan, 19, 22}
|
||||
f("sum", valuesExpected)
|
||||
})
|
||||
t.Run("min", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{0, nan, 1, 1}
|
||||
f("min", valuesExpected)
|
||||
})
|
||||
t.Run("max", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{7, nan, 8, 9}
|
||||
f("max", valuesExpected)
|
||||
})
|
||||
t.Run("avg", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{2.8333333333333335, nan, 3.8, 4.4}
|
||||
f("avg", valuesExpected)
|
||||
})
|
||||
t.Run("count", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{6, nan, 5, 5}
|
||||
f("count", valuesExpected)
|
||||
})
|
||||
t.Run("sum2", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{79, nan, 103, 138}
|
||||
f("sum2", valuesExpected)
|
||||
})
|
||||
t.Run("geomean", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{0, nan, 2.9925557394776896, 3.365865436338599}
|
||||
f("geomean", valuesExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssExpected []*timeseries) error {
|
||||
const workersCount = 3
|
||||
tsCh := make(chan *timeseries)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workersCount)
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func(workerID uint) {
|
||||
defer wg.Done()
|
||||
for ts := range tsCh {
|
||||
runtime.Gosched() // allow other goroutines performing the work
|
||||
iafc.updateTimeseries(ts, workerID)
|
||||
}
|
||||
}(uint(i))
|
||||
}
|
||||
for _, ts := range tssSrc {
|
||||
tsCh <- ts
|
||||
}
|
||||
close(tsCh)
|
||||
wg.Wait()
|
||||
tssActual := iafc.finalizeTimeseries()
|
||||
if err := expectTimeseriesEqual(tssActual, tssExpected); err != nil {
|
||||
return fmt.Errorf("%s; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func expectTimeseriesEqual(actual, expected []*timeseries) error {
|
||||
if len(actual) != len(expected) {
|
||||
return fmt.Errorf("unexpected number of time series; got %d; want %d", len(actual), len(expected))
|
||||
}
|
||||
mActual := timeseriesToMap(actual)
|
||||
mExpected := timeseriesToMap(expected)
|
||||
if len(mActual) != len(mExpected) {
|
||||
return fmt.Errorf("unexpected number of time series after converting to map; got %d; want %d", len(mActual), len(mExpected))
|
||||
}
|
||||
for k, tsExpected := range mExpected {
|
||||
tsActual := mActual[k]
|
||||
if tsActual == nil {
|
||||
return fmt.Errorf("missing time series for key=%q", k)
|
||||
}
|
||||
if err := expectTsEqual(tsActual, tsExpected); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeseriesToMap(tss []*timeseries) map[string]*timeseries {
|
||||
m := make(map[string]*timeseries, len(tss))
|
||||
for _, ts := range tss {
|
||||
k := ts.MetricName.Marshal(nil)
|
||||
m[string(k)] = ts
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func expectTsEqual(actual, expected *timeseries) error {
|
||||
mnActual := actual.MetricName.Marshal(nil)
|
||||
mnExpected := expected.MetricName.Marshal(nil)
|
||||
if string(mnActual) != string(mnExpected) {
|
||||
return fmt.Errorf("unexpected metric name; got %q; want %q", mnActual, mnExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.Timestamps, expected.Timestamps) {
|
||||
return fmt.Errorf("unexpected timestamps; got %v; want %v", actual.Timestamps, expected.Timestamps)
|
||||
}
|
||||
if err := compareValues(actual.Values, expected.Values); err != nil {
|
||||
return fmt.Errorf("%s; actual %v; expected %v", err, actual.Values, expected.Values)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareValues(vs1, vs2 []float64) error {
|
||||
if len(vs1) != len(vs2) {
|
||||
return fmt.Errorf("unexpected number of values; got %d; want %d", len(vs1), len(vs2))
|
||||
}
|
||||
for i, v1 := range vs1 {
|
||||
v2 := vs2[i]
|
||||
if math.IsNaN(v1) {
|
||||
if !math.IsNaN(v2) {
|
||||
return fmt.Errorf("unexpected value; got %v; want %v", v1, v2)
|
||||
}
|
||||
continue
|
||||
}
|
||||
eps := math.Abs(v1 - v2)
|
||||
if eps > 1e-14 {
|
||||
return fmt.Errorf("unexpected value; got %v; want %v", v1, v2)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
5
app/vmselect/promql/arch.go
Normal file
5
app/vmselect/promql/arch.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package promql
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const maxByteSliceLen = 1<<(31+9*(unsafe.Sizeof(int(0))/8)) - 1
|
||||
@@ -1,3 +0,0 @@
|
||||
package promql
|
||||
|
||||
const maxByteSliceLen = 1 << 40
|
||||
@@ -1,3 +0,0 @@
|
||||
package promql
|
||||
|
||||
const maxByteSliceLen = 1<<31 - 1
|
||||
@@ -6,24 +6,26 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql/binaryop"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
var binaryOpFuncs = map[string]binaryOpFunc{
|
||||
"+": newBinaryOpArithFunc(binaryOpPlus),
|
||||
"-": newBinaryOpArithFunc(binaryOpMinus),
|
||||
"*": newBinaryOpArithFunc(binaryOpMul),
|
||||
"/": newBinaryOpArithFunc(binaryOpDiv),
|
||||
"%": newBinaryOpArithFunc(binaryOpMod),
|
||||
"^": newBinaryOpArithFunc(binaryOpPow),
|
||||
"+": newBinaryOpArithFunc(binaryop.Plus),
|
||||
"-": newBinaryOpArithFunc(binaryop.Minus),
|
||||
"*": newBinaryOpArithFunc(binaryop.Mul),
|
||||
"/": newBinaryOpArithFunc(binaryop.Div),
|
||||
"%": newBinaryOpArithFunc(binaryop.Mod),
|
||||
"^": newBinaryOpArithFunc(binaryop.Pow),
|
||||
|
||||
// cmp ops
|
||||
"==": newBinaryOpCmpFunc(binaryOpEq),
|
||||
"!=": newBinaryOpCmpFunc(binaryOpNeq),
|
||||
">": newBinaryOpCmpFunc(binaryOpGt),
|
||||
"<": newBinaryOpCmpFunc(binaryOpLt),
|
||||
">=": newBinaryOpCmpFunc(binaryOpGte),
|
||||
"<=": newBinaryOpCmpFunc(binaryOpLte),
|
||||
"==": newBinaryOpCmpFunc(binaryop.Eq),
|
||||
"!=": newBinaryOpCmpFunc(binaryop.Neq),
|
||||
">": newBinaryOpCmpFunc(binaryop.Gt),
|
||||
"<": newBinaryOpCmpFunc(binaryop.Lt),
|
||||
">=": newBinaryOpCmpFunc(binaryop.Gte),
|
||||
"<=": newBinaryOpCmpFunc(binaryop.Lte),
|
||||
|
||||
// logical set ops
|
||||
"and": binaryOpAnd,
|
||||
@@ -31,38 +33,9 @@ var binaryOpFuncs = map[string]binaryOpFunc{
|
||||
"unless": binaryOpUnless,
|
||||
|
||||
// New op
|
||||
"if": newBinaryOpArithFunc(binaryOpIf),
|
||||
"ifnot": newBinaryOpArithFunc(binaryOpIfnot),
|
||||
"default": newBinaryOpArithFunc(binaryOpDefault),
|
||||
}
|
||||
|
||||
var binaryOpPriorities = map[string]int{
|
||||
"default": -1,
|
||||
|
||||
"if": 0,
|
||||
"ifnot": 0,
|
||||
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operator-precedence
|
||||
"or": 1,
|
||||
|
||||
"and": 2,
|
||||
"unless": 2,
|
||||
|
||||
"==": 3,
|
||||
"!=": 3,
|
||||
"<": 3,
|
||||
">": 3,
|
||||
"<=": 3,
|
||||
">=": 3,
|
||||
|
||||
"+": 4,
|
||||
"-": 4,
|
||||
|
||||
"*": 5,
|
||||
"/": 5,
|
||||
"%": 5,
|
||||
|
||||
"^": 6,
|
||||
"if": newBinaryOpArithFunc(binaryop.If),
|
||||
"ifnot": newBinaryOpArithFunc(binaryop.Ifnot),
|
||||
"default": newBinaryOpArithFunc(binaryop.Default),
|
||||
}
|
||||
|
||||
func getBinaryOpFunc(op string) binaryOpFunc {
|
||||
@@ -70,144 +43,8 @@ func getBinaryOpFunc(op string) binaryOpFunc {
|
||||
return binaryOpFuncs[op]
|
||||
}
|
||||
|
||||
func isBinaryOp(op string) bool {
|
||||
return getBinaryOpFunc(op) != nil
|
||||
}
|
||||
|
||||
func binaryOpPriority(op string) int {
|
||||
op = strings.ToLower(op)
|
||||
return binaryOpPriorities[op]
|
||||
}
|
||||
|
||||
func scanBinaryOpPrefix(s string) int {
|
||||
n := 0
|
||||
for op := range binaryOpFuncs {
|
||||
if len(s) < len(op) {
|
||||
continue
|
||||
}
|
||||
ss := strings.ToLower(s[:len(op)])
|
||||
if ss == op && len(op) > n {
|
||||
n = len(op)
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func isRightAssociativeBinaryOp(op string) bool {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operator-precedence
|
||||
return op == "^"
|
||||
}
|
||||
|
||||
func isBinaryOpGroupModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching
|
||||
case "on", "ignoring":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpJoinModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "group_left", "group_right":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpBoolModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
return s == "bool"
|
||||
}
|
||||
|
||||
func isBinaryOpCmp(op string) bool {
|
||||
switch op {
|
||||
case "==", "!=", ">", "<", ">=", "<=":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpLogicalSet(op string) bool {
|
||||
op = strings.ToLower(op)
|
||||
switch op {
|
||||
case "and", "or", "unless":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func binaryOpConstants(op string, left, right float64, isBool bool) float64 {
|
||||
if isBinaryOpCmp(op) {
|
||||
evalCmp := func(cf func(left, right float64) bool) float64 {
|
||||
if isBool {
|
||||
if cf(left, right) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if cf(left, right) {
|
||||
return left
|
||||
}
|
||||
return nan
|
||||
}
|
||||
switch op {
|
||||
case "==":
|
||||
left = evalCmp(binaryOpEq)
|
||||
case "!=":
|
||||
left = evalCmp(binaryOpNeq)
|
||||
case ">":
|
||||
left = evalCmp(binaryOpGt)
|
||||
case "<":
|
||||
left = evalCmp(binaryOpLt)
|
||||
case ">=":
|
||||
left = evalCmp(binaryOpGte)
|
||||
case "<=":
|
||||
left = evalCmp(binaryOpLte)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected comparison binaryOp: %q", op)
|
||||
}
|
||||
} else {
|
||||
switch op {
|
||||
case "+":
|
||||
left = binaryOpPlus(left, right)
|
||||
case "-":
|
||||
left = binaryOpMinus(left, right)
|
||||
case "*":
|
||||
left = binaryOpMul(left, right)
|
||||
case "/":
|
||||
left = binaryOpDiv(left, right)
|
||||
case "%":
|
||||
left = binaryOpMod(left, right)
|
||||
case "^":
|
||||
left = binaryOpPow(left, right)
|
||||
case "and":
|
||||
// Nothing to do
|
||||
case "or":
|
||||
// Nothing to do
|
||||
case "unless":
|
||||
left = nan
|
||||
case "default":
|
||||
left = binaryOpDefault(left, right)
|
||||
case "if":
|
||||
left = binaryOpIf(left, right)
|
||||
case "ifnot":
|
||||
left = binaryOpIfnot(left, right)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected non-comparison binaryOp: %q", op)
|
||||
}
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
type binaryOpFuncArg struct {
|
||||
be *binaryOpExpr
|
||||
be *metricsql.BinaryOpExpr
|
||||
left []*timeseries
|
||||
right []*timeseries
|
||||
}
|
||||
@@ -260,18 +97,21 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp
|
||||
dstValues[j] = bf(a, b, isBool)
|
||||
}
|
||||
}
|
||||
// Optimization: remove time series containing only NaNs.
|
||||
// This is quite common after applying filters like `q > 0`.
|
||||
dst = removeNaNs(dst)
|
||||
return dst, nil
|
||||
}
|
||||
}
|
||||
|
||||
func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) {
|
||||
func adjustBinaryOpTags(be *metricsql.BinaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) {
|
||||
if len(be.GroupModifier.Op) == 0 && len(be.JoinModifier.Op) == 0 {
|
||||
if isScalar(left) {
|
||||
// Fast path: `scalar op vector`
|
||||
rvsLeft := make([]*timeseries, len(right))
|
||||
tsLeft := left[0]
|
||||
for i, tsRight := range right {
|
||||
tsRight.MetricName.ResetMetricGroup()
|
||||
resetMetricGroupIfRequired(be, tsRight)
|
||||
rvsLeft[i] = tsLeft
|
||||
}
|
||||
return rvsLeft, right, right, nil
|
||||
@@ -281,7 +121,7 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||
rvsRight := make([]*timeseries, len(left))
|
||||
tsRight := right[0]
|
||||
for i, tsLeft := range left {
|
||||
tsLeft.MetricName.ResetMetricGroup()
|
||||
resetMetricGroupIfRequired(be, tsLeft)
|
||||
rvsRight[i] = tsRight
|
||||
}
|
||||
return left, rvsRight, left, nil
|
||||
@@ -289,19 +129,14 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||
}
|
||||
|
||||
// Slow path: `vector op vector` or `a op {on|ignoring} {group_left|group_right} b`
|
||||
ensureOneX := func(side string, tss []*timeseries) error {
|
||||
if len(tss) == 0 {
|
||||
logger.Panicf("BUG: tss must contain at least one value")
|
||||
}
|
||||
if len(tss) == 1 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf(`duplicate timeseries on the %s side of %q: %s %s`, side, be.Op, stringMetricTags(&tss[0].MetricName), be.GroupModifier.AppendString(nil))
|
||||
}
|
||||
var rvsLeft, rvsRight []*timeseries
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(be, left, right)
|
||||
joinOp := strings.ToLower(be.JoinModifier.Op)
|
||||
joinTags := be.JoinModifier.Args
|
||||
groupOp := strings.ToLower(be.GroupModifier.Op)
|
||||
if len(groupOp) == 0 {
|
||||
groupOp = "ignoring"
|
||||
}
|
||||
groupTags := be.GroupModifier.Args
|
||||
for k, tssLeft := range mLeft {
|
||||
tssRight := mRight[k]
|
||||
if len(tssRight) == 0 {
|
||||
@@ -309,37 +144,38 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||
}
|
||||
switch joinOp {
|
||||
case "group_left":
|
||||
if err := ensureOneX("right", tssRight); err != nil {
|
||||
var err error
|
||||
rvsLeft, rvsRight, err = groupJoin("right", be, rvsLeft, rvsRight, tssLeft, tssRight)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
src := tssRight[0]
|
||||
for _, ts := range tssLeft {
|
||||
ts.MetricName.AddMissingTags(joinTags, &src.MetricName)
|
||||
rvsLeft = append(rvsLeft, ts)
|
||||
rvsRight = append(rvsRight, src)
|
||||
}
|
||||
case "group_right":
|
||||
if err := ensureOneX("left", tssLeft); err != nil {
|
||||
var err error
|
||||
rvsRight, rvsLeft, err = groupJoin("left", be, rvsRight, rvsLeft, tssRight, tssLeft)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
src := tssLeft[0]
|
||||
for _, ts := range tssRight {
|
||||
ts.MetricName.AddMissingTags(joinTags, &src.MetricName)
|
||||
rvsLeft = append(rvsLeft, src)
|
||||
rvsRight = append(rvsRight, ts)
|
||||
}
|
||||
case "":
|
||||
if err := ensureOneX("left", tssLeft); err != nil {
|
||||
if err := ensureSingleTimeseries("left", be, tssLeft); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := ensureOneX("right", tssRight); err != nil {
|
||||
if err := ensureSingleTimeseries("right", be, tssRight); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
tssLeft[0].MetricName.ResetMetricGroup()
|
||||
rvsLeft = append(rvsLeft, tssLeft[0])
|
||||
tsLeft := tssLeft[0]
|
||||
resetMetricGroupIfRequired(be, tsLeft)
|
||||
switch groupOp {
|
||||
case "on":
|
||||
tsLeft.MetricName.RemoveTagsOn(groupTags)
|
||||
case "ignoring":
|
||||
tsLeft.MetricName.RemoveTagsIgnoring(groupTags)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected binary op modifier %q", groupOp)
|
||||
}
|
||||
rvsLeft = append(rvsLeft, tsLeft)
|
||||
rvsRight = append(rvsRight, tssRight[0])
|
||||
default:
|
||||
return nil, nil, nil, fmt.Errorf(`unexpected join modifier %q`, joinOp)
|
||||
logger.Panicf("BUG: unexpected join modifier %q", joinOp)
|
||||
}
|
||||
}
|
||||
dst := rvsLeft
|
||||
@@ -349,73 +185,101 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||
return rvsLeft, rvsRight, dst, nil
|
||||
}
|
||||
|
||||
func binaryOpPlus(left, right float64) float64 {
|
||||
return left + right
|
||||
}
|
||||
|
||||
func binaryOpMinus(left, right float64) float64 {
|
||||
return left - right
|
||||
}
|
||||
|
||||
func binaryOpMul(left, right float64) float64 {
|
||||
return left * right
|
||||
}
|
||||
|
||||
func binaryOpDiv(left, right float64) float64 {
|
||||
return left / right
|
||||
}
|
||||
|
||||
func binaryOpMod(left, right float64) float64 {
|
||||
return math.Mod(left, right)
|
||||
}
|
||||
|
||||
func binaryOpPow(left, right float64) float64 {
|
||||
return math.Pow(left, right)
|
||||
}
|
||||
|
||||
func binaryOpDefault(left, right float64) float64 {
|
||||
if math.IsNaN(left) {
|
||||
return right
|
||||
func ensureSingleTimeseries(side string, be *metricsql.BinaryOpExpr, tss []*timeseries) error {
|
||||
if len(tss) == 0 {
|
||||
logger.Panicf("BUG: tss must contain at least one value")
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
func binaryOpIf(left, right float64) float64 {
|
||||
if math.IsNaN(right) {
|
||||
return nan
|
||||
for len(tss) > 1 {
|
||||
if !mergeNonOverlappingTimeseries(tss[0], tss[len(tss)-1]) {
|
||||
return fmt.Errorf(`duplicate time series on the %s side of %s %s: %s and %s`, side, be.Op, be.GroupModifier.AppendString(nil),
|
||||
stringMetricTags(&tss[0].MetricName), stringMetricTags(&tss[len(tss)-1].MetricName))
|
||||
}
|
||||
tss = tss[:len(tss)-1]
|
||||
}
|
||||
return left
|
||||
return nil
|
||||
}
|
||||
|
||||
func binaryOpIfnot(left, right float64) float64 {
|
||||
if math.IsNaN(right) {
|
||||
return left
|
||||
func groupJoin(singleTimeseriesSide string, be *metricsql.BinaryOpExpr, rvsLeft, rvsRight, tssLeft, tssRight []*timeseries) ([]*timeseries, []*timeseries, error) {
|
||||
joinTags := be.JoinModifier.Args
|
||||
var m map[string]*timeseries
|
||||
for _, tsLeft := range tssLeft {
|
||||
resetMetricGroupIfRequired(be, tsLeft)
|
||||
if len(tssRight) == 1 {
|
||||
// Easy case - right part contains only a single matching time series.
|
||||
tsLeft.MetricName.AddMissingTags(joinTags, &tssRight[0].MetricName)
|
||||
rvsLeft = append(rvsLeft, tsLeft)
|
||||
rvsRight = append(rvsRight, tssRight[0])
|
||||
continue
|
||||
}
|
||||
|
||||
// Hard case - right part contains multiple matching time series.
|
||||
// Verify it doesn't result in duplicate MetricName values after adding missing tags.
|
||||
if m == nil {
|
||||
m = make(map[string]*timeseries, len(tssRight))
|
||||
} else {
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
bb := bbPool.Get()
|
||||
for _, tsRight := range tssRight {
|
||||
var tsCopy timeseries
|
||||
tsCopy.CopyFromShallowTimestamps(tsLeft)
|
||||
tsCopy.MetricName.AddMissingTags(joinTags, &tsRight.MetricName)
|
||||
bb.B = marshalMetricTagsSorted(bb.B[:0], &tsCopy.MetricName)
|
||||
if tsExisting := m[string(bb.B)]; tsExisting != nil {
|
||||
// Try merging tsExisting with tsRight if they don't overlap.
|
||||
if mergeNonOverlappingTimeseries(tsExisting, tsRight) {
|
||||
continue
|
||||
}
|
||||
return nil, nil, fmt.Errorf("duplicate time series on the %s side of `%s %s %s`: %s and %s",
|
||||
singleTimeseriesSide, be.Op, be.GroupModifier.AppendString(nil), be.JoinModifier.AppendString(nil),
|
||||
stringMetricTags(&tsExisting.MetricName), stringMetricTags(&tsRight.MetricName))
|
||||
}
|
||||
m[string(bb.B)] = tsRight
|
||||
rvsLeft = append(rvsLeft, &tsCopy)
|
||||
rvsRight = append(rvsRight, tsRight)
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
}
|
||||
return nan
|
||||
return rvsLeft, rvsRight, nil
|
||||
}
|
||||
|
||||
func binaryOpEq(left, right float64) bool {
|
||||
return left == right
|
||||
func mergeNonOverlappingTimeseries(dst, src *timeseries) bool {
|
||||
// Verify whether the time series can be merged.
|
||||
srcValues := src.Values
|
||||
dstValues := dst.Values
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if !math.IsNaN(dstValues[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Time series can be merged. Merge them.
|
||||
for i, v := range srcValues {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues[i] = v
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func binaryOpNeq(left, right float64) bool {
|
||||
return left != right
|
||||
}
|
||||
|
||||
func binaryOpGt(left, right float64) bool {
|
||||
return left > right
|
||||
}
|
||||
|
||||
func binaryOpLt(left, right float64) bool {
|
||||
return left < right
|
||||
}
|
||||
|
||||
func binaryOpGte(left, right float64) bool {
|
||||
return left >= right
|
||||
}
|
||||
|
||||
func binaryOpLte(left, right float64) bool {
|
||||
return left <= right
|
||||
func resetMetricGroupIfRequired(be *metricsql.BinaryOpExpr, ts *timeseries) {
|
||||
if metricsql.IsBinaryOpCmp(be.Op) && !be.Bool {
|
||||
// Do not reset MetricGroup for non-boolean `compare` binary ops like Prometheus does.
|
||||
return
|
||||
}
|
||||
switch be.Op {
|
||||
case "default", "if", "ifnot":
|
||||
// Do not reset MetricGroup for these ops.
|
||||
return
|
||||
}
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
|
||||
func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
@@ -454,7 +318,7 @@ func binaryOpUnless(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func createTimeseriesMapByTagSet(be *binaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) {
|
||||
func createTimeseriesMapByTagSet(be *metricsql.BinaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) {
|
||||
groupTags := be.GroupModifier.Args
|
||||
groupOp := strings.ToLower(be.GroupModifier.Op)
|
||||
if len(groupOp) == 0 {
|
||||
|
||||
@@ -11,15 +11,16 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 10e3, "The maximum points per a single timeseries returned from the search")
|
||||
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 30e3, "The maximum points per a single timeseries returned from the search")
|
||||
)
|
||||
|
||||
// The minumum number of points per timeseries for enabling time rounding.
|
||||
// The minimum number of points per timeseries for enabling time rounding.
|
||||
// This improves cache hit ratio for frequently requested queries over
|
||||
// big time ranges.
|
||||
const minTimeseriesPointsForTimeRounding = 50
|
||||
@@ -31,7 +32,7 @@ const minTimeseriesPointsForTimeRounding = 50
|
||||
func ValidateMaxPointsPerTimeseries(start, end, step int64) error {
|
||||
points := (end-start)/step + 1
|
||||
if uint64(points) > uint64(*maxPointsPerTimeseries) {
|
||||
return fmt.Errorf(`too many points for the given step=%d, start=%d and end=%d: %d; cannot exceed %d points`,
|
||||
return fmt.Errorf(`too many points for the given step=%d, start=%d and end=%d: %d; cannot exceed -search.maxPointsPerTimeseries=%d`,
|
||||
step, start, end, uint64(points), *maxPointsPerTimeseries)
|
||||
}
|
||||
return nil
|
||||
@@ -57,6 +58,14 @@ func AdjustStartEnd(start, end, step int64) (int64, int64) {
|
||||
if adjust > 0 {
|
||||
end += step - adjust
|
||||
}
|
||||
|
||||
// Make sure that the new number of points is the same as the initial number of points.
|
||||
newPoints := (end-start)/step + 1
|
||||
for newPoints > points {
|
||||
end -= step
|
||||
newPoints--
|
||||
}
|
||||
|
||||
return start, end
|
||||
}
|
||||
|
||||
@@ -70,6 +79,9 @@ type EvalConfig struct {
|
||||
|
||||
MayCache bool
|
||||
|
||||
// LookbackDelta is analog to `-query.lookback-delta` from Prometheus.
|
||||
LookbackDelta int64
|
||||
|
||||
timestamps []int64
|
||||
timestampsOnce sync.Once
|
||||
}
|
||||
@@ -82,6 +94,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig {
|
||||
ec.Step = src.Step
|
||||
ec.Deadline = src.Deadline
|
||||
ec.MayCache = src.MayCache
|
||||
ec.LookbackDelta = src.LookbackDelta
|
||||
|
||||
// do not copy src.timestamps - they must be generated again.
|
||||
return &ec
|
||||
@@ -140,25 +153,25 @@ func getTimestamps(start, end, step int64) []int64 {
|
||||
return timestamps
|
||||
}
|
||||
|
||||
func evalExpr(ec *EvalConfig, e expr) ([]*timeseries, error) {
|
||||
if me, ok := e.(*metricExpr); ok {
|
||||
re := &rollupExpr{
|
||||
func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||
if me, ok := e.(*metricsql.MetricExpr); ok {
|
||||
re := &metricsql.RollupExpr{
|
||||
Expr: me,
|
||||
}
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re)
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, me.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if re, ok := e.(*rollupExpr); ok {
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re)
|
||||
if re, ok := e.(*metricsql.RollupExpr); ok {
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, re.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if fe, ok := e.(*funcExpr); ok {
|
||||
if fe, ok := e.(*metricsql.FuncExpr); ok {
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
if nrf == nil {
|
||||
args, err := evalExprs(ec, fe.Args)
|
||||
@@ -188,13 +201,30 @@ func evalExpr(ec *EvalConfig, e expr) ([]*timeseries, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rv, err := evalRollupFunc(ec, fe.Name, rf, re)
|
||||
rv, err := evalRollupFunc(ec, fe.Name, rf, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if ae, ok := e.(*aggrFuncExpr); ok {
|
||||
if ae, ok := e.(*metricsql.AggrFuncExpr); ok {
|
||||
if callbacks := getIncrementalAggrFuncCallbacks(ae.Name); callbacks != nil {
|
||||
fe, nrf := tryGetArgRollupFuncWithMetricExpr(ae)
|
||||
if fe != nil {
|
||||
// There is an optimized path for calculating metricsql.AggrFuncExpr over rollupFunc over metricsql.MetricExpr.
|
||||
// The optimized path saves RAM for aggregates over big number of time series.
|
||||
args, re, err := evalRollupFuncArgs(ec, fe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf, err := nrf(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
||||
return evalRollupFunc(ec, fe.Name, rf, re, iafc)
|
||||
}
|
||||
}
|
||||
args, err := evalExprs(ec, ae.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -214,7 +244,7 @@ func evalExpr(ec *EvalConfig, e expr) ([]*timeseries, error) {
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if be, ok := e.(*binaryOpExpr); ok {
|
||||
if be, ok := e.(*metricsql.BinaryOpExpr); ok {
|
||||
left, err := evalExpr(ec, be.Left)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -238,18 +268,83 @@ func evalExpr(ec *EvalConfig, e expr) ([]*timeseries, error) {
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if ne, ok := e.(*numberExpr); ok {
|
||||
if ne, ok := e.(*metricsql.NumberExpr); ok {
|
||||
rv := evalNumber(ec, ne.N)
|
||||
return rv, nil
|
||||
}
|
||||
if se, ok := e.(*stringExpr); ok {
|
||||
if se, ok := e.(*metricsql.StringExpr); ok {
|
||||
rv := evalString(ec, se.S)
|
||||
return rv, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected expression %q", e.AppendString(nil))
|
||||
}
|
||||
|
||||
func evalExprs(ec *EvalConfig, es []expr) ([][]*timeseries, error) {
|
||||
func tryGetArgRollupFuncWithMetricExpr(ae *metricsql.AggrFuncExpr) (*metricsql.FuncExpr, newRollupFunc) {
|
||||
if len(ae.Args) != 1 {
|
||||
return nil, nil
|
||||
}
|
||||
e := ae.Args[0]
|
||||
// Make sure e contains one of the following:
|
||||
// - metricExpr
|
||||
// - metricExpr[d]
|
||||
// - rollupFunc(metricExpr)
|
||||
// - rollupFunc(metricExpr[d])
|
||||
|
||||
if me, ok := e.(*metricsql.MetricExpr); ok {
|
||||
// e = metricExpr
|
||||
if me.IsEmpty() {
|
||||
return nil, nil
|
||||
}
|
||||
fe := &metricsql.FuncExpr{
|
||||
Name: "default_rollup",
|
||||
Args: []metricsql.Expr{me},
|
||||
}
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
return fe, nrf
|
||||
}
|
||||
if re, ok := e.(*metricsql.RollupExpr); ok {
|
||||
if me, ok := re.Expr.(*metricsql.MetricExpr); !ok || me.IsEmpty() || re.ForSubquery() {
|
||||
return nil, nil
|
||||
}
|
||||
// e = metricExpr[d]
|
||||
fe := &metricsql.FuncExpr{
|
||||
Name: "default_rollup",
|
||||
Args: []metricsql.Expr{re},
|
||||
}
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
return fe, nrf
|
||||
}
|
||||
fe, ok := e.(*metricsql.FuncExpr)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
if nrf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
rollupArgIdx := getRollupArgIdx(fe.Name)
|
||||
arg := fe.Args[rollupArgIdx]
|
||||
if me, ok := arg.(*metricsql.MetricExpr); ok {
|
||||
if me.IsEmpty() {
|
||||
return nil, nil
|
||||
}
|
||||
// e = rollupFunc(metricExpr)
|
||||
return &metricsql.FuncExpr{
|
||||
Name: fe.Name,
|
||||
Args: []metricsql.Expr{me},
|
||||
}, nrf
|
||||
}
|
||||
if re, ok := arg.(*metricsql.RollupExpr); ok {
|
||||
if me, ok := re.Expr.(*metricsql.MetricExpr); !ok || me.IsEmpty() || re.ForSubquery() {
|
||||
return nil, nil
|
||||
}
|
||||
// e = rollupFunc(metricExpr[d])
|
||||
return fe, nrf
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evalExprs(ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
|
||||
var rvs [][]*timeseries
|
||||
for _, e := range es {
|
||||
rv, err := evalExpr(ec, e)
|
||||
@@ -261,8 +356,8 @@ func evalExprs(ec *EvalConfig, es []expr) ([][]*timeseries, error) {
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func evalRollupFuncArgs(ec *EvalConfig, fe *funcExpr) ([]interface{}, *rollupExpr, error) {
|
||||
var re *rollupExpr
|
||||
func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) {
|
||||
var re *metricsql.RollupExpr
|
||||
rollupArgIdx := getRollupArgIdx(fe.Name)
|
||||
args := make([]interface{}, len(fe.Args))
|
||||
for i, arg := range fe.Args {
|
||||
@@ -280,64 +375,61 @@ func evalRollupFuncArgs(ec *EvalConfig, fe *funcExpr) ([]interface{}, *rollupExp
|
||||
return args, re, nil
|
||||
}
|
||||
|
||||
func getRollupExprArg(arg expr) *rollupExpr {
|
||||
re, ok := arg.(*rollupExpr)
|
||||
func getRollupExprArg(arg metricsql.Expr) *metricsql.RollupExpr {
|
||||
re, ok := arg.(*metricsql.RollupExpr)
|
||||
if !ok {
|
||||
// Wrap non-rollup arg into rollupExpr.
|
||||
return &rollupExpr{
|
||||
// Wrap non-rollup arg into metricsql.RollupExpr.
|
||||
return &metricsql.RollupExpr{
|
||||
Expr: arg,
|
||||
}
|
||||
}
|
||||
if len(re.Step) == 0 && !re.InheritStep {
|
||||
// Return standard rollup if it doesn't set step.
|
||||
if !re.ForSubquery() {
|
||||
// Return standard rollup if it doesn't contain subquery.
|
||||
return re
|
||||
}
|
||||
me, ok := re.Expr.(*metricExpr)
|
||||
me, ok := re.Expr.(*metricsql.MetricExpr)
|
||||
if !ok {
|
||||
// arg contains subquery.
|
||||
return re
|
||||
}
|
||||
// Convert me[w:step] -> default_rollup(me)[w:step]
|
||||
reNew := *re
|
||||
reNew.Expr = &funcExpr{
|
||||
reNew.Expr = &metricsql.FuncExpr{
|
||||
Name: "default_rollup",
|
||||
Args: []expr{
|
||||
&rollupExpr{Expr: me},
|
||||
Args: []metricsql.Expr{
|
||||
&metricsql.RollupExpr{Expr: me},
|
||||
},
|
||||
}
|
||||
return &reNew
|
||||
}
|
||||
|
||||
func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, re *rollupExpr) ([]*timeseries, error) {
|
||||
func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
||||
ecNew := ec
|
||||
var offset int64
|
||||
if len(re.Offset) > 0 {
|
||||
var err error
|
||||
offset, err = DurationValue(re.Offset, ec.Step)
|
||||
offset, err = metricsql.DurationValue(re.Offset, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecNew = newEvalConfig(ec)
|
||||
ecNew.Start -= offset
|
||||
ecNew.End -= offset
|
||||
ecNew.Start, ecNew.End = AdjustStartEnd(ecNew.Start, ecNew.End, ecNew.Step)
|
||||
if ecNew.MayCache {
|
||||
start, end := AdjustStartEnd(ecNew.Start, ecNew.End, ecNew.Step)
|
||||
offset += ecNew.Start - start
|
||||
ecNew.Start = start
|
||||
ecNew.End = end
|
||||
}
|
||||
}
|
||||
var rvs []*timeseries
|
||||
var err error
|
||||
if me, ok := re.Expr.(*metricExpr); ok {
|
||||
if me.IsEmpty() {
|
||||
rvs = evalNumber(ecNew, nan)
|
||||
} else {
|
||||
var window int64
|
||||
if len(re.Window) > 0 {
|
||||
window, err = DurationValue(re.Window, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
rvs, err = evalRollupFuncWithMetricExpr(ecNew, name, rf, me, window)
|
||||
}
|
||||
if me, ok := re.Expr.(*metricsql.MetricExpr); ok {
|
||||
rvs, err = evalRollupFuncWithMetricExpr(ecNew, name, rf, me, iafc, re.Window)
|
||||
} else {
|
||||
if iafc != nil {
|
||||
logger.Panicf("BUG: iafc must be nil for rollup %q over subquery %q", name, re.AppendString(nil))
|
||||
}
|
||||
rvs, err = evalRollupFuncWithSubquery(ecNew, name, rf, re)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -357,12 +449,12 @@ func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, re *rollupExpr)
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, re *rollupExpr) ([]*timeseries, error) {
|
||||
// Do not use rollupResultCacheV here, since it works only with metricExpr.
|
||||
func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, re *metricsql.RollupExpr) ([]*timeseries, error) {
|
||||
// Do not use rollupResultCacheV here, since it works only with metricsql.MetricExpr.
|
||||
var step int64
|
||||
if len(re.Step) > 0 {
|
||||
var err error
|
||||
step, err = DurationValue(re.Step, ec.Step)
|
||||
step, err = metricsql.PositiveDurationValue(re.Step, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -372,15 +464,14 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, re *
|
||||
var window int64
|
||||
if len(re.Window) > 0 {
|
||||
var err error
|
||||
window, err = DurationValue(re.Window, ec.Step)
|
||||
window, err = metricsql.PositiveDurationValue(re.Window, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ecSQ := newEvalConfig(ec)
|
||||
ecSQ.Start -= window + maxSilenceInterval
|
||||
ecSQ.End += step
|
||||
ecSQ.Start -= window + maxSilenceInterval + step
|
||||
ecSQ.Step = step
|
||||
if err := ValidateMaxPointsPerTimeseries(ecSQ.Start, ecSQ.End, ecSQ.Step); err != nil {
|
||||
return nil, err
|
||||
@@ -392,33 +483,22 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, re *
|
||||
}
|
||||
|
||||
sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, ec.Start, ec.End, ec.Step, window, sharedTimestamps)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, ec.Start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
|
||||
tss := make([]*timeseries, 0, len(tssSQ)*len(rcs))
|
||||
var tssLock sync.Mutex
|
||||
removeMetricGroup := !rollupFuncsKeepMetricGroup[name]
|
||||
doParallel(tssSQ, func(tsSQ *timeseries, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
values, timestamps = removeNanValues(values[:0], timestamps[:0], tsSQ.Values, tsSQ.Timestamps)
|
||||
preFunc(values, timestamps)
|
||||
for _, rc := range rcs {
|
||||
var ts timeseries
|
||||
ts.MetricName.CopyFrom(&tsSQ.MetricName)
|
||||
if len(rc.TagValue) > 0 {
|
||||
ts.MetricName.AddTag("rollup", rc.TagValue)
|
||||
}
|
||||
ts.Values = rc.Do(ts.Values[:0], values, timestamps)
|
||||
ts.Timestamps = sharedTimestamps
|
||||
ts.denyReuse = true
|
||||
doRollupForTimeseries(rc, &ts, &tsSQ.MetricName, values, timestamps, sharedTimestamps, removeMetricGroup)
|
||||
tssLock.Lock()
|
||||
tss = append(tss, &ts)
|
||||
tssLock.Unlock()
|
||||
}
|
||||
return values, timestamps
|
||||
})
|
||||
if !rollupFuncsKeepMetricGroup[name] {
|
||||
tss = copyTimeseriesMetricNames(tss)
|
||||
for _, ts := range tss {
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
}
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
@@ -472,31 +552,27 @@ func removeNanValues(dstValues []float64, dstTimestamps []int64, values []float6
|
||||
return dstValues, dstTimestamps
|
||||
}
|
||||
|
||||
func getMaxPointsPerRollup() int {
|
||||
maxPointsPerRollupOnce.Do(func() {
|
||||
n := memory.Allowed() / 16 / 8
|
||||
if n <= 16 {
|
||||
n = 16
|
||||
}
|
||||
maxPointsPerRollup = n
|
||||
})
|
||||
return maxPointsPerRollup
|
||||
}
|
||||
|
||||
var (
|
||||
maxPointsPerRollup int
|
||||
maxPointsPerRollupOnce sync.Once
|
||||
)
|
||||
|
||||
var (
|
||||
rollupResultCacheFullHits = metrics.NewCounter(`vm_rollup_result_cache_full_hits_total`)
|
||||
rollupResultCachePartialHits = metrics.NewCounter(`vm_rollup_result_cache_partial_hits_total`)
|
||||
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
|
||||
)
|
||||
|
||||
func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me *metricExpr, window int64) ([]*timeseries, error) {
|
||||
func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowStr string) ([]*timeseries, error) {
|
||||
if me.IsEmpty() {
|
||||
return evalNumber(ec, nan), nil
|
||||
}
|
||||
var window int64
|
||||
if len(windowStr) > 0 {
|
||||
var err error
|
||||
window, err = metricsql.PositiveDurationValue(windowStr, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Search for partial results in cache.
|
||||
tssCached, start := rollupResultCacheV.Get(name, ec, me, window)
|
||||
tssCached, start := rollupResultCacheV.Get(name, ec, me, iafc, window)
|
||||
if start > ec.End {
|
||||
// The result is fully cached.
|
||||
rollupResultCacheFullHits.Inc()
|
||||
@@ -509,12 +585,13 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me
|
||||
}
|
||||
|
||||
// Fetch the remaining part of the result.
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start - window - maxSilenceInterval,
|
||||
MaxTimestamp: ec.End + ec.Step,
|
||||
TagFilterss: [][]storage.TagFilter{me.TagFilters},
|
||||
TagFilterss: [][]storage.TagFilter{tfs},
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, ec.Deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -528,33 +605,95 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me
|
||||
return tss, nil
|
||||
}
|
||||
sharedTimestamps := getTimestamps(start, ec.End, ec.Step)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, start, ec.End, ec.Step, window, sharedTimestamps)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
|
||||
|
||||
// Verify timeseries fit available memory after the rollup.
|
||||
// Take into account points from tssCached.
|
||||
pointsPerTimeseries := 1 + (ec.End-ec.Start)/ec.Step
|
||||
if uint64(pointsPerTimeseries) > uint64(getMaxPointsPerRollup()/rssLen/len(rcs)) {
|
||||
rss.Cancel()
|
||||
return nil, fmt.Errorf("cannot process more than %d data points for %d time series with %d points in each time series; "+
|
||||
"possible solutions are: reducing the number of matching time series; switching to node with more RAM; increasing `step` query arg (%gs)",
|
||||
getMaxPointsPerRollup(), rssLen*len(rcs), pointsPerTimeseries, float64(ec.Step)/1e3)
|
||||
timeseriesLen := rssLen
|
||||
if iafc != nil {
|
||||
// Incremental aggregates require hold only GOMAXPROCS timeseries in memory.
|
||||
timeseriesLen = runtime.GOMAXPROCS(-1)
|
||||
if iafc.ae.Modifier.Op != "" {
|
||||
// Increase the number of timeseries for non-empty group list: `aggr() by (something)`,
|
||||
// since each group can have own set of time series in memory.
|
||||
// Estimate the number of such groups is lower than 100 :)
|
||||
timeseriesLen *= 100
|
||||
}
|
||||
}
|
||||
rollupPoints := mulNoOverflow(pointsPerTimeseries, int64(timeseriesLen*len(rcs)))
|
||||
rollupMemorySize := mulNoOverflow(rollupPoints, 16)
|
||||
rml := getRollupMemoryLimiter()
|
||||
if !rml.Get(uint64(rollupMemorySize)) {
|
||||
rss.Cancel()
|
||||
return nil, fmt.Errorf("not enough memory for processing %d data points across %d time series with %d points in each time series; "+
|
||||
"possible solutions are: reducing the number of matching time series; switching to node with more RAM; "+
|
||||
"increasing -memory.allowedPercent; increasing `step` query arg (%gs)",
|
||||
rollupPoints, rssLen*len(rcs), pointsPerTimeseries, float64(ec.Step)/1e3)
|
||||
}
|
||||
defer rml.Put(uint64(rollupMemorySize))
|
||||
|
||||
// Evaluate rollup
|
||||
tss := make([]*timeseries, 0, rssLen*len(rcs))
|
||||
removeMetricGroup := !rollupFuncsKeepMetricGroup[name]
|
||||
var tss []*timeseries
|
||||
if iafc != nil {
|
||||
tss, err = evalRollupWithIncrementalAggregate(iafc, rss, rcs, preFunc, sharedTimestamps, removeMetricGroup)
|
||||
} else {
|
||||
tss, err = evalRollupNoIncrementalAggregate(rss, rcs, preFunc, sharedTimestamps, removeMetricGroup)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tss = mergeTimeseries(tssCached, tss, start, ec)
|
||||
rollupResultCacheV.Put(name, ec, me, iafc, window, tss)
|
||||
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
var (
|
||||
rollupMemoryLimiter memoryLimiter
|
||||
rollupMemoryLimiterOnce sync.Once
|
||||
)
|
||||
|
||||
func getRollupMemoryLimiter() *memoryLimiter {
|
||||
rollupMemoryLimiterOnce.Do(func() {
|
||||
rollupMemoryLimiter.MaxSize = uint64(memory.Allowed()) / 4
|
||||
})
|
||||
return &rollupMemoryLimiter
|
||||
}
|
||||
|
||||
func evalRollupWithIncrementalAggregate(iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64, removeMetricGroup bool) ([]*timeseries, error) {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
preFunc(rs.Values, rs.Timestamps)
|
||||
ts := getTimeseries()
|
||||
defer putTimeseries(ts)
|
||||
for _, rc := range rcs {
|
||||
ts.Reset()
|
||||
doRollupForTimeseries(rc, ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps, removeMetricGroup)
|
||||
iafc.updateTimeseries(ts, workerID)
|
||||
|
||||
// ts.Timestamps points to sharedTimestamps. Zero it, so it can be re-used.
|
||||
ts.Timestamps = nil
|
||||
ts.denyReuse = false
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tss := iafc.finalizeTimeseries()
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func evalRollupNoIncrementalAggregate(rss *netstorage.Results, rcs []*rollupConfig,
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64, removeMetricGroup bool) ([]*timeseries, error) {
|
||||
tss := make([]*timeseries, 0, rss.Len()*len(rcs))
|
||||
var tssLock sync.Mutex
|
||||
err = rss.RunParallel(func(rs *netstorage.Result) {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
preFunc(rs.Values, rs.Timestamps)
|
||||
for _, rc := range rcs {
|
||||
var ts timeseries
|
||||
ts.MetricName.CopyFrom(&rs.MetricName)
|
||||
if len(rc.TagValue) > 0 {
|
||||
ts.MetricName.AddTag("rollup", rc.TagValue)
|
||||
}
|
||||
ts.Values = rc.Do(ts.Values[:0], rs.Values, rs.Timestamps)
|
||||
ts.Timestamps = sharedTimestamps
|
||||
ts.denyReuse = true
|
||||
|
||||
doRollupForTimeseries(rc, &ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps, removeMetricGroup)
|
||||
tssLock.Lock()
|
||||
tss = append(tss, &ts)
|
||||
tssLock.Unlock()
|
||||
@@ -563,19 +702,25 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !rollupFuncsKeepMetricGroup[name] {
|
||||
tss = copyTimeseriesMetricNames(tss)
|
||||
for _, ts := range tss {
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
}
|
||||
tss = mergeTimeseries(tssCached, tss, start, ec)
|
||||
rollupResultCacheV.Put(name, ec, me, window, tss)
|
||||
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, start, end, step, window int64, sharedTimestamps []int64) (func(values []float64, timestamps []int64), []*rollupConfig) {
|
||||
func doRollupForTimeseries(rc *rollupConfig, tsDst *timeseries, mnSrc *storage.MetricName, valuesSrc []float64, timestampsSrc []int64,
|
||||
sharedTimestamps []int64, removeMetricGroup bool) {
|
||||
tsDst.MetricName.CopyFrom(mnSrc)
|
||||
if len(rc.TagValue) > 0 {
|
||||
tsDst.MetricName.AddTag("rollup", rc.TagValue)
|
||||
}
|
||||
if removeMetricGroup {
|
||||
tsDst.MetricName.ResetMetricGroup()
|
||||
}
|
||||
tsDst.Values = rc.Do(tsDst.Values[:0], valuesSrc, timestampsSrc)
|
||||
tsDst.Timestamps = sharedTimestamps
|
||||
tsDst.denyReuse = true
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, start, end, step, window int64, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func(values []float64, timestamps []int64), []*rollupConfig) {
|
||||
preFunc := func(values []float64, timestamps []int64) {}
|
||||
if rollupFuncsRemoveCounterResets[name] {
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
@@ -584,13 +729,15 @@ func getRollupConfigs(name string, rf rollupFunc, start, end, step, window int64
|
||||
}
|
||||
newRollupConfig := func(rf rollupFunc, tagValue string) *rollupConfig {
|
||||
return &rollupConfig{
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
Timestamps: sharedTimestamps,
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
MayAdjustWindow: rollupFuncsMayAdjustWindow[name],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
}
|
||||
}
|
||||
appendRollupConfigs := func(dst []*rollupConfig) []*rollupConfig {
|
||||
@@ -617,6 +764,11 @@ func getRollupConfigs(name string, rf rollupFunc, start, end, step, window int64
|
||||
deltaValues(values)
|
||||
}
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_candlestick":
|
||||
rcs = append(rcs, newRollupConfig(rollupFirst, "open"))
|
||||
rcs = append(rcs, newRollupConfig(rollupLast, "close"))
|
||||
rcs = append(rcs, newRollupConfig(rollupMin, "low"))
|
||||
rcs = append(rcs, newRollupConfig(rollupMax, "high"))
|
||||
default:
|
||||
rcs = append(rcs, newRollupConfig(rf, ""))
|
||||
}
|
||||
@@ -653,3 +805,31 @@ func evalTime(ec *EvalConfig) []*timeseries {
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func mulNoOverflow(a, b int64) int64 {
|
||||
if math.MaxInt64/b < a {
|
||||
// Overflow
|
||||
return math.MaxInt64
|
||||
}
|
||||
return a * b
|
||||
}
|
||||
|
||||
func toTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||
tfs := make([]storage.TagFilter, len(lfs))
|
||||
for i := range lfs {
|
||||
toTagFilter(&tfs[i], &lfs[i])
|
||||
}
|
||||
return tfs
|
||||
}
|
||||
|
||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||
if src.Label != "__name__" {
|
||||
dst.Key = []byte(src.Label)
|
||||
} else {
|
||||
// This is required for storage.Search.
|
||||
dst.Key = nil
|
||||
}
|
||||
dst.Value = []byte(src.Value)
|
||||
dst.IsRegexp = src.IsRegexp
|
||||
dst.IsNegative = src.IsNegative
|
||||
}
|
||||
|
||||
@@ -1,29 +1,38 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/metricsql"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// ExpandWithExprs expands WITH expressions inside q and returns the resulting
|
||||
// PromQL without WITH expressions.
|
||||
func ExpandWithExprs(q string) (string, error) {
|
||||
e, err := parsePromQLWithCache(q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := e.AppendString(nil)
|
||||
return string(buf), nil
|
||||
}
|
||||
var logSlowQueryDuration = flag.Duration("search.logSlowQueryDuration", 5*time.Second, "Log queries with execution time exceeding this value. Zero disables slow query logging")
|
||||
|
||||
var slowQueries = metrics.NewCounter(`vm_slow_queries_total`)
|
||||
|
||||
// Exec executes q for the given ec.
|
||||
func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, error) {
|
||||
if *logSlowQueryDuration > 0 {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
d := time.Since(startTime)
|
||||
if d >= *logSlowQueryDuration {
|
||||
logger.Infof("slow query according to -search.logSlowQueryDuration=%s: duration=%s, start=%d, end=%d, step=%d, query=%q",
|
||||
*logSlowQueryDuration, d, ec.Start/1000, ec.End/1000, ec.Step/1000, q)
|
||||
slowQueries.Inc()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Exec executes q for the given ec until the deadline.
|
||||
func Exec(ec *EvalConfig, q string) ([]netstorage.Result, error) {
|
||||
ec.validate()
|
||||
|
||||
e, err := parsePromQLWithCache(q)
|
||||
@@ -50,6 +59,14 @@ func Exec(ec *EvalConfig, q string) ([]netstorage.Result, error) {
|
||||
}
|
||||
ec.End -= ec.Step
|
||||
|
||||
if isFirstPointOnly {
|
||||
// Remove all the points except the first one from every time series.
|
||||
for _, ts := range rv {
|
||||
ts.Values = ts.Values[:1]
|
||||
ts.Timestamps = ts.Timestamps[:1]
|
||||
}
|
||||
}
|
||||
|
||||
maySort := maySortResults(e, rv)
|
||||
result, err := timeseriesToResult(rv, maySort)
|
||||
if err != nil {
|
||||
@@ -58,12 +75,12 @@ func Exec(ec *EvalConfig, q string) ([]netstorage.Result, error) {
|
||||
return result, err
|
||||
}
|
||||
|
||||
func maySortResults(e expr, tss []*timeseries) bool {
|
||||
func maySortResults(e metricsql.Expr, tss []*timeseries) bool {
|
||||
if len(tss) > 100 {
|
||||
// There is no sense in sorting a lot of results
|
||||
return false
|
||||
}
|
||||
fe, ok := e.(*funcExpr)
|
||||
fe, ok := e.(*metricsql.FuncExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
@@ -78,14 +95,14 @@ func maySortResults(e expr, tss []*timeseries) bool {
|
||||
func timeseriesToResult(tss []*timeseries, maySort bool) ([]netstorage.Result, error) {
|
||||
tss = removeNaNs(tss)
|
||||
result := make([]netstorage.Result, len(tss))
|
||||
m := make(map[string]bool)
|
||||
m := make(map[string]struct{}, len(tss))
|
||||
bb := bbPool.Get()
|
||||
for i, ts := range tss {
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
if m[string(bb.B)] {
|
||||
return nil, fmt.Errorf(`duplicate output timeseries: %s%s`, ts.MetricName.MetricGroup, stringMetricName(&ts.MetricName))
|
||||
if _, ok := m[string(bb.B)]; ok {
|
||||
return nil, fmt.Errorf(`duplicate output timeseries: %s`, stringMetricName(&ts.MetricName))
|
||||
}
|
||||
m[string(bb.B)] = true
|
||||
m[string(bb.B)] = struct{}{}
|
||||
|
||||
rs := &result[i]
|
||||
rs.MetricNameMarshaled = append(rs.MetricNameMarshaled[:0], bb.B...)
|
||||
@@ -107,25 +124,30 @@ func timeseriesToResult(tss []*timeseries, maySort bool) ([]netstorage.Result, e
|
||||
func removeNaNs(tss []*timeseries) []*timeseries {
|
||||
rvs := tss[:0]
|
||||
for _, ts := range tss {
|
||||
nans := 0
|
||||
allNans := true
|
||||
for _, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
nans++
|
||||
if !math.IsNaN(v) {
|
||||
allNans = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if nans == len(ts.Values) {
|
||||
if allNans {
|
||||
// Skip timeseries with all NaNs.
|
||||
continue
|
||||
}
|
||||
rvs = append(rvs, ts)
|
||||
}
|
||||
for i := len(rvs); i < len(tss); i++ {
|
||||
// Zero unused time series, so GC could reclaim them.
|
||||
tss[i] = nil
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
|
||||
func parsePromQLWithCache(q string) (expr, error) {
|
||||
func parsePromQLWithCache(q string) (metricsql.Expr, error) {
|
||||
pcv := parseCacheV.Get(q)
|
||||
if pcv == nil {
|
||||
e, err := parsePromQL(q)
|
||||
e, err := metricsql.Parse(q)
|
||||
pcv = &parseCacheValue{
|
||||
e: e,
|
||||
err: err,
|
||||
@@ -157,16 +179,19 @@ var parseCacheV = func() *parseCache {
|
||||
const parseCacheMaxLen = 10e3
|
||||
|
||||
type parseCacheValue struct {
|
||||
e expr
|
||||
e metricsql.Expr
|
||||
err error
|
||||
}
|
||||
|
||||
type parseCache struct {
|
||||
m map[string]*parseCacheValue
|
||||
mu sync.RWMutex
|
||||
// Move atomic counters to the top of struct for 8-byte alignment on 32-bit arch.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
|
||||
|
||||
requests uint64
|
||||
misses uint64
|
||||
|
||||
m map[string]*parseCacheValue
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (pc *parseCache) Requests() uint64 {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
33
app/vmselect/promql/memory_limiter.go
Normal file
33
app/vmselect/promql/memory_limiter.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
type memoryLimiter struct {
|
||||
MaxSize uint64
|
||||
|
||||
mu sync.Mutex
|
||||
usage uint64
|
||||
}
|
||||
|
||||
func (ml *memoryLimiter) Get(n uint64) bool {
|
||||
ml.mu.Lock()
|
||||
ok := n <= ml.MaxSize && ml.MaxSize-n >= ml.usage
|
||||
if ok {
|
||||
ml.usage += n
|
||||
}
|
||||
ml.mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
func (ml *memoryLimiter) Put(n uint64) {
|
||||
ml.mu.Lock()
|
||||
if n > ml.usage {
|
||||
logger.Panicf("BUG: n=%d cannot exceed %d", n, ml.usage)
|
||||
}
|
||||
ml.usage -= n
|
||||
ml.mu.Unlock()
|
||||
}
|
||||
56
app/vmselect/promql/memory_limiter_test.go
Normal file
56
app/vmselect/promql/memory_limiter_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMemoryLimiter(t *testing.T) {
|
||||
var ml memoryLimiter
|
||||
ml.MaxSize = 100
|
||||
|
||||
// Allocate memory
|
||||
if !ml.Get(10) {
|
||||
t.Fatalf("cannot get 10 out of %d bytes", ml.MaxSize)
|
||||
}
|
||||
if ml.usage != 10 {
|
||||
t.Fatalf("unexpected usage; got %d; want %d", ml.usage, 10)
|
||||
}
|
||||
if !ml.Get(20) {
|
||||
t.Fatalf("cannot get 20 out of 90 bytes")
|
||||
}
|
||||
if ml.usage != 30 {
|
||||
t.Fatalf("unexpected usage; got %d; want %d", ml.usage, 30)
|
||||
}
|
||||
if ml.Get(1000) {
|
||||
t.Fatalf("unexpected get for 1000 bytes")
|
||||
}
|
||||
if ml.usage != 30 {
|
||||
t.Fatalf("unexpected usage; got %d; want %d", ml.usage, 30)
|
||||
}
|
||||
if ml.Get(71) {
|
||||
t.Fatalf("unexpected get for 71 bytes")
|
||||
}
|
||||
if ml.usage != 30 {
|
||||
t.Fatalf("unexpected usage; got %d; want %d", ml.usage, 30)
|
||||
}
|
||||
if !ml.Get(70) {
|
||||
t.Fatalf("cannot get 70 bytes")
|
||||
}
|
||||
if ml.usage != 100 {
|
||||
t.Fatalf("unexpected usage; got %d; want %d", ml.usage, 100)
|
||||
}
|
||||
|
||||
// Return memory back
|
||||
ml.Put(10)
|
||||
ml.Put(70)
|
||||
if ml.usage != 20 {
|
||||
t.Fatalf("unexpected usage; got %d; want %d", ml.usage, 20)
|
||||
}
|
||||
if !ml.Get(30) {
|
||||
t.Fatalf("cannot get 30 bytes")
|
||||
}
|
||||
ml.Put(50)
|
||||
if ml.usage != 0 {
|
||||
t.Fatalf("unexpected usage; got %d; want %d", ml.usage, 0)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user