mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
408 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6334bffa1 | ||
|
|
2abd5154e0 | ||
|
|
c1cf7d9f93 | ||
|
|
956fdd89d3 | ||
|
|
1bc6377863 | ||
|
|
1e2c511747 | ||
|
|
0eeffb910f | ||
|
|
4ba86f501a | ||
|
|
fdc5cfd838 | ||
|
|
a116f5e7c1 | ||
|
|
4e9e1ca0f7 | ||
|
|
c1d3705be0 | ||
|
|
b7ee2e7af2 | ||
|
|
67d44b0845 | ||
|
|
1e6ae9eff4 | ||
|
|
fa81f82714 | ||
|
|
0fa6df94a2 | ||
|
|
c39355921e | ||
|
|
cf4786f34a | ||
|
|
3e67862676 | ||
|
|
0db9fcedd5 | ||
|
|
391530bb74 | ||
|
|
60c5b368bc | ||
|
|
26dc21cf64 | ||
|
|
2444433d83 | ||
|
|
ea4c828bae | ||
|
|
aebc45ad26 | ||
|
|
2cb811b42f | ||
|
|
b986516fbe | ||
|
|
ef2296e420 | ||
|
|
a6086cde78 | ||
|
|
c9063ece66 | ||
|
|
4e26ad869b | ||
|
|
0772191975 | ||
|
|
48999e5396 | ||
|
|
0adebae1f8 | ||
|
|
267efde5ae | ||
|
|
0686ac52c3 | ||
|
|
68722c3c74 | ||
|
|
a544f49c2b | ||
|
|
d32f88c378 | ||
|
|
00cfb2d2b9 | ||
|
|
37dc223e25 | ||
|
|
a84fe76677 | ||
|
|
3a697a935a | ||
|
|
51a21c7d4b | ||
|
|
3d83f5d334 | ||
|
|
6f3b2fd600 | ||
|
|
8d35718dc6 | ||
|
|
33975513d0 | ||
|
|
63f2b539df | ||
|
|
9428ec9c9f | ||
|
|
0c8057924f | ||
|
|
d4218d27e6 | ||
|
|
e2274714b1 | ||
|
|
4d636c244d | ||
|
|
bad53e4207 | ||
|
|
3f581a9860 | ||
|
|
398e00aa54 | ||
|
|
4fd741f40d | ||
|
|
4a2cd85b92 | ||
|
|
6c46afb087 | ||
|
|
7343e8b408 | ||
|
|
22e3fabefd | ||
|
|
88f8670ede | ||
|
|
9eb5de334f | ||
|
|
6954e126fc | ||
|
|
bce35b8dd9 | ||
|
|
16dd145586 | ||
|
|
cd2c9e39da | ||
|
|
305e7bc981 | ||
|
|
9721d06c6a | ||
|
|
4862e93024 | ||
|
|
db4560ca31 | ||
|
|
1575a560f0 | ||
|
|
e1d76ec1f3 | ||
|
|
aeaa5de5fe | ||
|
|
4c0a262a2e | ||
|
|
3685fc18d5 | ||
|
|
ede7ad3703 | ||
|
|
9196c085a7 | ||
|
|
3802ae9269 | ||
|
|
b0090dbd86 | ||
|
|
603a79b357 | ||
|
|
2655220c58 | ||
|
|
bf915fc0db | ||
|
|
2fc157ff7a | ||
|
|
0dc0006f34 | ||
|
|
4b688fffee | ||
|
|
1402a6b981 | ||
|
|
3308279c4e | ||
|
|
fb909cf710 | ||
|
|
c4e75f09dc | ||
|
|
fb8840ac38 | ||
|
|
9c9221d1b2 | ||
|
|
70ca018a57 | ||
|
|
4266091e4f | ||
|
|
8001d29b6e | ||
|
|
9d3f1fcbb9 | ||
|
|
ba7b3806be | ||
|
|
7fa88c6efc | ||
|
|
4da34b11f8 | ||
|
|
a18317adbc | ||
|
|
44d7fc599d | ||
|
|
dce6079379 | ||
|
|
98419c00ef | ||
|
|
ac004665b5 | ||
|
|
8c03a8c4b4 | ||
|
|
8a126c2865 | ||
|
|
380cae23a0 | ||
|
|
1272e407b2 | ||
|
|
5f33fc8e46 | ||
|
|
ec8125606d | ||
|
|
f4a38f7fb1 | ||
|
|
ab740afd0d | ||
|
|
7b5168adfb | ||
|
|
a0d480fbf3 | ||
|
|
0dfc1ace53 | ||
|
|
d3fd113a80 | ||
|
|
4f738c8a15 | ||
|
|
dd86e6130c | ||
|
|
6a27657d73 | ||
|
|
c23b66a1ad | ||
|
|
be39414f9c | ||
|
|
e74fb23189 | ||
|
|
582fdc059a | ||
|
|
1c108fc494 | ||
|
|
d6b5ed6d39 | ||
|
|
639b14e8ab | ||
|
|
483de1cc06 | ||
|
|
9e0896055d | ||
|
|
5bb61b8b38 | ||
|
|
75a58dee02 | ||
|
|
5b41122292 | ||
|
|
964c296f96 | ||
|
|
9ecb994671 | ||
|
|
9d41e0dcae | ||
|
|
09fc6e22e5 | ||
|
|
99c37c2c96 | ||
|
|
06c2c25544 | ||
|
|
ec1b185991 | ||
|
|
0967683ae9 | ||
|
|
ad8a43b4e1 | ||
|
|
7346982763 | ||
|
|
5d8d110010 | ||
|
|
0b488f1e37 | ||
|
|
b8bb74ffc6 | ||
|
|
5c9e48417a | ||
|
|
5c83f8e203 | ||
|
|
05713469c3 | ||
|
|
8822079b77 | ||
|
|
99e048c9df | ||
|
|
47e4b50112 | ||
|
|
241170dc05 | ||
|
|
1c69f4eadc | ||
|
|
8d93b15b86 | ||
|
|
fcc166622a | ||
|
|
a9f39168d2 | ||
|
|
f090b2e917 | ||
|
|
10caad4728 | ||
|
|
3b90c2a99a | ||
|
|
57ec4f5f92 | ||
|
|
01cb15b6f5 | ||
|
|
b9256511e8 | ||
|
|
3a38b23fa3 | ||
|
|
8bd6f1f6df | ||
|
|
4aaa5c2efc | ||
|
|
10f5a26bec | ||
|
|
c14fd6c43f | ||
|
|
a77e88db7d | ||
|
|
aad7236e5d | ||
|
|
5e5de6be9a | ||
|
|
90cf6f3fcb | ||
|
|
8e3d69219f | ||
|
|
b842a2eccc | ||
|
|
afcc7fb167 | ||
|
|
57a57c711a | ||
|
|
68f260d878 | ||
|
|
1eade9b358 | ||
|
|
7e8747f6ed | ||
|
|
0168a1b658 | ||
|
|
bf6cbb762c | ||
|
|
6aeac37fc5 | ||
|
|
c98725db55 | ||
|
|
d8043f7161 | ||
|
|
f586e1f83c | ||
|
|
d1132bb188 | ||
|
|
915fb6df79 | ||
|
|
89eb6d78a4 | ||
|
|
17096b5750 | ||
|
|
66efa5745f | ||
|
|
106ab78a47 | ||
|
|
8aa474d685 | ||
|
|
9e059bb330 | ||
|
|
2346335ea6 | ||
|
|
b339890dca | ||
|
|
6c4ca89d75 | ||
|
|
f0fe7b5ad6 | ||
|
|
22ed4e7fd4 | ||
|
|
162f1fb1b7 | ||
|
|
d07f616609 | ||
|
|
5bf4e5ffb5 | ||
|
|
8c3629a892 | ||
|
|
ea07cf68ba | ||
|
|
4ee41bab43 | ||
|
|
1273f31f19 | ||
|
|
0f2ecde0e6 | ||
|
|
6cd77d4847 | ||
|
|
fb14f23532 | ||
|
|
daba0cdb05 | ||
|
|
575d2f0a91 | ||
|
|
ec1b439329 | ||
|
|
6a943a6a58 | ||
|
|
998525999c | ||
|
|
ab88890523 | ||
|
|
374d681848 | ||
|
|
e75d5f47c4 | ||
|
|
fc90ebf43c | ||
|
|
62a7353479 | ||
|
|
54bd21eb4a | ||
|
|
2bd1a01d1a | ||
|
|
cd4833d3d0 | ||
|
|
101fa258e5 | ||
|
|
d031e04023 | ||
|
|
43ea4ce428 | ||
|
|
a336bb4e22 | ||
|
|
1fe6d784d8 | ||
|
|
55fe36149c | ||
|
|
9203170eb2 | ||
|
|
2db685c19c | ||
|
|
6ddfb06b52 | ||
|
|
40a6c0d672 | ||
|
|
1371024747 | ||
|
|
c27c6de297 | ||
|
|
0c629429de | ||
|
|
4dbd642c86 | ||
|
|
56c154f45b | ||
|
|
8d83dcf332 | ||
|
|
9a4b2b8315 | ||
|
|
e06866005d | ||
|
|
2c76a9c9ab | ||
|
|
b9166a60ff | ||
|
|
c7034fc51b | ||
|
|
715c423f1a | ||
|
|
ca74e29458 | ||
|
|
a41955863a | ||
|
|
2ecb117082 | ||
|
|
0c88afa386 | ||
|
|
74c0fb04f3 | ||
|
|
828078eb45 | ||
|
|
7b59466667 | ||
|
|
79ac02ba74 | ||
|
|
593bd35aaa | ||
|
|
7354f10336 | ||
|
|
e8998c69a7 | ||
|
|
55bcf60ea6 | ||
|
|
796b010139 | ||
|
|
0c8a09c8e1 | ||
|
|
c1be1e4342 | ||
|
|
0c8d463307 | ||
|
|
e0fccc6c60 | ||
|
|
1f7d9a213a | ||
|
|
7ce1f73ada | ||
|
|
e605315f01 | ||
|
|
fcef49184b | ||
|
|
844ce4731e | ||
|
|
683bf2a11f | ||
|
|
eb2283a029 | ||
|
|
e8377011ab | ||
|
|
33ea2120c3 | ||
|
|
cf63669303 | ||
|
|
feacfffe89 | ||
|
|
4bb738ddd9 | ||
|
|
90e72c2a42 | ||
|
|
ccd8b7a003 | ||
|
|
d32845781e | ||
|
|
af2ceaaa0b | ||
|
|
61926bae01 | ||
|
|
ee13256f74 | ||
|
|
3b3b2f1e6e | ||
|
|
c9cbf5351c | ||
|
|
146c6e1f72 | ||
|
|
d261fa2885 | ||
|
|
5b47c00910 | ||
|
|
9e1119dab8 | ||
|
|
47a3228108 | ||
|
|
e88a03323a | ||
|
|
b75630fcf4 | ||
|
|
80db24386e | ||
|
|
296c14317f | ||
|
|
973e4b5b76 | ||
|
|
7aadec8e3c | ||
|
|
45fc8cb72f | ||
|
|
4b2523fb40 | ||
|
|
70ba36fa37 | ||
|
|
a78b3dba7f | ||
|
|
a9cfca6a72 | ||
|
|
710d6c33ea | ||
|
|
a8d4224828 | ||
|
|
341bed4822 | ||
|
|
5982e94c94 | ||
|
|
6d6c9eb1f8 | ||
|
|
86d3d907a5 | ||
|
|
269285848f | ||
|
|
47e1e5eb4b | ||
|
|
d2c801029b | ||
|
|
beb479b8f1 | ||
|
|
611c4401f8 | ||
|
|
a8db528930 | ||
|
|
15613e5338 | ||
|
|
3237d0309c | ||
|
|
26f8d7ea1b | ||
|
|
419197ba08 | ||
|
|
a4b4db9bf6 | ||
|
|
c1276edab5 | ||
|
|
2322c9a45a | ||
|
|
89b928ff24 | ||
|
|
935bfd7a18 | ||
|
|
3dd36b8088 | ||
|
|
afb964670a | ||
|
|
20fc0e0e54 | ||
|
|
4d9f088526 | ||
|
|
82d1707861 | ||
|
|
70d20ce8de | ||
|
|
723bf1af7f | ||
|
|
ac7b186f13 | ||
|
|
cd1bc32158 | ||
|
|
1c33b5937e | ||
|
|
8bb6bc986d | ||
|
|
d2be567482 | ||
|
|
7e7d4d5275 | ||
|
|
bf9782eaf6 | ||
|
|
cbe692f0e2 | ||
|
|
7b6623558f | ||
|
|
a1351bbaee | ||
|
|
b4d707d9bb | ||
|
|
bee7298f81 | ||
|
|
dbd217b8f0 | ||
|
|
4d936b1524 | ||
|
|
7354090aad | ||
|
|
d37924900b | ||
|
|
c0baa977cf | ||
|
|
f4252f87e6 | ||
|
|
0b78d228d2 | ||
|
|
0371c216a7 | ||
|
|
c1f18ee48d | ||
|
|
fbd7044b2b | ||
|
|
2afe511d80 | ||
|
|
f4e63cd070 | ||
|
|
667115a5c7 | ||
|
|
1458450dba | ||
|
|
5a5ba749f2 | ||
|
|
a3e26de45e | ||
|
|
53ea90865d | ||
|
|
17f0a53068 | ||
|
|
b03bdb32ff | ||
|
|
15f59c6df9 | ||
|
|
da45a20491 | ||
|
|
5859bb9556 | ||
|
|
28f6c36ab4 | ||
|
|
4794f894a4 | ||
|
|
c7280ba61a | ||
|
|
fbd8b03f15 | ||
|
|
d17a47e3e0 | ||
|
|
d6862a2d97 | ||
|
|
f2cf5d8e36 | ||
|
|
27f0d098bd | ||
|
|
a51ff2c6cb | ||
|
|
56b952c456 | ||
|
|
61bad1e07e | ||
|
|
be97f764f5 | ||
|
|
a576d1f5d3 | ||
|
|
968d094524 | ||
|
|
e307a4d92c | ||
|
|
0eae39daa7 | ||
|
|
437e0b2300 | ||
|
|
4b3af728ea | ||
|
|
4a12c4c982 | ||
|
|
2e75efb64e | ||
|
|
25900162f6 | ||
|
|
16afcd6aff | ||
|
|
c2a5eef5e3 | ||
|
|
4859ca0cda | ||
|
|
feb6b203a4 | ||
|
|
51ee990902 | ||
|
|
5262aae5da | ||
|
|
54fb8b21f9 | ||
|
|
d6523ffe90 | ||
|
|
024560b161 | ||
|
|
96ac664b27 | ||
|
|
2ffcf7a4a5 | ||
|
|
5cbd4cfca9 | ||
|
|
718ce33714 | ||
|
|
f332c0d54e | ||
|
|
eca566ed22 | ||
|
|
5bbfdff9fe | ||
|
|
6b0ae332f8 | ||
|
|
2eb3602d61 | ||
|
|
6fb9dd09f5 | ||
|
|
19b6643e5c | ||
|
|
08b889ef09 | ||
|
|
d15d0127fe | ||
|
|
674888fdc9 | ||
|
|
fb140eda33 | ||
|
|
398ec4383e | ||
|
|
eff0debe14 | ||
|
|
1836c415e6 | ||
|
|
81bbbf2cae |
6
.dockerignore
Normal file
6
.dockerignore
Normal file
@@ -0,0 +1,6 @@
|
||||
.git
|
||||
vendor
|
||||
gocache-for-docker
|
||||
victoria-metrics-data
|
||||
vmstorage-data
|
||||
vmselect-cache
|
||||
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Version**
|
||||
The line returned when passing `--version` command line flag to binary. For example:
|
||||
```
|
||||
$ ./victoria-metrics-prod --version
|
||||
victoria-metrics-20190730-121249-heads-single-node-0-g671d9e55
|
||||
```
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here such as error logs, `/metrics` output, screenshots from [the official Grafana dashboard for VictoriaMetrics](https://grafana.com/dashboards/10229).
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
43
.github/workflows/main.yml
vendored
Normal file
43
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: main
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.13
|
||||
id: go
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Dependencies
|
||||
env:
|
||||
GO111MODULE: off
|
||||
run: |
|
||||
go get -v golang.org/x/lint/golint
|
||||
go get -u github.com/kisielk/errcheck
|
||||
- name: Build
|
||||
env:
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
export PATH=$PATH:$(go env GOPATH)/bin # temporary fix. See https://github.com/actions/setup-go/issues/14
|
||||
make check-all
|
||||
git diff --exit-code
|
||||
make test-full
|
||||
make test-pure
|
||||
make victoria-metrics
|
||||
make victoria-metrics-pure
|
||||
make victoria-metrics-arm
|
||||
make victoria-metrics-arm64
|
||||
GOOS=freebsd go build -mod=vendor ./app/victoria-metrics
|
||||
GOOS=darwin go build -mod=vendor ./app/victoria-metrics
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v1.0.0
|
||||
with:
|
||||
token: ${{secrets.CODECOV_TOKEN}}
|
||||
file: ./coverage.txt
|
||||
|
||||
15
.gitignore
vendored
Normal file
15
.gitignore
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
/tags
|
||||
/pkg
|
||||
*.pprof
|
||||
/bin
|
||||
.idea
|
||||
*.test
|
||||
*.swp
|
||||
/gocache-for-docker
|
||||
/victoria-metrics-data
|
||||
/vmstorage-data
|
||||
/vmselect-cache
|
||||
/package/temp-deb-*
|
||||
/package/temp-rpm-*
|
||||
/package/*.deb
|
||||
/package/*.rpm
|
||||
76
CODE_OF_CONDUCT.md
Normal file
76
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at info@victoriametrics.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
16
CONTRIBUTING.md
Normal file
16
CONTRIBUTING.md
Normal file
@@ -0,0 +1,16 @@
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
||||
- Filing issues and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
- Spreading a word about VictoriaMetrics: conference talks, articles, comments, experience sharing with colleagues.
|
||||
- Updating documentation.
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
- Prefer simple code and architecture.
|
||||
- Avoid complex abstractions.
|
||||
- Avoid magic code and fancy algorithms.
|
||||
- Avoid [big external dependencies](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d).
|
||||
- Minimize the number of moving parts in the distributed system.
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
190
LICENSE
Normal file
190
LICENSE
Normal file
@@ -0,0 +1,190 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2019 VictoriaMetrics, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
95
Makefile
Normal file
95
Makefile
Normal file
@@ -0,0 +1,95 @@
|
||||
PKG_PREFIX := github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
BUILDINFO_TAG ?= $(shell echo $$(git describe --long --all | tr '/' '-')$$( \
|
||||
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | openssl sha1 | cut -c 10-17)))
|
||||
|
||||
PKG_TAG ?= $(shell git tag -l --points-at HEAD)
|
||||
ifeq ($(PKG_TAG),)
|
||||
PKG_TAG := $(BUILDINFO_TAG)
|
||||
endif
|
||||
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(shell date -u +'%Y%m%d-%H%M%S')-$(BUILDINFO_TAG)'
|
||||
|
||||
all: \
|
||||
victoria-metrics-prod
|
||||
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
|
||||
clean:
|
||||
rm -rf bin/*
|
||||
|
||||
publish: publish-victoria-metrics
|
||||
|
||||
package: package-victoria-metrics
|
||||
|
||||
release: victoria-metrics-prod
|
||||
cd bin && tar czf victoria-metrics-$(PKG_TAG).tar.gz victoria-metrics-prod
|
||||
|
||||
fmt:
|
||||
GO111MODULE=on gofmt -l -w -s ./lib
|
||||
GO111MODULE=on gofmt -l -w -s ./app
|
||||
|
||||
vet:
|
||||
GO111MODULE=on go vet -mod=vendor ./lib/...
|
||||
GO111MODULE=on go vet -mod=vendor ./app/...
|
||||
|
||||
lint: install-golint
|
||||
golint lib/...
|
||||
golint app/...
|
||||
|
||||
install-golint:
|
||||
which golint || GO111MODULE=off go get -u github.com/golang/lint/golint
|
||||
|
||||
errcheck: install-errcheck
|
||||
errcheck -exclude=errcheck_excludes.txt ./lib/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vminsert/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmselect/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmstorage/...
|
||||
|
||||
install-errcheck:
|
||||
which errcheck || GO111MODULE=off go get -u github.com/kisielk/errcheck
|
||||
|
||||
check-all: fmt vet lint errcheck golangci-lint
|
||||
|
||||
test:
|
||||
GO111MODULE=on go test -tags=integration -mod=vendor ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -tags=integration -mod=vendor ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
GO111MODULE=on go test -tags=integration -mod=vendor -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
benchmark:
|
||||
GO111MODULE=on go test -mod=vendor -bench=. ./lib/...
|
||||
GO111MODULE=on go test -mod=vendor -bench=. ./app/...
|
||||
|
||||
benchmark-pure:
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor -bench=. ./lib/...
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -mod=vendor -bench=. ./app/...
|
||||
|
||||
vendor-update:
|
||||
GO111MODULE=on go get -u ./lib/...
|
||||
GO111MODULE=on go get -u ./app/...
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
app-local:
|
||||
CGO_ENABLED=1 GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-pure:
|
||||
CGO_ENABLED=0 GO111MODULE=on go build $(RACE) -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc
|
||||
|
||||
install-qtc:
|
||||
which qtc || GO111MODULE=off go get -u github.com/valyala/quicktemplate/qtc
|
||||
|
||||
|
||||
golangci-lint: install-golangci-lint
|
||||
golangci-lint run --exclude '(SA4003|SA1019):' -D errcheck
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || GO111MODULE=off go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
824
README.md
824
README.md
@@ -1,41 +1,790 @@
|
||||
<img text-align="center" alt="Victoria Metrics" src="logo.png">
|
||||
|
||||
## VictoriaMetrics - the best long-term remote storage for Prometheus
|
||||
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
[](http://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||
|
||||
### VictoriaMetrics features
|
||||
<img alt="Victoria Metrics" src="logo.png">
|
||||
|
||||
- Native [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) support. Additionally, VictoriaMetrics extends PromQL with useful features. See [Extended PromQL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL) for more details.
|
||||
- Simple configuration. Just copy-n-paste remote storage URL to Prometheus config and that's it! See [Quick Start](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/Quick-Start) for more info.
|
||||
- Reduced operational overhead. Offload operational burden - configuration, capacity planning, scalability, backups, retention, durability - from Prometheus local storage to VictoriaMetrics.
|
||||
- Insertion rate scales to [millions of metric values per second](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893).
|
||||
- Storage scales to [millions of metrics](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) with trillions of metric values.
|
||||
- Wide range of retention periods - from 1 month to 5 years. Users may create different projects (aka `storage namespaces`) with different retention periods.
|
||||
- [Fast query engine](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4). It excels on heavy queries over thousands of metrics with millions of metric values.
|
||||
- The same remote storage URL may be used by multiple Prometheus instances collecting distinct metric sets, so all these metrics may be used in a single query (aka `global querying view`). This works ideally for multiple Prometheus instances located in different subnetworks / datacenters.
|
||||
- Accepts data in [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_reference/), so [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) and other influx-compatible agents may send data to VictoriaMetrics.
|
||||
- [Single-server VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) accepts data in [Graphite plaintext protocol](https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol) if `-graphiteListenAddr` is set and in [OpenTSDB format](http://opentsdb.net/docs/build/html/api_telnet/put.html) if `-opentsdbListenAddr` is set.
|
||||
- VictoriaMetrics supports backfilling, i.e. data points from the past may be inserted into the DB.
|
||||
- Time series data may be exported via `/api/v1/export?match=<metric_selector>`. Optional `start` and `end` timestamps may be added for exporting time series in the given time range.
|
||||
- Time series may be deleted via `/api/v1/admin/tsdb/delete_series?match[]=<metric_selector>`.
|
||||
- [Instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) are supported.
|
||||
## Single-node VictoriaMetrics
|
||||
|
||||
VictoriaMetrics is fast, cost-effective and scalable time-series database. It can be used as long-term remote storage for Prometheus.
|
||||
It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and
|
||||
in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
|
||||
Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
|
||||
### Useful links
|
||||
## Prominent features
|
||||
|
||||
* [Free single-node VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/Single-server-VictoriaMetrics)
|
||||
* [Site](https://victoriametrics.com/)
|
||||
* [`WITH` templates playground](https://play.victoriametrics.com/promql/expand-with-exprs)
|
||||
* [Grafana playground](http://play-grafana.victoriametrics.com:3000/d/4ome8yJmz/node-exporter-on-victoriametrics-demo)
|
||||
* [Docs](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki)
|
||||
* [FAQ](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/FAQ)
|
||||
* [Issues](https://github.com/VictoriaMetrics/VictoriaMetrics/issues)
|
||||
* [Google group](https://groups.google.com/forum/#!forum/victoriametrics)
|
||||
* [Creating the best remote storage for Prometheus](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac) - an article with technical details about VictoriaMetrics.
|
||||
* Supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana.
|
||||
Additionally, VictoriaMetrics extends PromQL with opt-in [useful features](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL).
|
||||
* Supports global query view. Multiple Prometheus instances may write data into VictoriaMetrics. Later this data may be used in a single query.
|
||||
* High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
|
||||
and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
|
||||
[Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* [Uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) when working with millions of unique time series (aka high cardinality).
|
||||
* High data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
|
||||
may be crammed into limited storage comparing to TimescaleDB.
|
||||
* Optimized for storage with high-latency IO and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc). See [graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, Uber M3, Cortex, InfluxDB or TimescaleDB.
|
||||
See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae)
|
||||
and [comparing Thanos to VictoriaMetrics cluster](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683).
|
||||
* Easy operation:
|
||||
* VictoriaMetrics consists of a single executable without external dependencies.
|
||||
* All the configuration is done via explicit command-line flags with reasonable defaults.
|
||||
* All the data is stored in a single directory pointed by `-storageDataPath` flag.
|
||||
* Easy backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Supports metrics' ingestion and [backfilling](#backfilling) via the following protocols:
|
||||
* [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
* [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
* [Graphite plaintext protocol](https://graphite.readthedocs.io/en/latest/feeding-carbon.html) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon)
|
||||
if `-graphiteListenAddr` is set.
|
||||
* [OpenTSDB put message](http://opentsdb.net/docs/build/html/api_telnet/put.html) if `-opentsdbListenAddr` is set.
|
||||
* [HTTP OpenTSDB /api/put requests](http://opentsdb.net/docs/build/html/api_http/put.html) if `-opentsdbHTTPListenAddr` is set.
|
||||
* Ideally works with big amounts of time series data from Kubernetes, IoT sensors, connected cars, industrial telemetry and various Enterprise workloads.
|
||||
* Has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
|
||||
### Victoria Metrics Logo
|
||||
## Operation
|
||||
|
||||
|
||||
### Table of contents
|
||||
|
||||
- [How to start VictoriaMetrics](#how-to-start-victoriametrics)
|
||||
- [Prometheus setup](#prometheus-setup)
|
||||
- [Grafana setup](#grafana-setup)
|
||||
- [How to upgrade VictoriaMetrics?](#how-to-upgrade-victoriametrics)
|
||||
- [How to apply new config to VictoriaMetrics?](#how-to-apply-new-config-to-victoriametrics)
|
||||
- [How to send data from InfluxDB-compatible agents such as Telegraf?](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
- [How to send data from Graphite-compatible agents such as StatsD?](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
- [Querying Graphite data](#querying-graphite-data)
|
||||
- [How to send data from OpenTSDB-compatible agents?](#how-to-send-data-from-opentsdb-compatible-agents)
|
||||
- [How to build from sources](#how-to-build-from-sources)
|
||||
- [Development build](#development-build)
|
||||
- [Production build](#production-build)
|
||||
- [ARM build](#arm-build)
|
||||
- [Pure Go build (CGO_ENABLED=0)](#pure-go-build-cgo_enabled0)
|
||||
- [Building docker images](#building-docker-images)
|
||||
- [Start with docker-compose](#start-with-docker-compose)
|
||||
- [Setting up service](#setting-up-service)
|
||||
- [Third-party contributions](#third-party-contributions)
|
||||
- [How to work with snapshots?](#how-to-work-with-snapshots)
|
||||
- [How to delete time series?](#how-to-delete-time-series)
|
||||
- [How to export time series?](#how-to-export-time-series)
|
||||
- [Federation](#federation)
|
||||
- [Capacity planning](#capacity-planning)
|
||||
- [High availability](#high-availability)
|
||||
- [Multiple retentions](#multiple-retentions)
|
||||
- [Downsampling](#downsampling)
|
||||
- [Multi-tenancy](#multi-tenancy)
|
||||
- [Scalability and cluster version](#scalability-and-cluster-version)
|
||||
- [Alerting](#alerting)
|
||||
- [Security](#security)
|
||||
- [Tuning](#tuning)
|
||||
- [Monitoring](#monitoring)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Backfilling](#backfilling)
|
||||
- [Profiling](#profiling)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Contacts](#contacts)
|
||||
- [Community and contributions](#community-and-contributions)
|
||||
- [Reporting bugs](#reporting-bugs)
|
||||
- [Victoria Metrics Logo](#victoria-metrics-logo)
|
||||
- [Logo Usage Guidelines](#logo-usage-guidelines)
|
||||
- [Font used:](#font-used)
|
||||
- [Color Palette:](#color-palette)
|
||||
- [We kindly ask:](#we-kindly-ask)
|
||||
|
||||
|
||||
### How to start VictoriaMetrics
|
||||
|
||||
Just start VictoriaMetrics [executable](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
or [docker image](https://hub.docker.com/r/victoriametrics/victoria-metrics/) with the desired command-line flags.
|
||||
|
||||
The following command-line flags are used the most:
|
||||
|
||||
* `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in current working directory.
|
||||
* `-retentionPeriod` - retention period in months for the data. Older data is automatically deleted. Default period is 1 month.
|
||||
* `-httpListenAddr` - TCP address to listen to for http requests. By default, it listens port `8428` on all the network interfaces.
|
||||
* `-graphiteListenAddr` - TCP and UDP address to listen to for Graphite data. By default, it is disabled.
|
||||
* `-opentsdbListenAddr` - TCP and UDP address to listen to for OpenTSDB data over telnet protocol. By default, it is disabled.
|
||||
* `-opentsdbHTTPListenAddr` - TCP address to listen to for HTTP OpenTSDB data over `/api/put`. By default, it is disabled.
|
||||
|
||||
Pass `-help` to see all the available flags with description and default values.
|
||||
|
||||
It is recommended setting up [monitoring](#monitoring) for VictoriaMetrics.
|
||||
|
||||
|
||||
### Prometheus setup
|
||||
|
||||
Add the following lines to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`):
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
max_shards: 30
|
||||
```
|
||||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
Then apply the new config via the following command:
|
||||
|
||||
```
|
||||
kill -HUP `pidof prometheus`
|
||||
```
|
||||
|
||||
Prometheus writes incoming data to local storage and replicates it to remote storage in parallel.
|
||||
This means the data remains available in local storage for `--storage.tsdb.retention.time` duration
|
||||
even if remote storage is unavailable.
|
||||
|
||||
If you plan to send data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
of [Prometheus config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file):
|
||||
|
||||
```yml
|
||||
global:
|
||||
external_labels:
|
||||
datacenter: dc-123
|
||||
```
|
||||
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each time series sent to remote storage.
|
||||
The label name may be arbitrary - `datacenter` is just an example. The label value must be unique
|
||||
across Prometheus instances, so those time series may be filtered and grouped by this label.
|
||||
|
||||
|
||||
It is recommended upgrading Prometheus to [v2.12.0](https://github.com/prometheus/prometheus/releases) or newer,
|
||||
since the previous versions may have issues with `remote_write`.
|
||||
|
||||
|
||||
### Grafana setup
|
||||
|
||||
Create [Prometheus datasource](http://docs.grafana.org/features/datasources/prometheus/) in Grafana with the following Url:
|
||||
|
||||
```
|
||||
http://<victoriametrics-addr>:8428
|
||||
```
|
||||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
|
||||
Then build graphs with the created datasource using [Prometheus query language](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
VictoriaMetrics supports native PromQL and [extends it with useful features](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL).
|
||||
|
||||
|
||||
### How to upgrade VictoriaMetrics?
|
||||
|
||||
It is safe upgrading VictoriaMetrics to new versions unless [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
say otherwise. It is recommended performing regular upgrades to the latest version,
|
||||
since it may contain important bug fixes, performance optimizations or new features.
|
||||
|
||||
Follow the following steps during the upgrade:
|
||||
|
||||
1) Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
2) Wait until the process stops. This can take a few seconds.
|
||||
3) Start the upgraded VictoriaMetrics.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
|
||||
|
||||
### How to apply new config to VictoriaMetrics?
|
||||
|
||||
VictoriaMetrics must be restarted for applying new config:
|
||||
|
||||
1) Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
2) Wait until the process stops. This can take a few seconds.
|
||||
3) Start VictoriaMetrics with the new config.
|
||||
|
||||
Prometheus doesn't drop data during VictoriaMetrics restart.
|
||||
See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details.
|
||||
|
||||
|
||||
### How to send data from InfluxDB-compatible agents such as [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)?
|
||||
|
||||
Just use `http://<victoriametric-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
For instance, put the following lines into `Telegraf` config, so it sends data to VictoriaMetrics instead of InfluxDB:
|
||||
|
||||
```
|
||||
[[outputs.influxdb]]
|
||||
urls = ["http://<victoriametrics-addr>:8428"]
|
||||
```
|
||||
|
||||
Do not forget substituting `<victoriametrics-addr>` with the real address where VictoriaMetrics runs.
|
||||
|
||||
VictoriaMetrics maps Influx data using the following rules:
|
||||
* [`db` query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
||||
unless `db` tag exists in the Influx line.
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value,
|
||||
where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag.
|
||||
See also `-influxSkipSingleField` command-line flag.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
|
||||
```
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
```
|
||||
|
||||
is converted into the following Prometheus data points:
|
||||
|
||||
```
|
||||
foo_field1{tag1="value1", tag2="value2"} 12
|
||||
foo_field2{tag1="value1", tag2="value2"} 40
|
||||
```
|
||||
|
||||
Example for writing data with [Influx line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
to local VictoriaMetrics using `curl`:
|
||||
|
||||
```
|
||||
curl -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://localhost:8428/write'
|
||||
```
|
||||
|
||||
An arbitrary number of lines delimited by '\n' may be sent in a single request.
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match={__name__!=""}'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"measurement.field1","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1560272508147]}
|
||||
{"metric":{"__name__":"measurement.field2","tag1":"value1","tag2":"value2"},"values":[1.23],"timestamps":[1560272508147]}
|
||||
```
|
||||
|
||||
Note that Influx line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
while VictoriaMetrics stores them with *milliseconds* precision.
|
||||
|
||||
|
||||
### How to send data from Graphite-compatible agents such as [StatsD](https://github.com/etsy/statsd)?
|
||||
|
||||
1) Enable Graphite receiver in VictoriaMetrics by setting `-graphiteListenAddr` command line flag. For instance,
|
||||
the following command will enable Graphite receiver in VictoriaMetrics on TCP and UDP port `2003`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod -graphiteListenAddr=:2003
|
||||
```
|
||||
|
||||
2) Use the configured address in Graphite-compatible agents. For instance, set `graphiteHost`
|
||||
to the VictoriaMetrics host in `StatsD` configs.
|
||||
|
||||
|
||||
Example for writing data with Graphite plaintext protocol to local VictoriaMetrics using `nc`:
|
||||
|
||||
```
|
||||
echo "foo.bar.baz;tag1=value1;tag2=value2 123 `date +%s`" | nc -N localhost 2003
|
||||
```
|
||||
|
||||
VictoriaMetrics sets the current time if the timestamp is omitted.
|
||||
An arbitrary number of lines delimited by `\n` may be sent in one go.
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match={__name__!=""}'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1560277406000]}
|
||||
```
|
||||
|
||||
|
||||
### Querying Graphite data
|
||||
|
||||
Data sent to VictoriaMetrics via `Graphite plaintext protocol` may be read either via
|
||||
[Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/)
|
||||
or via [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi/blob/master/cmd/carbonapi/carbonapi.example.prometheus.yaml).
|
||||
|
||||
|
||||
|
||||
### How to send data from OpenTSDB-compatible agents?
|
||||
|
||||
VictoriaMetrics supports [telnet put protocol](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
and [HTTP /api/put requests](http://opentsdb.net/docs/build/html/api_http/put.html) for ingesting OpenTSDB data.
|
||||
|
||||
#### Sending data via `telnet put` protocol
|
||||
|
||||
1) Enable OpenTSDB receiver in VictoriaMetrics by setting `-opentsdbListenAddr` command line flag. For instance,
|
||||
the following command enables OpenTSDB receiver in VictoriaMetrics on TCP and UDP port `4242`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod -opentsdbListenAddr=:4242
|
||||
```
|
||||
|
||||
2) Send data to the given address from OpenTSDB-compatible agents.
|
||||
|
||||
|
||||
Example for writing data with OpenTSDB protocol to local VictoriaMetrics using `nc`:
|
||||
|
||||
```
|
||||
echo "put foo.bar.baz `date +%s` 123 tag1=value1 tag2=value2" | nc -N localhost 4242
|
||||
```
|
||||
|
||||
An arbitrary number of lines delimited by `\n` may be sent in one go.
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match={__name__!=""}'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1560277292000]}
|
||||
```
|
||||
|
||||
|
||||
#### Sending OpenTSDB data via HTTP `/api/put` requests
|
||||
|
||||
1) Enable HTTP server for OpenTSDB `/api/put` requests by setting `-opentsdbHTTPListenAddr` command line flag. For instance,
|
||||
the following command enables OpenTSDB HTTP server on port `4242`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod -opentsdbHTTPListenAddr=:4242
|
||||
```
|
||||
|
||||
2) Send data to the given address from OpenTSDB-compatible agents.
|
||||
|
||||
Example for writing a single data point:
|
||||
|
||||
```
|
||||
curl -H 'Content-Type: application/json' -d '{"metric":"x.y.z","value":45.34,"tags":{"t1":"v1","t2":"v2"}}' http://localhost:4242/api/put
|
||||
```
|
||||
|
||||
Example for writing multiple data points in a single request:
|
||||
|
||||
```
|
||||
curl -H 'Content-Type: application/json' -d '[{"metric":"foo","value":45.34},{"metric":"bar","value":43}]' http://localhost:4242/api/put
|
||||
```
|
||||
|
||||
After that the data may be read via [/api/v1/export](#how-to-export-time-series) endpoint:
|
||||
|
||||
```
|
||||
curl -G 'http://localhost:8428/api/v1/export' -d 'match[]=x.y.z' -d 'match[]=foo' -d 'match[]=bar'
|
||||
```
|
||||
|
||||
The `/api/v1/export` endpoint should return the following response:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"foo"},"values":[45.34],"timestamps":[1566464846000]}
|
||||
{"metric":{"__name__":"bar"},"values":[43],"timestamps":[1566464846000]}
|
||||
{"metric":{"__name__":"x.y.z","t1":"v1","t2":"v2"},"values":[45.34],"timestamps":[1566464763000]}
|
||||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
|
||||
We recommend using either [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or
|
||||
[docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/) instead of building VictoriaMetrics
|
||||
from sources. Building from sources is reasonable when developing additional features specific
|
||||
to your needs.
|
||||
|
||||
|
||||
#### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make victoria-metrics` from the root folder of the repository.
|
||||
It builds `victoria-metrics` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-prod` from the root folder of the repository.
|
||||
It builds `victoria-metrics-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### ARM build
|
||||
|
||||
ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/).
|
||||
|
||||
#### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make victoria-metrics-arm` or `make victoria-metrics-arm64` from the root folder of the repository.
|
||||
It builds `victoria-metrics-arm` or `victoria-metrics-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-arm-prod` or `make victoria-metrics-arm64-prod` from the root folder of the repository.
|
||||
It builds `victoria-metrics-arm-prod` or `victoria-metrics-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Pure Go build (CGO_ENABLED=0)
|
||||
|
||||
`Pure Go` mode builds only Go code without [cgo](https://golang.org/cmd/cgo/) dependencies.
|
||||
This is an experimental mode, which may result in a lower compression ratio and slower decompression performance.
|
||||
Use it with caution!
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `make victoria-metrics-pure` from the root folder of the repository.
|
||||
It builds `victoria-metrics-pure` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
|
||||
Run `make package-victoria-metrics`. It builds `victoriametrics/victoria-metrics:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-victoria-metrics`.
|
||||
|
||||
|
||||
### Start with docker-compose
|
||||
|
||||
[Docker-compose](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/docker-compose.yml)
|
||||
helps to spin up VictoriaMetrics, Prometheus and Grafana with one command.
|
||||
More details may be found [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker#folder-contains-basic-images-and-tools-for-building-and-running-victoria-metrics-in-docker).
|
||||
|
||||
|
||||
### Setting up service
|
||||
|
||||
Read [these instructions](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/43) on how to set up VictoriaMetrics as a service in your OS.
|
||||
|
||||
|
||||
### Third-party contributions
|
||||
|
||||
* [Unofficial yum repository](https://copr.fedorainfracloud.org/coprs/antonpatsev/VictoriaMetrics/) ([source code](https://github.com/patsevanton/victoriametrics-rpm))
|
||||
|
||||
|
||||
### How to work with snapshots?
|
||||
|
||||
VictoriaMetrics can create [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
for all the data stored under `-storageDataPath` directory.
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/create` in order to create an instant snapshot.
|
||||
The page will return the following JSON response:
|
||||
|
||||
```
|
||||
{"status":"ok","snapshot":"<snapshot-name>"}
|
||||
```
|
||||
|
||||
Snapshots are created under `<-storageDataPath>/snapshots` directory, where `<-storageDataPath>`
|
||||
is the command-line flag value. Snapshots can be archived to backup storage via `cp -L`, `rsync -L`, `scp -r`
|
||||
or any similar tool that follows symlinks during copying.
|
||||
|
||||
The `http://<victoriametrics-addr>:8428/snapshot/list` page contains the list of available snapshots.
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete?snapshot=<snapshot-name>` in order
|
||||
to delete `<snapshot-name>` snapshot.
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete_all` in order to delete all the snapshots.
|
||||
|
||||
Steps for restoring from a snapshot:
|
||||
1. Stop VictoriaMetrics with `kill -INT`.
|
||||
2. Remove the entire contents of the directory pointed by `-storageDataPath` command-line flag.
|
||||
3. Copy snapshot contents to the directory pointed by `-storageDataPath`.
|
||||
4. Start VictoriaMetrics.
|
||||
|
||||
|
||||
### How to delete time series?
|
||||
|
||||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/admin/tsdb/delete_series?match[]=<timeseries_selector_for_delete>`,
|
||||
where `<timeseries_selector_for_delete>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
for metrics to delete. After that all the time series matching the given selector are deleted. Storage space for
|
||||
the deleted time series isn't freed instantly - it is freed during subsequent merges of data files.
|
||||
|
||||
It is recommended verifying which metrics will be deleted with the call to `http://<victoria-metrics-addr>:8428/api/v1/series?match[]=<timeseries_selector_for_delete>`
|
||||
before actually deleting the metrics.
|
||||
|
||||
|
||||
### How to export time series?
|
||||
|
||||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/export?match[]=<timeseries_selector_for_export>`,
|
||||
where `<timeseries_selector_for_export>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
for metrics to export. The response would contain all the data for the selected time series in [JSON streaming format](https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON).
|
||||
Each JSON line would contain data for a single time series. An example output:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"up","job":"node_exporter","instance":"localhost:9100"},"values":[0,0,0],"timestamps":[1549891472010,1549891487724,1549891503438]}
|
||||
{"metric":{"__name__":"up","job":"prometheus","instance":"localhost:9090"},"values":[1,1,1],"timestamps":[1549891461511,1549891476511,1549891491511]}
|
||||
```
|
||||
|
||||
Optional `start` and `end` args may be added to the request in order to limit the time frame for the exported data. These args may contain either
|
||||
unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values.
|
||||
|
||||
|
||||
### Federation
|
||||
|
||||
VictoriaMetrics exports [Prometheus-compatible federation data](https://prometheus.io/docs/prometheus/latest/federation/)
|
||||
at `http://<victoriametrics-addr>:8428/federate?match[]=<timeseries_selector_for_federation>`.
|
||||
|
||||
Optional `start` and `end` args may be added to the request in order to scrape the last point for each selected time series on the `[start ... end]` interval.
|
||||
`start` and `end` may contain either unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values. By default, the last point
|
||||
on the interval `[now - max_lookback ... now]` is scraped for each time series. The default value for `max_lookback` is `5m` (5 minutes), but can be overridden.
|
||||
For instance, `/federate?match[]=up&max_lookback=1h` would return last points on the `[now - 1h ... now]` interval. This may be useful for time series federation
|
||||
with scrape intervals exceeding `5m`.
|
||||
|
||||
|
||||
### Capacity planning
|
||||
|
||||
A rough estimation of the required resources for ingestion path:
|
||||
|
||||
* RAM size: less than 1KB per active time series. So, ~1GB of RAM is required for 1M active time series.
|
||||
Time series is considered active if new data points have been added to it recently or if it has been recently queried.
|
||||
The number of active time series may be obtained from `vm_cache_entries{type="storage/hour_metric_ids"}` metric
|
||||
exproted on the `/metrics` page.
|
||||
VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited by `-memory.allowedPercent` flag.
|
||||
|
||||
* CPU cores: a CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing
|
||||
the insert stream of 1M data points per second. The ingestion rate may be lower for high cardinality data or for time series with high number of labels.
|
||||
See [this article](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) for details.
|
||||
If you see lower numbers per CPU core, then it is likely active time series info doesn't fit caches,
|
||||
so you need more RAM for lowering CPU usage.
|
||||
|
||||
* Storage space: less than a byte per data point on average. So, ~260GB is required for storing a month-long insert stream
|
||||
of 100K data points per second.
|
||||
The actual storage size heavily depends on data randomness (entropy). Higher randomness means higher storage size requirements.
|
||||
Read [this article](https://medium.com/faun/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932)
|
||||
for details.
|
||||
|
||||
* Network usage: outbound traffic is negligible. Ingress traffic is ~100 bytes per ingested data point via
|
||||
[Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
|
||||
The actual ingress bandwidth usage depends on the average number of labels per ingested metric and the average size
|
||||
of label values. The higher number of per-metric labels and longer label values mean the higher ingress bandwidth.
|
||||
|
||||
|
||||
The required resources for query path:
|
||||
|
||||
* RAM size: depends on the number of time series to scan in each query and the `step`
|
||||
argument passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries).
|
||||
The higher number of scanned time series and lower `step` argument results in the higher RAM usage.
|
||||
|
||||
* CPU cores: a CPU core per 30 millions of scanned data points per second.
|
||||
|
||||
* Network usage: depends on the frequency and the type of incoming requests. Typical Grafana dashboards usually
|
||||
require negligible network bandwidth.
|
||||
|
||||
|
||||
### High availability
|
||||
|
||||
1) Install multiple VictoriaMetrics instances in distinct datacenters (availability zones).
|
||||
2) Add addresses of these instances to `remote_write` section in Prometheus config:
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr-1>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
# ...
|
||||
- url: http://<victoriametrics-addr-N>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
```
|
||||
|
||||
3) Apply the updated config:
|
||||
|
||||
```
|
||||
kill -HUP `pidof prometheus`
|
||||
```
|
||||
|
||||
4) Now Prometheus should write data into all the configured `remote_write` urls in parallel.
|
||||
5) Set up [Promxy](https://github.com/jacksontj/promxy) in front of all the VictoriaMetrics replicas.
|
||||
6) Set up Prometheus datasource in Grafana that points to Promxy.
|
||||
|
||||
|
||||
If you have Prometheus HA pairs with replicas `r1` and `r2` in each pair, then configure each `r1`
|
||||
to write data to `victoriametrics-addr-1`, while each `r2` should write data to `victoriametrics-addr-2`.
|
||||
|
||||
|
||||
### Multiple retentions
|
||||
|
||||
Just start multiple VictoriaMetrics instances with distinct values for the following flags:
|
||||
|
||||
* `-retentionPeriod`
|
||||
* `-storageDataPath`, so the data for each retention period is saved in a separate directory
|
||||
* `-httpListenAddr`, so clients may reach VictoriaMetrics instance with proper retention
|
||||
|
||||
|
||||
### Downsampling
|
||||
|
||||
There is no downsampling support at the moment, but:
|
||||
- VictoriaMetrics is optimized for querying big amounts of raw data. See benchmark results for heavy queries
|
||||
in [this article](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
- VictoriaMetrics has good compression for on-disk data. See [this article](https://medium.com/@valyala/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932)
|
||||
for details.
|
||||
|
||||
These properties reduce the need in downsampling. We plan to implement downsampling in the future.
|
||||
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36) for details.
|
||||
|
||||
|
||||
### Multi-tenancy
|
||||
|
||||
Single-node VictoriaMetrics doesn't support multi-tenancy. Use [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) instead.
|
||||
|
||||
|
||||
### Scalability and cluster version
|
||||
|
||||
Though single-node VictoriaMetrics cannot scale to multiple nodes, it is optimized for resource usage - storage size / bandwidth / IOPS, RAM, CPU.
|
||||
This means that a single-node VictoriaMetrics may scale vertically and substitute a moderately sized cluster built with competing solutions
|
||||
such as Thanos, Uber M3, InfluxDB or TimescaleDB. See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
|
||||
So try single-node VictoriaMetrics at first and then [switch to cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) if you still need
|
||||
horizontally scalable long-term remote storage for really large Prometheus deployments.
|
||||
[Contact us](mailto:info@victoriametrics.com) for paid support.
|
||||
|
||||
|
||||
### Alerting
|
||||
|
||||
VictoriaMetrics doesn't support rule evaluation and alerting yet, so these actions must be performed either
|
||||
on [Prometheus side](https://prometheus.io/docs/alerting/overview/) or on [Grafana side](https://grafana.com/docs/alerting/rules/).
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
Do not forget protecting sensitive endpoints in VictoriaMetrics when exposing it to untrusted networks such as the internet.
|
||||
Consider setting the following command-line flags:
|
||||
|
||||
* `-tls`, `-tlsCertFile` and `-tlsKeyFile` for switching from HTTP to HTTPS.
|
||||
* `-httpAuth.username` and `-httpAuth.password` for protecting all the HTTP endpoints
|
||||
with [HTTP Basic Authentication](https://en.wikipedia.org/wiki/Basic_access_authentication).
|
||||
* `-deleteAuthKey` for protecting `/api/v1/admin/tsdb/delete_series` endpoint. See [how to delete time series](#how-to-delete-time-series).
|
||||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
||||
|
||||
### Tuning
|
||||
|
||||
* There is no need in VictoriaMetrics tuning since it uses reasonable defaults for command-line flags,
|
||||
which are automatically adjusted for the available CPU and RAM resources.
|
||||
* There is no need in Operating System tuning since VictoriaMetrics is optimized for default OS settings.
|
||||
The only option is increasing the limit on [the number of open files in the OS](https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a),
|
||||
so Prometheus instances could establish more connections to VictoriaMetrics.
|
||||
* The recommended filesystem is `ext4`, the recommended persistent storage is [persistent HDD-based disk on GCP](https://cloud.google.com/compute/docs/disks/#pdspecs),
|
||||
since it is protected from hardware failures via internal replication and it can be [resized on the fly](https://cloud.google.com/compute/docs/disks/add-persistent-disk#resize_pd).
|
||||
If you plan storing more than 1TB of data on `ext4` partition or plan extending it to more than 16TB,
|
||||
then the following options are recommended to pass to `mkfs.ext4`:
|
||||
|
||||
```
|
||||
mkfs.ext4 ... -O 64bit,huge_file,extent -T huge
|
||||
```
|
||||
|
||||
|
||||
### Monitoring
|
||||
|
||||
VictoriaMetrics exports internal metrics in Prometheus format on the `/metrics` page.
|
||||
Add this page to Prometheus' scrape config in order to collect VictoriaMetrics metrics.
|
||||
There is [an official Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229).
|
||||
|
||||
The most interesting metrics are:
|
||||
|
||||
* `vm_cache_entries{type="storage/hour_metric_ids"}` - the number of time series with new data points during the last hour
|
||||
aka active time series.
|
||||
* `vm_rows{type="indexdb"}` - the number of rows in inverted index. Each label in each unique time series adds a single
|
||||
row into the inverted index. An approximate number of time series in the database may be calculated as
|
||||
`vm_rows{type="indexdb"} / (avg_labels_per_series + 1)`, where `avg_labels_per_series` is the average number of labels
|
||||
per each time series.
|
||||
* Sum of `vm_rows{type="storage/big"}` and `vm_rows{type="storage/small"}` - total number of `(timestamp, value)` data points
|
||||
in the database.
|
||||
* Sum of all the `vm_cache_size_bytes` metrics - the total size of all the caches in the database.
|
||||
* `vm_allowed_memory_bytes` - the maximum allowed size for caches in the database. It is calculated as `system_memory * <-memory.allowedPercent> / 100`,
|
||||
where `system_memory` is the amount of system memory and `-memory.allowedPercent` is the corresponding flag value.
|
||||
* `vm_rows_inserted_total` - the total number of inserted rows since VictoriaMetrics start.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
* It is recommended to use default command-line flag values (i.e. don't set them explicitly) until the need
|
||||
in tweaking these flag values arises.
|
||||
|
||||
* If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second,
|
||||
then it is likely you have too many active time series for the current amount of RAM.
|
||||
It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve
|
||||
ingestion performance.
|
||||
Another option is to increase `-memory.allowedPercent` command-line flag value. Be careful with this
|
||||
option, since too big value for `-memory.allowedPercent` may result in high I/O usage.
|
||||
|
||||
* VictoriaMetrics requires free disk space for [merging data files to bigger ones](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
It may slow down when there is no enough free space left. So make sure `-storageDataPath` directory
|
||||
has at least 20% of free space comparing to disk size.
|
||||
|
||||
* If VictoriaMetrics doesn't work because of certain parts are corrupted due to disk errors,
|
||||
then just remove directoreis with broken parts. This will recover VictoriaMetrics at the cost
|
||||
of data loss stored in the broken parts. In the future, `vmrecover` tool will be created
|
||||
for automatic recovering from such errors.
|
||||
|
||||
|
||||
### Backfilling
|
||||
|
||||
Make sure that configured `-retentionPeriod` covers timestamps for the backfilled data.
|
||||
|
||||
It is recommended disabling query cache with `-search.disableCache` command-line flag when writing
|
||||
historical data with timestamps from the past, since the cache assumes that the data is written with
|
||||
the current timestamps. Query cache can be enabled after the backfilling is complete.
|
||||
|
||||
|
||||
### Profiling
|
||||
|
||||
VictoriaMetrics provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
- Memory profile. It can be collected with the following command:
|
||||
```
|
||||
curl -s http://<victoria-metrics-host>:8428/debug/pprof/heap > mem.pprof
|
||||
```
|
||||
|
||||
- CPU profile. It can be collected with the following command:
|
||||
```
|
||||
curl -s http://<victoria-metrics-host>:8428/debug/pprof/profile > cpu.pprof
|
||||
```
|
||||
|
||||
The command for collecting CPU profile waits for 30 seconds before returning.
|
||||
|
||||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [ ] Replication [#118](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/118)
|
||||
- [ ] Support of Object Storages (GCS, S3, Azure Storage) [#38](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/38)
|
||||
- [ ] Data downsampling [#36](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36)
|
||||
- [ ] Alert Manager Integration [#119](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/119)
|
||||
- [ ] CLI tool for data migration, re-balancing and adding/removing nodes [#103](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/103)
|
||||
|
||||
|
||||
The discussion happens [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/129). Feel free to comment any item or add own one.
|
||||
|
||||
|
||||
## Contacts
|
||||
|
||||
Contact us with any questions regarding VictoriaMetrics at [info@victoriametrics.com](mailto:info@victoriametrics.com).
|
||||
|
||||
|
||||
## Community and contributions
|
||||
|
||||
Feel free asking any questions regarding VictoriaMetrics:
|
||||
|
||||
- [slack](http://slack.victoriametrics.com/)
|
||||
- [telergam-en](https://t.me/VictoriaMetrics_en)
|
||||
- [telergam-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
- [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
||||
- Filing issues and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
- Spreading a word about VictoriaMetrics: conference talks, articles, comments, experience sharing with colleagues.
|
||||
- Updating documentation.
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
- Prefer simple code and architecture.
|
||||
- Avoid complex abstractions.
|
||||
- Avoid magic code and fancy algorithms.
|
||||
- Avoid [big external dependencies](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d).
|
||||
- Minimize the number of moving parts in the distributed system.
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
|
||||
|
||||
## Reporting bugs
|
||||
|
||||
Report bugs and propose new features [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
|
||||
|
||||
## Victoria Metrics Logo
|
||||
|
||||
[Zip](VM_logo.zip) contains three folders with different image orientation (main color and inverted version).
|
||||
|
||||
@@ -46,24 +795,21 @@ Files included in each folder:
|
||||
* 2 EPS Adobe Illustrator EPS10 files
|
||||
|
||||
|
||||
#### Logo Usage Guidelines
|
||||
### Logo Usage Guidelines
|
||||
|
||||
##### Font used:
|
||||
#### Font used:
|
||||
|
||||
* Lato Black
|
||||
* Lato Black
|
||||
* Lato Regular
|
||||
|
||||
##### Color Palette:
|
||||
#### Color Palette:
|
||||
|
||||
* HEX [#110f0f](https://www.color-hex.com/color/110f0f)
|
||||
* HEX [#110f0f](https://www.color-hex.com/color/110f0f)
|
||||
* HEX [#ffffff](https://www.color-hex.com/color/ffffff)
|
||||
|
||||
#### We kindly ask:
|
||||
### We kindly ask:
|
||||
|
||||
- Please don't use any other font instead of suggested.
|
||||
- There should be sufficient clear space around the logo.
|
||||
- Do not change spacing, alignment, or relative locations of the design elements.
|
||||
- Do not change the proportions of any of the design elements or the design itself. You may resize as needed but must retain all proportions.
|
||||
|
||||
|
||||
|
||||
|
||||
72
app/victoria-metrics/Makefile
Normal file
72
app/victoria-metrics/Makefile
Normal file
@@ -0,0 +1,72 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
victoria-metrics:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-local
|
||||
|
||||
victoria-metrics-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker
|
||||
|
||||
package-victoria-metrics:
|
||||
APP_NAME=victoria-metrics \
|
||||
$(MAKE) package-via-docker
|
||||
|
||||
publish-victoria-metrics:
|
||||
APP_NAME=victoria-metrics $(MAKE) publish-via-docker
|
||||
|
||||
run-victoria-metrics:
|
||||
mkdir -p victoria-metrics-data
|
||||
DOCKER_OPTS='-v $(shell pwd)/victoria-metrics-data:/victoria-metrics-data' \
|
||||
APP_NAME=victoria-metrics \
|
||||
ARGS='-graphiteListenAddr=:2003 -opentsdbListenAddr=:4242 -retentionPeriod=12 -search.maxUniqueTimeseries=1000000 -search.maxQueryDuration=10m' \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
victoria-metrics-arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm ./app/victoria-metrics
|
||||
|
||||
victoria-metrics-arm-prod:
|
||||
APP_NAME=victoria-metrics APP_SUFFIX='-arm' DOCKER_OPTS='--env CGO_ENABLED=0 --env GOARCH=arm' $(MAKE) app-via-docker
|
||||
|
||||
victoria-metrics-arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm64 ./app/victoria-metrics
|
||||
|
||||
victoria-metrics-arm64-prod:
|
||||
APP_NAME=victoria-metrics APP_SUFFIX='-arm64' DOCKER_OPTS='--env CGO_ENABLED=0 --env GOARCH=arm64' $(MAKE) app-via-docker
|
||||
|
||||
victoria-metrics-pure:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-local-pure
|
||||
|
||||
victoria-metrics-pure-prod:
|
||||
APP_NAME=victoria-metrics APP_SUFFIX='-pure' DOCKER_OPTS='--env CGO_ENABLED=0' $(MAKE) app-via-docker
|
||||
|
||||
### Packaging as DEB - amd64
|
||||
victoria-metrics-package-deb: victoria-metrics-prod
|
||||
./package/package_deb.sh amd64
|
||||
|
||||
### Packaging as DEB - arm64
|
||||
victoria-metrics-package-deb-arm64: victoria-metrics-arm64-prod
|
||||
./package/package_deb.sh arm64
|
||||
|
||||
### Packaging as DEB - all
|
||||
victoria-metrics-package-deb-all: \
|
||||
victoria-metrics-package-deb \
|
||||
victoria-metrics-package-deb-arm64
|
||||
|
||||
### Packaging as RPM - amd64
|
||||
victoria-metrics-package-rpm: victoria-metrics-prod
|
||||
./package/package_rpm.sh amd64
|
||||
|
||||
### Packaging as RPM - arm64
|
||||
victoria-metrics-package-rpm-arm64: victoria-metrics-arm64-prod
|
||||
./package/package_rpm.sh arm64
|
||||
|
||||
### Packaging as RPM - all
|
||||
victoria-metrics-package-rpm-all: \
|
||||
victoria-metrics-package-rpm \
|
||||
victoria-metrics-package-rpm-arm64
|
||||
|
||||
### Packaging as both DEB and RPM - all
|
||||
victoria-metrics-package-deb-rpm-all: \
|
||||
victoria-metrics-package-deb \
|
||||
victoria-metrics-package-deb-arm64 \
|
||||
victoria-metrics-package-rpm \
|
||||
victoria-metrics-package-rpm-arm64
|
||||
5
app/victoria-metrics/deployment/Dockerfile
Normal file
5
app/victoria-metrics/deployment/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM scratch
|
||||
COPY --from=local/certs:1.0.2 /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY bin/victoria-metrics-prod .
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
63
app/victoria-metrics/main.go
Normal file
63
app/victoria-metrics/main.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
)
|
||||
|
||||
var httpListenAddr = flag.String("httpListenAddr", ":8428", "TCP address to listen for http connections")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
logger.Infof("starting VictoraMetrics at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
vmstorage.Init()
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, requestHandler)
|
||||
logger.Infof("started VictoriaMetrics in %s", time.Since(startTime))
|
||||
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
logger.Infof("successfully shut down the webservice in %s", time.Since(startTime))
|
||||
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
|
||||
fs.MustStopDirRemover()
|
||||
|
||||
logger.Infof("the VictoriaMetrics has been stopped in %s", time.Since(startTime))
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if vminsert.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
if vmselect.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
if vmstorage.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
495
app/victoria-metrics/main_test.go
Normal file
495
app/victoria-metrics/main_test.go
Normal file
@@ -0,0 +1,495 @@
|
||||
// +build integration
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testutil "github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-metrics/test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
testFixturesDir = "testdata"
|
||||
testStorageSuffix = "vm-test-storage"
|
||||
testHTTPListenAddr = ":7654"
|
||||
testStatsDListenAddr = ":2003"
|
||||
testOpenTSDBListenAddr = ":4242"
|
||||
testOpenTSDBHTTPListenAddr = ":4243"
|
||||
testLogLevel = "INFO"
|
||||
)
|
||||
|
||||
const (
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
)
|
||||
|
||||
const (
|
||||
testStorageInitTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
storagePath string
|
||||
insertionTime = time.Now().UTC()
|
||||
)
|
||||
|
||||
type test struct {
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
ResultQueryRange QueryRange `json:"result_query_range"`
|
||||
Issue string `json:"issue"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
|
||||
func (r *Metric) UnmarshalJSON(b []byte) error {
|
||||
type plain Metric
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
type Series struct {
|
||||
Status string `json:"status"`
|
||||
Data []map[string]string `json:"data"`
|
||||
}
|
||||
type Query struct {
|
||||
Status string `json:"status"`
|
||||
Data QueryData `json:"data"`
|
||||
}
|
||||
type QueryData struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []QueryDataResult `json:"result"`
|
||||
}
|
||||
|
||||
type QueryDataResult struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Value []interface{} `json:"value"`
|
||||
}
|
||||
|
||||
func (r *QueryDataResult) UnmarshalJSON(b []byte) error {
|
||||
type plain QueryDataResult
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
type QueryRange struct {
|
||||
Status string `json:"status"`
|
||||
Data QueryRangeData `json:"data"`
|
||||
}
|
||||
type QueryRangeData struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []QueryRangeDataResult `json:"result"`
|
||||
}
|
||||
|
||||
type QueryRangeDataResult struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values [][]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
func (r *QueryRangeDataResult) UnmarshalJSON(b []byte) error {
|
||||
type plain QueryRangeDataResult
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setUp()
|
||||
code := m.Run()
|
||||
tearDown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setUp() {
|
||||
storagePath = filepath.Join(os.TempDir(), testStorageSuffix)
|
||||
processFlags()
|
||||
logger.Init()
|
||||
vmstorage.InitWithoutMetrics()
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
go httpserver.Serve(*httpListenAddr, requestHandler)
|
||||
readyStorageCheckFunc := func() bool {
|
||||
resp, err := http.Get(testHealthHTTPPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
return resp.StatusCode == 200
|
||||
}
|
||||
if err := waitFor(testStorageInitTimeout, readyStorageCheckFunc); err != nil {
|
||||
log.Fatalf("http server can't start for %s seconds, err %s", testStorageInitTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
func processFlags() {
|
||||
flag.Parse()
|
||||
for _, fv := range []struct {
|
||||
flag string
|
||||
value string
|
||||
}{
|
||||
{flag: "storageDataPath", value: storagePath},
|
||||
{flag: "httpListenAddr", value: testHTTPListenAddr},
|
||||
{flag: "graphiteListenAddr", value: testStatsDListenAddr},
|
||||
{flag: "opentsdbListenAddr", value: testOpenTSDBListenAddr},
|
||||
{flag: "loggerLevel", value: testLogLevel},
|
||||
{flag: "opentsdbHTTPListenAddr", value: testOpenTSDBHTTPListenAddr},
|
||||
} {
|
||||
// panics if flag doesn't exist
|
||||
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
|
||||
log.Fatalf("unable to set %q with value %q, err: %v", fv.flag, fv.value, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitFor(timeout time.Duration, f func() bool) error {
|
||||
fraction := timeout / 10
|
||||
for i := fraction; i < timeout; i += fraction {
|
||||
if f() {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(fraction)
|
||||
}
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
|
||||
func tearDown() {
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
log.Printf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
fs.MustRemoveAll(storagePath)
|
||||
fs.MustStopDirRemover()
|
||||
}
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
t.Run("write", testWrite)
|
||||
time.Sleep(1 * time.Second)
|
||||
vmstorage.Stop()
|
||||
// open storage after stop in write
|
||||
vmstorage.InitWithoutMetrics()
|
||||
t.Run("read", testRead)
|
||||
}
|
||||
|
||||
func testWrite(t *testing.T) {
|
||||
t.Run("prometheus", func(t *testing.T) {
|
||||
for _, test := range readIn("prometheus", t, insertionTime) {
|
||||
s := newSuite(t)
|
||||
r := testutil.WriteRequest{}
|
||||
s.noError(json.Unmarshal([]byte(strings.Join(test.Data, "\n")), &r.Timeseries))
|
||||
data, err := testutil.Compress(r)
|
||||
s.greaterThan(len(r.Timeseries), 0)
|
||||
if err != nil {
|
||||
t.Errorf("error compressing %v %s", r, err)
|
||||
t.Fail()
|
||||
}
|
||||
httpWrite(t, testPromWriteHTTPPath, bytes.NewBuffer(data))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("influxdb", func(t *testing.T) {
|
||||
for _, x := range readIn("influxdb", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpWrite(t, testWriteHTTPPath, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("graphite", func(t *testing.T) {
|
||||
for _, x := range readIn("graphite", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testStatsDListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdb", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdb", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testOpenTSDBListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdbhttp", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdbhttp", t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger.Infof("writing %s", test.Data)
|
||||
httpWrite(t, testOpenTSDBWriteHTTPPath, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testRead(t *testing.T) {
|
||||
for _, engine := range []string{"prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
t.Run(engine, func(t *testing.T) {
|
||||
for _, x := range readIn(engine, t, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, q := range test.Query {
|
||||
q = testutil.PopulateTimeTplString(q, insertionTime)
|
||||
if test.Issue != "" {
|
||||
test.Issue = "Regression in " + test.Issue
|
||||
}
|
||||
switch true {
|
||||
case strings.HasPrefix(q, "/api/v1/export"):
|
||||
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
|
||||
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/series"):
|
||||
s := Series{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &s)
|
||||
if err := checkSeriesResult(s, test.ResultSeries); err != nil {
|
||||
t.Fatalf("Series. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query_range"):
|
||||
queryResult := QueryRange{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
if err := checkQueryRangeResult(queryResult, test.ResultQueryRange); err != nil {
|
||||
t.Fatalf("Query Range. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query"):
|
||||
queryResult := Query{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
if err := checkQueryResult(queryResult, test.ResultQuery); err != nil {
|
||||
t.Fatalf("Query. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unsupported read query %s", q)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func readIn(readFor string, t *testing.T, insertTime time.Time) []test {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
var tt []test
|
||||
s.noError(filepath.Walk(filepath.Join(testFixturesDir, readFor), func(path string, info os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) != ".json" {
|
||||
return nil
|
||||
}
|
||||
b, err := ioutil.ReadFile(path)
|
||||
s.noError(err)
|
||||
item := test{}
|
||||
s.noError(json.Unmarshal(b, &item))
|
||||
for i := range item.Data {
|
||||
item.Data[i] = testutil.PopulateTimeTplString(item.Data[i], insertTime)
|
||||
}
|
||||
tt = append(tt, item)
|
||||
return nil
|
||||
}))
|
||||
if len(tt) == 0 {
|
||||
t.Fatalf("no test found in %s", filepath.Join(testFixturesDir, readFor))
|
||||
}
|
||||
return tt
|
||||
}
|
||||
|
||||
func httpWrite(t *testing.T, address string, r io.Reader) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Post(address, "", r)
|
||||
s.noError(err)
|
||||
s.noError(resp.Body.Close())
|
||||
s.equalInt(resp.StatusCode, 204)
|
||||
}
|
||||
|
||||
func tcpWrite(t *testing.T, address string, data string) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
conn, err := net.Dial("tcp", address)
|
||||
s.noError(err)
|
||||
defer conn.Close()
|
||||
n, err := conn.Write([]byte(data))
|
||||
s.noError(err)
|
||||
s.equalInt(n, len(data))
|
||||
}
|
||||
|
||||
func httpReadMetrics(t *testing.T, address, query string) []Metric {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Get(address + query)
|
||||
s.noError(err)
|
||||
defer resp.Body.Close()
|
||||
s.equalInt(resp.StatusCode, 200)
|
||||
var rows []Metric
|
||||
for dec := json.NewDecoder(resp.Body); dec.More(); {
|
||||
var row Metric
|
||||
s.noError(dec.Decode(&row))
|
||||
rows = append(rows, row)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
func httpReadStruct(t *testing.T, address, query string, dst interface{}) {
|
||||
t.Helper()
|
||||
s := newSuite(t)
|
||||
resp, err := http.Get(address + query)
|
||||
s.noError(err)
|
||||
defer resp.Body.Close()
|
||||
s.equalInt(resp.StatusCode, 200)
|
||||
s.noError(json.NewDecoder(resp.Body).Decode(dst))
|
||||
}
|
||||
|
||||
func checkMetricsResult(got, want []Metric) error {
|
||||
for _, r := range append([]Metric(nil), got...) {
|
||||
want = removeIfFoundMetrics(r, want)
|
||||
}
|
||||
if len(want) > 0 {
|
||||
return fmt.Errorf("exptected metrics %+v not found in %+v", want, got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundMetrics(r Metric, contains []Metric) []Metric {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) &&
|
||||
reflect.DeepEqual(r.Timestamps, item.Timestamps) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkSeriesResult(got, want Series) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
wantData := append([]map[string]string(nil), want.Data...)
|
||||
for _, r := range got.Data {
|
||||
wantData = removeIfFoundSeries(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected seria(s) %+v not found in %+v", wantData, got.Data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundSeries(r map[string]string, contains []map[string]string) []map[string]string {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r, item) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkQueryResult(got, want Query) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
if got.Data.ResultType != want.Data.ResultType {
|
||||
return fmt.Errorf("result type mismatch %q - %q", want.Data.ResultType, got.Data.ResultType)
|
||||
}
|
||||
wantData := append([]QueryDataResult(nil), want.Data.Result...)
|
||||
for _, r := range got.Data.Result {
|
||||
wantData = removeIfFoundQueryData(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected query result %+v not found in %+v", wantData, got.Data.Result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundQueryData(r QueryDataResult, contains []QueryDataResult) []QueryDataResult {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Value[0], item.Value[0]) && reflect.DeepEqual(r.Value[1], item.Value[1]) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkQueryRangeResult(got, want QueryRange) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
if got.Data.ResultType != want.Data.ResultType {
|
||||
return fmt.Errorf("result type mismatch %q - %q", want.Data.ResultType, got.Data.ResultType)
|
||||
}
|
||||
wantData := append([]QueryRangeDataResult(nil), want.Data.Result...)
|
||||
for _, r := range got.Data.Result {
|
||||
wantData = removeIfFoundQueryRangeData(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected query range result %+v not found in %+v", wantData, got.Data.Result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundQueryRangeData(r QueryRangeDataResult, contains []QueryRangeDataResult) []QueryRangeDataResult {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
type suite struct{ t *testing.T }
|
||||
|
||||
func newSuite(t *testing.T) *suite { return &suite{t: t} }
|
||||
|
||||
func (s *suite) noError(err error) {
|
||||
s.t.Helper()
|
||||
if err != nil {
|
||||
s.t.Errorf("unexpected error %v", err)
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *suite) equalInt(a, b int) {
|
||||
s.t.Helper()
|
||||
if a != b {
|
||||
s.t.Errorf("%d not equal %d", a, b)
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *suite) greaterThan(a, b int) {
|
||||
s.t.Helper()
|
||||
if a <= b {
|
||||
s.t.Errorf("%d less or equal then %d", a, b)
|
||||
s.t.FailNow()
|
||||
}
|
||||
}
|
||||
51
app/victoria-metrics/test/parser.go
Normal file
51
app/victoria-metrics/test/parser.go
Normal file
@@ -0,0 +1,51 @@
|
||||
// +build integration
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
parseTimeExpRegex = regexp.MustCompile(`"?{TIME[^}]*}"?`)
|
||||
extractRegex = regexp.MustCompile(`"?{([^}]*)}"?`)
|
||||
)
|
||||
|
||||
func PopulateTimeTplString(s string, t time.Time) string {
|
||||
return string(PopulateTimeTpl([]byte(s), t))
|
||||
}
|
||||
|
||||
func PopulateTimeTpl(b []byte, t time.Time) []byte {
|
||||
return parseTimeExpRegex.ReplaceAllFunc(b, func(repl []byte) []byte {
|
||||
repl = extractRegex.FindSubmatch(repl)[1]
|
||||
parts := strings.SplitN(string(repl), "-", 2)
|
||||
if len(parts) == 2 {
|
||||
duration, err := time.ParseDuration(strings.TrimSpace(parts[1]))
|
||||
if err != nil {
|
||||
log.Fatalf("error %s parsing duration %s in %s", err, parts[1], repl)
|
||||
}
|
||||
t = t.Add(-duration)
|
||||
}
|
||||
switch strings.TrimSpace(parts[0]) {
|
||||
case `TIME_S`:
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()))
|
||||
case `TIME_MSZ`:
|
||||
return []byte(fmt.Sprintf("%d", t.Unix()*1e3))
|
||||
case `TIME_MS`:
|
||||
return []byte(fmt.Sprintf("%d", timeToMillis(t)))
|
||||
case `TIME_NS`:
|
||||
return []byte(fmt.Sprintf("%d", t.UnixNano()))
|
||||
default:
|
||||
log.Fatalf("unkown time pattern %s in %s", parts[0], repl)
|
||||
}
|
||||
return repl
|
||||
})
|
||||
}
|
||||
|
||||
func timeToMillis(t time.Time) int64 {
|
||||
return t.UnixNano() / 1e6
|
||||
}
|
||||
338
app/victoria-metrics/test/prom_types.go
Normal file
338
app/victoria-metrics/test/prom_types.go
Normal file
@@ -0,0 +1,338 @@
|
||||
// +build integration
|
||||
|
||||
// Source https://github.com/prometheus/prometheus/blob/master/prompb/remote.pb.go . Code is copy pasted and cleaned up
|
||||
package test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type WriteRequest struct {
|
||||
Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
|
||||
}
|
||||
|
||||
func (m *WriteRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Timeseries) > 0 {
|
||||
for _, e := range m.Timeseries {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovRemote(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sovRemote(x uint64) (n int) {
|
||||
return (bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
|
||||
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if len(m.Timeseries) > 0 {
|
||||
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintRemote(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintRemote(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovRemote(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
|
||||
type Sample struct {
|
||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Sample) Reset() { *m = Sample{} }
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
type TimeSeries struct {
|
||||
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
|
||||
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
|
||||
}
|
||||
|
||||
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
|
||||
|
||||
type Label struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Label) Reset() { *m = Label{} }
|
||||
|
||||
type Labels struct {
|
||||
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
|
||||
}
|
||||
|
||||
func (m *Labels) Reset() { *m = Labels{} }
|
||||
|
||||
func (m *Sample) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if m.Timestamp != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.Value != 0 {
|
||||
i -= 8
|
||||
binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
|
||||
i--
|
||||
dAtA[i] = 0x9
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if len(m.Samples) > 0 {
|
||||
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Labels) > 0 {
|
||||
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Label) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Label) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Value) > 0 {
|
||||
i -= len(m.Value)
|
||||
copy(dAtA[i:], m.Value)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Value)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.Name) > 0 {
|
||||
i -= len(m.Name)
|
||||
copy(dAtA[i:], m.Name)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Name)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Labels) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Labels) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
if len(m.Labels) > 0 {
|
||||
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTypes(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
|
||||
func (m *Sample) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
if m.Value != 0 {
|
||||
n += 9
|
||||
}
|
||||
if m.Timestamp != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Timestamp))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *TimeSeries) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Labels) > 0 {
|
||||
for _, e := range m.Labels {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Samples) > 0 {
|
||||
for _, e := range m.Samples {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Label) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = len(m.Value)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Labels) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Labels) > 0 {
|
||||
for _, e := range m.Labels {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTypes(x uint64) (n int) {
|
||||
return (bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
13
app/victoria-metrics/test/prom_writter.go
Normal file
13
app/victoria-metrics/test/prom_writter.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build integration
|
||||
|
||||
package test
|
||||
|
||||
import "github.com/golang/snappy"
|
||||
|
||||
func Compress(wr WriteRequest) ([]byte, error) {
|
||||
data, err := wr.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, data), nil
|
||||
}
|
||||
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["graphite.foo.bar.baz;tag1=value1;tag2=value2 123 {TIME_S}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"graphite.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
14
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
14
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "comparison-not-inf-not-nan",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/150",
|
||||
"data": [
|
||||
"not_nan_not_inf;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=x 2 {TIME_S-2m}",
|
||||
"not_nan_not_inf;item=y 1 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=y 2 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query_range?query=1/(not_nan_not_inf-1)!=inf!=nan&start={TIME_S-2m}&end={TIME_S}&step=60"],
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"item":"x"},"values":[["{TIME_S-2m}","1"]]},{"metric":{"item":"y"},"values":[["{TIME_S-2m}","1"]]}]}}
|
||||
}
|
||||
14
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
14
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "not-nan-as-missing-data",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153",
|
||||
"data": [
|
||||
"not_nan_as_missing_data;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_as_missing_data;item=x 2 {TIME_S-2m}",
|
||||
"not_nan_as_missing_data;item=y 3 {TIME_S-1m}",
|
||||
"not_nan_as_missing_data;item=y 4 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query_range?query=not_nan_as_missing_data>1&start={TIME_S-2m}&end={TIME_S}&step=60"],
|
||||
"result_query_range": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"not_nan_as_missing_data","item":"x"},"values":[["{TIME_S-2m}","2"]]},{"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"]]}]}}
|
||||
}
|
||||
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "subquery-aggregation",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184",
|
||||
"data": [
|
||||
"forms_daily_count;item=x 1 {TIME_S-1m}",
|
||||
"forms_daily_count;item=x 2 {TIME_S-2m}",
|
||||
"forms_daily_count;item=y 3 {TIME_S-1m}",
|
||||
"forms_daily_count;item=y 4 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[{"metric":{"item":"x"},"value":["{TIME_S-1m}","1"]},{"metric":{"item":"y"},"value":["{TIME_S-1m}","3"]}]}
|
||||
}
|
||||
}
|
||||
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["measurement,tag1=value1,tag2=value2 field1=1.23,field2=123 {TIME_NS}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"measurement_field1","tag1":"value1","tag2":"value2"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["put openstdb.foo.bar.baz {TIME_S} 123 tag1=value1 tag2=value2"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"openstdb.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["{\"metric\": \"opentsdbhttp.foo\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.foo","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "multiline",
|
||||
"data": ["[{\"metric\": \"opentsdbhttp.multiline1\", \"value\": 1001, \"timestamp\": \"{TIME_S}\", \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}, {\"metric\": \"opentsdbhttp.multiline2\", \"value\": 1002, \"timestamp\": {TIME_S}}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline1","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]},
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline2"},"values":[1002], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.bar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.bar","baz":"qux"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "case-sensitive-regex",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/161",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"sensitiveRegex\"}],\"samples\":[{\"value\":2,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"SensitiveRegex\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={label=~'(?i)sensitiveregex'}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"sensitiveRegex"},"values":[2], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"SensitiveRegex"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "duplicate_label",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/172",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.duplicate_label\"},{\"name\":\"duplicate\",\"value\":\"label\"},{\"name\":\"duplicate\",\"value\":\"label\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.duplicate_label","duplicate":"label"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"name": "match_series",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/155",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"1\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"2\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"3\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"4\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/series?match[]={__name__='MatchSeries'}", "/api/v1/series?match[]={__name__=~'MatchSeries.*'}"],
|
||||
"result_series": {
|
||||
"status": "success",
|
||||
"data": [
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"1","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"2","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"3","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"4","TurbineType":"V112"}
|
||||
]
|
||||
}
|
||||
}
|
||||
1
app/vminsert/README.md
Normal file
1
app/vminsert/README.md
Normal file
@@ -0,0 +1 @@
|
||||
`vminsert` routes the ingested data to `vmstorage`.
|
||||
30
app/vminsert/common/gzip_reader.go
Normal file
30
app/vminsert/common/gzip_reader.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GetGzipReader returns new gzip reader from the pool.
|
||||
//
|
||||
// Return back the gzip reader when it no longer needed with PutGzipReader.
|
||||
func GetGzipReader(r io.Reader) (*gzip.Reader, error) {
|
||||
v := gzipReaderPool.Get()
|
||||
if v == nil {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
zr := v.(*gzip.Reader)
|
||||
if err := zr.Reset(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zr, nil
|
||||
}
|
||||
|
||||
// PutGzipReader returns back gzip reader obtained via GetGzipReader.
|
||||
func PutGzipReader(zr *gzip.Reader) {
|
||||
_ = zr.Close()
|
||||
gzipReaderPool.Put(zr)
|
||||
}
|
||||
|
||||
var gzipReaderPool sync.Pool
|
||||
110
app/vminsert/common/insert_ctx.go
Normal file
110
app/vminsert/common/insert_ctx.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
// InsertCtx contains common bits for data points insertion.
|
||||
type InsertCtx struct {
|
||||
Labels []prompb.Label
|
||||
|
||||
mrs []storage.MetricRow
|
||||
metricNamesBuf []byte
|
||||
}
|
||||
|
||||
// Reset resets ctx for future fill with rowsLen rows.
|
||||
func (ctx *InsertCtx) Reset(rowsLen int) {
|
||||
for _, label := range ctx.Labels {
|
||||
label.Name = nil
|
||||
label.Value = nil
|
||||
}
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
|
||||
for i := range ctx.mrs {
|
||||
mr := &ctx.mrs[i]
|
||||
mr.MetricNameRaw = nil
|
||||
}
|
||||
ctx.mrs = ctx.mrs[:0]
|
||||
if n := rowsLen - cap(ctx.mrs); n > 0 {
|
||||
ctx.mrs = append(ctx.mrs[:cap(ctx.mrs)], make([]storage.MetricRow, n)...)
|
||||
}
|
||||
ctx.mrs = ctx.mrs[:0]
|
||||
ctx.metricNamesBuf = ctx.metricNamesBuf[:0]
|
||||
}
|
||||
|
||||
func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompb.Label) []byte {
|
||||
start := len(ctx.metricNamesBuf)
|
||||
ctx.metricNamesBuf = append(ctx.metricNamesBuf, prefix...)
|
||||
ctx.metricNamesBuf = storage.MarshalMetricNameRaw(ctx.metricNamesBuf, labels)
|
||||
metricNameRaw := ctx.metricNamesBuf[start:]
|
||||
return metricNameRaw[:len(metricNameRaw):len(metricNameRaw)]
|
||||
}
|
||||
|
||||
// WriteDataPoint writes (timestamp, value) with the given prefix and lables into ctx buffer.
|
||||
func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompb.Label, timestamp int64, value float64) {
|
||||
metricNameRaw := ctx.marshalMetricNameRaw(prefix, labels)
|
||||
ctx.addRow(metricNameRaw, timestamp, value)
|
||||
}
|
||||
|
||||
// WriteDataPointExt writes (timestamp, value) with the given metricNameRaw and labels into ctx buffer.
|
||||
//
|
||||
// It returns metricNameRaw for the given labels if len(metricNameRaw) == 0.
|
||||
func (ctx *InsertCtx) WriteDataPointExt(metricNameRaw []byte, labels []prompb.Label, timestamp int64, value float64) []byte {
|
||||
if len(metricNameRaw) == 0 {
|
||||
metricNameRaw = ctx.marshalMetricNameRaw(nil, labels)
|
||||
}
|
||||
ctx.addRow(metricNameRaw, timestamp, value)
|
||||
return metricNameRaw
|
||||
}
|
||||
|
||||
func (ctx *InsertCtx) addRow(metricNameRaw []byte, timestamp int64, value float64) {
|
||||
mrs := ctx.mrs
|
||||
if cap(mrs) > len(mrs) {
|
||||
mrs = mrs[:len(mrs)+1]
|
||||
} else {
|
||||
mrs = append(mrs, storage.MetricRow{})
|
||||
}
|
||||
mr := &mrs[len(mrs)-1]
|
||||
ctx.mrs = mrs
|
||||
mr.MetricNameRaw = metricNameRaw
|
||||
mr.Timestamp = timestamp
|
||||
mr.Value = value
|
||||
}
|
||||
|
||||
// AddLabel adds (name, value) label to ctx.Labels.
|
||||
//
|
||||
// name and value must exist until ctx.Labels is used.
|
||||
func (ctx *InsertCtx) AddLabel(name, value string) {
|
||||
labels := ctx.Labels
|
||||
if cap(labels) > len(labels) {
|
||||
labels = labels[:len(labels)+1]
|
||||
} else {
|
||||
labels = append(labels, prompb.Label{})
|
||||
}
|
||||
label := &labels[len(labels)-1]
|
||||
|
||||
// Do not copy name and value contents for performance reasons.
|
||||
// This reduces GC overhead on the number of objects and allocations.
|
||||
label.Name = bytesutil.ToUnsafeBytes(name)
|
||||
label.Value = bytesutil.ToUnsafeBytes(value)
|
||||
|
||||
ctx.Labels = labels
|
||||
}
|
||||
|
||||
// FlushBufs flushes buffered rows to the underlying storage.
|
||||
func (ctx *InsertCtx) FlushBufs() error {
|
||||
if err := vmstorage.AddRows(ctx.mrs); err != nil {
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot store metrics: %s", err),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
68
app/vminsert/common/lines_reader.go
Normal file
68
app/vminsert/common/lines_reader.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
)
|
||||
|
||||
// The maximum size of a single line returned by ReadLinesBlock.
|
||||
const maxLineSize = 256 * 1024
|
||||
|
||||
// Default size in bytes of a single block returned by ReadLinesBlock.
|
||||
const defaultBlockSize = 64 * 1024
|
||||
|
||||
// ReadLinesBlock reads a block of lines delimited by '\n' from tailBuf and r into dstBuf.
|
||||
//
|
||||
// Trailing chars after the last newline are put into tailBuf.
|
||||
//
|
||||
// Returns (dstBuf, tailBuf).
|
||||
func ReadLinesBlock(r io.Reader, dstBuf, tailBuf []byte) ([]byte, []byte, error) {
|
||||
if cap(dstBuf) < defaultBlockSize {
|
||||
dstBuf = bytesutil.Resize(dstBuf, defaultBlockSize)
|
||||
}
|
||||
dstBuf = append(dstBuf[:0], tailBuf...)
|
||||
tailBuf = tailBuf[:0]
|
||||
again:
|
||||
n, err := r.Read(dstBuf[len(dstBuf):cap(dstBuf)])
|
||||
// Check for error only if zero bytes read from r, i.e. no forward progress made.
|
||||
// Otherwise process the read data.
|
||||
if n == 0 {
|
||||
if err == nil {
|
||||
return dstBuf, tailBuf, fmt.Errorf("no forward progress made")
|
||||
}
|
||||
if err == io.EOF && len(dstBuf) > 0 {
|
||||
// Missing newline in the end of stream. This is OK,
|
||||
// so suppress io.EOF for now. It will be returned during the next
|
||||
// call to ReadLinesBlock.
|
||||
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/60 .
|
||||
return dstBuf, tailBuf, nil
|
||||
}
|
||||
return dstBuf, tailBuf, err
|
||||
}
|
||||
dstBuf = dstBuf[:len(dstBuf)+n]
|
||||
|
||||
// Search for the last newline in dstBuf and put the rest into tailBuf.
|
||||
nn := bytes.LastIndexByte(dstBuf[len(dstBuf)-n:], '\n')
|
||||
if nn < 0 {
|
||||
// Didn't found at least a single line.
|
||||
if len(dstBuf) > maxLineSize {
|
||||
return dstBuf, tailBuf, fmt.Errorf("too long line: more than %d bytes", maxLineSize)
|
||||
}
|
||||
if cap(dstBuf) < 2*len(dstBuf) {
|
||||
// Increase dsbBuf capacity, so more data could be read into it.
|
||||
dstBufLen := len(dstBuf)
|
||||
dstBuf = bytesutil.Resize(dstBuf, 2*cap(dstBuf))
|
||||
dstBuf = dstBuf[:dstBufLen]
|
||||
}
|
||||
goto again
|
||||
}
|
||||
|
||||
// Found at least a single line. Return it.
|
||||
nn += len(dstBuf) - n
|
||||
tailBuf = append(tailBuf[:0], dstBuf[nn+1:]...)
|
||||
dstBuf = dstBuf[:nn]
|
||||
return dstBuf, tailBuf, nil
|
||||
}
|
||||
213
app/vminsert/common/lines_reader_test.go
Normal file
213
app/vminsert/common/lines_reader_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadLinesBlockFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
r := bytes.NewBufferString(s)
|
||||
if _, _, err := ReadLinesBlock(r, nil, nil); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
sbr := &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
if _, _, err := ReadLinesBlock(sbr, nil, nil); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
fr := &failureReader{}
|
||||
if _, _, err := ReadLinesBlock(fr, nil, nil); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// empty string
|
||||
f("")
|
||||
|
||||
// too long string
|
||||
b := make([]byte, maxLineSize+1)
|
||||
f(string(b))
|
||||
}
|
||||
|
||||
type failureReader struct{}
|
||||
|
||||
func (fr *failureReader) Read(p []byte) (int, error) {
|
||||
return 0, fmt.Errorf("some error")
|
||||
}
|
||||
|
||||
func TestReadLinesBlockMultiLinesSingleByteReader(t *testing.T) {
|
||||
f := func(s string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
var err error
|
||||
var dstBuf, tailBuf []byte
|
||||
var lines []string
|
||||
for {
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("unexpected error in ReadLinesBlock(%q): %s", s, err)
|
||||
}
|
||||
lines = append(lines, string(dstBuf))
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines after reading %q: got %q; want %q", s, lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("foo", []string{"foo"})
|
||||
f("foo\n", []string{"foo"})
|
||||
f("foo\nbar", []string{"foo", "bar"})
|
||||
f("\nfoo\nbar", []string{"", "foo", "bar"})
|
||||
f("\nfoo\nbar\n", []string{"", "foo", "bar"})
|
||||
f("\nfoo\nbar\n\n", []string{"", "foo", "bar", ""})
|
||||
}
|
||||
|
||||
func TestReadLinesBlockMultiLinesBytesBuffer(t *testing.T) {
|
||||
f := func(s string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(s)
|
||||
var err error
|
||||
var dstBuf, tailBuf []byte
|
||||
var lines []string
|
||||
for {
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("unexpected error in ReadLinesBlock(%q): %s", s, err)
|
||||
}
|
||||
lines = append(lines, string(dstBuf))
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines after reading %q: got %q; want %q", s, lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("foo", []string{"foo"})
|
||||
f("foo\n", []string{"foo"})
|
||||
f("foo\nbar", []string{"foo", "bar"})
|
||||
f("\nfoo\nbar", []string{"\nfoo", "bar"})
|
||||
f("\nfoo\nbar\n", []string{"\nfoo\nbar"})
|
||||
f("\nfoo\nbar\n\n", []string{"\nfoo\nbar\n"})
|
||||
}
|
||||
|
||||
func TestReadLinesBlockSuccessSingleByteReader(t *testing.T) {
|
||||
f := func(s, dstBufExpected, tailBufExpected string) {
|
||||
t.Helper()
|
||||
|
||||
r := &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
dstBuf, tailBuf, err := ReadLinesBlock(r, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
|
||||
// Verify the same with non-empty dstBuf and tailBuf
|
||||
r = &singleByteReader{
|
||||
b: []byte(s),
|
||||
}
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf[:0])
|
||||
if err != nil {
|
||||
t.Fatalf("non-empty bufs: unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
}
|
||||
|
||||
f("\n", "", "")
|
||||
f("foo\n", "foo", "")
|
||||
f("\nfoo", "", "")
|
||||
f("foo\nbar", "foo", "")
|
||||
f("foo\nbar\nbaz", "foo", "")
|
||||
f("foo", "foo", "")
|
||||
|
||||
// The maximum line size
|
||||
b := make([]byte, maxLineSize+10)
|
||||
b[maxLineSize] = '\n'
|
||||
f(string(b), string(b[:maxLineSize]), "")
|
||||
}
|
||||
|
||||
func TestReadLinesBlockSuccessBytesBuffer(t *testing.T) {
|
||||
f := func(s, dstBufExpected, tailBufExpected string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(s)
|
||||
dstBuf, tailBuf, err := ReadLinesBlock(r, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
|
||||
// Verify the same with non-empty dstBuf and tailBuf
|
||||
r = bytes.NewBufferString(s)
|
||||
dstBuf, tailBuf, err = ReadLinesBlock(r, dstBuf, tailBuf[:0])
|
||||
if err != nil {
|
||||
t.Fatalf("non-empty bufs: unexpected error: %s", err)
|
||||
}
|
||||
if string(dstBuf) != dstBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected dstBuf; got %q; want %q; tailBuf=%q", dstBuf, dstBufExpected, tailBuf)
|
||||
}
|
||||
if string(tailBuf) != tailBufExpected {
|
||||
t.Fatalf("non-empty bufs: unexpected tailBuf; got %q; want %q; dstBuf=%q", tailBuf, tailBufExpected, dstBuf)
|
||||
}
|
||||
}
|
||||
|
||||
f("\n", "", "")
|
||||
f("foo\n", "foo", "")
|
||||
f("\nfoo", "", "foo")
|
||||
f("foo\nbar", "foo", "bar")
|
||||
f("foo\nbar\nbaz", "foo\nbar", "baz")
|
||||
|
||||
// The maximum line size
|
||||
b := make([]byte, maxLineSize+10)
|
||||
b[maxLineSize] = '\n'
|
||||
f(string(b), string(b[:maxLineSize]), string(b[maxLineSize+1:]))
|
||||
}
|
||||
|
||||
type singleByteReader struct {
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (sbr *singleByteReader) Read(p []byte) (int, error) {
|
||||
if len(sbr.b) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(p, sbr.b[:1])
|
||||
sbr.b = sbr.b[n:]
|
||||
if len(sbr.b) == 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
75
app/vminsert/concurrencylimiter/concurrencylimiter.go
Normal file
75
app/vminsert/concurrencylimiter/concurrencylimiter.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package concurrencylimiter
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var maxConcurrentInserts = flag.Int("maxConcurrentInserts", runtime.GOMAXPROCS(-1)*4, "The maximum number of concurrent inserts")
|
||||
|
||||
var (
|
||||
// ch is the channel for limiting concurrent calls to Do.
|
||||
ch chan struct{}
|
||||
|
||||
// waitDuration is the amount of time to wait until at least a single
|
||||
// concurrent Do call out of cap(ch) inserts is complete.
|
||||
waitDuration = time.Second * 30
|
||||
)
|
||||
|
||||
// Init initializes concurrencylimiter.
|
||||
//
|
||||
// Init must be called after flag.Parse call.
|
||||
func Init() {
|
||||
ch = make(chan struct{}, *maxConcurrentInserts)
|
||||
}
|
||||
|
||||
// Do calls f with the limited concurrency.
|
||||
func Do(f func() error) error {
|
||||
// Limit the number of conurrent f calls in order to prevent from excess
|
||||
// memory usage and CPU trashing.
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
err := f()
|
||||
<-ch
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
// All the workers are busy.
|
||||
// Sleep for up to waitDuration.
|
||||
concurrencyLimitReached.Inc()
|
||||
t := timerpool.Get(waitDuration)
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
timerpool.Put(t)
|
||||
err := f()
|
||||
<-ch
|
||||
return err
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
concurrencyLimitTimeout.Inc()
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("the server is overloaded with %d concurrent inserts; either increase -maxConcurrentInserts or reduce the load", cap(ch)),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
concurrencyLimitReached = metrics.NewCounter(`vm_concurrent_insert_limit_reached_total`)
|
||||
concurrencyLimitTimeout = metrics.NewCounter(`vm_concurrent_insert_limit_timeout_total`)
|
||||
|
||||
_ = metrics.NewGauge(`vm_concurrent_insert_capacity`, func() float64 {
|
||||
return float64(cap(ch))
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_concurrent_insert_current`, func() float64 {
|
||||
return float64(len(ch))
|
||||
})
|
||||
)
|
||||
190
app/vminsert/graphite/parser.go
Normal file
190
app/vminsert/graphite/parser.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed graphite rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Reset items, so they can be GC'ed
|
||||
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals grahite plaintext protocol rows from s.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) {
|
||||
rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single graphite row.
|
||||
type Row struct {
|
||||
Metric string
|
||||
Tags []Tag
|
||||
Value float64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Metric = ""
|
||||
r.Tags = nil
|
||||
r.Value = 0
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
r.reset()
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between metric and value in %q", s)
|
||||
}
|
||||
metricAndTags := s[:n]
|
||||
tail := s[n+1:]
|
||||
|
||||
n = strings.IndexByte(metricAndTags, ';')
|
||||
if n < 0 {
|
||||
// No tags
|
||||
r.Metric = metricAndTags
|
||||
} else {
|
||||
// Tags found
|
||||
r.Metric = metricAndTags[:n]
|
||||
tagsStart := len(tagsPool)
|
||||
var err error
|
||||
tagsPool, err = unmarshalTags(tagsPool, metricAndTags[n+1:])
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot umarshal tags: %s", err)
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
}
|
||||
if len(r.Metric) == 0 {
|
||||
return tagsPool, fmt.Errorf("metric cannot be empty")
|
||||
}
|
||||
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
// There is no timestamp. Use default timestamp instead.
|
||||
r.Value = fastfloat.ParseBestEffort(tail)
|
||||
return tagsPool, nil
|
||||
}
|
||||
r.Value = fastfloat.ParseBestEffort(tail[:n])
|
||||
r.Timestamp = fastfloat.ParseInt64BestEffort(tail[n+1:])
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
return unmarshalRow(dst, s, tagsPool)
|
||||
}
|
||||
dst, tagsPool = unmarshalRow(dst, s[:n], tagsPool)
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
if len(s) > 0 && s[len(s)-1] == '\r' {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// Skip empty line
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal Graphite line %q: %s", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="graphite"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
|
||||
n := strings.IndexByte(s, ';')
|
||||
if n < 0 {
|
||||
// The last tag found
|
||||
if err := tag.unmarshal(s); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n]); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tag is a graphite tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (t *Tag) reset() {
|
||||
t.Key = ""
|
||||
t.Value = ""
|
||||
}
|
||||
|
||||
func (t *Tag) unmarshal(s string) error {
|
||||
t.reset()
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
t.Key = s[:n]
|
||||
t.Value = s[n+1:]
|
||||
return nil
|
||||
}
|
||||
154
app/vminsert/graphite/parser_test.go
Normal file
154
app/vminsert/graphite/parser_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
|
||||
// Try again
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// Missing metric
|
||||
f(" 123 455")
|
||||
|
||||
// Missing value
|
||||
f("aaa")
|
||||
|
||||
// missing tag
|
||||
f("aa; 12 34")
|
||||
|
||||
// missing tag value
|
||||
f("aa;bb 23 34")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\r", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
f("\n\r\n", &Rows{})
|
||||
|
||||
// Single line
|
||||
f("foobar -123.456 789", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
}},
|
||||
})
|
||||
f("foo.bar 123.456 789\n", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo.bar",
|
||||
Value: 123.456,
|
||||
Timestamp: 789,
|
||||
}},
|
||||
})
|
||||
|
||||
// Missing timestamp
|
||||
f("aaa 1123", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
}},
|
||||
})
|
||||
|
||||
// Tags
|
||||
f("foo;bar=baz 1 2", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
}},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
// Empty tags
|
||||
f("foo;bar=baz;aa=;x=y;=z 1 2", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
|
||||
// Multi lines
|
||||
f("foo 0.3 2\naaa 3\nbar.baz 0.34 43\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
},
|
||||
{
|
||||
Metric: "aaa",
|
||||
Value: 3,
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Multi lines with invalid line
|
||||
f("foo 0.3 2\naaa\nbar.baz 0.34 43\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
25
app/vminsert/graphite/parser_timing_test.go
Normal file
25
app/vminsert/graphite/parser_timing_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu.usage_user 1.23 1234556768
|
||||
cpu.usage_system 23.344 1234556768
|
||||
cpu.usage_iowait 3.3443 1234556769
|
||||
cpu.usage_irq 0.34432 1234556768
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of rows unmarshaled: got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
161
app/vminsert/graphite/request_handler.go
Normal file
161
app/vminsert/graphite/request_handler.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="graphite"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="graphite"}`)
|
||||
)
|
||||
|
||||
// insertHandler processes remote write for graphite plaintext protocol.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
func insertHandler(r io.Reader) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(r)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(r io.Reader) error {
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
for ctx.Read(r) {
|
||||
if err := ctx.InsertRows(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ctx.Error()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) InsertRows() error {
|
||||
rows := ctx.Rows.Rows
|
||||
ic := &ctx.Common
|
||||
ic.Reset(len(rows))
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", r.Metric)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const flushTimeout = 3 * time.Second
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader) bool {
|
||||
graphiteReadCalls.Inc()
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
if c, ok := r.(net.Conn); ok {
|
||||
if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil {
|
||||
graphiteReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot set read deadline: %s", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf)
|
||||
if ctx.err != nil {
|
||||
if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() {
|
||||
// Flush the read data on timeout and try reading again.
|
||||
ctx.err = nil
|
||||
} else {
|
||||
if ctx.err != io.EOF {
|
||||
graphiteReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %s", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf))
|
||||
|
||||
// Fill missing timestamps with the current timestamp rounded to seconds.
|
||||
currentTimestamp := time.Now().Unix()
|
||||
rows := ctx.Rows.Rows
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp == 0 {
|
||||
r.Timestamp = currentTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
// Convert timestamps from seconds to milliseconds.
|
||||
for i := range rows {
|
||||
rows[i].Timestamp *= 1e3
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf []byte
|
||||
tailBuf []byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Error() error {
|
||||
if ctx.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
var (
|
||||
graphiteReadCalls = metrics.NewCounter(`vm_read_calls_total{name="graphite"}`)
|
||||
graphiteReadErrors = metrics.NewCounter(`vm_read_errors_total{name="graphite"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
||||
138
app/vminsert/graphite/server.go
Normal file
138
app/vminsert/graphite/server.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
writeRequestsTCP = metrics.NewCounter(`vm_graphite_requests_total{name="write", net="tcp"}`)
|
||||
writeErrorsTCP = metrics.NewCounter(`vm_graphite_request_errors_total{name="write", net="tcp"}`)
|
||||
|
||||
writeRequestsUDP = metrics.NewCounter(`vm_graphite_requests_total{name="write", net="udp"}`)
|
||||
writeErrorsUDP = metrics.NewCounter(`vm_graphite_request_errors_total{name="write", net="udp"}`)
|
||||
)
|
||||
|
||||
// Serve starts graphite server on the given addr.
|
||||
func Serve(addr string) {
|
||||
logger.Infof("starting TCP Graphite server at %q", addr)
|
||||
lnTCP, err := netutil.NewTCPListener("graphite", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start TCP Graphite server at %q: %s", addr, err)
|
||||
}
|
||||
listenerTCP = lnTCP
|
||||
|
||||
logger.Infof("starting UDP Graphite server at %q", addr)
|
||||
lnUDP, err := net.ListenPacket("udp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP Graphite server at %q: %s", addr, err)
|
||||
}
|
||||
listenerUDP = lnUDP
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveTCP(listenerTCP)
|
||||
logger.Infof("stopped TCP Graphite server at %q", addr)
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveUDP(listenerUDP)
|
||||
logger.Infof("stopped UDP Graphite server at %q", addr)
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
logger.Fatalf("unrecoverable error when accepting TCP Graphite connections: %s", err)
|
||||
}
|
||||
logger.Fatalf("unexpected error when accepting TCP Graphite connections: %s", err)
|
||||
}
|
||||
go func() {
|
||||
writeRequestsTCP.Inc()
|
||||
if err := insertHandler(c); err != nil {
|
||||
writeErrorsTCP.Inc()
|
||||
logger.Errorf("error in TCP Graphite conn %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
}
|
||||
_ = c.Close()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func serveUDP(ln net.PacketConn) {
|
||||
gomaxprocs := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var bb bytesutil.ByteBuffer
|
||||
bb.B = bytesutil.Resize(bb.B, 64*1024)
|
||||
for {
|
||||
bb.Reset()
|
||||
bb.B = bb.B[:cap(bb.B)]
|
||||
n, addr, err := ln.ReadFrom(bb.B)
|
||||
if err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
}
|
||||
logger.Errorf("cannot read Graphite UDP data: %s", err)
|
||||
continue
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
writeRequestsUDP.Inc()
|
||||
if err := insertHandler(bb.NewReader()); err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
logger.Errorf("error in UDP Graphite conn %q<->%q: %s", ln.LocalAddr(), addr, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var (
|
||||
listenerTCP net.Listener
|
||||
listenerUDP net.PacketConn
|
||||
)
|
||||
|
||||
// Stop stops the server.
|
||||
func Stop() {
|
||||
logger.Infof("stopping TCP Graphite server at %q...", listenerTCP.Addr())
|
||||
if err := listenerTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP Graphite server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP Graphite server at %q...", listenerUDP.LocalAddr())
|
||||
if err := listenerUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP Graphite server: %s", err)
|
||||
}
|
||||
}
|
||||
399
app/vminsert/influx/parser.go
Normal file
399
app/vminsert/influx/parser.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package influx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed influx rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
fieldsPool []Field
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Reset rows, tags and fields in order to remove references to old data,
|
||||
// so GC could collect it.
|
||||
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
|
||||
for i := range rs.fieldsPool {
|
||||
rs.fieldsPool[i].reset()
|
||||
}
|
||||
rs.fieldsPool = rs.fieldsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals influx line protocol rows from s.
|
||||
//
|
||||
// See https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) {
|
||||
rs.Rows, rs.tagsPool, rs.fieldsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0], rs.fieldsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single influx row.
|
||||
type Row struct {
|
||||
Measurement string
|
||||
Tags []Tag
|
||||
Fields []Field
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Measurement = ""
|
||||
r.Tags = nil
|
||||
r.Fields = nil
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag, fieldsPool []Field, noEscapeChars bool) ([]Tag, []Field, error) {
|
||||
r.reset()
|
||||
n := nextUnescapedChar(s, ' ', noEscapeChars)
|
||||
if n < 0 {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("cannot find Whitespace I in %q", s)
|
||||
}
|
||||
measurementTags := s[:n]
|
||||
s = s[n+1:]
|
||||
|
||||
// Parse measurement and tags
|
||||
var err error
|
||||
n = nextUnescapedChar(measurementTags, ',', noEscapeChars)
|
||||
if n >= 0 {
|
||||
tagsStart := len(tagsPool)
|
||||
tagsPool, err = unmarshalTags(tagsPool, measurementTags[n+1:], noEscapeChars)
|
||||
if err != nil {
|
||||
return tagsPool, fieldsPool, err
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
measurementTags = measurementTags[:n]
|
||||
}
|
||||
r.Measurement = unescapeTagValue(measurementTags, noEscapeChars)
|
||||
if len(r.Measurement) == 0 {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("measurement cannot be empty. measurementTags=%q", s)
|
||||
}
|
||||
|
||||
// Parse fields
|
||||
fieldsStart := len(fieldsPool)
|
||||
hasQuotedFields := nextUnescapedChar(s, '"', noEscapeChars) >= 0
|
||||
n = nextUnquotedChar(s, ' ', noEscapeChars, hasQuotedFields)
|
||||
if n < 0 {
|
||||
// No timestamp.
|
||||
fieldsPool, err = unmarshalInfluxFields(fieldsPool, s, noEscapeChars, hasQuotedFields)
|
||||
if err != nil {
|
||||
return tagsPool, fieldsPool, err
|
||||
}
|
||||
fields := fieldsPool[fieldsStart:]
|
||||
r.Fields = fields[:len(fields):len(fields)]
|
||||
return tagsPool, fieldsPool, nil
|
||||
}
|
||||
fieldsPool, err = unmarshalInfluxFields(fieldsPool, s[:n], noEscapeChars, hasQuotedFields)
|
||||
if err != nil {
|
||||
return tagsPool, fieldsPool, err
|
||||
}
|
||||
r.Fields = fieldsPool[fieldsStart:]
|
||||
s = s[n+1:]
|
||||
|
||||
// Parse timestamp
|
||||
timestamp := fastfloat.ParseInt64BestEffort(s)
|
||||
if timestamp == 0 && s != "0" {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("cannot parse timestamp %q", s)
|
||||
}
|
||||
r.Timestamp = timestamp
|
||||
return tagsPool, fieldsPool, nil
|
||||
}
|
||||
|
||||
// Tag represents influx tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (tag *Tag) reset() {
|
||||
tag.Key = ""
|
||||
tag.Value = ""
|
||||
}
|
||||
|
||||
func (tag *Tag) unmarshal(s string, noEscapeChars bool) error {
|
||||
tag.reset()
|
||||
n := nextUnescapedChar(s, '=', noEscapeChars)
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
tag.Key = unescapeTagValue(s[:n], noEscapeChars)
|
||||
tag.Value = unescapeTagValue(s[n+1:], noEscapeChars)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Field represents influx field.
|
||||
type Field struct {
|
||||
Key string
|
||||
Value float64
|
||||
}
|
||||
|
||||
func (f *Field) reset() {
|
||||
f.Key = ""
|
||||
f.Value = 0
|
||||
}
|
||||
|
||||
func (f *Field) unmarshal(s string, noEscapeChars, hasQuotedFields bool) error {
|
||||
f.reset()
|
||||
n := nextUnescapedChar(s, '=', noEscapeChars)
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing field value for %q", s)
|
||||
}
|
||||
f.Key = unescapeTagValue(s[:n], noEscapeChars)
|
||||
if len(f.Key) == 0 {
|
||||
return fmt.Errorf("field key cannot be empty")
|
||||
}
|
||||
v, err := parseFieldValue(s[n+1:], hasQuotedFields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse field value for %q: %s", f.Key, err)
|
||||
}
|
||||
f.Value = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag, fieldsPool []Field) ([]Row, []Tag, []Field) {
|
||||
noEscapeChars := strings.IndexByte(s, '\\') < 0
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
return unmarshalRow(dst, s, tagsPool, fieldsPool, noEscapeChars)
|
||||
}
|
||||
dst, tagsPool, fieldsPool = unmarshalRow(dst, s[:n], tagsPool, fieldsPool, noEscapeChars)
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, s string, tagsPool []Tag, fieldsPool []Field, noEscapeChars bool) ([]Row, []Tag, []Field) {
|
||||
if len(s) > 0 && s[len(s)-1] == '\r' {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// Skip empty line
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
if s[0] == '#' {
|
||||
// Skip comment
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, fieldsPool, err = r.unmarshal(s, tagsPool, fieldsPool, noEscapeChars)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal Influx line %q: %s; skipping it", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool, fieldsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="influx"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, s string, noEscapeChars bool) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
n := nextUnescapedChar(s, ',', noEscapeChars)
|
||||
if n < 0 {
|
||||
if err := tag.unmarshal(s, noEscapeChars); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n], noEscapeChars); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalInfluxFields(dst []Field, s string, noEscapeChars, hasQuotedFields bool) ([]Field, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Field{})
|
||||
}
|
||||
f := &dst[len(dst)-1]
|
||||
n := nextUnquotedChar(s, ',', noEscapeChars, hasQuotedFields)
|
||||
if n < 0 {
|
||||
if err := f.unmarshal(s, noEscapeChars, hasQuotedFields); err != nil {
|
||||
return dst, err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := f.unmarshal(s[:n], noEscapeChars, hasQuotedFields); err != nil {
|
||||
return dst, err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
|
||||
func unescapeTagValue(s string, noEscapeChars bool) string {
|
||||
if noEscapeChars {
|
||||
// Fast path - no escape chars.
|
||||
return s
|
||||
}
|
||||
n := strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
// Slow path. Remove escape chars.
|
||||
dst := make([]byte, 0, len(s))
|
||||
for {
|
||||
dst = append(dst, s[:n]...)
|
||||
s = s[n+1:]
|
||||
if len(s) == 0 {
|
||||
return string(append(dst, '\\'))
|
||||
}
|
||||
ch := s[0]
|
||||
if ch != ' ' && ch != ',' && ch != '=' && ch != '\\' {
|
||||
dst = append(dst, '\\')
|
||||
}
|
||||
dst = append(dst, ch)
|
||||
s = s[1:]
|
||||
n = strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
return string(append(dst, s...))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseFieldValue(s string, hasQuotedFields bool) (float64, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, fmt.Errorf("field value cannot be empty")
|
||||
}
|
||||
if hasQuotedFields && s[0] == '"' {
|
||||
if len(s) < 2 || s[len(s)-1] != '"' {
|
||||
return 0, fmt.Errorf("missing closing quote for quoted field value %s", s)
|
||||
}
|
||||
// Try converting quoted string to number, since sometimes Influx agents
|
||||
// send numbers as strings.
|
||||
s = s[1 : len(s)-1]
|
||||
return fastfloat.ParseBestEffort(s), nil
|
||||
}
|
||||
ch := s[len(s)-1]
|
||||
if ch == 'i' {
|
||||
// Integer value
|
||||
ss := s[:len(s)-1]
|
||||
n := fastfloat.ParseInt64BestEffort(ss)
|
||||
return float64(n), nil
|
||||
}
|
||||
if ch == 'u' {
|
||||
// Unsigned integer value
|
||||
ss := s[:len(s)-1]
|
||||
n := fastfloat.ParseUint64BestEffort(ss)
|
||||
return float64(n), nil
|
||||
}
|
||||
if s == "t" || s == "T" || s == "true" || s == "True" || s == "TRUE" {
|
||||
return 1, nil
|
||||
}
|
||||
if s == "f" || s == "F" || s == "false" || s == "False" || s == "FALSE" {
|
||||
return 0, nil
|
||||
}
|
||||
return fastfloat.ParseBestEffort(s), nil
|
||||
}
|
||||
|
||||
func nextUnescapedChar(s string, ch byte, noEscapeChars bool) int {
|
||||
if noEscapeChars {
|
||||
// Fast path: just search for ch in s, since s has no escape chars.
|
||||
return strings.IndexByte(s, ch)
|
||||
}
|
||||
|
||||
sOrig := s
|
||||
again:
|
||||
n := strings.IndexByte(s, ch)
|
||||
if n < 0 {
|
||||
return -1
|
||||
}
|
||||
if n == 0 {
|
||||
return len(sOrig) - len(s) + n
|
||||
}
|
||||
if s[n-1] != '\\' {
|
||||
return len(sOrig) - len(s) + n
|
||||
}
|
||||
nOrig := n
|
||||
slashes := 0
|
||||
for n > 0 && s[n-1] == '\\' {
|
||||
slashes++
|
||||
n--
|
||||
}
|
||||
if slashes&1 == 0 {
|
||||
return len(sOrig) - len(s) + nOrig
|
||||
}
|
||||
s = s[nOrig+1:]
|
||||
goto again
|
||||
}
|
||||
|
||||
func nextUnquotedChar(s string, ch byte, noEscapeChars, hasQuotedFields bool) int {
|
||||
if !hasQuotedFields {
|
||||
return nextUnescapedChar(s, ch, noEscapeChars)
|
||||
}
|
||||
sOrig := s
|
||||
for {
|
||||
n := nextUnescapedChar(s, ch, noEscapeChars)
|
||||
if n < 0 {
|
||||
return -1
|
||||
}
|
||||
if !isInQuote(s[:n], noEscapeChars) {
|
||||
return n + len(sOrig) - len(s)
|
||||
}
|
||||
s = s[n+1:]
|
||||
n = nextUnescapedChar(s, '"', noEscapeChars)
|
||||
if n < 0 {
|
||||
return -1
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
|
||||
func isInQuote(s string, noEscapeChars bool) bool {
|
||||
isQuote := false
|
||||
for {
|
||||
n := nextUnescapedChar(s, '"', noEscapeChars)
|
||||
if n < 0 {
|
||||
return isQuote
|
||||
}
|
||||
isQuote = !isQuote
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
430
app/vminsert/influx/parser_test.go
Normal file
430
app/vminsert/influx/parser_test.go
Normal file
@@ -0,0 +1,430 @@
|
||||
package influx
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNextUnquotedChar(t *testing.T) {
|
||||
f := func(s string, ch byte, noUnescape bool, nExpected int) {
|
||||
t.Helper()
|
||||
n := nextUnquotedChar(s, ch, noUnescape, true)
|
||||
if n != nExpected {
|
||||
t.Fatalf("unexpected n for nextUnqotedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f(``, ' ', false, -1)
|
||||
f(``, ' ', true, -1)
|
||||
f(`""`, ' ', false, -1)
|
||||
f(`""`, ' ', true, -1)
|
||||
f(`"foo bar\" " baz`, ' ', false, 12)
|
||||
f(`"foo bar\" " baz`, ' ', true, 10)
|
||||
}
|
||||
|
||||
func TestNextUnescapedChar(t *testing.T) {
|
||||
f := func(s string, ch byte, noUnescape bool, nExpected int) {
|
||||
t.Helper()
|
||||
n := nextUnescapedChar(s, ch, noUnescape)
|
||||
if n != nExpected {
|
||||
t.Fatalf("unexpected n for nextUnescapedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", ' ', true, -1)
|
||||
f("", ' ', false, -1)
|
||||
f(" ", ' ', true, 0)
|
||||
f(" ", ' ', false, 0)
|
||||
f("x y", ' ', true, 1)
|
||||
f("x y", ' ', false, 1)
|
||||
f(`x\ y`, ' ', true, 2)
|
||||
f(`x\ y`, ' ', false, 3)
|
||||
f(`\\,`, ',', true, 2)
|
||||
f(`\\,`, ',', false, 2)
|
||||
f(`\\\=`, '=', true, 3)
|
||||
f(`\\\=`, '=', false, -1)
|
||||
f(`\\\=aa`, '=', true, 3)
|
||||
f(`\\\=aa`, '=', false, -1)
|
||||
f(`\\\=a=a`, '=', true, 3)
|
||||
f(`\\\=a=a`, '=', false, 5)
|
||||
f(`a\`, ' ', true, -1)
|
||||
f(`a\`, ' ', false, -1)
|
||||
}
|
||||
|
||||
func TestUnescapeTagValue(t *testing.T) {
|
||||
f := func(s, sExpected string) {
|
||||
t.Helper()
|
||||
ss := unescapeTagValue(s, false)
|
||||
if ss != sExpected {
|
||||
t.Fatalf("unexpected value for %q; got %q; want %q", s, ss, sExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", "")
|
||||
f("x", "x")
|
||||
f("foobar", "foobar")
|
||||
f("привет", "привет")
|
||||
f(`\a\b\cd`, `\a\b\cd`)
|
||||
f(`\`, `\`)
|
||||
f(`foo\`, `foo\`)
|
||||
f(`\,foo\\\=\ bar`, `,foo\= bar`)
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("expecting zero rows; got %d rows", len(rows.Rows))
|
||||
}
|
||||
|
||||
// Try again
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("expecting zero rows; got %d rows", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// Missing measurement
|
||||
f(",foo=bar baz=123")
|
||||
|
||||
// No fields
|
||||
f("foo")
|
||||
f("foo,bar=baz 1234")
|
||||
|
||||
// Missing tag value
|
||||
f("foo,bar")
|
||||
f("foo,bar baz")
|
||||
f("foo,bar=123, 123")
|
||||
|
||||
// Missing field value
|
||||
f("foo bar")
|
||||
f("foo bar=")
|
||||
f("foo bar=,baz=23 123")
|
||||
f("foo bar=1, 123")
|
||||
f(`foo bar=" 123`)
|
||||
f(`foo bar="123`)
|
||||
f(`foo bar=",123`)
|
||||
f(`foo bar=a"", 123`)
|
||||
|
||||
// Missing field name
|
||||
f("foo =123")
|
||||
f("foo =123\nbar")
|
||||
|
||||
// Invalid timestamp
|
||||
f("foo bar=123 baz")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
f("\n\r\n", &Rows{})
|
||||
|
||||
// Comment
|
||||
f("\n# foobar\n", &Rows{})
|
||||
f("#foobar baz", &Rows{})
|
||||
f("#foobar baz\n#sss", &Rows{})
|
||||
|
||||
// Minimal line without tags and timestamp
|
||||
f("foo bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f("# comment\nfoo bar=123\r\n#comment2 sdsf dsf", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f("foo bar=123\n", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line without tags and with a timestamp.
|
||||
f("foo bar=123.45 -345", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123.45,
|
||||
}},
|
||||
Timestamp: -345,
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with a single tag
|
||||
f("foo,tag1=xyz bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with multiple tags
|
||||
f("foo,tag1=xyz,tag2=43as bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
},
|
||||
{
|
||||
Key: "tag2",
|
||||
Value: "43as",
|
||||
},
|
||||
},
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with empty tag values
|
||||
f("foo,tag1=xyz,tagN=,tag2=43as,=xxx bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
},
|
||||
{
|
||||
Key: "tag2",
|
||||
Value: "43as",
|
||||
},
|
||||
},
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with multiple tags, multiple fields and timestamp
|
||||
f(`system,host=ip-172-16-10-144 uptime_format="3 days, 21:01",quoted_float="-1.23",quoted_int="123" 1557761040000000000`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "system",
|
||||
Tags: []Tag{{
|
||||
Key: "host",
|
||||
Value: "ip-172-16-10-144",
|
||||
}},
|
||||
Fields: []Field{
|
||||
{
|
||||
Key: "uptime_format",
|
||||
Value: 0,
|
||||
},
|
||||
{
|
||||
Key: "quoted_float",
|
||||
Value: -1.23,
|
||||
},
|
||||
{
|
||||
Key: "quoted_int",
|
||||
Value: 123,
|
||||
},
|
||||
},
|
||||
Timestamp: 1557761040000000000,
|
||||
}},
|
||||
})
|
||||
f(`foo,tag1=xyz,tag2=43as bar=-123e4,x=True,y=-45i,z=f,aa="f,= \"a",bb=23u 48934`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
},
|
||||
{
|
||||
Key: "tag2",
|
||||
Value: "43as",
|
||||
},
|
||||
},
|
||||
Fields: []Field{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: -123e4,
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: 1,
|
||||
},
|
||||
{
|
||||
Key: "y",
|
||||
Value: -45,
|
||||
},
|
||||
{
|
||||
Key: "z",
|
||||
Value: 0,
|
||||
},
|
||||
{
|
||||
Key: "aa",
|
||||
Value: 0,
|
||||
},
|
||||
{
|
||||
Key: "bb",
|
||||
Value: 23,
|
||||
},
|
||||
},
|
||||
Timestamp: 48934,
|
||||
}},
|
||||
})
|
||||
|
||||
// Escape chars
|
||||
f(`fo\,bar\=baz,x\=\b=\\a\,\=\q\ \\\a\=\,=4.34`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: `fo,bar=baz`,
|
||||
Tags: []Tag{{
|
||||
Key: `x=\b`,
|
||||
Value: `\a,=\q `,
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: `\\a=,`,
|
||||
Value: 4.34,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Multiple lines
|
||||
f("foo,tag=xyz field=1.23 48934\n"+
|
||||
"bar x=-1i\n\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "field",
|
||||
Value: 1.23,
|
||||
}},
|
||||
Timestamp: 48934,
|
||||
},
|
||||
{
|
||||
Measurement: "bar",
|
||||
Fields: []Field{{
|
||||
Key: "x",
|
||||
Value: -1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Multiple lines with invalid line in the middle.
|
||||
f("foo,tag=xyz field=1.23 48934\n"+
|
||||
"invalid line\n"+
|
||||
"bar x=-1i\n\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "field",
|
||||
Value: 1.23,
|
||||
}},
|
||||
Timestamp: 48934,
|
||||
},
|
||||
{
|
||||
Measurement: "bar",
|
||||
Fields: []Field{{
|
||||
Key: "x",
|
||||
Value: -1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// No newline after the second line.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/82
|
||||
f("foo,tag=xyz field=1.23 48934\n"+
|
||||
"bar x=-1i", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "field",
|
||||
Value: 1.23,
|
||||
}},
|
||||
Timestamp: 48934,
|
||||
},
|
||||
{
|
||||
Measurement: "bar",
|
||||
Fields: []Field{{
|
||||
Key: "x",
|
||||
Value: -1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f("x,y=z,g=p:\\ \\ 5432\\,\\ gp\\ mon\\ [lol]\\ con10\\ cmd5\\ SELECT f=1", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "x",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "y",
|
||||
Value: "z",
|
||||
},
|
||||
{
|
||||
Key: "g",
|
||||
Value: "p: 5432, gp mon [lol] con10 cmd5 SELECT",
|
||||
},
|
||||
},
|
||||
Fields: []Field{{
|
||||
Key: "f",
|
||||
Value: 1,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
25
app/vminsert/influx/parser_timing_test.go
Normal file
25
app/vminsert/influx/parser_timing_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package influx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 1234556768
|
||||
cpu usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 123455676344
|
||||
aaa usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 123455676344
|
||||
bbb usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 123455676344
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of rows parsed; got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
226
app/vminsert/influx/request_handler.go
Normal file
226
app/vminsert/influx/request_handler.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package influx
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for `{measurement}{separator}{field_name}` metric name when inserted via Influx line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses `{measurement}` instead of `{measurement}{separator}{field_name}` for metic name if Influx line contains only a single field")
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="influx"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="influx"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes remote write for influx line protocol.
|
||||
//
|
||||
// See https://github.com/influxdata/influxdb/blob/4cbdc197b8117fee648d62e2e5be75c6575352f0/tsdb/README.md
|
||||
func InsertHandler(req *http.Request) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(req)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(req *http.Request) error {
|
||||
influxReadCalls.Inc()
|
||||
|
||||
r := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped influx line protocol data: %s", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
tsMultiplier := int64(1e6)
|
||||
switch q.Get("precision") {
|
||||
case "ns":
|
||||
tsMultiplier = 1e6
|
||||
case "u":
|
||||
tsMultiplier = 1e3
|
||||
case "ms":
|
||||
tsMultiplier = 1
|
||||
case "s":
|
||||
tsMultiplier = -1e3
|
||||
case "m":
|
||||
tsMultiplier = -1e3 * 60
|
||||
case "h":
|
||||
tsMultiplier = -1e3 * 3600
|
||||
}
|
||||
|
||||
// Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint
|
||||
db := q.Get("db")
|
||||
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
for ctx.Read(r, tsMultiplier) {
|
||||
if err := ctx.InsertRows(db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ctx.Error()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) InsertRows(db string) error {
|
||||
rows := ctx.Rows.Rows
|
||||
rowsLen := 0
|
||||
for i := range rows {
|
||||
rowsLen += len(rows[i].Tags)
|
||||
}
|
||||
ic := &ctx.Common
|
||||
ic.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
hasDBLabel := false
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
if tag.Key == "db" {
|
||||
hasDBLabel = true
|
||||
}
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
if len(db) > 0 && !hasDBLabel {
|
||||
ic.AddLabel("db", db)
|
||||
}
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf[:0], r.Measurement...)
|
||||
skipFieldKey := len(r.Fields) == 1 && *skipSingleField
|
||||
if !skipFieldKey {
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf, *measurementFieldSeparator...)
|
||||
}
|
||||
metricGroupPrefixLen := len(ctx.metricGroupBuf)
|
||||
for j := range r.Fields {
|
||||
f := &r.Fields[j]
|
||||
if !skipFieldKey {
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf[:metricGroupPrefixLen], f.Key...)
|
||||
}
|
||||
metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf)
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", metricGroup)
|
||||
ic.WriteDataPoint(ctx.metricNameBuf, ic.Labels[:1], r.Timestamp, f.Value)
|
||||
}
|
||||
rowsTotal += len(r.Fields)
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader, tsMultiplier int64) bool {
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf)
|
||||
if ctx.err != nil {
|
||||
if ctx.err != io.EOF {
|
||||
influxReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read influx line protocol data: %s", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf))
|
||||
|
||||
// Adjust timestamps according to tsMultiplier
|
||||
currentTs := time.Now().UnixNano() / 1e6
|
||||
if tsMultiplier >= 1 {
|
||||
for i := range ctx.Rows.Rows {
|
||||
row := &ctx.Rows.Rows[i]
|
||||
if row.Timestamp == 0 {
|
||||
row.Timestamp = currentTs
|
||||
} else {
|
||||
row.Timestamp /= tsMultiplier
|
||||
}
|
||||
}
|
||||
} else if tsMultiplier < 0 {
|
||||
tsMultiplier = -tsMultiplier
|
||||
currentTs -= currentTs % tsMultiplier
|
||||
for i := range ctx.Rows.Rows {
|
||||
row := &ctx.Rows.Rows[i]
|
||||
if row.Timestamp == 0 {
|
||||
row.Timestamp = currentTs
|
||||
} else {
|
||||
row.Timestamp *= tsMultiplier
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
influxReadCalls = metrics.NewCounter(`vm_read_calls_total{name="influx"}`)
|
||||
influxReadErrors = metrics.NewCounter(`vm_read_errors_total{name="influx"}`)
|
||||
)
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf []byte
|
||||
tailBuf []byte
|
||||
metricNameBuf []byte
|
||||
metricGroupBuf []byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Error() error {
|
||||
if ctx.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
ctx.metricNameBuf = ctx.metricNameBuf[:0]
|
||||
ctx.metricGroupBuf = ctx.metricGroupBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
||||
99
app/vminsert/main.go
Normal file
99
app/vminsert/main.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package vminsert
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty")
|
||||
opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB put messages. Usually :4242 must be set. Doesn't work if empty")
|
||||
opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty")
|
||||
maxInsertRequestSize = flag.Int("maxInsertRequestSize", 32*1024*1024, "The maximum size of a single insert request in bytes")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superflouos labels are dropped")
|
||||
)
|
||||
|
||||
// Init initializes vminsert.
|
||||
func Init() {
|
||||
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
||||
|
||||
concurrencylimiter.Init()
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
go graphite.Serve(*graphiteListenAddr)
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
go opentsdb.Serve(*opentsdbListenAddr)
|
||||
}
|
||||
if len(*opentsdbHTTPListenAddr) > 0 {
|
||||
go opentsdbhttp.Serve(*opentsdbHTTPListenAddr, int64(*maxInsertRequestSize))
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops vminsert.
|
||||
func Stop() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphite.Stop()
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
opentsdb.Stop()
|
||||
}
|
||||
if len(*opentsdbHTTPListenAddr) > 0 {
|
||||
opentsdbhttp.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// RequestHandler is a handler for Prometheus remote storage write API
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := strings.Replace(r.URL.Path, "//", "/", -1)
|
||||
switch path {
|
||||
case "/api/v1/write":
|
||||
prometheusWriteRequests.Inc()
|
||||
if err := prometheus.InsertHandler(r, int64(*maxInsertRequestSize)); err != nil {
|
||||
prometheusWriteErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/write", "/api/v2/write":
|
||||
influxWriteRequests.Inc()
|
||||
if err := influx.InsertHandler(r); err != nil {
|
||||
influxWriteErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/query":
|
||||
// Emulate fake response for influx query.
|
||||
// This is required for TSBS benchmark.
|
||||
influxQueryRequests.Inc()
|
||||
fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`)
|
||||
return true
|
||||
default:
|
||||
// This is not our link
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
prometheusWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/write", protocol="prometheus"}`)
|
||||
prometheusWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/write", protocol="prometheus"}`)
|
||||
|
||||
influxWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/write", protocol="influx"}`)
|
||||
influxWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/write", protocol="influx"}`)
|
||||
|
||||
influxQueryRequests = metrics.NewCounter(`vm_http_requests_total{path="/query", protocol="influx"}`)
|
||||
)
|
||||
187
app/vminsert/opentsdb/parser.go
Normal file
187
app/vminsert/opentsdb/parser.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed OpenTSDB rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Release references to objects, so they can be GC'ed.
|
||||
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals OpenTSDB put rows from s.
|
||||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_telnet/put.html
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) {
|
||||
rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single OpenTSDB row.
|
||||
type Row struct {
|
||||
Metric string
|
||||
Tags []Tag
|
||||
Value float64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Metric = ""
|
||||
r.Tags = nil
|
||||
r.Value = 0
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
r.reset()
|
||||
if !strings.HasPrefix(s, "put ") {
|
||||
return tagsPool, fmt.Errorf("missing `put ` prefix in %q", s)
|
||||
}
|
||||
s = s[len("put "):]
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between metric and timestamp in %q", s)
|
||||
}
|
||||
r.Metric = s[:n]
|
||||
if len(r.Metric) == 0 {
|
||||
return tagsPool, fmt.Errorf("metric cannot be empty")
|
||||
}
|
||||
tail := s[n+1:]
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between timestamp and value in %q", s)
|
||||
}
|
||||
r.Timestamp = int64(fastfloat.ParseBestEffort(tail[:n]))
|
||||
tail = tail[n+1:]
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between value and the first tag in %q", s)
|
||||
}
|
||||
r.Value = fastfloat.ParseBestEffort(tail[:n])
|
||||
var err error
|
||||
tagsStart := len(tagsPool)
|
||||
tagsPool, err = unmarshalTags(tagsPool, tail[n+1:])
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot unmarshal tags in %q: %s", s, err)
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
return unmarshalRow(dst, s, tagsPool)
|
||||
}
|
||||
dst, tagsPool = unmarshalRow(dst, s[:n], tagsPool)
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag) {
|
||||
if len(s) > 0 && s[len(s)-1] == '\r' {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// Skip empty line
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal OpenTSDB line %q: %s", s, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="opentsdb"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
// The last tag found
|
||||
if err := tag.unmarshal(s); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n]); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(tag.Key) == 0 || len(tag.Value) == 0 {
|
||||
// Skip empty tag
|
||||
dst = dst[:len(dst)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tag is an OpenTSDB tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (t *Tag) reset() {
|
||||
t.Key = ""
|
||||
t.Value = ""
|
||||
}
|
||||
|
||||
func (t *Tag) unmarshal(s string) error {
|
||||
t.reset()
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
t.Key = s[:n]
|
||||
t.Value = s[n+1:]
|
||||
return nil
|
||||
}
|
||||
222
app/vminsert/opentsdb/parser_test.go
Normal file
222
app/vminsert/opentsdb/parser_test.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
|
||||
// Try again
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// Missing put prefix
|
||||
f("xx")
|
||||
|
||||
// Missing metric
|
||||
f("put 111 34")
|
||||
|
||||
// Missing timestamp
|
||||
f("put aaa")
|
||||
|
||||
// Missing value
|
||||
f("put aaa 1123")
|
||||
|
||||
// Invalid timestamp
|
||||
f("put aaa timestamp")
|
||||
|
||||
// Missing first tag
|
||||
f("put aaa 123 43")
|
||||
|
||||
// Invalid value
|
||||
f("put aaa 123 invalid-value")
|
||||
|
||||
// Invalid multiline
|
||||
f("put aaa\nbbb 123 34")
|
||||
|
||||
// Invalid tag
|
||||
f("put aaa 123 4.5 foo")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
rows.Unmarshal(s)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\r", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
f("\n\r\n", &Rows{})
|
||||
|
||||
// Single line
|
||||
f("put foobar 789 -123.456 a=b", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Empty tag
|
||||
f("put foobar 789 -123.456 a= b=c =d", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "b",
|
||||
Value: "c",
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
// Fractional timestamp that is supported by Akumuli.
|
||||
f("put foobar 789.4 -123.456 a=b", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f("put foo.bar 789 123.456 a=b\n", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo.bar",
|
||||
Value: 123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Tags
|
||||
f("put foo 2 1 bar=baz", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
}},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
f("put foo 2 1 bar=baz x=y", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
f("put foo 2 1 bar=baz=aaa x=y", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz=aaa",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
|
||||
// Multi lines
|
||||
f("put foo 2 0.3 a=b\nput bar.baz 43 0.34 a=b\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
// Multi lines with invalid line
|
||||
f("put foo 2 0.3 a=b\naaa bbb\nput bar.baz 43 0.34 a=b\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
25
app/vminsert/opentsdb/parser_timing_test.go
Normal file
25
app/vminsert/opentsdb/parser_timing_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `put cpu.usage_user 1234556768 1.23 a=b
|
||||
put cpu.usage_system 1234556768 23.344 a=b
|
||||
put cpu.usage_iowait 1234556769 3.3443 a=b
|
||||
put cpu.usage_irq 1234556768 0.34432 a=b
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
rows.Unmarshal(s)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of parsed rows; got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
160
app/vminsert/opentsdb/request_handler.go
Normal file
160
app/vminsert/opentsdb/request_handler.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="opentsdb"}`)
|
||||
)
|
||||
|
||||
// insertHandler processes remote write for OpenTSDB put protocol.
|
||||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_telnet/put.html
|
||||
func insertHandler(r io.Reader) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(r)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(r io.Reader) error {
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
for ctx.Read(r) {
|
||||
if err := ctx.InsertRows(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ctx.Error()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) InsertRows() error {
|
||||
rows := ctx.Rows.Rows
|
||||
ic := &ctx.Common
|
||||
ic.Reset(len(rows))
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", r.Metric)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const flushTimeout = 3 * time.Second
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader) bool {
|
||||
opentsdbReadCalls.Inc()
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
if c, ok := r.(net.Conn); ok {
|
||||
if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil {
|
||||
opentsdbReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot set read deadline: %s", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf)
|
||||
if ctx.err != nil {
|
||||
if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() {
|
||||
// Flush the read data on timeout and try reading again.
|
||||
ctx.err = nil
|
||||
} else {
|
||||
if ctx.err != io.EOF {
|
||||
opentsdbReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %s", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf))
|
||||
|
||||
// Fill in missing timestamps
|
||||
currentTimestamp := time.Now().Unix()
|
||||
rows := ctx.Rows.Rows
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp == 0 {
|
||||
r.Timestamp = currentTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
// Convert timestamps from seconds to milliseconds
|
||||
for i := range rows {
|
||||
rows[i].Timestamp *= 1e3
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf []byte
|
||||
tailBuf []byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Error() error {
|
||||
if ctx.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
var (
|
||||
opentsdbReadCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb"}`)
|
||||
opentsdbReadErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
||||
138
app/vminsert/opentsdb/server.go
Normal file
138
app/vminsert/opentsdb/server.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package opentsdb
|
||||
|
||||
import (
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
writeRequestsTCP = metrics.NewCounter(`vm_opentsdb_requests_total{name="write", net="tcp"}`)
|
||||
writeErrorsTCP = metrics.NewCounter(`vm_opentsdb_request_errors_total{name="write", net="tcp"}`)
|
||||
|
||||
writeRequestsUDP = metrics.NewCounter(`vm_opentsdb_requests_total{name="write", net="udp"}`)
|
||||
writeErrorsUDP = metrics.NewCounter(`vm_opentsdb_request_errors_total{name="write", net="udp"}`)
|
||||
)
|
||||
|
||||
// Serve starts OpenTSDB collector on the given addr.
|
||||
func Serve(addr string) {
|
||||
logger.Infof("starting TCP OpenTSDB collector at %q", addr)
|
||||
lnTCP, err := netutil.NewTCPListener("opentsdb", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start TCP OpenTSDB collector at %q: %s", addr, err)
|
||||
}
|
||||
listenerTCP = lnTCP
|
||||
|
||||
logger.Infof("starting UDP OpenTSDB collector at %q", addr)
|
||||
lnUDP, err := net.ListenPacket("udp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP OpenTSDB collector at %q: %s", addr, err)
|
||||
}
|
||||
listenerUDP = lnUDP
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveTCP(listenerTCP)
|
||||
logger.Infof("stopped TCP OpenTSDB collector at %q", addr)
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveUDP(listenerUDP)
|
||||
logger.Infof("stopped UDP OpenTSDB collector at %q", addr)
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
logger.Fatalf("unrecoverable error when accepting TCP OpenTSDB connections: %s", err)
|
||||
}
|
||||
logger.Fatalf("unexpected error when accepting TCP OpenTSDB connections: %s", err)
|
||||
}
|
||||
go func() {
|
||||
writeRequestsTCP.Inc()
|
||||
if err := insertHandler(c); err != nil {
|
||||
writeErrorsTCP.Inc()
|
||||
logger.Errorf("error in TCP OpenTSDB conn %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
}
|
||||
_ = c.Close()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func serveUDP(ln net.PacketConn) {
|
||||
gomaxprocs := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var bb bytesutil.ByteBuffer
|
||||
bb.B = bytesutil.Resize(bb.B, 64*1024)
|
||||
for {
|
||||
bb.Reset()
|
||||
bb.B = bb.B[:cap(bb.B)]
|
||||
n, addr, err := ln.ReadFrom(bb.B)
|
||||
if err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
}
|
||||
logger.Errorf("cannot read OpenTSDB UDP data: %s", err)
|
||||
continue
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
writeRequestsUDP.Inc()
|
||||
if err := insertHandler(bb.NewReader()); err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
logger.Errorf("error in UDP OpenTSDB conn %q<->%q: %s", ln.LocalAddr(), addr, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var (
|
||||
listenerTCP net.Listener
|
||||
listenerUDP net.PacketConn
|
||||
)
|
||||
|
||||
// Stop stops the server.
|
||||
func Stop() {
|
||||
logger.Infof("stopping TCP OpenTSDB server at %q...", listenerTCP.Addr())
|
||||
if err := listenerTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP OpenTSDB server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP OpenTSDB server at %q...", listenerUDP.LocalAddr())
|
||||
if err := listenerUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP OpenTSDB server: %s", err)
|
||||
}
|
||||
}
|
||||
198
app/vminsert/opentsdbhttp/parser.go
Normal file
198
app/vminsert/opentsdbhttp/parser.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed OpenTSDB rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Release references to objects, so they can be GC'ed.
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals OpenTSDB rows from av.
|
||||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_http/put.html
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(av *fastjson.Value) {
|
||||
rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], av, rs.tagsPool[:0])
|
||||
}
|
||||
|
||||
// Row is a single OpenTSDB row.
|
||||
type Row struct {
|
||||
Metric string
|
||||
Tags []Tag
|
||||
Value float64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Metric = ""
|
||||
r.Tags = nil
|
||||
r.Value = 0
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(o *fastjson.Value, tagsPool []Tag) ([]Tag, error) {
|
||||
r.reset()
|
||||
m := o.GetStringBytes("metric")
|
||||
if len(m) == 0 {
|
||||
return tagsPool, fmt.Errorf("missing `metric` in %s", o)
|
||||
}
|
||||
r.Metric = bytesutil.ToUnsafeString(m)
|
||||
|
||||
rawTs := o.Get("timestamp")
|
||||
if rawTs != nil {
|
||||
ts, err := getFloat64(rawTs)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("invalid `timestamp` in %s: %s", o, err)
|
||||
}
|
||||
r.Timestamp = int64(ts)
|
||||
} else {
|
||||
// Allow missing timestamp. It is automatically populated
|
||||
// with the current time in this case.
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
rawV := o.Get("value")
|
||||
if rawV == nil {
|
||||
return tagsPool, fmt.Errorf("missing `value` in %s", o)
|
||||
}
|
||||
v, err := getFloat64(rawV)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("invalid `value` in %s: %s", o, err)
|
||||
}
|
||||
r.Value = v
|
||||
|
||||
vt := o.Get("tags")
|
||||
if vt == nil {
|
||||
// Allow empty tags.
|
||||
return tagsPool, nil
|
||||
}
|
||||
rawTags, err := vt.Object()
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("invalid `tags` in %s: %s", o, err)
|
||||
}
|
||||
|
||||
tagsStart := len(tagsPool)
|
||||
tagsPool, err = unmarshalTags(tagsPool, rawTags)
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot parse tags %s: %s", rawTags, err)
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func getFloat64(v *fastjson.Value) (float64, error) {
|
||||
switch v.Type() {
|
||||
case fastjson.TypeNumber:
|
||||
return v.Float64()
|
||||
case fastjson.TypeString:
|
||||
vStr, _ := v.StringBytes()
|
||||
vFloat := fastfloat.ParseBestEffort(bytesutil.ToUnsafeString(vStr))
|
||||
if vFloat == 0 && string(vStr) != "0" && string(vStr) != "0.0" {
|
||||
return 0, fmt.Errorf("invalid float64 value: %q", vStr)
|
||||
}
|
||||
return vFloat, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("value doesn't contain float64; it contains %s", v.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, av *fastjson.Value, tagsPool []Tag) ([]Row, []Tag) {
|
||||
switch av.Type() {
|
||||
case fastjson.TypeObject:
|
||||
return unmarshalRow(dst, av, tagsPool)
|
||||
case fastjson.TypeArray:
|
||||
a, _ := av.Array()
|
||||
for _, o := range a {
|
||||
dst, tagsPool = unmarshalRow(dst, o, tagsPool)
|
||||
}
|
||||
return dst, tagsPool
|
||||
default:
|
||||
logger.Errorf("OpenTSDB JSON must be either object or array; got %s; body=%s", av.Type(), av)
|
||||
invalidLines.Inc()
|
||||
return dst, tagsPool
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalRow(dst []Row, o *fastjson.Value, tagsPool []Tag) ([]Row, []Tag) {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(o, tagsPool)
|
||||
if err != nil {
|
||||
dst = dst[:len(dst)-1]
|
||||
logger.Errorf("cannot unmarshal OpenTSDB object %s: %s", o, err)
|
||||
invalidLines.Inc()
|
||||
}
|
||||
return dst, tagsPool
|
||||
}
|
||||
|
||||
var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="opentsdb-http"}`)
|
||||
|
||||
func unmarshalTags(dst []Tag, o *fastjson.Object) ([]Tag, error) {
|
||||
var err error
|
||||
o.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if v.Type() != fastjson.TypeString {
|
||||
err = fmt.Errorf("tag value must be string; got %s; value=%s", v.Type(), v)
|
||||
return
|
||||
}
|
||||
if len(k) == 0 {
|
||||
// Skip empty tags
|
||||
return
|
||||
}
|
||||
vStr, _ := v.StringBytes()
|
||||
if len(vStr) == 0 {
|
||||
// Skip empty tags
|
||||
return
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
tag.Key = bytesutil.ToUnsafeString(k)
|
||||
tag.Value = bytesutil.ToUnsafeString(vStr)
|
||||
})
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// Tag is an OpenTSDB tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (t *Tag) reset() {
|
||||
t.Key = ""
|
||||
t.Value = ""
|
||||
}
|
||||
246
app/vminsert/opentsdbhttp/parser_test.go
Normal file
246
app/vminsert/opentsdbhttp/parser_test.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
// Expected JSON parser error
|
||||
return
|
||||
}
|
||||
// Verify OpenTSDB body parsing error
|
||||
rows.Unmarshal(v)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
// Try again
|
||||
rows.Unmarshal(v)
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("unexpected number of rows parsed; got %d; want 0", len(rows.Rows))
|
||||
}
|
||||
}
|
||||
|
||||
// invalid json
|
||||
f("{g")
|
||||
|
||||
// Invalid json type
|
||||
f(`1`)
|
||||
f(`"foo"`)
|
||||
f(`[1,2]`)
|
||||
f(`null`)
|
||||
|
||||
// Incomplete object
|
||||
f(`{}`)
|
||||
f(`{"metric": "aaa"}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122}`)
|
||||
f(`{"metric": "aaa", "timestamp": "tststs"}`)
|
||||
f(`{"timestamp": 1122, "value": 33}`)
|
||||
f(`{"value": 33}`)
|
||||
f(`{"value": 33, "tags": {"fooo":"bar"}}`)
|
||||
|
||||
// Invalid value
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": "0.0.0"}`)
|
||||
|
||||
// Invalid metric type
|
||||
f(`{"metric": "", "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": ["aaa"], "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": {"aaa":1}, "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": 1, "timestamp": 1122, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
|
||||
// Invalid timestamp type
|
||||
f(`{"metric": "aaa", "timestamp": "foobar", "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": [1,2], "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": {"a":1}, "value": 0.45, "tags": {"foo": "bar"}}`)
|
||||
|
||||
// Invalid value type
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": [0,1], "tags": {"foo":"bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": {"a":1}, "tags": {"foo":"bar"}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": "foobar", "tags": {"foo":"bar"}}`)
|
||||
|
||||
// Invalid tags type
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": 1}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": [1,2]}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": "foo"}`)
|
||||
|
||||
// Invalid tag value type
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": {"foo": ["bar"]}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": {"foo": {"bar":"baz"}}}`)
|
||||
f(`{"metric": "aaa", "timestamp": 1122, "value": 0.45, "tags": {"foo": 1}}`)
|
||||
|
||||
// Invalid multiline
|
||||
f(`[{"metric": "aaa", "timestamp": 1122, "value": "trt", "tags":{"foo":"bar"}}, {"metric": "aaa", "timestamp": [1122], "value": 111}]`)
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse json %s: %s", s, err)
|
||||
}
|
||||
rows.Unmarshal(v)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
rows.Unmarshal(v)
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Normal line
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": -123.456, "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Timestamp as string
|
||||
f(`{"metric": "foobar", "timestamp": "1789", "value": -123.456, "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 1789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Timestamp as float64 (it is truncated to integer)
|
||||
f(`{"metric": "foobar", "timestamp": 17.89, "value": -123.456, "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 17,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Empty tags
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": -123.456, "tags": {}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: nil,
|
||||
}},
|
||||
})
|
||||
// Missing tags
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": -123.456}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: nil,
|
||||
}},
|
||||
})
|
||||
// Empty tag value
|
||||
f(`{"metric": "foobar", "timestamp": 123, "value": -123.456, "tags": {"a":"", "b":"c", "": "d"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 123,
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "b",
|
||||
Value: "c",
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
// Value as string
|
||||
f(`{"metric": "foobar", "timestamp": 789, "value": "-12.456", "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -12.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Missing timestamp
|
||||
f(`{"metric": "foobar", "value": "-12.456", "tags": {"a":"b"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -12.456,
|
||||
Timestamp: 0,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Multiple tags
|
||||
f(`{"metric": "foo", "value": 1, "timestamp": 2, "tags": {"bar":"baz", "x": "y"}}`, &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
|
||||
// Multi lines
|
||||
f(`[{"metric": "foo", "value": "0.3", "timestamp": 2, "tags": {"a":"b"}},
|
||||
{"metric": "bar.baz", "value": 0.34, "timestamp": 43, "tags": {"a":"b"}}]`, &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
33
app/vminsert/opentsdbhttp/parser_timing_test.go
Normal file
33
app/vminsert/opentsdbhttp/parser_timing_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `[{"metric": "cpu.usage_user", "timestamp": 1234556768, "value": 1.23, "tags": {"a":"b", "x": "y"}},
|
||||
{"metric": "cpu.usage_system", "timestamp": 1234556768, "value": 23.344, "tags": {"a":"b"}},
|
||||
{"metric": "cpu.usage_iowait", "timestamp": 1234556769, "value":3.3443, "tags": {"a":"b"}},
|
||||
{"metric": "cpu.usage_irq", "timestamp": 1234556768, "value": 0.34432, "tags": {"a":"b"}}
|
||||
]
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
var p fastjson.Parser
|
||||
for pb.Next() {
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot parse %q: %s", s, err))
|
||||
}
|
||||
rows.Unmarshal(v)
|
||||
if len(rows.Rows) != 4 {
|
||||
panic(fmt.Errorf("unexpected number of rows unmarshaled; got %d; want 4", len(rows.Rows)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
150
app/vminsert/opentsdbhttp/request_handler.go
Normal file
150
app/vminsert/opentsdbhttp/request_handler.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb-http"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="opentsdb-http"}`)
|
||||
|
||||
opentsdbReadCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb-http"}`)
|
||||
opentsdbReadErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb-http"}`)
|
||||
opentsdbUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="opentsdb-http"}`)
|
||||
)
|
||||
|
||||
// insertHandler processes HTTP OpenTSDB put requests.
|
||||
// See http://opentsdb.net/docs/build/html/api_http/put.html
|
||||
func insertHandler(req *http.Request, maxSize int64) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(req, maxSize)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(req *http.Request, maxSize int64) error {
|
||||
opentsdbReadCalls.Inc()
|
||||
|
||||
r := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
opentsdbReadErrors.Inc()
|
||||
return fmt.Errorf("cannot read gzipped http protocol data: %s", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
|
||||
// Read the request in ctx.reqBuf
|
||||
lr := io.LimitReader(r, maxSize+1)
|
||||
reqLen, err := ctx.reqBuf.ReadFrom(lr)
|
||||
if err != nil {
|
||||
opentsdbReadErrors.Inc()
|
||||
return fmt.Errorf("cannot read HTTP OpenTSDB request: %s", err)
|
||||
}
|
||||
if reqLen > maxSize {
|
||||
opentsdbReadErrors.Inc()
|
||||
return fmt.Errorf("too big HTTP OpenTSDB request; mustn't exceed %d bytes", maxSize)
|
||||
}
|
||||
|
||||
// Unmarshal the request to ctx.Rows
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(ctx.reqBuf.B)
|
||||
if err != nil {
|
||||
opentsdbUnmarshalErrors.Inc()
|
||||
return fmt.Errorf("cannot parse HTTP OpenTSDB json: %s", err)
|
||||
}
|
||||
ctx.Rows.Unmarshal(v)
|
||||
|
||||
// Fill in missing timestamps
|
||||
currentTimestamp := time.Now().Unix()
|
||||
rows := ctx.Rows.Rows
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp == 0 {
|
||||
r.Timestamp = currentTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
// Convert timestamps in seconds to milliseconds if needed.
|
||||
// See http://opentsdb.net/docs/javadoc/net/opentsdb/core/Const.html#SECOND_MASK
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp&secondMask == 0 {
|
||||
r.Timestamp *= 1e3
|
||||
}
|
||||
}
|
||||
|
||||
// Insert ctx.Rows to db.
|
||||
ic := &ctx.Common
|
||||
ic.Reset(len(rows))
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", r.Metric)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const secondMask int64 = 0x7FFFFFFF00000000
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf.Reset()
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
||||
70
app/vminsert/opentsdbhttp/server.go
Normal file
70
app/vminsert/opentsdbhttp/server.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package opentsdbhttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
writeRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/put", protocol="opentsdb-http"}`)
|
||||
writeErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/put", protocol="opentsdb-http"}`)
|
||||
)
|
||||
|
||||
var (
|
||||
httpServer *http.Server
|
||||
httpAddr string
|
||||
maxRequestSize int64
|
||||
)
|
||||
|
||||
// Serve starts HTTP OpenTSDB server on the given addr.
|
||||
func Serve(addr string, maxReqSize int64) {
|
||||
logger.Infof("starting HTTP OpenTSDB server at %q", addr)
|
||||
httpAddr = addr
|
||||
maxRequestSize = maxReqSize
|
||||
httpServer = &http.Server{
|
||||
Addr: addr,
|
||||
Handler: http.HandlerFunc(requestHandler),
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
go func() {
|
||||
err := httpServer.ListenAndServe()
|
||||
if err == http.ErrServerClosed {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatalf("error serving HTTP OpenTSDB: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// requestHandler handles HTTP OpenTSDB insert request.
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/api/put":
|
||||
writeRequests.Inc()
|
||||
if err := insertHandler(r, maxRequestSize); err != nil {
|
||||
writeErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
default:
|
||||
httpserver.Errorf(w, "unexpected path requested on HTTP OpenTSDB server: %q", r.URL.Path)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops HTTP OpenTSDB server.
|
||||
func Stop() {
|
||||
logger.Infof("stopping HTTP OpenTSDB server at %q...", httpAddr)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
logger.Fatalf("cannot close HTTP OpenTSDB server: %s", err)
|
||||
}
|
||||
}
|
||||
112
app/vminsert/prometheus/request_handler.go
Normal file
112
app/vminsert/prometheus/request_handler.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`)
|
||||
rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="prometheus"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes remote write for prometheus.
|
||||
func InsertHandler(r *http.Request, maxSize int64) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(r, maxSize)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(r *http.Request, maxSize int64) error {
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
if err := ctx.Read(r, maxSize); err != nil {
|
||||
return err
|
||||
}
|
||||
timeseries := ctx.req.Timeseries
|
||||
rowsLen := 0
|
||||
for i := range timeseries {
|
||||
rowsLen += len(timeseries[i].Samples)
|
||||
}
|
||||
ic := &ctx.Common
|
||||
ic.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
for i := range timeseries {
|
||||
ts := ×eries[i]
|
||||
var metricNameRaw []byte
|
||||
for i := range ts.Samples {
|
||||
r := &ts.Samples[i]
|
||||
metricNameRaw = ic.WriteDataPointExt(metricNameRaw, ts.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsTotal += len(ts.Samples)
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
type pushCtx struct {
|
||||
Common common.InsertCtx
|
||||
|
||||
req prompb.WriteRequest
|
||||
reqBuf []byte
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Common.Reset(0)
|
||||
ctx.req.Reset()
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Read(r *http.Request, maxSize int64) error {
|
||||
prometheusReadCalls.Inc()
|
||||
|
||||
var err error
|
||||
ctx.reqBuf, err = prompb.ReadSnappy(ctx.reqBuf[:0], r.Body, maxSize)
|
||||
if err != nil {
|
||||
prometheusReadErrors.Inc()
|
||||
return fmt.Errorf("cannot read prompb.WriteRequest: %s", err)
|
||||
}
|
||||
if err = ctx.req.Unmarshal(ctx.reqBuf); err != nil {
|
||||
prometheusUnmarshalErrors.Inc()
|
||||
return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %s", len(ctx.reqBuf), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
prometheusReadCalls = metrics.NewCounter(`vm_read_calls_total{name="prometheus"}`)
|
||||
prometheusReadErrors = metrics.NewCounter(`vm_read_errors_total{name="prometheus"}`)
|
||||
prometheusUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="prometheus"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
||||
2
app/vmselect/README.md
Normal file
2
app/vmselect/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
`vmselect` performs the incoming queries and fetches the required data
|
||||
from `vmstorage`.
|
||||
231
app/vmselect/main.go
Normal file
231
app/vmselect/main.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package vmselect
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
deleteAuthKey = flag.String("deleteAuthKey", "", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series")
|
||||
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", runtime.GOMAXPROCS(-1)*2, "The maximum number of concurrent search requests. It shouldn't exceed 2*vCPUs for better performance. See also -search.maxQueueDuration")
|
||||
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached")
|
||||
)
|
||||
|
||||
// Init initializes vmselect
|
||||
func Init() {
|
||||
tmpDirPath := *vmstorage.DataPath + "/tmp"
|
||||
fs.RemoveDirContents(tmpDirPath)
|
||||
netstorage.InitTmpBlocksDir(tmpDirPath)
|
||||
promql.InitRollupResultCache(*vmstorage.DataPath + "/cache/rollupResult")
|
||||
|
||||
concurrencyCh = make(chan struct{}, *maxConcurrentRequests)
|
||||
}
|
||||
|
||||
// Stop stops vmselect
|
||||
func Stop() {
|
||||
promql.StopRollupResultCache()
|
||||
}
|
||||
|
||||
var concurrencyCh chan struct{}
|
||||
|
||||
var (
|
||||
concurrencyLimitReached = metrics.NewCounter(`vm_concurrent_select_limit_reached_total`)
|
||||
concurrencyLimitTimeout = metrics.NewCounter(`vm_concurrent_select_limit_timeout_total`)
|
||||
|
||||
_ = metrics.NewGauge(`vm_concurrent_select_capacity`, func() float64 {
|
||||
return float64(cap(concurrencyCh))
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_concurrent_select_current`, func() float64 {
|
||||
return float64(len(concurrencyCh))
|
||||
})
|
||||
)
|
||||
|
||||
// RequestHandler handles remote read API requests for Prometheus
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// Limit the number of concurrent queries.
|
||||
select {
|
||||
case concurrencyCh <- struct{}{}:
|
||||
defer func() { <-concurrencyCh }()
|
||||
default:
|
||||
// Sleep for a while until giving up. This should resolve short bursts in requests.
|
||||
concurrencyLimitReached.Inc()
|
||||
t := timerpool.Get(*maxQueueDuration)
|
||||
select {
|
||||
case concurrencyCh <- struct{}{}:
|
||||
timerpool.Put(t)
|
||||
defer func() { <-concurrencyCh }()
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
concurrencyLimitTimeout.Inc()
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot handle more than %d concurrent requests", cap(concurrencyCh)),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, "%s", err)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
path := strings.Replace(r.URL.Path, "//", "/", -1)
|
||||
if strings.HasPrefix(path, "/api/v1/label/") {
|
||||
s := r.URL.Path[len("/api/v1/label/"):]
|
||||
if strings.HasSuffix(s, "/values") {
|
||||
labelValuesRequests.Inc()
|
||||
labelName := s[:len(s)-len("/values")]
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.LabelValuesHandler(labelName, w, r); err != nil {
|
||||
labelValuesErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "/api/v1/query":
|
||||
queryRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.QueryHandler(w, r); err != nil {
|
||||
queryErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/query_range":
|
||||
queryRangeRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.QueryRangeHandler(w, r); err != nil {
|
||||
queryRangeErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/series":
|
||||
seriesRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.SeriesHandler(w, r); err != nil {
|
||||
seriesErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/series/count":
|
||||
seriesCountRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.SeriesCountHandler(w, r); err != nil {
|
||||
seriesCountErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/labels":
|
||||
labelsRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.LabelsHandler(w, r); err != nil {
|
||||
labelsErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/labels/count":
|
||||
labelsCountRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.LabelsCountHandler(w, r); err != nil {
|
||||
labelsCountErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/export":
|
||||
exportRequests.Inc()
|
||||
if err := prometheus.ExportHandler(w, r); err != nil {
|
||||
exportErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/federate":
|
||||
federateRequests.Inc()
|
||||
if err := prometheus.FederateHandler(w, r); err != nil {
|
||||
federateErrors.Inc()
|
||||
httpserver.Errorf(w, "error int %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/admin/tsdb/delete_series":
|
||||
deleteRequests.Inc()
|
||||
authKey := r.FormValue("authKey")
|
||||
if authKey != *deleteAuthKey {
|
||||
httpserver.Errorf(w, "invalid authKey %q. It must match the value from -deleteAuthKey command line flag", authKey)
|
||||
return true
|
||||
}
|
||||
if err := prometheus.DeleteHandler(r); err != nil {
|
||||
deleteErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
logger.Errorf("error in %q: %s", r.URL.Path, err)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
statusCode := http.StatusUnprocessableEntity
|
||||
if esc, ok := err.(*httpserver.ErrorWithStatusCode); ok {
|
||||
statusCode = esc.StatusCode
|
||||
}
|
||||
w.WriteHeader(statusCode)
|
||||
prometheus.WriteErrorResponse(w, statusCode, err)
|
||||
}
|
||||
|
||||
var (
|
||||
labelValuesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/label/{}/values"}`)
|
||||
labelValuesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/label/{}/values"}`)
|
||||
|
||||
queryRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/query"}`)
|
||||
queryErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/query"}`)
|
||||
|
||||
queryRangeRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/query_range"}`)
|
||||
queryRangeErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/query_range"}`)
|
||||
|
||||
seriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/series"}`)
|
||||
seriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/series"}`)
|
||||
|
||||
seriesCountRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/series/count"}`)
|
||||
seriesCountErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/series/count"}`)
|
||||
|
||||
labelsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/labels"}`)
|
||||
labelsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/labels"}`)
|
||||
|
||||
labelsCountRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/labels/count"}`)
|
||||
labelsCountErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/labels/count"}`)
|
||||
|
||||
deleteRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
deleteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
|
||||
exportRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/export"}`)
|
||||
exportErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/export"}`)
|
||||
|
||||
federateRequests = metrics.NewCounter(`vm_http_requests_total{path="/federate"}`)
|
||||
federateErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/federate"}`)
|
||||
)
|
||||
9
app/vmselect/netstorage/fadvise_darwin.go
Normal file
9
app/vmselect/netstorage/fadvise_darwin.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func mustFadviseSequentialRead(f *os.File) {
|
||||
// Do nothing :)
|
||||
}
|
||||
15
app/vmselect/netstorage/fadvise_freebsd.go
Normal file
15
app/vmselect/netstorage/fadvise_freebsd.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mustFadviseSequentialRead(f *os.File) {
|
||||
fd := int(f.Fd())
|
||||
if err := unix.Fadvise(int(fd), 0, 0, unix.FADV_SEQUENTIAL|unix.FADV_WILLNEED); err != nil {
|
||||
logger.Panicf("FATAL: error returned from unix.Fadvise(SEQUENTIAL|WILLNEED): %s", err)
|
||||
}
|
||||
}
|
||||
15
app/vmselect/netstorage/fadvise_linux.go
Normal file
15
app/vmselect/netstorage/fadvise_linux.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mustFadviseSequentialRead(f *os.File) {
|
||||
fd := int(f.Fd())
|
||||
if err := unix.Fadvise(int(fd), 0, 0, unix.FADV_SEQUENTIAL|unix.FADV_WILLNEED); err != nil {
|
||||
logger.Panicf("FATAL: error returned from unix.Fadvise(SEQUENTIAL|WILLNEED): %s", err)
|
||||
}
|
||||
}
|
||||
584
app/vmselect/netstorage/netstorage.go
Normal file
584
app/vmselect/netstorage/netstorage.go
Normal file
@@ -0,0 +1,584 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"flag"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned per search")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned per search")
|
||||
maxMetricsPerSearch = flag.Int("search.maxUniqueTimeseries", 300e3, "The maximum number of unique time series each search can scan")
|
||||
)
|
||||
|
||||
// Result is a single timeseries result.
|
||||
//
|
||||
// ProcessSearchQuery returns Result slice.
|
||||
type Result struct {
|
||||
// The name of the metric.
|
||||
MetricName storage.MetricName
|
||||
|
||||
// Values are sorted by Timestamps.
|
||||
Values []float64
|
||||
Timestamps []int64
|
||||
|
||||
// Marshaled MetricName. Used only for results sorting
|
||||
// in app/vmselect/promql
|
||||
MetricNameMarshaled []byte
|
||||
}
|
||||
|
||||
func (r *Result) reset() {
|
||||
r.MetricName.Reset()
|
||||
r.Values = r.Values[:0]
|
||||
r.Timestamps = r.Timestamps[:0]
|
||||
r.MetricNameMarshaled = r.MetricNameMarshaled[:0]
|
||||
}
|
||||
|
||||
// Results holds results returned from ProcessSearchQuery.
|
||||
type Results struct {
|
||||
tr storage.TimeRange
|
||||
fetchData bool
|
||||
deadline Deadline
|
||||
|
||||
tbf *tmpBlocksFile
|
||||
|
||||
packedTimeseries []packedTimeseries
|
||||
}
|
||||
|
||||
// Len returns the number of results in rss.
|
||||
func (rss *Results) Len() int {
|
||||
return len(rss.packedTimeseries)
|
||||
}
|
||||
|
||||
// Cancel cancels rss work.
|
||||
func (rss *Results) Cancel() {
|
||||
putTmpBlocksFile(rss.tbf)
|
||||
rss.tbf = nil
|
||||
}
|
||||
|
||||
// RunParallel runs in parallel f for all the results from rss.
|
||||
//
|
||||
// f shouldn't hold references to rs after returning.
|
||||
// workerID is the id of the worker goroutine that calls f.
|
||||
//
|
||||
// rss becomes unusable after the call to RunParallel.
|
||||
func (rss *Results) RunParallel(f func(rs *Result, workerID uint)) error {
|
||||
defer func() {
|
||||
putTmpBlocksFile(rss.tbf)
|
||||
rss.tbf = nil
|
||||
}()
|
||||
|
||||
workersCount := 1 + len(rss.packedTimeseries)/32
|
||||
if workersCount > gomaxprocs {
|
||||
workersCount = gomaxprocs
|
||||
}
|
||||
if workersCount == 0 {
|
||||
logger.Panicf("BUG: workersCount cannot be zero")
|
||||
}
|
||||
workCh := make(chan *packedTimeseries, workersCount)
|
||||
doneCh := make(chan error)
|
||||
|
||||
// Start workers.
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func(workerID uint) {
|
||||
rs := getResult()
|
||||
defer putResult(rs)
|
||||
maxWorkersCount := gomaxprocs / workersCount
|
||||
|
||||
var err error
|
||||
for pts := range workCh {
|
||||
if time.Until(rss.deadline.Deadline) < 0 {
|
||||
err = fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.Timeout)
|
||||
break
|
||||
}
|
||||
if err = pts.Unpack(rss.tbf, rs, rss.tr, rss.fetchData, maxWorkersCount); err != nil {
|
||||
break
|
||||
}
|
||||
if len(rs.Timestamps) == 0 && rss.fetchData {
|
||||
// Skip empty blocks.
|
||||
continue
|
||||
}
|
||||
f(rs, workerID)
|
||||
}
|
||||
// Drain the remaining work
|
||||
for range workCh {
|
||||
}
|
||||
doneCh <- err
|
||||
}(uint(i))
|
||||
}
|
||||
|
||||
// Feed workers with work.
|
||||
for i := range rss.packedTimeseries {
|
||||
workCh <- &rss.packedTimeseries[i]
|
||||
}
|
||||
rss.packedTimeseries = rss.packedTimeseries[:0]
|
||||
close(workCh)
|
||||
|
||||
// Wait until workers finish.
|
||||
var errors []error
|
||||
for i := 0; i < workersCount; i++ {
|
||||
if err := <-doneCh; err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
// Return just the first error, since other errors
|
||||
// is likely duplicate the first error.
|
||||
return errors[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var gomaxprocs = runtime.GOMAXPROCS(-1)
|
||||
|
||||
type packedTimeseries struct {
|
||||
metricName string
|
||||
addrs []tmpBlockAddr
|
||||
}
|
||||
|
||||
// Unpack unpacks pts to dst.
|
||||
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, fetchData bool, maxWorkersCount int) error {
|
||||
dst.reset()
|
||||
|
||||
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %s", pts.metricName, err)
|
||||
}
|
||||
|
||||
workersCount := 1 + len(pts.addrs)/32
|
||||
if workersCount > maxWorkersCount {
|
||||
workersCount = maxWorkersCount
|
||||
}
|
||||
if workersCount == 0 {
|
||||
logger.Panicf("BUG: workersCount cannot be zero")
|
||||
}
|
||||
|
||||
sbs := make([]*sortBlock, 0, len(pts.addrs))
|
||||
var sbsLock sync.Mutex
|
||||
|
||||
workCh := make(chan tmpBlockAddr, workersCount)
|
||||
doneCh := make(chan error)
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func() {
|
||||
var err error
|
||||
for addr := range workCh {
|
||||
sb := getSortBlock()
|
||||
if err = sb.unpackFrom(tbf, addr, tr, fetchData); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
sbsLock.Lock()
|
||||
sbs = append(sbs, sb)
|
||||
sbsLock.Unlock()
|
||||
}
|
||||
|
||||
// Drain the remaining work
|
||||
for range workCh {
|
||||
}
|
||||
doneCh <- err
|
||||
}()
|
||||
}
|
||||
|
||||
// Feed workers with work
|
||||
for _, addr := range pts.addrs {
|
||||
workCh <- addr
|
||||
}
|
||||
pts.addrs = pts.addrs[:0]
|
||||
close(workCh)
|
||||
|
||||
// Wait until workers finish
|
||||
var errors []error
|
||||
for i := 0; i < workersCount; i++ {
|
||||
if err := <-doneCh; err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
// Return the first error only, since other errors are likely the same.
|
||||
return errors[0]
|
||||
}
|
||||
|
||||
// Merge blocks
|
||||
mergeSortBlocks(dst, sbs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSortBlock() *sortBlock {
|
||||
v := sbPool.Get()
|
||||
if v == nil {
|
||||
return &sortBlock{}
|
||||
}
|
||||
return v.(*sortBlock)
|
||||
}
|
||||
|
||||
func putSortBlock(sb *sortBlock) {
|
||||
sb.reset()
|
||||
sbPool.Put(sb)
|
||||
}
|
||||
|
||||
var sbPool sync.Pool
|
||||
|
||||
var metricRowsSkipped = metrics.NewCounter(`vm_metric_rows_skipped_total{name="vmselect"}`)
|
||||
|
||||
func mergeSortBlocks(dst *Result, sbh sortBlocksHeap) {
|
||||
// Skip empty sort blocks, since they cannot be passed to heap.Init.
|
||||
src := sbh
|
||||
sbh = sbh[:0]
|
||||
for _, sb := range src {
|
||||
if len(sb.Timestamps) == 0 {
|
||||
putSortBlock(sb)
|
||||
continue
|
||||
}
|
||||
sbh = append(sbh, sb)
|
||||
}
|
||||
if len(sbh) == 0 {
|
||||
return
|
||||
}
|
||||
heap.Init(&sbh)
|
||||
for {
|
||||
top := sbh[0]
|
||||
heap.Pop(&sbh)
|
||||
if len(sbh) == 0 {
|
||||
dst.Timestamps = append(dst.Timestamps, top.Timestamps[top.NextIdx:]...)
|
||||
dst.Values = append(dst.Values, top.Values[top.NextIdx:]...)
|
||||
putSortBlock(top)
|
||||
return
|
||||
}
|
||||
sbNext := sbh[0]
|
||||
tsNext := sbNext.Timestamps[sbNext.NextIdx]
|
||||
idxNext := len(top.Timestamps)
|
||||
if top.Timestamps[idxNext-1] > tsNext {
|
||||
idxNext = top.NextIdx
|
||||
for top.Timestamps[idxNext] <= tsNext {
|
||||
idxNext++
|
||||
}
|
||||
}
|
||||
dst.Timestamps = append(dst.Timestamps, top.Timestamps[top.NextIdx:idxNext]...)
|
||||
dst.Values = append(dst.Values, top.Values[top.NextIdx:idxNext]...)
|
||||
if idxNext < len(top.Timestamps) {
|
||||
top.NextIdx = idxNext
|
||||
heap.Push(&sbh, top)
|
||||
} else {
|
||||
// Return top to the pool.
|
||||
putSortBlock(top)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sortBlock struct {
|
||||
// b is used as a temporary storage for unpacked rows before they
|
||||
// go to Timestamps and Values.
|
||||
b storage.Block
|
||||
|
||||
Timestamps []int64
|
||||
Values []float64
|
||||
NextIdx int
|
||||
}
|
||||
|
||||
func (sb *sortBlock) reset() {
|
||||
sb.b.Reset()
|
||||
sb.Timestamps = sb.Timestamps[:0]
|
||||
sb.Values = sb.Values[:0]
|
||||
sb.NextIdx = 0
|
||||
}
|
||||
|
||||
func (sb *sortBlock) unpackFrom(tbf *tmpBlocksFile, addr tmpBlockAddr, tr storage.TimeRange, fetchData bool) error {
|
||||
tbf.MustReadBlockAt(&sb.b, addr)
|
||||
if fetchData {
|
||||
if err := sb.b.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal block: %s", err)
|
||||
}
|
||||
}
|
||||
timestamps := sb.b.Timestamps()
|
||||
|
||||
// Skip timestamps smaller than tr.MinTimestamp.
|
||||
i := 0
|
||||
for i < len(timestamps) && timestamps[i] < tr.MinTimestamp {
|
||||
i++
|
||||
}
|
||||
|
||||
// Skip timestamps bigger than tr.MaxTimestamp.
|
||||
j := len(timestamps)
|
||||
for j > i && timestamps[j-1] > tr.MaxTimestamp {
|
||||
j--
|
||||
}
|
||||
skippedRows := sb.b.RowsCount() - (j - i)
|
||||
metricRowsSkipped.Add(skippedRows)
|
||||
|
||||
// Copy the remaining values.
|
||||
if i == j {
|
||||
return nil
|
||||
}
|
||||
values := sb.b.Values()
|
||||
sb.Timestamps = append(sb.Timestamps, timestamps[i:j]...)
|
||||
sb.Values = decimal.AppendDecimalToFloat(sb.Values, values[i:j], sb.b.Scale())
|
||||
return nil
|
||||
}
|
||||
|
||||
type sortBlocksHeap []*sortBlock
|
||||
|
||||
func (sbh sortBlocksHeap) Len() int {
|
||||
return len(sbh)
|
||||
}
|
||||
|
||||
func (sbh sortBlocksHeap) Less(i, j int) bool {
|
||||
a := sbh[i]
|
||||
b := sbh[j]
|
||||
return a.Timestamps[a.NextIdx] < b.Timestamps[b.NextIdx]
|
||||
}
|
||||
|
||||
func (sbh sortBlocksHeap) Swap(i, j int) {
|
||||
sbh[i], sbh[j] = sbh[j], sbh[i]
|
||||
}
|
||||
|
||||
func (sbh *sortBlocksHeap) Push(x interface{}) {
|
||||
*sbh = append(*sbh, x.(*sortBlock))
|
||||
}
|
||||
|
||||
func (sbh *sortBlocksHeap) Pop() interface{} {
|
||||
a := *sbh
|
||||
v := a[len(a)-1]
|
||||
*sbh = a[:len(a)-1]
|
||||
return v
|
||||
}
|
||||
|
||||
// DeleteSeries deletes time series matching the given tagFilterss.
|
||||
func DeleteSeries(sq *storage.SearchQuery) (int, error) {
|
||||
tfss, err := setupTfss(sq.TagFilterss)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return vmstorage.DeleteMetrics(tfss)
|
||||
}
|
||||
|
||||
// GetLabels returns labels until the given deadline.
|
||||
func GetLabels(deadline Deadline) ([]string, error) {
|
||||
labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during labels search: %s", err)
|
||||
}
|
||||
|
||||
// Substitute "" with "__name__"
|
||||
for i := range labels {
|
||||
if labels[i] == "" {
|
||||
labels[i] = "__name__"
|
||||
}
|
||||
}
|
||||
|
||||
// Sort labels like Prometheus does
|
||||
sort.Strings(labels)
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// GetLabelValues returns label values for the given labelName
|
||||
// until the given deadline.
|
||||
func GetLabelValues(labelName string, deadline Deadline) ([]string, error) {
|
||||
if labelName == "__name__" {
|
||||
labelName = ""
|
||||
}
|
||||
|
||||
// Search for tag values
|
||||
labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label values search for labelName=%q: %s", labelName, err)
|
||||
}
|
||||
|
||||
// Sort labelValues like Prometheus does
|
||||
sort.Strings(labelValues)
|
||||
|
||||
return labelValues, nil
|
||||
}
|
||||
|
||||
// GetLabelEntries returns all the label entries until the given deadline.
|
||||
func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) {
|
||||
labelEntries, err := vmstorage.SearchTagEntries(*maxTagKeysPerSearch, *maxTagValuesPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label entries request: %s", err)
|
||||
}
|
||||
|
||||
// Substitute "" with "__name__"
|
||||
for i := range labelEntries {
|
||||
e := &labelEntries[i]
|
||||
if e.Key == "" {
|
||||
e.Key = "__name__"
|
||||
}
|
||||
}
|
||||
|
||||
// Sort labelEntries by the number of label values in each entry.
|
||||
sort.Slice(labelEntries, func(i, j int) bool {
|
||||
a, b := labelEntries[i].Values, labelEntries[j].Values
|
||||
if len(a) < len(b) {
|
||||
return true
|
||||
}
|
||||
if len(a) > len(b) {
|
||||
return false
|
||||
}
|
||||
return labelEntries[i].Key < labelEntries[j].Key
|
||||
})
|
||||
|
||||
return labelEntries, nil
|
||||
}
|
||||
|
||||
// GetSeriesCount returns the number of unique series.
|
||||
func GetSeriesCount(deadline Deadline) (uint64, error) {
|
||||
n, err := vmstorage.GetSeriesCount()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error during series count request: %s", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func getStorageSearch() *storage.Search {
|
||||
v := ssPool.Get()
|
||||
if v == nil {
|
||||
return &storage.Search{}
|
||||
}
|
||||
return v.(*storage.Search)
|
||||
}
|
||||
|
||||
func putStorageSearch(sr *storage.Search) {
|
||||
n := atomic.LoadUint64(&sr.MissingMetricNamesForMetricID)
|
||||
missingMetricNamesForMetricID.Add(int(n))
|
||||
sr.MustClose()
|
||||
ssPool.Put(sr)
|
||||
}
|
||||
|
||||
var ssPool sync.Pool
|
||||
|
||||
var missingMetricNamesForMetricID = metrics.NewCounter(`vm_missing_metric_names_for_metric_id_total`)
|
||||
|
||||
// ProcessSearchQuery performs sq on storage nodes until the given deadline.
|
||||
func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline Deadline) (*Results, error) {
|
||||
// Setup search.
|
||||
tfss, err := setupTfss(sq.TagFilterss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
|
||||
vmstorage.WG.Add(1)
|
||||
defer vmstorage.WG.Done()
|
||||
|
||||
sr := getStorageSearch()
|
||||
defer putStorageSearch(sr)
|
||||
sr.Init(vmstorage.Storage, tfss, tr, fetchData, *maxMetricsPerSearch)
|
||||
|
||||
tbf := getTmpBlocksFile()
|
||||
m := make(map[string][]tmpBlockAddr)
|
||||
blocksRead := 0
|
||||
bb := tmpBufPool.Get()
|
||||
defer tmpBufPool.Put(bb)
|
||||
for sr.NextMetricBlock() {
|
||||
blocksRead++
|
||||
bb.B = storage.MarshalBlock(bb.B[:0], sr.MetricBlock.Block)
|
||||
addr, err := tbf.WriteBlockData(bb.B)
|
||||
if err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("cannot write data block #%d to temporary blocks file: %s", blocksRead, err)
|
||||
}
|
||||
if time.Until(deadline.Deadline) < 0 {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("timeout exceeded while fetching data block #%d from storage: %s", blocksRead, deadline.Timeout)
|
||||
}
|
||||
metricName := sr.MetricBlock.MetricName
|
||||
m[string(metricName)] = append(m[string(metricName)], addr)
|
||||
}
|
||||
if err := sr.Error(); err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("search error after reading %d data blocks: %s", blocksRead, err)
|
||||
}
|
||||
if err := tbf.Finalize(); err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("cannot finalize temporary blocks file with %d blocks: %s", blocksRead, err)
|
||||
}
|
||||
|
||||
var rss Results
|
||||
rss.packedTimeseries = make([]packedTimeseries, len(m))
|
||||
rss.tr = tr
|
||||
rss.fetchData = fetchData
|
||||
rss.deadline = deadline
|
||||
rss.tbf = tbf
|
||||
i := 0
|
||||
for metricName, addrs := range m {
|
||||
pts := &rss.packedTimeseries[i]
|
||||
i++
|
||||
pts.metricName = metricName
|
||||
pts.addrs = addrs
|
||||
}
|
||||
|
||||
// Sort rss.packedTimeseries by the first addr offset in order
|
||||
// to reduce the number of disk seeks during unpacking in RunParallel.
|
||||
// In this case tmpBlocksFile must be read almost sequentially.
|
||||
sort.Slice(rss.packedTimeseries, func(i, j int) bool {
|
||||
pts := rss.packedTimeseries
|
||||
return pts[i].addrs[0].offset < pts[j].addrs[0].offset
|
||||
})
|
||||
|
||||
return &rss, nil
|
||||
}
|
||||
|
||||
func getResult() *Result {
|
||||
v := rsPool.Get()
|
||||
if v == nil {
|
||||
return &Result{}
|
||||
}
|
||||
return v.(*Result)
|
||||
}
|
||||
|
||||
func putResult(rs *Result) {
|
||||
if len(rs.Values) > 8192 {
|
||||
// Do not pool big results, since they may occupy too much memory.
|
||||
return
|
||||
}
|
||||
rs.reset()
|
||||
rsPool.Put(rs)
|
||||
}
|
||||
|
||||
var rsPool sync.Pool
|
||||
|
||||
func setupTfss(tagFilterss [][]storage.TagFilter) ([]*storage.TagFilters, error) {
|
||||
tfss := make([]*storage.TagFilters, 0, len(tagFilterss))
|
||||
for _, tagFilters := range tagFilterss {
|
||||
tfs := storage.NewTagFilters()
|
||||
for i := range tagFilters {
|
||||
tf := &tagFilters[i]
|
||||
if err := tfs.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tag filter %s: %s", tf, err)
|
||||
}
|
||||
}
|
||||
tfss = append(tfss, tfs)
|
||||
}
|
||||
return tfss, nil
|
||||
}
|
||||
|
||||
// Deadline contains deadline with the corresponding timeout for pretty error messages.
|
||||
type Deadline struct {
|
||||
Deadline time.Time
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewDeadline returns deadline for the given timeout.
|
||||
func NewDeadline(timeout time.Duration) Deadline {
|
||||
return Deadline{
|
||||
Deadline: time.Now().Add(timeout),
|
||||
Timeout: timeout,
|
||||
}
|
||||
}
|
||||
179
app/vmselect/netstorage/tmp_blocks_file.go
Normal file
179
app/vmselect/netstorage/tmp_blocks_file.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// InitTmpBlocksDir initializes directory to store temporary search results.
|
||||
//
|
||||
// It stores data in system-defined temporary directory if tmpDirPath is empty.
|
||||
func InitTmpBlocksDir(tmpDirPath string) {
|
||||
if len(tmpDirPath) == 0 {
|
||||
tmpDirPath = os.TempDir()
|
||||
}
|
||||
tmpBlocksDir = tmpDirPath + "/searchResults"
|
||||
fs.MustRemoveAll(tmpBlocksDir)
|
||||
if err := fs.MkdirAllIfNotExist(tmpBlocksDir); err != nil {
|
||||
logger.Panicf("FATAL: cannot create %q: %s", tmpBlocksDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
var tmpBlocksDir string
|
||||
|
||||
func maxInmemoryTmpBlocksFile() int {
|
||||
mem := memory.Allowed()
|
||||
maxLen := mem / 1024
|
||||
if maxLen < 64*1024 {
|
||||
return 64 * 1024
|
||||
}
|
||||
return maxLen
|
||||
}
|
||||
|
||||
var _ = metrics.NewGauge(`vm_tmp_blocks_max_inmemory_file_size_bytes`, func() float64 {
|
||||
return float64(maxInmemoryTmpBlocksFile())
|
||||
})
|
||||
|
||||
type tmpBlocksFile struct {
|
||||
buf []byte
|
||||
|
||||
f *os.File
|
||||
|
||||
offset uint64
|
||||
}
|
||||
|
||||
func getTmpBlocksFile() *tmpBlocksFile {
|
||||
v := tmpBlocksFilePool.Get()
|
||||
if v == nil {
|
||||
return &tmpBlocksFile{
|
||||
buf: make([]byte, 0, maxInmemoryTmpBlocksFile()),
|
||||
}
|
||||
}
|
||||
return v.(*tmpBlocksFile)
|
||||
}
|
||||
|
||||
func putTmpBlocksFile(tbf *tmpBlocksFile) {
|
||||
tbf.MustClose()
|
||||
tbf.buf = tbf.buf[:0]
|
||||
tbf.f = nil
|
||||
tbf.offset = 0
|
||||
tmpBlocksFilePool.Put(tbf)
|
||||
}
|
||||
|
||||
var tmpBlocksFilePool sync.Pool
|
||||
|
||||
type tmpBlockAddr struct {
|
||||
offset uint64
|
||||
size int
|
||||
}
|
||||
|
||||
func (addr tmpBlockAddr) String() string {
|
||||
return fmt.Sprintf("offset %d, size %d", addr.offset, addr.size)
|
||||
}
|
||||
|
||||
var tmpBlocksFilesCreated = metrics.NewCounter(`vm_tmp_blocks_files_created_total`)
|
||||
|
||||
// WriteBlockData writes b to tbf.
|
||||
//
|
||||
// It returns errors since the operation may fail on space shortage
|
||||
// and this must be handled.
|
||||
func (tbf *tmpBlocksFile) WriteBlockData(b []byte) (tmpBlockAddr, error) {
|
||||
var addr tmpBlockAddr
|
||||
addr.offset = tbf.offset
|
||||
addr.size = len(b)
|
||||
tbf.offset += uint64(addr.size)
|
||||
if len(tbf.buf)+len(b) <= cap(tbf.buf) {
|
||||
// Fast path - the data fits tbf.buf
|
||||
tbf.buf = append(tbf.buf, b...)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Slow path: flush the data from tbf.buf to file.
|
||||
if tbf.f == nil {
|
||||
f, err := ioutil.TempFile(tmpBlocksDir, "")
|
||||
if err != nil {
|
||||
return addr, err
|
||||
}
|
||||
tbf.f = f
|
||||
tmpBlocksFilesCreated.Inc()
|
||||
}
|
||||
_, err := tbf.f.Write(tbf.buf)
|
||||
tbf.buf = append(tbf.buf[:0], b...)
|
||||
if err != nil {
|
||||
return addr, fmt.Errorf("cannot write block to %q: %s", tbf.f.Name(), err)
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func (tbf *tmpBlocksFile) Finalize() error {
|
||||
if tbf.f == nil {
|
||||
return nil
|
||||
}
|
||||
if _, err := tbf.f.Write(tbf.buf); err != nil {
|
||||
return fmt.Errorf("cannot flush the remaining %d bytes to tmpBlocksFile: %s", len(tbf.buf), err)
|
||||
}
|
||||
tbf.buf = tbf.buf[:0]
|
||||
if _, err := tbf.f.Seek(0, 0); err != nil {
|
||||
logger.Panicf("FATAL: cannot seek to the start of file: %s", err)
|
||||
}
|
||||
// Hint the OS that the file is read almost sequentiallly.
|
||||
// This should reduce the number of disk seeks, which is important
|
||||
// for HDDs.
|
||||
mustFadviseSequentialRead(tbf.f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tbf *tmpBlocksFile) MustReadBlockAt(dst *storage.Block, addr tmpBlockAddr) {
|
||||
var buf []byte
|
||||
if tbf.f == nil {
|
||||
buf = tbf.buf[addr.offset : addr.offset+uint64(addr.size)]
|
||||
} else {
|
||||
bb := tmpBufPool.Get()
|
||||
defer tmpBufPool.Put(bb)
|
||||
bb.B = bytesutil.Resize(bb.B, addr.size)
|
||||
n, err := tbf.f.ReadAt(bb.B, int64(addr.offset))
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot read from %q at %s: %s", tbf.f.Name(), addr, err)
|
||||
}
|
||||
if n != len(bb.B) {
|
||||
logger.Panicf("FATAL: too short number of bytes read at %s; got %d; want %d", addr, n, len(bb.B))
|
||||
}
|
||||
buf = bb.B
|
||||
}
|
||||
tail, err := storage.UnmarshalBlock(dst, buf)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot unmarshal data at %s: %s", addr, err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
logger.Panicf("FATAL: unexpected non-empty tail left after unmarshaling data at %s; len(tail)=%d", addr, len(tail))
|
||||
}
|
||||
}
|
||||
|
||||
var tmpBufPool bytesutil.ByteBufferPool
|
||||
|
||||
func (tbf *tmpBlocksFile) MustClose() {
|
||||
if tbf.f == nil {
|
||||
return
|
||||
}
|
||||
fname := tbf.f.Name()
|
||||
|
||||
// Remove the file at first, then close it.
|
||||
// This way the OS shouldn't try to flush file contents to storage
|
||||
// on close.
|
||||
if err := os.Remove(fname); err != nil {
|
||||
logger.Panicf("FATAL: cannot remove %q: %s", fname, err)
|
||||
}
|
||||
if err := tbf.f.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close %q: %s", fname, err)
|
||||
}
|
||||
tbf.f = nil
|
||||
}
|
||||
153
app/vmselect/netstorage/tmp_blocks_file_test.go
Normal file
153
app/vmselect/netstorage/tmp_blocks_file_test.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package netstorage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
tmpDir := "TestTmpBlocks"
|
||||
InitTmpBlocksDir(tmpDir)
|
||||
statusCode := m.Run()
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
logger.Panicf("cannot remove %q: %s", tmpDir, err)
|
||||
}
|
||||
os.Exit(statusCode)
|
||||
}
|
||||
|
||||
func TestTmpBlocksFileSerial(t *testing.T) {
|
||||
if err := testTmpBlocksFile(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTmpBlocksFileConcurrent(t *testing.T) {
|
||||
concurrency := 3
|
||||
ch := make(chan error, concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
ch <- testTmpBlocksFile()
|
||||
}()
|
||||
}
|
||||
for i := 0; i < concurrency; i++ {
|
||||
select {
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testTmpBlocksFile() error {
|
||||
createBlock := func() *storage.Block {
|
||||
rowsCount := rand.Intn(8000) + 1
|
||||
var timestamps, values []int64
|
||||
ts := int64(rand.Intn(1023434))
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
ts += int64(rand.Intn(1000) + 1)
|
||||
timestamps = append(timestamps, ts)
|
||||
values = append(values, int64(i*i+rand.Intn(20)))
|
||||
}
|
||||
tsid := &storage.TSID{
|
||||
MetricID: 234211,
|
||||
}
|
||||
scale := int16(rand.Intn(123))
|
||||
precisionBits := uint8(rand.Intn(63) + 1)
|
||||
var b storage.Block
|
||||
b.Init(tsid, timestamps, values, scale, precisionBits)
|
||||
_, _, _ = b.MarshalData(0, 0)
|
||||
return &b
|
||||
}
|
||||
for _, size := range []int{1024, 16 * 1024, maxInmemoryTmpBlocksFile() / 2, 2 * maxInmemoryTmpBlocksFile()} {
|
||||
err := func() error {
|
||||
tbf := getTmpBlocksFile()
|
||||
defer putTmpBlocksFile(tbf)
|
||||
|
||||
// Write blocks until their summary size exceeds `size`.
|
||||
var addrs []tmpBlockAddr
|
||||
var blocks []*storage.Block
|
||||
bb := tmpBufPool.Get()
|
||||
defer tmpBufPool.Put(bb)
|
||||
for tbf.offset < uint64(size) {
|
||||
b := createBlock()
|
||||
bb.B = storage.MarshalBlock(bb.B[:0], b)
|
||||
addr, err := tbf.WriteBlockData(bb.B)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot write block at offset %d: %s", tbf.offset, err)
|
||||
}
|
||||
if addr.offset+uint64(addr.size) != tbf.offset {
|
||||
return fmt.Errorf("unexpected addr=%+v for offset %v", &addr, tbf.offset)
|
||||
}
|
||||
addrs = append(addrs, addr)
|
||||
blocks = append(blocks, b)
|
||||
}
|
||||
if err := tbf.Finalize(); err != nil {
|
||||
return fmt.Errorf("cannot finalize tbf: %s", err)
|
||||
}
|
||||
|
||||
// Read blocks in parallel and verify them
|
||||
concurrency := 2
|
||||
workCh := make(chan int)
|
||||
doneCh := make(chan error)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
doneCh <- func() error {
|
||||
var b1 storage.Block
|
||||
for idx := range workCh {
|
||||
addr := addrs[idx]
|
||||
b := blocks[idx]
|
||||
if err := b.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal data from the original block: %s", err)
|
||||
}
|
||||
b1.Reset()
|
||||
tbf.MustReadBlockAt(&b1, addr)
|
||||
if err := b1.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal data from tbf: %s", err)
|
||||
}
|
||||
if b1.RowsCount() != b.RowsCount() {
|
||||
return fmt.Errorf("unexpected number of rows in tbf block; got %d; want %d", b1.RowsCount(), b.RowsCount())
|
||||
}
|
||||
if !reflect.DeepEqual(b1.Timestamps(), b.Timestamps()) {
|
||||
return fmt.Errorf("unexpected timestamps; got\n%v\nwant\n%v", b1.Timestamps(), b.Timestamps())
|
||||
}
|
||||
if !reflect.DeepEqual(b1.Values(), b.Values()) {
|
||||
return fmt.Errorf("unexpected values; got\n%v\nwant\n%v", b1.Values(), b.Values())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
}()
|
||||
}
|
||||
for i := range addrs {
|
||||
workCh <- i
|
||||
}
|
||||
close(workCh)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
select {
|
||||
case err := <-doneCh:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
11
app/vmselect/prometheus/error_response.qtpl
Normal file
11
app/vmselect/prometheus/error_response.qtpl
Normal file
@@ -0,0 +1,11 @@
|
||||
{% stripspace %}
|
||||
ErrorResponse generates error response for /api/v1/query.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#format-overview
|
||||
{% func ErrorResponse(statusCode int, err error) %}
|
||||
{
|
||||
"status":"error",
|
||||
"errorType":"{%d statusCode %}",
|
||||
"error": {%q= err.Error() %}
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
61
app/vmselect/prometheus/error_response.qtpl.go
Normal file
61
app/vmselect/prometheus/error_response.qtpl.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Code generated by qtc from "error_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// ErrorResponse generates error response for /api/v1/query.See https://prometheus.io/docs/prometheus/latest/querying/api/#format-overview
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
func StreamErrorResponse(qw422016 *qt422016.Writer, statusCode int, err error) {
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
qw422016.N().S(`{"status":"error","errorType":"`)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:7
|
||||
qw422016.N().D(statusCode)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:7
|
||||
qw422016.N().S(`","error":`)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:8
|
||||
qw422016.N().Q(err.Error())
|
||||
//line app/vmselect/prometheus/error_response.qtpl:8
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
func WriteErrorResponse(qq422016 qtio422016.Writer, statusCode int, err error) {
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
StreamErrorResponse(qw422016, statusCode, err)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
func ErrorResponse(statusCode int, err error) string {
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
WriteErrorResponse(qb422016, statusCode, err)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
}
|
||||
96
app/vmselect/prometheus/export.qtpl
Normal file
96
app/vmselect/prometheus/export.qtpl
Normal file
@@ -0,0 +1,96 @@
|
||||
{% import (
|
||||
"github.com/valyala/quicktemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
{% func ExportPrometheusLine(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 %}{% return %}{% endif %}
|
||||
{% code bb := quicktemplate.AcquireByteBuffer() %}
|
||||
{% code writeprometheusMetricName(bb, &rs.MetricName) %}
|
||||
{% for i, ts := range rs.Timestamps %}
|
||||
{%z= bb.B %}{% space %}
|
||||
{%f= rs.Values[i] %}{% space %}
|
||||
{%d= int(ts) %}{% newline %}
|
||||
{% endfor %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportJSONLine(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 %}{% return %}{% endif %}
|
||||
{
|
||||
"metric":{%= metricNameObject(&rs.MetricName) %},
|
||||
"values":[
|
||||
{% if len(rs.Values) > 0 %}
|
||||
{% code values := rs.Values %}
|
||||
{%f= values[0] %}
|
||||
{% code values = values[1:] %}
|
||||
{% for _, v := range values %}
|
||||
,{%f= v %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
"timestamps":[
|
||||
{% if len(rs.Timestamps) > 0 %}
|
||||
{% code timestamps := rs.Timestamps %}
|
||||
{%d= int(timestamps[0]) %}
|
||||
{% code timestamps = timestamps[1:] %}
|
||||
{% for _, ts := range timestamps %}
|
||||
,{%d= int(ts) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportPromAPILine(rs *netstorage.Result) %}
|
||||
{
|
||||
"metric": {%= metricNameObject(&rs.MetricName) %},
|
||||
"values": {%= valuesWithTimestamps(rs.Values, rs.Timestamps) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{% code bb, ok := <-resultsCh %}
|
||||
{% if ok %}
|
||||
{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% for bb := range resultsCh %}
|
||||
,{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
||||
{% for bb := range resultsCh %}
|
||||
{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfor %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func prometheusMetricName(mn *storage.MetricName) %}
|
||||
{%z= mn.MetricGroup %}
|
||||
{% if len(mn.Tags) > 0 %}
|
||||
{
|
||||
{% code tags := mn.Tags %}
|
||||
{%z= tags[0].Key %}={%qz= tags[0].Value %}
|
||||
{% code tags = tags[1:] %}
|
||||
{% for i := range tags %}
|
||||
{% code tag := &tags[i] %}
|
||||
,{%z= tag.Key %}={%qz= tag.Value %}
|
||||
{% endfor %}
|
||||
}
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
385
app/vmselect/prometheus/export.qtpl.go
Normal file
385
app/vmselect/prometheus/export.qtpl.go
Normal file
@@ -0,0 +1,385 @@
|
||||
// Code generated by qtc from "export.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:9
|
||||
func StreamExportPrometheusLine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:10
|
||||
if len(rs.Timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:10
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:11
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:12
|
||||
writeprometheusMetricName(bb, &rs.MetricName)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:13
|
||||
for i, ts := range rs.Timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:14
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:14
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:15
|
||||
qw422016.N().F(rs.Values[i])
|
||||
//line app/vmselect/prometheus/export.qtpl:15
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:16
|
||||
qw422016.N().D(int(ts))
|
||||
//line app/vmselect/prometheus/export.qtpl:16
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:17
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:18
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
func WriteExportPrometheusLine(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
StreamExportPrometheusLine(qw422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
func ExportPrometheusLine(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
WriteExportPrometheusLine(qb422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:21
|
||||
func StreamExportJSONLine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
if len(rs.Timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:24
|
||||
streammetricNameObject(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/export.qtpl:24
|
||||
qw422016.N().S(`,"values":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:26
|
||||
if len(rs.Values) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:27
|
||||
values := rs.Values
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:28
|
||||
qw422016.N().F(values[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:29
|
||||
values = values[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:30
|
||||
for _, v := range values {
|
||||
//line app/vmselect/prometheus/export.qtpl:30
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:31
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:32
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
qw422016.N().S(`],"timestamps":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:36
|
||||
if len(rs.Timestamps) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:37
|
||||
timestamps := rs.Timestamps
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:38
|
||||
qw422016.N().D(int(timestamps[0]))
|
||||
//line app/vmselect/prometheus/export.qtpl:39
|
||||
timestamps = timestamps[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:40
|
||||
for _, ts := range timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:40
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:41
|
||||
qw422016.N().D(int(ts))
|
||||
//line app/vmselect/prometheus/export.qtpl:42
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:43
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:43
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:45
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
func WriteExportJSONLine(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
StreamExportJSONLine(qw422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
func ExportJSONLine(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
WriteExportJSONLine(qb422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:48
|
||||
func StreamExportPromAPILine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:48
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:50
|
||||
streammetricNameObject(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/export.qtpl:50
|
||||
qw422016.N().S(`,"values":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:51
|
||||
streamvaluesWithTimestamps(qw422016, rs.Values, rs.Timestamps)
|
||||
//line app/vmselect/prometheus/export.qtpl:51
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
func WriteExportPromAPILine(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
StreamExportPromAPILine(qw422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
func ExportPromAPILine(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
WriteExportPromAPILine(qb422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:55
|
||||
func StreamExportPromAPIResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:55
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:61
|
||||
bb, ok := <-resultsCh
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:62
|
||||
if ok {
|
||||
//line app/vmselect/prometheus/export.qtpl:63
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:64
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:65
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/export.qtpl:65
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:66
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:67
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:68
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
qw422016.N().S(`]}}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
func WriteExportPromAPIResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
StreamExportPromAPIResponse(qw422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
WriteExportPromAPIResponse(qb422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:75
|
||||
func StreamExportStdResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:76
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/export.qtpl:77
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:78
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:79
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
func WriteExportStdResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
StreamExportStdResponse(qw422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
WriteExportStdResponse(qb422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:83
|
||||
qw422016.N().Z(mn.MetricGroup)
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
if len(mn.Tags) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/export.qtpl:86
|
||||
tags := mn.Tags
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
qw422016.N().Z(tags[0].Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
qw422016.N().QZ(tags[0].Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
tags = tags[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
for i := range tags {
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
tag := &tags[i]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().Z(tag.Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().QZ(tag.Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:92
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:92
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
streamprometheusMetricName(qw422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
func prometheusMetricName(mn *storage.MetricName) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
writeprometheusMetricName(qb422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
}
|
||||
16
app/vmselect/prometheus/federate.qtpl
Normal file
16
app/vmselect/prometheus/federate.qtpl
Normal file
@@ -0,0 +1,16 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// Federate writes rs in /federate format.
|
||||
// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
{% func Federate(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 || len(rs.Values) == 0 %}{% return %}{% endif %}
|
||||
{%= prometheusMetricName(&rs.MetricName) %}{% space %}
|
||||
{%f= rs.Values[len(rs.Values)-1] %}{% space %}
|
||||
{%d= int(rs.Timestamps[len(rs.Timestamps)-1]) %}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
75
app/vmselect/prometheus/federate.qtpl.go
Normal file
75
app/vmselect/prometheus/federate.qtpl.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Code generated by qtc from "federate.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// Federate writes rs in /federate format.// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
func StreamFederate(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
if len(rs.Timestamps) == 0 || len(rs.Values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
streamprometheusMetricName(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().F(rs.Values[len(rs.Values)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().D(int(rs.Timestamps[len(rs.Timestamps)-1]))
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
func WriteFederate(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
StreamFederate(qw422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
func Federate(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
WriteFederate(qb422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
}
|
||||
15
app/vmselect/prometheus/label_values_response.qtpl
Normal file
15
app/vmselect/prometheus/label_values_response.qtpl
Normal file
@@ -0,0 +1,15 @@
|
||||
{% stripspace %}
|
||||
LabelValuesResponse generates response for /api/v1/label/<labelName>/values .
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
{% func LabelValuesResponse(labelValues []string) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[
|
||||
{% for i, labelValue := range labelValues %}
|
||||
{%q= labelValue %}
|
||||
{% if i+1 < len(labelValues) %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
67
app/vmselect/prometheus/label_values_response.qtpl.go
Normal file
67
app/vmselect/prometheus/label_values_response.qtpl.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Code generated by qtc from "label_values_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// LabelValuesResponse generates response for /api/v1/label/<labelName>/values .See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
func StreamLabelValuesResponse(qw422016 *qt422016.Writer, labelValues []string) {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:8
|
||||
for i, labelValue := range labelValues {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:9
|
||||
qw422016.N().Q(labelValue)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
||||
if i+1 < len(labelValues) {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:11
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
func WriteLabelValuesResponse(qq422016 qtio422016.Writer, labelValues []string) {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
StreamLabelValuesResponse(qw422016, labelValues)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
func LabelValuesResponse(labelValues []string) string {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
WriteLabelValuesResponse(qb422016, labelValues)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
}
|
||||
17
app/vmselect/prometheus/labels_count_response.qtpl
Normal file
17
app/vmselect/prometheus/labels_count_response.qtpl
Normal file
@@ -0,0 +1,17 @@
|
||||
{% import "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" %}
|
||||
|
||||
{% stripspace %}
|
||||
LabelsCountResponse generates response for /api/v1/labels/count .
|
||||
{% func LabelsCountResponse(labelEntries []storage.TagEntry) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
{% for i, e := range labelEntries %}
|
||||
{%q= e.Key %}:{%d= len(e.Values) %}
|
||||
{% if i+1 < len(labelEntries) %},{% endif %}
|
||||
{% endfor %}
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
74
app/vmselect/prometheus/labels_count_response.qtpl.go
Normal file
74
app/vmselect/prometheus/labels_count_response.qtpl.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Code generated by qtc from "labels_count_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:1
|
||||
import "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
||||
// LabelsCountResponse generates response for /api/v1/labels/count .
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
func StreamLabelsCountResponse(qw422016 *qt422016.Writer, labelEntries []storage.TagEntry) {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:5
|
||||
qw422016.N().S(`{"status":"success","data":{`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:9
|
||||
for i, e := range labelEntries {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:10
|
||||
qw422016.N().Q(e.Key)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:10
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:10
|
||||
qw422016.N().D(len(e.Values))
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:11
|
||||
if i+1 < len(labelEntries) {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:11
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:12
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:12
|
||||
qw422016.N().S(`}}`)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
func WriteLabelsCountResponse(qq422016 qtio422016.Writer, labelEntries []storage.TagEntry) {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
StreamLabelsCountResponse(qw422016, labelEntries)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
func LabelsCountResponse(labelEntries []storage.TagEntry) string {
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
WriteLabelsCountResponse(qb422016, labelEntries)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/labels_count_response.qtpl:15
|
||||
}
|
||||
15
app/vmselect/prometheus/labels_response.qtpl
Normal file
15
app/vmselect/prometheus/labels_response.qtpl
Normal file
@@ -0,0 +1,15 @@
|
||||
{% stripspace %}
|
||||
LabelsResponse generates response for /api/v1/labels .
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||
{% func LabelsResponse(labels []string) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[
|
||||
{% for i, label := range labels %}
|
||||
{%q= label %}
|
||||
{% if i+1 < len(labels) %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
67
app/vmselect/prometheus/labels_response.qtpl.go
Normal file
67
app/vmselect/prometheus/labels_response.qtpl.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Code generated by qtc from "labels_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// LabelsResponse generates response for /api/v1/labels .See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
func StreamLabelsResponse(qw422016 *qt422016.Writer, labels []string) {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:8
|
||||
for i, label := range labels {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:9
|
||||
qw422016.N().Q(label)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
||||
if i+1 < len(labels) {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:11
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
func WriteLabelsResponse(qq422016 qtio422016.Writer, labels []string) {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
StreamLabelsResponse(qw422016, labels)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
func LabelsResponse(labels []string) string {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
WriteLabelsResponse(qb422016, labels)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
}
|
||||
766
app/vmselect/prometheus/prometheus.go
Normal file
766
app/vmselect/prometheus/prometheus.go
Normal file
@@ -0,0 +1,766 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
var (
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum time for search query execution")
|
||||
maxQueryLen = flag.Int("search.maxQueryLen", 16*1024, "The maximum search query length in bytes")
|
||||
)
|
||||
|
||||
// Default step used if not set.
|
||||
const defaultStep = 5 * 60 * 1000
|
||||
|
||||
// Latency for data processing pipeline, i.e. the time between data is ignested
|
||||
// into the system and the time it becomes visible to search.
|
||||
const latencyOffset = 60 * 1000
|
||||
|
||||
// FederateHandler implements /federate . See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
func FederateHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
maxLookback, err := getDuration(r, "max_lookback", defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start, err := getTime(r, "start", ct-maxLookback)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deadline := getDeadline(r)
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
WriteFederate(bb, rs)
|
||||
resultsCh <- bb
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
for bb := range resultsCh {
|
||||
w.Write(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
}
|
||||
federateDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/federate"}`)
|
||||
|
||||
// ExportHandler exports data in raw format from /api/v1/export.
|
||||
func ExportHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
// Maintain backwards compatibility
|
||||
match := r.FormValue("match")
|
||||
if len(match) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
matches = []string{match}
|
||||
}
|
||||
start, err := getTime(r, "start", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
format := r.FormValue("format")
|
||||
deadline := getDeadline(r)
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
if err := exportHandler(w, matches, start, end, format, deadline); err != nil {
|
||||
return err
|
||||
}
|
||||
exportDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||
|
||||
func exportHandler(w http.ResponseWriter, matches []string, start, end int64, format string, deadline netstorage.Deadline) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := WriteExportJSONLine
|
||||
contentType := "application/json"
|
||||
if format == "prometheus" {
|
||||
contentType = "text/plain"
|
||||
writeLineFunc = WriteExportPrometheusLine
|
||||
} else if format == "promapi" {
|
||||
writeResponseFunc = WriteExportPromAPIResponse
|
||||
writeLineFunc = WriteExportPromAPILine
|
||||
}
|
||||
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, runtime.GOMAXPROCS(-1))
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writeLineFunc(bb, rs)
|
||||
resultsCh <- bb
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
writeResponseFunc(w, resultsCh)
|
||||
|
||||
// Consume all the data from resultsCh in the event writeResponseFunc
|
||||
// fails to consume all the data.
|
||||
for bb := range resultsCh {
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteHandler processes /api/v1/admin/tsdb/delete_series prometheus API request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series
|
||||
func DeleteHandler(r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
}
|
||||
if r.FormValue("start") != "" || r.FormValue("end") != "" {
|
||||
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
deletedCount, err := netstorage.DeleteSeries(sq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete time series matching %q: %s", matches, err)
|
||||
}
|
||||
if deletedCount > 0 {
|
||||
promql.ResetRollupResultCache()
|
||||
}
|
||||
deleteDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var deleteDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
|
||||
// LabelValuesHandler processes /api/v1/label/<labelName>/values request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
func LabelValuesHandler(labelName string, w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
}
|
||||
var labelValues []string
|
||||
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err)
|
||||
}
|
||||
} else {
|
||||
// Extended functionality that allows filtering by label filters and time range
|
||||
// i.e. /api/v1/label/foo/values?match[]=foobar{baz="abc"}&start=...&end=...
|
||||
// is equivalent to `label_values(foobar{baz="abc"}, foo)` call on the selected
|
||||
// time range in Grafana templating.
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
matches = []string{fmt.Sprintf("{%s!=''}", labelName)}
|
||||
}
|
||||
ct := currentTime()
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start, err := getTime(r, "start", end-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %s", labelName, matches, start, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteLabelValuesResponse(w, labelValues)
|
||||
labelValuesDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func labelValuesWithMatches(labelName string, matches []string, start, end int64, deadline netstorage.Deadline) ([]string, error) {
|
||||
if len(matches) == 0 {
|
||||
logger.Panicf("BUG: matches must be non-empty")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
m := make(map[string]struct{})
|
||||
var mLock sync.Mutex
|
||||
err = rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
labelValue := rs.MetricName.GetTagValue(labelName)
|
||||
if len(labelValue) == 0 {
|
||||
return
|
||||
}
|
||||
mLock.Lock()
|
||||
m[string(labelValue)] = struct{}{}
|
||||
mLock.Unlock()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when data fetching: %s", err)
|
||||
}
|
||||
|
||||
labelValues := make([]string, 0, len(m))
|
||||
for labelValue := range m {
|
||||
labelValues = append(labelValues, labelValue)
|
||||
}
|
||||
sort.Strings(labelValues)
|
||||
return labelValues, nil
|
||||
}
|
||||
|
||||
var labelValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/label/{}/values"}`)
|
||||
|
||||
// LabelsCountHandler processes /api/v1/labels/count request.
|
||||
func LabelsCountHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
labelEntries, err := netstorage.GetLabelEntries(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label entries: %s`, err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteLabelsCountResponse(w, labelEntries)
|
||||
labelsCountDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var labelsCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/labels/count"}`)
|
||||
|
||||
// LabelsHandler processes /api/v1/labels request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||
func LabelsHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
labels, err := netstorage.GetLabels(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels: %s", err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteLabelsResponse(w, labels)
|
||||
labelsDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var labelsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/labels"}`)
|
||||
|
||||
// SeriesCountHandler processes /api/v1/series/count request.
|
||||
func SeriesCountHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
n, err := netstorage.GetSeriesCount(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain series count: %s", err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteSeriesCountResponse(w, n)
|
||||
seriesCountDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var seriesCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/series/count"}`)
|
||||
|
||||
// SeriesHandler processes /api/v1/series request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
func SeriesHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Do not set start to minTimeMsecs by default as Prometheus does,
|
||||
// since this leads to fetching and scanning all the data from the storage,
|
||||
// which can take a lot of time for big storages.
|
||||
// It is better setting start as end-defaultStep by default.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/91
|
||||
start, err := getTime(r, "start", end-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deadline := getDeadline(r)
|
||||
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writemetricNameObject(bb, &rs.MetricName)
|
||||
resultsCh <- bb
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteSeriesResponse(w, resultsCh)
|
||||
|
||||
// Consume all the data from resultsCh in the event WriteSeriesResponse
|
||||
// fails to consume all the data.
|
||||
for bb := range resultsCh {
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
}
|
||||
seriesDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var seriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/series"}`)
|
||||
|
||||
// QueryHandler processes /api/v1/query request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
func QueryHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
|
||||
query := r.FormValue("query")
|
||||
if len(query) == 0 {
|
||||
return fmt.Errorf("missing `query` arg")
|
||||
}
|
||||
start, err := getTime(r, "time", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
step, err := getDuration(r, "step", latencyOffset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deadline := getDeadline(r)
|
||||
|
||||
if len(query) > *maxQueryLen {
|
||||
return fmt.Errorf(`too long query; got %d bytes; mustn't exceed %d bytes`, len(query), *maxQueryLen)
|
||||
}
|
||||
if ct-start < latencyOffset {
|
||||
start -= latencyOffset
|
||||
}
|
||||
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
|
||||
var window int64
|
||||
if len(windowStr) > 0 {
|
||||
var err error
|
||||
window, err = promql.DurationValue(windowStr, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var offset int64
|
||||
if len(offsetStr) > 0 {
|
||||
var err error
|
||||
offset, err = promql.DurationValue(offsetStr, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := exportHandler(w, []string{childQuery}, start, end, "promapi", deadline); err != nil {
|
||||
return err
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot execute %q: %s", query, err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteQueryResponse(w, result)
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var queryDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/query"}`)
|
||||
|
||||
// QueryRangeHandler processes /api/v1/query_range request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
func QueryRangeHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
|
||||
query := r.FormValue("query")
|
||||
if len(query) == 0 {
|
||||
return fmt.Errorf("missing `query` arg")
|
||||
}
|
||||
start, err := getTime(r, "start", ct-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := getTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
step, err := getDuration(r, "step", defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deadline := getDeadline(r)
|
||||
mayCache := !getBool(r, "nocache")
|
||||
|
||||
// Validate input args.
|
||||
if len(query) > *maxQueryLen {
|
||||
return fmt.Errorf(`too long query; got %d bytes; mustn't exceed %d bytes`, len(query), *maxQueryLen)
|
||||
}
|
||||
if start > end {
|
||||
start = end
|
||||
}
|
||||
if err := promql.ValidateMaxPointsPerTimeseries(start, end, step); err != nil {
|
||||
return err
|
||||
}
|
||||
if mayCache {
|
||||
start, end = promql.AdjustStartEnd(start, end, step)
|
||||
}
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot execute %q: %s", query, err)
|
||||
}
|
||||
if ct-end < latencyOffset {
|
||||
result = adjustLastPoints(result)
|
||||
}
|
||||
|
||||
// Remove NaN values as Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153
|
||||
removeNaNValuesInplace(result)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteQueryRangeResponse(w, result)
|
||||
queryRangeDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeNaNValuesInplace(tss []netstorage.Result) {
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
hasNaNs := false
|
||||
for _, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
hasNaNs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasNaNs {
|
||||
// Fast path: nothing to remove.
|
||||
continue
|
||||
}
|
||||
|
||||
// Slow path: remove NaNs.
|
||||
srcTimestamps := ts.Timestamps
|
||||
dstValues := ts.Values[:0]
|
||||
dstTimestamps := ts.Timestamps[:0]
|
||||
for j, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues = append(dstValues, v)
|
||||
dstTimestamps = append(dstTimestamps, srcTimestamps[j])
|
||||
}
|
||||
ts.Values = dstValues
|
||||
ts.Timestamps = dstTimestamps
|
||||
}
|
||||
}
|
||||
|
||||
var queryRangeDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/query_range"}`)
|
||||
|
||||
// adjustLastPoints substitutes the last point values with the previous
|
||||
// point values, since the last points may contain garbage.
|
||||
func adjustLastPoints(tss []netstorage.Result) []netstorage.Result {
|
||||
if len(tss) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search for the last non-NaN value across all the timeseries.
|
||||
lastNonNaNIdx := -1
|
||||
for i := range tss {
|
||||
values := tss[i].Values
|
||||
j := len(values) - 1
|
||||
for j >= 0 && math.IsNaN(values[j]) {
|
||||
j--
|
||||
}
|
||||
if j > lastNonNaNIdx {
|
||||
lastNonNaNIdx = j
|
||||
}
|
||||
}
|
||||
if lastNonNaNIdx == -1 {
|
||||
// All timeseries contain only NaNs.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Substitute the last two values starting from lastNonNaNIdx
|
||||
// with the previous values for each timeseries.
|
||||
for i := range tss {
|
||||
values := tss[i].Values
|
||||
for j := 0; j < 2; j++ {
|
||||
idx := lastNonNaNIdx + j
|
||||
if idx <= 0 || idx >= len(values) || math.IsNaN(values[idx-1]) {
|
||||
continue
|
||||
}
|
||||
values[idx] = values[idx-1]
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
|
||||
func getTime(r *http.Request, argKey string, defaultValue int64) (int64, error) {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return defaultValue, nil
|
||||
}
|
||||
secs, err := strconv.ParseFloat(argValue, 64)
|
||||
if err != nil {
|
||||
// Try parsing string format
|
||||
t, err := time.Parse(time.RFC3339, argValue)
|
||||
if err != nil {
|
||||
// Handle Prometheus'-provided minTime and maxTime.
|
||||
// See https://github.com/prometheus/client_golang/issues/614
|
||||
switch argValue {
|
||||
case prometheusMinTimeFormatted:
|
||||
return minTimeMsecs, nil
|
||||
case prometheusMaxTimeFormatted:
|
||||
return maxTimeMsecs, nil
|
||||
}
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
||||
}
|
||||
secs = float64(t.UnixNano()) / 1e9
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs < minTimeMsecs {
|
||||
msecs = 0
|
||||
}
|
||||
if msecs > maxTimeMsecs {
|
||||
msecs = maxTimeMsecs
|
||||
}
|
||||
return msecs, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// These constants were obtained from https://github.com/prometheus/prometheus/blob/91d7175eaac18b00e370965f3a8186cc40bf9f55/web/api/v1/api.go#L442
|
||||
// See https://github.com/prometheus/client_golang/issues/614 for details.
|
||||
prometheusMinTimeFormatted = time.Unix(math.MinInt64/1000+62135596801, 0).UTC().Format(time.RFC3339Nano)
|
||||
prometheusMaxTimeFormatted = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC().Format(time.RFC3339Nano)
|
||||
)
|
||||
|
||||
const (
|
||||
// These values prevent from overflow when storing msec-precision time in int64.
|
||||
minTimeMsecs = 0 // use 0 instead of `int64(-1<<63) / 1e6` because the storage engine doesn't actually support negative time
|
||||
maxTimeMsecs = int64(1<<63-1) / 1e6
|
||||
)
|
||||
|
||||
func getDuration(r *http.Request, argKey string, defaultValue int64) (int64, error) {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return defaultValue, nil
|
||||
}
|
||||
secs, err := strconv.ParseFloat(argValue, 64)
|
||||
if err != nil {
|
||||
// Try parsing string format
|
||||
d, err := time.ParseDuration(argValue)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
||||
}
|
||||
secs = d.Seconds()
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs <= 0 || msecs > maxDurationMsecs {
|
||||
return 0, fmt.Errorf("%q=%dms is out of allowed range [%d ... %d]", argKey, msecs, 0, int64(maxDurationMsecs))
|
||||
}
|
||||
return msecs, nil
|
||||
}
|
||||
|
||||
const maxDurationMsecs = 100 * 365 * 24 * 3600 * 1000
|
||||
|
||||
func getDeadline(r *http.Request) netstorage.Deadline {
|
||||
d, err := getDuration(r, "timeout", 0)
|
||||
if err != nil {
|
||||
d = 0
|
||||
}
|
||||
dMax := int64(maxQueryDuration.Seconds() * 1e3)
|
||||
if d <= 0 || d > dMax {
|
||||
d = dMax
|
||||
}
|
||||
timeout := time.Duration(d) * time.Millisecond
|
||||
return netstorage.NewDeadline(timeout)
|
||||
}
|
||||
|
||||
func getBool(r *http.Request, argKey string) bool {
|
||||
argValue := r.FormValue(argKey)
|
||||
switch strings.ToLower(argValue) {
|
||||
case "", "0", "f", "false", "no":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func currentTime() int64 {
|
||||
return int64(time.Now().UTC().Unix()) * 1e3
|
||||
}
|
||||
|
||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
||||
for _, match := range matches {
|
||||
tagFilters, err := promql.ParseMetricSelector(match)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %s", match, err)
|
||||
}
|
||||
tagFilterss = append(tagFilterss, tagFilters)
|
||||
}
|
||||
return tagFilterss, nil
|
||||
}
|
||||
115
app/vmselect/prometheus/prometheus_test.go
Normal file
115
app/vmselect/prometheus/prometheus_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
func TestRemoveNaNValuesInplace(t *testing.T) {
|
||||
f := func(tss []netstorage.Result, tssExpected []netstorage.Result) {
|
||||
t.Helper()
|
||||
removeNaNValuesInplace(tss)
|
||||
if !reflect.DeepEqual(tss, tssExpected) {
|
||||
t.Fatalf("unexpected result; got %v; want %v", tss, tssExpected)
|
||||
}
|
||||
}
|
||||
|
||||
nan := math.NaN()
|
||||
|
||||
f(nil, nil)
|
||||
f([]netstorage.Result{
|
||||
{
|
||||
Timestamps: []int64{100, 200, 300},
|
||||
Values: []float64{1, 2, 3},
|
||||
},
|
||||
{
|
||||
Timestamps: []int64{100, 200, 300, 400},
|
||||
Values: []float64{nan, nan, 3, nan},
|
||||
},
|
||||
}, []netstorage.Result{
|
||||
{
|
||||
Timestamps: []int64{100, 200, 300},
|
||||
Values: []float64{1, 2, 3},
|
||||
},
|
||||
{
|
||||
Timestamps: []int64{300},
|
||||
Values: []float64{3},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetTimeSuccess(t *testing.T) {
|
||||
f := func(s string, timestampExpected int64) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest("GET", urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
// Verify defaultValue
|
||||
ts, err := getTime(r, "foo", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when obtaining default time from getTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != 123 {
|
||||
t.Fatalf("unexpected default value for getTime(%q); got %d; want %d", s, ts, 123)
|
||||
}
|
||||
|
||||
// Verify timestampExpected
|
||||
ts, err = getTime(r, "s", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in getTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != timestampExpected {
|
||||
t.Fatalf("unexpected timestamp for getTime(%q); got %d; want %d", s, ts, timestampExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("2019-07-07T20:01:02Z", 1562529662000)
|
||||
f("2019-07-07T20:47:40+03:00", 1562521660000)
|
||||
f("-292273086-05-16T16:47:06Z", minTimeMsecs)
|
||||
f("292277025-08-18T07:12:54.999999999Z", maxTimeMsecs)
|
||||
f("1562529662.324", 1562529662324)
|
||||
f("-9223372036.854", minTimeMsecs)
|
||||
f("-9223372036.855", minTimeMsecs)
|
||||
f("9223372036.855", maxTimeMsecs)
|
||||
}
|
||||
|
||||
func TestGetTimeError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest("GET", urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
// Verify defaultValue
|
||||
ts, err := getTime(r, "foo", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when obtaining default time from getTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != 123 {
|
||||
t.Fatalf("unexpected default value for getTime(%q); got %d; want %d", s, ts, 123)
|
||||
}
|
||||
|
||||
// Verify timestampExpected
|
||||
_, err = getTime(r, "s", 123)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error in getTime(%q)", s)
|
||||
}
|
||||
}
|
||||
|
||||
f("foo")
|
||||
f("2019-07-07T20:01:02Zisdf")
|
||||
f("2019-07-07T20:47:40+03:00123")
|
||||
f("-292273086-05-16T16:47:07Z")
|
||||
f("292277025-08-18T07:12:54.999999998Z")
|
||||
}
|
||||
33
app/vmselect/prometheus/query_range_response.qtpl
Normal file
33
app/vmselect/prometheus/query_range_response.qtpl
Normal file
@@ -0,0 +1,33 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
QueryRangeResponse generates response for /api/v1/query_range.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
{% func QueryRangeResponse(rs []netstorage.Result) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{% if len(rs) > 0 %}
|
||||
{%= queryRangeLine(&rs[0]) %}
|
||||
{% code rs = rs[1:] %}
|
||||
{% for i := range rs %}
|
||||
,{%= queryRangeLine(&rs[i]) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func queryRangeLine(r *netstorage.Result) %}
|
||||
{
|
||||
"metric": {%= metricNameObject(&r.MetricName) %},
|
||||
"values": {%= valuesWithTimestamps(r.Values, r.Timestamps) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
118
app/vmselect/prometheus/query_range_response.qtpl.go
Normal file
118
app/vmselect/prometheus/query_range_response.qtpl.go
Normal file
@@ -0,0 +1,118 @@
|
||||
// Code generated by qtc from "query_range_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// QueryRangeResponse generates response for /api/v1/query_range.See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
func StreamQueryRangeResponse(qw422016 *qt422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:14
|
||||
if len(rs) > 0 {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:15
|
||||
streamqueryRangeLine(qw422016, &rs[0])
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:16
|
||||
rs = rs[1:]
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:17
|
||||
for i := range rs {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:17
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:18
|
||||
streamqueryRangeLine(qw422016, &rs[i])
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:19
|
||||
}
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:20
|
||||
}
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:20
|
||||
qw422016.N().S(`]}}`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
func WriteQueryRangeResponse(qq422016 qtio422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
StreamQueryRangeResponse(qw422016, rs)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
func QueryRangeResponse(rs []netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
WriteQueryRangeResponse(qb422016, rs)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:26
|
||||
func streamqueryRangeLine(qw422016 *qt422016.Writer, r *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:26
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:28
|
||||
streammetricNameObject(qw422016, &r.MetricName)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:28
|
||||
qw422016.N().S(`,"values":`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:29
|
||||
streamvaluesWithTimestamps(qw422016, r.Values, r.Timestamps)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:29
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
func writequeryRangeLine(qq422016 qtio422016.Writer, r *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
streamqueryRangeLine(qw422016, r)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
func queryRangeLine(r *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
writequeryRangeLine(qb422016, r)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
}
|
||||
32
app/vmselect/prometheus/query_response.qtpl
Normal file
32
app/vmselect/prometheus/query_response.qtpl
Normal file
@@ -0,0 +1,32 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
QueryResponse generates response for /api/v1/query.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
{% func QueryResponse(rs []netstorage.Result) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{% if len(rs) > 0 %}
|
||||
{
|
||||
"metric": {%= metricNameObject(&rs[0].MetricName) %},
|
||||
"value": {%= metricRow(rs[0].Timestamps[0], rs[0].Values[0]) %}
|
||||
}
|
||||
{% code rs = rs[1:] %}
|
||||
{% for i := range rs %}
|
||||
{% code r := &rs[i] %}
|
||||
,{
|
||||
"metric": {%= metricNameObject(&r.MetricName) %},
|
||||
"value": {%= metricRow(r.Timestamps[0], r.Values[0]) %}
|
||||
}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
94
app/vmselect/prometheus/query_response.qtpl.go
Normal file
94
app/vmselect/prometheus/query_response.qtpl.go
Normal file
@@ -0,0 +1,94 @@
|
||||
// Code generated by qtc from "query_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// QueryResponse generates response for /api/v1/query.See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
func StreamQueryResponse(qw422016 *qt422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"vector","result":[`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:14
|
||||
if len(rs) > 0 {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:14
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:16
|
||||
streammetricNameObject(qw422016, &rs[0].MetricName)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:16
|
||||
qw422016.N().S(`,"value":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:17
|
||||
streammetricRow(qw422016, rs[0].Timestamps[0], rs[0].Values[0])
|
||||
//line app/vmselect/prometheus/query_response.qtpl:17
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:19
|
||||
rs = rs[1:]
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:20
|
||||
for i := range rs {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:21
|
||||
r := &rs[i]
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:21
|
||||
qw422016.N().S(`,{"metric":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:23
|
||||
streammetricNameObject(qw422016, &r.MetricName)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:23
|
||||
qw422016.N().S(`,"value":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:24
|
||||
streammetricRow(qw422016, r.Timestamps[0], r.Values[0])
|
||||
//line app/vmselect/prometheus/query_response.qtpl:24
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:26
|
||||
}
|
||||
//line app/vmselect/prometheus/query_response.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/query_response.qtpl:27
|
||||
qw422016.N().S(`]}}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
func WriteQueryResponse(qq422016 qtio422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
StreamQueryResponse(qw422016, rs)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
func QueryResponse(rs []netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
WriteQueryResponse(qb422016, rs)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
}
|
||||
9
app/vmselect/prometheus/series_count_response.qtpl
Normal file
9
app/vmselect/prometheus/series_count_response.qtpl
Normal file
@@ -0,0 +1,9 @@
|
||||
{% stripspace %}
|
||||
SeriesCountResponse generates response for /api/v1/series/count .
|
||||
{% func SeriesCountResponse(n uint64) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[{%d int(n) %}]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
57
app/vmselect/prometheus/series_count_response.qtpl.go
Normal file
57
app/vmselect/prometheus/series_count_response.qtpl.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Code generated by qtc from "series_count_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// SeriesCountResponse generates response for /api/v1/series/count .
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
func StreamSeriesCountResponse(qw422016 *qt422016.Writer, n uint64) {
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:6
|
||||
qw422016.N().D(int(n))
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:6
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
func WriteSeriesCountResponse(qq422016 qtio422016.Writer, n uint64) {
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
StreamSeriesCountResponse(qw422016, n)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
func SeriesCountResponse(n uint64) string {
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
WriteSeriesCountResponse(qb422016, n)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
}
|
||||
24
app/vmselect/prometheus/series_response.qtpl
Normal file
24
app/vmselect/prometheus/series_response.qtpl
Normal file
@@ -0,0 +1,24 @@
|
||||
{% import (
|
||||
"github.com/valyala/quicktemplate"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
SeriesResponse generates response for /api/v1/series.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
{% func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[
|
||||
{% code bb, ok := <-resultsCh %}
|
||||
{% if ok %}
|
||||
{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% for bb := range resultsCh %}
|
||||
,{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
||||
83
app/vmselect/prometheus/series_response.qtpl.go
Normal file
83
app/vmselect/prometheus/series_response.qtpl.go
Normal file
@@ -0,0 +1,83 @@
|
||||
// Code generated by qtc from "series_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:1
|
||||
import (
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
// SeriesResponse generates response for /api/v1/series.See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
func StreamSeriesResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:12
|
||||
bb, ok := <-resultsCh
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:13
|
||||
if ok {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:14
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:15
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:17
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:18
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:19
|
||||
}
|
||||
//line app/vmselect/prometheus/series_response.qtpl:20
|
||||
}
|
||||
//line app/vmselect/prometheus/series_response.qtpl:20
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
func WriteSeriesResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
StreamSeriesResponse(qw422016, resultsCh)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
WriteSeriesResponse(qb422016, resultsCh)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
}
|
||||
47
app/vmselect/prometheus/util.qtpl
Normal file
47
app/vmselect/prometheus/util.qtpl
Normal file
@@ -0,0 +1,47 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
{% func metricNameObject(mn *storage.MetricName) %}
|
||||
{
|
||||
{% if len(mn.MetricGroup) > 0 %}
|
||||
"__name__":{%qz= mn.MetricGroup %}{% if len(mn.Tags) > 0 %},{% endif %}
|
||||
{% endif %}
|
||||
{% for j := range mn.Tags %}
|
||||
{% code tag := &mn.Tags[j] %}
|
||||
{%qz= tag.Key %}:{%qz= tag.Value %}{% if j+1 < len(mn.Tags) %},{% endif %}
|
||||
{% endfor %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func metricRow(timestamp int64, value float64) %}
|
||||
[{%f= float64(timestamp)/1e3 %},"{%f= value %}"]
|
||||
{% endfunc %}
|
||||
|
||||
{% func valuesWithTimestamps(values []float64, timestamps []int64) %}
|
||||
[
|
||||
{% if len(values) == 0 %}
|
||||
{% return %}
|
||||
{% endif %}
|
||||
{% code /* inline metricRow call here for the sake of performance optimization */ %}
|
||||
[{%f= float64(timestamps[0])/1e3 %},"{%f= values[0] %}"]
|
||||
{% code
|
||||
timestamps = timestamps[1:]
|
||||
values = values[1:]
|
||||
%}
|
||||
{% if len(values) > 0 %}
|
||||
{%code
|
||||
// Remove bounds check inside the loop below
|
||||
_ = timestamps[len(values)-1]
|
||||
%}
|
||||
{% for i, v := range values %}
|
||||
{% code /* inline metricRow call here for the sake of performance optimization */ %}
|
||||
,[{%f= float64(timestamps[i])/1e3 %},"{%f= v %}"]
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
215
app/vmselect/prometheus/util.qtpl.go
Normal file
215
app/vmselect/prometheus/util.qtpl.go
Normal file
@@ -0,0 +1,215 @@
|
||||
// Code generated by qtc from "util.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
func streammetricNameObject(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/util.qtpl:9
|
||||
if len(mn.MetricGroup) > 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:9
|
||||
qw422016.N().S(`"__name__":`)
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
qw422016.N().QZ(mn.MetricGroup)
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
if len(mn.Tags) > 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:12
|
||||
for j := range mn.Tags {
|
||||
//line app/vmselect/prometheus/util.qtpl:13
|
||||
tag := &mn.Tags[j]
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().QZ(tag.Key)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().QZ(tag.Value)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
if j+1 < len(mn.Tags) {
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:15
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:15
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
func writemetricNameObject(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
streammetricNameObject(qw422016, mn)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
func metricNameObject(mn *storage.MetricName) string {
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
writemetricNameObject(qb422016, mn)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:19
|
||||
func streammetricRow(qw422016 *qt422016.Writer, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:19
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().F(float64(timestamp) / 1e3)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().S(`,"`)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().F(value)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().S(`"]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
func writemetricRow(qq422016 qtio422016.Writer, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
streammetricRow(qw422016, timestamp, value)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
func metricRow(timestamp int64, value float64) string {
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
writemetricRow(qb422016, timestamp, value)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:23
|
||||
func streamvaluesWithTimestamps(qw422016 *qt422016.Writer, values []float64, timestamps []int64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:23
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:25
|
||||
if len(values) == 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:26
|
||||
return
|
||||
//line app/vmselect/prometheus/util.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:28
|
||||
/* inline metricRow call here for the sake of performance optimization */
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:28
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().F(float64(timestamps[0]) / 1e3)
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().S(`,"`)
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().F(values[0])
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().S(`"]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:31
|
||||
timestamps = timestamps[1:]
|
||||
values = values[1:]
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:34
|
||||
if len(values) > 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:36
|
||||
// Remove bounds check inside the loop below
|
||||
_ = timestamps[len(values)-1]
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:39
|
||||
for i, v := range values {
|
||||
//line app/vmselect/prometheus/util.qtpl:40
|
||||
/* inline metricRow call here for the sake of performance optimization */
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:40
|
||||
qw422016.N().S(`,[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().F(float64(timestamps[i]) / 1e3)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().S(`,"`)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().S(`"]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:42
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:43
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:43
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
func writevaluesWithTimestamps(qq422016 qtio422016.Writer, values []float64, timestamps []int64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
streamvaluesWithTimestamps(qw422016, values, timestamps)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
func valuesWithTimestamps(values []float64, timestamps []int64) string {
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
writevaluesWithTimestamps(qb422016, values, timestamps)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
}
|
||||
548
app/vmselect/promql/aggr.go
Normal file
548
app/vmselect/promql/aggr.go
Normal file
@@ -0,0 +1,548 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
var aggrFuncs = map[string]aggrFunc{
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators
|
||||
"sum": newAggrFunc(aggrFuncSum),
|
||||
"min": newAggrFunc(aggrFuncMin),
|
||||
"max": newAggrFunc(aggrFuncMax),
|
||||
"avg": newAggrFunc(aggrFuncAvg),
|
||||
"stddev": newAggrFunc(aggrFuncStddev),
|
||||
"stdvar": newAggrFunc(aggrFuncStdvar),
|
||||
"count": newAggrFunc(aggrFuncCount),
|
||||
"count_values": aggrFuncCountValues,
|
||||
"bottomk": newAggrFuncTopK(true),
|
||||
"topk": newAggrFuncTopK(false),
|
||||
"quantile": aggrFuncQuantile,
|
||||
|
||||
// Extended PromQL funcs
|
||||
"median": aggrFuncMedian,
|
||||
"limitk": aggrFuncLimitK,
|
||||
"distinct": newAggrFunc(aggrFuncDistinct),
|
||||
"sum2": newAggrFunc(aggrFuncSum2),
|
||||
"geomean": newAggrFunc(aggrFuncGeomean),
|
||||
}
|
||||
|
||||
type aggrFunc func(afa *aggrFuncArg) ([]*timeseries, error)
|
||||
|
||||
type aggrFuncArg struct {
|
||||
args [][]*timeseries
|
||||
ae *aggrFuncExpr
|
||||
ec *EvalConfig
|
||||
}
|
||||
|
||||
func getAggrFunc(s string) aggrFunc {
|
||||
s = strings.ToLower(s)
|
||||
return aggrFuncs[s]
|
||||
}
|
||||
|
||||
func isAggrFunc(s string) bool {
|
||||
return getAggrFunc(s) != nil
|
||||
}
|
||||
|
||||
func isAggrFuncModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "by", "without":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func newAggrFunc(afe func(tss []*timeseries) []*timeseries) aggrFunc {
|
||||
return func(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aggrFuncExt(afe, args[0], &afa.ae.Modifier, false)
|
||||
}
|
||||
}
|
||||
|
||||
func removeGroupTags(metricName *storage.MetricName, modifier *modifierExpr) {
|
||||
groupOp := strings.ToLower(modifier.Op)
|
||||
switch groupOp {
|
||||
case "", "by":
|
||||
metricName.RemoveTagsOn(modifier.Args)
|
||||
case "without":
|
||||
metricName.RemoveTagsIgnoring(modifier.Args)
|
||||
default:
|
||||
logger.Panicf("BUG: unknown group modifier: %q", groupOp)
|
||||
}
|
||||
}
|
||||
|
||||
func aggrFuncExt(afe func(tss []*timeseries) []*timeseries, argOrig []*timeseries, modifier *modifierExpr, keepOriginal bool) ([]*timeseries, error) {
|
||||
arg := copyTimeseriesMetricNames(argOrig)
|
||||
|
||||
// Perform grouping.
|
||||
m := make(map[string][]*timeseries)
|
||||
bb := bbPool.Get()
|
||||
for i, ts := range arg {
|
||||
removeGroupTags(&ts.MetricName, modifier)
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
if keepOriginal {
|
||||
ts = argOrig[i]
|
||||
}
|
||||
m[string(bb.B)] = append(m[string(bb.B)], ts)
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
|
||||
srcTssCount := 0
|
||||
dstTssCount := 0
|
||||
rvs := make([]*timeseries, 0, len(m))
|
||||
for _, tss := range m {
|
||||
rv := afe(tss)
|
||||
rvs = append(rvs, rv...)
|
||||
srcTssCount += len(tss)
|
||||
dstTssCount += len(rv)
|
||||
if dstTssCount > 2000 && dstTssCount > 16*srcTssCount {
|
||||
// This looks like count_values explosion.
|
||||
return nil, fmt.Errorf(`too many timeseries after aggragation; got %d; want less than %d`, dstTssCount, 16*srcTssCount)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func aggrFuncSum(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to sum.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
sum := float64(0)
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(ts.Values[i]) {
|
||||
continue
|
||||
}
|
||||
sum += ts.Values[i]
|
||||
count++
|
||||
}
|
||||
if count == 0 {
|
||||
sum = nan
|
||||
}
|
||||
dst.Values[i] = sum
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncSum2(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
sum2 := float64(0)
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
sum2 += v * v
|
||||
count++
|
||||
}
|
||||
if count == 0 {
|
||||
sum2 = nan
|
||||
}
|
||||
dst.Values[i] = sum2
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncGeomean(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to geomean.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
p := 1.0
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
p *= v
|
||||
count++
|
||||
}
|
||||
if count == 0 {
|
||||
p = nan
|
||||
}
|
||||
dst.Values[i] = math.Pow(p, 1/float64(count))
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncMin(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to min.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
min := dst.Values[i]
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(min) || ts.Values[i] < min {
|
||||
min = ts.Values[i]
|
||||
}
|
||||
}
|
||||
dst.Values[i] = min
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncMax(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to max.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
max := dst.Values[i]
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(max) || ts.Values[i] > max {
|
||||
max = ts.Values[i]
|
||||
}
|
||||
}
|
||||
dst.Values[i] = max
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncAvg(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to avg.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
// Do not use `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation,
|
||||
// since it is slower and has no obvious benefits in increased precision.
|
||||
var sum float64
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
sum += v
|
||||
}
|
||||
avg := nan
|
||||
if count > 0 {
|
||||
avg = sum / float64(count)
|
||||
}
|
||||
dst.Values[i] = avg
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncStddev(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - stddev over a single time series is zero
|
||||
values := tss[0].Values
|
||||
for i, v := range values {
|
||||
if !math.IsNaN(v) {
|
||||
values[i] = 0
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
rvs := aggrFuncStdvar(tss)
|
||||
dst := rvs[0]
|
||||
for i, v := range dst.Values {
|
||||
dst.Values[i] = math.Sqrt(v)
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
|
||||
func aggrFuncStdvar(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - stdvar over a single time series is zero
|
||||
values := tss[0].Values
|
||||
for i, v := range values {
|
||||
if !math.IsNaN(v) {
|
||||
values[i] = 0
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
// See `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation
|
||||
var avg float64
|
||||
var count float64
|
||||
var q float64
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
avgNew := avg + (v-avg)/count
|
||||
q += (v - avg) * (v - avgNew)
|
||||
avg = avgNew
|
||||
}
|
||||
if count == 0 {
|
||||
q = nan
|
||||
}
|
||||
dst.Values[i] = q / count
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncCount(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(ts.Values[i]) {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
v := float64(count)
|
||||
if count == 0 {
|
||||
v = nan
|
||||
}
|
||||
dst.Values[i] = v
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncDistinct(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
m := make(map[float64]struct{}, len(tss))
|
||||
for i := range dst.Values {
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
m[v] = struct{}{}
|
||||
}
|
||||
n := float64(len(m))
|
||||
if n == 0 {
|
||||
n = nan
|
||||
}
|
||||
dst.Values[i] = n
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncCountValues(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstLabel, err := getString(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove dstLabel from grouping like Prometheus does.
|
||||
modifier := &afa.ae.Modifier
|
||||
switch strings.ToLower(modifier.Op) {
|
||||
case "without":
|
||||
modifier.Args = append(modifier.Args, dstLabel)
|
||||
case "by":
|
||||
dstArgs := modifier.Args[:0]
|
||||
for _, arg := range modifier.Args {
|
||||
if arg == dstLabel {
|
||||
continue
|
||||
}
|
||||
dstArgs = append(dstArgs, arg)
|
||||
}
|
||||
modifier.Args = dstArgs
|
||||
default:
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
m := make(map[float64]bool)
|
||||
for _, ts := range tss {
|
||||
for _, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
m[v] = true
|
||||
}
|
||||
}
|
||||
values := make([]float64, 0, len(m))
|
||||
for v := range m {
|
||||
values = append(values, v)
|
||||
}
|
||||
sort.Float64s(values)
|
||||
|
||||
var rvs []*timeseries
|
||||
for _, v := range values {
|
||||
var dst timeseries
|
||||
dst.CopyFromShallowTimestamps(tss[0])
|
||||
dst.MetricName.RemoveTag(dstLabel)
|
||||
dst.MetricName.AddTag(dstLabel, strconv.FormatFloat(v, 'g', -1, 64))
|
||||
for i := range dst.Values {
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
if ts.Values[i] == v {
|
||||
count++
|
||||
}
|
||||
}
|
||||
n := float64(count)
|
||||
if n == 0 {
|
||||
n = nan
|
||||
}
|
||||
dst.Values[i] = n
|
||||
}
|
||||
rvs = append(rvs, &dst)
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, false)
|
||||
}
|
||||
|
||||
func newAggrFuncTopK(isReverse bool) aggrFunc {
|
||||
return func(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ks, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
rvs := tss
|
||||
for n := range rvs[0].Values {
|
||||
sort.Slice(rvs, func(i, j int) bool {
|
||||
a := rvs[i].Values[n]
|
||||
b := rvs[j].Values[n]
|
||||
cmp := lessWithNaNs(a, b)
|
||||
if isReverse {
|
||||
cmp = !cmp
|
||||
}
|
||||
return cmp
|
||||
})
|
||||
if math.IsNaN(ks[n]) {
|
||||
ks[n] = 0
|
||||
}
|
||||
k := int(ks[n])
|
||||
if k < 0 {
|
||||
k = 0
|
||||
}
|
||||
if k > len(rvs) {
|
||||
k = len(rvs)
|
||||
}
|
||||
for _, ts := range rvs[:len(rvs)-k] {
|
||||
ts.Values[n] = nan
|
||||
}
|
||||
}
|
||||
return removeNaNs(rvs)
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, true)
|
||||
}
|
||||
}
|
||||
|
||||
func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ks, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxK := 0
|
||||
for _, kf := range ks {
|
||||
k := int(kf)
|
||||
if k > maxK {
|
||||
maxK = k
|
||||
}
|
||||
}
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
if len(tss) > maxK {
|
||||
tss = tss[:maxK]
|
||||
}
|
||||
for i, kf := range ks {
|
||||
k := int(kf)
|
||||
if k < 0 {
|
||||
k = 0
|
||||
}
|
||||
for j := k; j < len(tss); j++ {
|
||||
tss[j].Values[i] = nan
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, true)
|
||||
}
|
||||
|
||||
func aggrFuncQuantile(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
phis, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afe := newAggrQuantileFunc(phis)
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, false)
|
||||
}
|
||||
|
||||
func aggrFuncMedian(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
phis := evalNumber(afa.ec, 0.5)[0].Values
|
||||
afe := newAggrQuantileFunc(phis)
|
||||
return aggrFuncExt(afe, args[0], &afa.ae.Modifier, false)
|
||||
}
|
||||
|
||||
func newAggrQuantileFunc(phis []float64) func(tss []*timeseries) []*timeseries {
|
||||
return func(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
for n := range dst.Values {
|
||||
sort.Slice(tss, func(i, j int) bool {
|
||||
a := tss[i].Values[n]
|
||||
b := tss[j].Values[n]
|
||||
return lessWithNaNs(a, b)
|
||||
})
|
||||
phi := phis[n]
|
||||
if math.IsNaN(phi) {
|
||||
phi = 1
|
||||
}
|
||||
if phi < 0 {
|
||||
phi = 0
|
||||
}
|
||||
if phi > 1 {
|
||||
phi = 1
|
||||
}
|
||||
idx := int(math.Round(float64(len(tss)-1) * phi))
|
||||
dst.Values[n] = tss[idx].Values[n]
|
||||
}
|
||||
tss[0] = dst
|
||||
return tss[:1]
|
||||
}
|
||||
}
|
||||
|
||||
func lessWithNaNs(a, b float64) bool {
|
||||
if math.IsNaN(a) {
|
||||
return !math.IsNaN(b)
|
||||
}
|
||||
return a < b
|
||||
}
|
||||
450
app/vmselect/promql/aggr_incremental.go
Normal file
450
app/vmselect/promql/aggr_incremental.go
Normal file
@@ -0,0 +1,450 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// callbacks for optimized incremental calculations for aggregate functions
|
||||
// over rollups over metricExpr.
|
||||
//
|
||||
// These calculations save RAM for aggregates over big number of time series.
|
||||
var incrementalAggrFuncCallbacksMap = map[string]*incrementalAggrFuncCallbacks{
|
||||
"sum": {
|
||||
updateAggrFunc: updateAggrSum,
|
||||
mergeAggrFunc: mergeAggrSum,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"min": {
|
||||
updateAggrFunc: updateAggrMin,
|
||||
mergeAggrFunc: mergeAggrMin,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"max": {
|
||||
updateAggrFunc: updateAggrMax,
|
||||
mergeAggrFunc: mergeAggrMax,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"avg": {
|
||||
updateAggrFunc: updateAggrAvg,
|
||||
mergeAggrFunc: mergeAggrAvg,
|
||||
finalizeAggrFunc: finalizeAggrAvg,
|
||||
},
|
||||
"count": {
|
||||
updateAggrFunc: updateAggrCount,
|
||||
mergeAggrFunc: mergeAggrCount,
|
||||
finalizeAggrFunc: finalizeAggrCount,
|
||||
},
|
||||
"sum2": {
|
||||
updateAggrFunc: updateAggrSum2,
|
||||
mergeAggrFunc: mergeAggrSum2,
|
||||
finalizeAggrFunc: finalizeAggrCommon,
|
||||
},
|
||||
"geomean": {
|
||||
updateAggrFunc: updateAggrGeomean,
|
||||
mergeAggrFunc: mergeAggrGeomean,
|
||||
finalizeAggrFunc: finalizeAggrGeomean,
|
||||
},
|
||||
}
|
||||
|
||||
type incrementalAggrFuncContext struct {
|
||||
ae *aggrFuncExpr
|
||||
|
||||
mLock sync.Mutex
|
||||
m map[uint]map[string]*incrementalAggrContext
|
||||
|
||||
callbacks *incrementalAggrFuncCallbacks
|
||||
}
|
||||
|
||||
func newIncrementalAggrFuncContext(ae *aggrFuncExpr, callbacks *incrementalAggrFuncCallbacks) *incrementalAggrFuncContext {
|
||||
return &incrementalAggrFuncContext{
|
||||
ae: ae,
|
||||
m: make(map[uint]map[string]*incrementalAggrContext),
|
||||
callbacks: callbacks,
|
||||
}
|
||||
}
|
||||
|
||||
func (iafc *incrementalAggrFuncContext) updateTimeseries(ts *timeseries, workerID uint) {
|
||||
iafc.mLock.Lock()
|
||||
m := iafc.m[workerID]
|
||||
if m == nil {
|
||||
m = make(map[string]*incrementalAggrContext, 1)
|
||||
iafc.m[workerID] = m
|
||||
}
|
||||
iafc.mLock.Unlock()
|
||||
|
||||
removeGroupTags(&ts.MetricName, &iafc.ae.Modifier)
|
||||
bb := bbPool.Get()
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
iac := m[string(bb.B)]
|
||||
if iac == nil {
|
||||
tsAggr := ×eries{
|
||||
Values: make([]float64, len(ts.Values)),
|
||||
Timestamps: ts.Timestamps,
|
||||
denyReuse: true,
|
||||
}
|
||||
tsAggr.MetricName.CopyFrom(&ts.MetricName)
|
||||
iac = &incrementalAggrContext{
|
||||
ts: tsAggr,
|
||||
values: make([]float64, len(ts.Values)),
|
||||
}
|
||||
m[string(bb.B)] = iac
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
iafc.callbacks.updateAggrFunc(iac, ts.Values)
|
||||
}
|
||||
|
||||
func (iafc *incrementalAggrFuncContext) finalizeTimeseries() []*timeseries {
|
||||
// There is no need in iafc.mLock.Lock here, since finalizeTimeseries must be called
|
||||
// without concurrent goroutines touching iafc.
|
||||
mGlobal := make(map[string]*incrementalAggrContext)
|
||||
mergeAggrFunc := iafc.callbacks.mergeAggrFunc
|
||||
for _, m := range iafc.m {
|
||||
for k, iac := range m {
|
||||
iacGlobal := mGlobal[k]
|
||||
if iacGlobal == nil {
|
||||
mGlobal[k] = iac
|
||||
continue
|
||||
}
|
||||
mergeAggrFunc(iacGlobal, iac)
|
||||
}
|
||||
}
|
||||
tss := make([]*timeseries, 0, len(mGlobal))
|
||||
finalizeAggrFunc := iafc.callbacks.finalizeAggrFunc
|
||||
for _, iac := range mGlobal {
|
||||
finalizeAggrFunc(iac)
|
||||
tss = append(tss, iac.ts)
|
||||
}
|
||||
return tss
|
||||
}
|
||||
|
||||
type incrementalAggrFuncCallbacks struct {
|
||||
updateAggrFunc func(iac *incrementalAggrContext, values []float64)
|
||||
mergeAggrFunc func(dst, src *incrementalAggrContext)
|
||||
finalizeAggrFunc func(iac *incrementalAggrContext)
|
||||
}
|
||||
|
||||
func getIncrementalAggrFuncCallbacks(name string) *incrementalAggrFuncCallbacks {
|
||||
name = strings.ToLower(name)
|
||||
return incrementalAggrFuncCallbacksMap[name]
|
||||
}
|
||||
|
||||
type incrementalAggrContext struct {
|
||||
ts *timeseries
|
||||
values []float64
|
||||
}
|
||||
|
||||
func finalizeAggrCommon(iac *incrementalAggrContext) {
|
||||
counts := iac.values
|
||||
dstValues := iac.ts.Values
|
||||
_ = dstValues[len(counts)-1]
|
||||
for i, v := range counts {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrSum(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrSum(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrMin(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v < dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrMin(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v < dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrMax(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v > dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrMax(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
if v > dstValues[i] {
|
||||
dstValues[i] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrAvg(iac *incrementalAggrContext, values []float64) {
|
||||
// Do not use `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation,
|
||||
// since it is slower and has no obvious benefits in increased precision.
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
dstCounts[i]++
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrAvg(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = srcCounts[i]
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
dstCounts[i] += srcCounts[i]
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeAggrAvg(iac *incrementalAggrContext) {
|
||||
dstValues := iac.ts.Values
|
||||
counts := iac.values
|
||||
_ = dstValues[len(counts)-1]
|
||||
for i, v := range counts {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
continue
|
||||
}
|
||||
dstValues[i] /= v
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrCount(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
_ = dstValues[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues[i]++
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrCount(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeAggrCount(iac *incrementalAggrContext) {
|
||||
dstValues := iac.ts.Values
|
||||
for i, v := range dstValues {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrSum2(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v * v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v * v
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrSum2(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
func updateAggrGeomean(iac *incrementalAggrContext, values []float64) {
|
||||
dstValues := iac.ts.Values
|
||||
dstCounts := iac.values
|
||||
_ = dstValues[len(values)-1]
|
||||
_ = dstCounts[len(values)-1]
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = 1
|
||||
continue
|
||||
}
|
||||
dstValues[i] *= v
|
||||
dstCounts[i]++
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAggrGeomean(dst, src *incrementalAggrContext) {
|
||||
srcValues := src.ts.Values
|
||||
dstValues := dst.ts.Values
|
||||
srcCounts := src.values
|
||||
dstCounts := dst.values
|
||||
_ = srcCounts[len(srcValues)-1]
|
||||
_ = dstCounts[len(srcValues)-1]
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if srcCounts[i] == 0 {
|
||||
continue
|
||||
}
|
||||
if dstCounts[i] == 0 {
|
||||
dstValues[i] = v
|
||||
dstCounts[i] = srcCounts[i]
|
||||
continue
|
||||
}
|
||||
dstValues[i] *= v
|
||||
dstCounts[i] += srcCounts[i]
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeAggrGeomean(iac *incrementalAggrContext) {
|
||||
dstValues := iac.ts.Values
|
||||
counts := iac.values
|
||||
_ = dstValues[len(counts)-1]
|
||||
for i, v := range counts {
|
||||
if v == 0 {
|
||||
dstValues[i] = nan
|
||||
continue
|
||||
}
|
||||
dstValues[i] = math.Pow(dstValues[i], 1/v)
|
||||
}
|
||||
}
|
||||
188
app/vmselect/promql/aggr_incremental_test.go
Normal file
188
app/vmselect/promql/aggr_incremental_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIncrementalAggr(t *testing.T) {
|
||||
defaultTimestamps := []int64{100e3, 200e3, 300e3, 400e3}
|
||||
values := [][]float64{
|
||||
{1, nan, 2, nan},
|
||||
{3, nan, nan, 4},
|
||||
{nan, nan, 5, 6},
|
||||
{7, nan, 8, 9},
|
||||
{4, nan, nan, nan},
|
||||
{2, nan, 3, 2},
|
||||
{0, nan, 1, 1},
|
||||
}
|
||||
tssSrc := make([]*timeseries, len(values))
|
||||
for i, vs := range values {
|
||||
ts := ×eries{
|
||||
Timestamps: defaultTimestamps,
|
||||
Values: vs,
|
||||
}
|
||||
tssSrc[i] = ts
|
||||
}
|
||||
|
||||
copyTimeseries := func(tssSrc []*timeseries) []*timeseries {
|
||||
tssDst := make([]*timeseries, len(tssSrc))
|
||||
for i, tsSrc := range tssSrc {
|
||||
var tsDst timeseries
|
||||
tsDst.CopyFromShallowTimestamps(tsSrc)
|
||||
tssDst[i] = &tsDst
|
||||
}
|
||||
return tssDst
|
||||
}
|
||||
|
||||
f := func(name string, valuesExpected []float64) {
|
||||
t.Helper()
|
||||
callbacks := getIncrementalAggrFuncCallbacks(name)
|
||||
ae := &aggrFuncExpr{
|
||||
Name: name,
|
||||
}
|
||||
tssExpected := []*timeseries{{
|
||||
Timestamps: defaultTimestamps,
|
||||
Values: valuesExpected,
|
||||
}}
|
||||
// run the test multiple times to make sure there are no side effects on concurrency
|
||||
for i := 0; i < 10; i++ {
|
||||
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
||||
tssSrcCopy := copyTimeseries(tssSrc)
|
||||
if err := testIncrementalParallelAggr(iafc, tssSrcCopy, tssExpected); err != nil {
|
||||
t.Fatalf("unexpected error on iteration %d: %s", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("sum", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{17, nan, 19, 22}
|
||||
f("sum", valuesExpected)
|
||||
})
|
||||
t.Run("min", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{0, nan, 1, 1}
|
||||
f("min", valuesExpected)
|
||||
})
|
||||
t.Run("max", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{7, nan, 8, 9}
|
||||
f("max", valuesExpected)
|
||||
})
|
||||
t.Run("avg", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{2.8333333333333335, nan, 3.8, 4.4}
|
||||
f("avg", valuesExpected)
|
||||
})
|
||||
t.Run("count", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{6, nan, 5, 5}
|
||||
f("count", valuesExpected)
|
||||
})
|
||||
t.Run("sum2", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{79, nan, 103, 138}
|
||||
f("sum2", valuesExpected)
|
||||
})
|
||||
t.Run("geomean", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
valuesExpected := []float64{0, nan, 2.9925557394776896, 3.365865436338599}
|
||||
f("geomean", valuesExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssExpected []*timeseries) error {
|
||||
const workersCount = 3
|
||||
tsCh := make(chan *timeseries)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workersCount)
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func(workerID uint) {
|
||||
defer wg.Done()
|
||||
for ts := range tsCh {
|
||||
runtime.Gosched() // allow other goroutines performing the work
|
||||
iafc.updateTimeseries(ts, workerID)
|
||||
}
|
||||
}(uint(i))
|
||||
}
|
||||
for _, ts := range tssSrc {
|
||||
tsCh <- ts
|
||||
}
|
||||
close(tsCh)
|
||||
wg.Wait()
|
||||
tssActual := iafc.finalizeTimeseries()
|
||||
if err := expectTimeseriesEqual(tssActual, tssExpected); err != nil {
|
||||
return fmt.Errorf("%s; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func expectTimeseriesEqual(actual, expected []*timeseries) error {
|
||||
if len(actual) != len(expected) {
|
||||
return fmt.Errorf("unexpected number of time series; got %d; want %d", len(actual), len(expected))
|
||||
}
|
||||
mActual := timeseriesToMap(actual)
|
||||
mExpected := timeseriesToMap(expected)
|
||||
if len(mActual) != len(mExpected) {
|
||||
return fmt.Errorf("unexpected number of time series after converting to map; got %d; want %d", len(mActual), len(mExpected))
|
||||
}
|
||||
for k, tsExpected := range mExpected {
|
||||
tsActual := mActual[k]
|
||||
if tsActual == nil {
|
||||
return fmt.Errorf("missing time series for key=%q", k)
|
||||
}
|
||||
if err := expectTsEqual(tsActual, tsExpected); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeseriesToMap(tss []*timeseries) map[string]*timeseries {
|
||||
m := make(map[string]*timeseries, len(tss))
|
||||
for _, ts := range tss {
|
||||
k := ts.MetricName.Marshal(nil)
|
||||
m[string(k)] = ts
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func expectTsEqual(actual, expected *timeseries) error {
|
||||
mnActual := actual.MetricName.Marshal(nil)
|
||||
mnExpected := expected.MetricName.Marshal(nil)
|
||||
if string(mnActual) != string(mnExpected) {
|
||||
return fmt.Errorf("unexpected metric name; got %q; want %q", mnActual, mnExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.Timestamps, expected.Timestamps) {
|
||||
return fmt.Errorf("unexpected timestamps; got %v; want %v", actual.Timestamps, expected.Timestamps)
|
||||
}
|
||||
if err := compareValues(actual.Values, expected.Values); err != nil {
|
||||
return fmt.Errorf("%s; actual %v; expected %v", err, actual.Values, expected.Values)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareValues(vs1, vs2 []float64) error {
|
||||
if len(vs1) != len(vs2) {
|
||||
return fmt.Errorf("unexpected number of values; got %d; want %d", len(vs1), len(vs2))
|
||||
}
|
||||
for i, v1 := range vs1 {
|
||||
v2 := vs2[i]
|
||||
if math.IsNaN(v1) {
|
||||
if !math.IsNaN(v2) {
|
||||
return fmt.Errorf("unexpected value; got %v; want %v", v1, v2)
|
||||
}
|
||||
continue
|
||||
}
|
||||
eps := math.Abs(v1 - v2)
|
||||
if eps > 1e-14 {
|
||||
return fmt.Errorf("unexpected value; got %v; want %v", v1, v2)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
30
app/vmselect/promql/aggr_test.go
Normal file
30
app/vmselect/promql/aggr_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsAggrFuncModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isAggrFuncModifier(s) {
|
||||
t.Fatalf("expecting valid funcModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("by")
|
||||
f("BY")
|
||||
f("without")
|
||||
f("Without")
|
||||
}
|
||||
|
||||
func TestIsAggrFuncModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isAggrFuncModifier(s) {
|
||||
t.Fatalf("unexpected valid funcModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("byfix")
|
||||
f("on")
|
||||
f("ignoring")
|
||||
}
|
||||
3
app/vmselect/promql/arch_amd64.go
Normal file
3
app/vmselect/promql/arch_amd64.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package promql
|
||||
|
||||
const maxByteSliceLen = 1 << 40
|
||||
3
app/vmselect/promql/arch_arm.go
Normal file
3
app/vmselect/promql/arch_arm.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package promql
|
||||
|
||||
const maxByteSliceLen = 1<<31 - 1
|
||||
3
app/vmselect/promql/arch_arm64.go
Normal file
3
app/vmselect/promql/arch_arm64.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package promql
|
||||
|
||||
const maxByteSliceLen = 1 << 40
|
||||
610
app/vmselect/promql/binary_op.go
Normal file
610
app/vmselect/promql/binary_op.go
Normal file
@@ -0,0 +1,610 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
var binaryOpFuncs = map[string]binaryOpFunc{
|
||||
"+": newBinaryOpArithFunc(binaryOpPlus),
|
||||
"-": newBinaryOpArithFunc(binaryOpMinus),
|
||||
"*": newBinaryOpArithFunc(binaryOpMul),
|
||||
"/": newBinaryOpArithFunc(binaryOpDiv),
|
||||
"%": newBinaryOpArithFunc(binaryOpMod),
|
||||
"^": newBinaryOpArithFunc(binaryOpPow),
|
||||
|
||||
// cmp ops
|
||||
"==": newBinaryOpCmpFunc(binaryOpEq),
|
||||
"!=": newBinaryOpCmpFunc(binaryOpNeq),
|
||||
">": newBinaryOpCmpFunc(binaryOpGt),
|
||||
"<": newBinaryOpCmpFunc(binaryOpLt),
|
||||
">=": newBinaryOpCmpFunc(binaryOpGte),
|
||||
"<=": newBinaryOpCmpFunc(binaryOpLte),
|
||||
|
||||
// logical set ops
|
||||
"and": binaryOpAnd,
|
||||
"or": binaryOpOr,
|
||||
"unless": binaryOpUnless,
|
||||
|
||||
// New op
|
||||
"if": newBinaryOpArithFunc(binaryOpIf),
|
||||
"ifnot": newBinaryOpArithFunc(binaryOpIfnot),
|
||||
"default": newBinaryOpArithFunc(binaryOpDefault),
|
||||
}
|
||||
|
||||
var binaryOpPriorities = map[string]int{
|
||||
"default": -1,
|
||||
|
||||
"if": 0,
|
||||
"ifnot": 0,
|
||||
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operator-precedence
|
||||
"or": 1,
|
||||
|
||||
"and": 2,
|
||||
"unless": 2,
|
||||
|
||||
"==": 3,
|
||||
"!=": 3,
|
||||
"<": 3,
|
||||
">": 3,
|
||||
"<=": 3,
|
||||
">=": 3,
|
||||
|
||||
"+": 4,
|
||||
"-": 4,
|
||||
|
||||
"*": 5,
|
||||
"/": 5,
|
||||
"%": 5,
|
||||
|
||||
"^": 6,
|
||||
}
|
||||
|
||||
func getBinaryOpFunc(op string) binaryOpFunc {
|
||||
op = strings.ToLower(op)
|
||||
return binaryOpFuncs[op]
|
||||
}
|
||||
|
||||
func isBinaryOp(op string) bool {
|
||||
return getBinaryOpFunc(op) != nil
|
||||
}
|
||||
|
||||
func binaryOpPriority(op string) int {
|
||||
op = strings.ToLower(op)
|
||||
return binaryOpPriorities[op]
|
||||
}
|
||||
|
||||
func scanBinaryOpPrefix(s string) int {
|
||||
n := 0
|
||||
for op := range binaryOpFuncs {
|
||||
if len(s) < len(op) {
|
||||
continue
|
||||
}
|
||||
ss := strings.ToLower(s[:len(op)])
|
||||
if ss == op && len(op) > n {
|
||||
n = len(op)
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func isRightAssociativeBinaryOp(op string) bool {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operator-precedence
|
||||
return op == "^"
|
||||
}
|
||||
|
||||
func isBinaryOpGroupModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching
|
||||
case "on", "ignoring":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpJoinModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "group_left", "group_right":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpBoolModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
return s == "bool"
|
||||
}
|
||||
|
||||
func isBinaryOpCmp(op string) bool {
|
||||
switch op {
|
||||
case "==", "!=", ">", "<", ">=", "<=":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpLogicalSet(op string) bool {
|
||||
op = strings.ToLower(op)
|
||||
switch op {
|
||||
case "and", "or", "unless":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func binaryOpConstants(op string, left, right float64, isBool bool) float64 {
|
||||
if isBinaryOpCmp(op) {
|
||||
evalCmp := func(cf func(left, right float64) bool) float64 {
|
||||
if isBool {
|
||||
if cf(left, right) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if cf(left, right) {
|
||||
return left
|
||||
}
|
||||
return nan
|
||||
}
|
||||
switch op {
|
||||
case "==":
|
||||
left = evalCmp(binaryOpEq)
|
||||
case "!=":
|
||||
left = evalCmp(binaryOpNeq)
|
||||
case ">":
|
||||
left = evalCmp(binaryOpGt)
|
||||
case "<":
|
||||
left = evalCmp(binaryOpLt)
|
||||
case ">=":
|
||||
left = evalCmp(binaryOpGte)
|
||||
case "<=":
|
||||
left = evalCmp(binaryOpLte)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected comparison binaryOp: %q", op)
|
||||
}
|
||||
} else {
|
||||
switch op {
|
||||
case "+":
|
||||
left = binaryOpPlus(left, right)
|
||||
case "-":
|
||||
left = binaryOpMinus(left, right)
|
||||
case "*":
|
||||
left = binaryOpMul(left, right)
|
||||
case "/":
|
||||
left = binaryOpDiv(left, right)
|
||||
case "%":
|
||||
left = binaryOpMod(left, right)
|
||||
case "^":
|
||||
left = binaryOpPow(left, right)
|
||||
case "and":
|
||||
// Nothing to do
|
||||
case "or":
|
||||
// Nothing to do
|
||||
case "unless":
|
||||
left = nan
|
||||
case "default":
|
||||
left = binaryOpDefault(left, right)
|
||||
case "if":
|
||||
left = binaryOpIf(left, right)
|
||||
case "ifnot":
|
||||
left = binaryOpIfnot(left, right)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected non-comparison binaryOp: %q", op)
|
||||
}
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
type binaryOpFuncArg struct {
|
||||
be *binaryOpExpr
|
||||
left []*timeseries
|
||||
right []*timeseries
|
||||
}
|
||||
|
||||
type binaryOpFunc func(bfa *binaryOpFuncArg) ([]*timeseries, error)
|
||||
|
||||
func newBinaryOpCmpFunc(cf func(left, right float64) bool) binaryOpFunc {
|
||||
cfe := func(left, right float64, isBool bool) float64 {
|
||||
if !isBool {
|
||||
if cf(left, right) {
|
||||
return left
|
||||
}
|
||||
return nan
|
||||
}
|
||||
if cf(left, right) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return newBinaryOpFunc(cfe)
|
||||
}
|
||||
|
||||
func newBinaryOpArithFunc(af func(left, right float64) float64) binaryOpFunc {
|
||||
afe := func(left, right float64, isBool bool) float64 {
|
||||
return af(left, right)
|
||||
}
|
||||
return newBinaryOpFunc(afe)
|
||||
}
|
||||
|
||||
func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOpFunc {
|
||||
return func(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
isBool := bfa.be.Bool
|
||||
left, right, dst, err := adjustBinaryOpTags(bfa.be, bfa.left, bfa.right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(left) != len(right) || len(left) != len(dst) {
|
||||
logger.Panicf("BUG: len(left) must match len(right) and len(dst); got %d vs %d vs %d", len(left), len(right), len(dst))
|
||||
}
|
||||
for i, tsLeft := range left {
|
||||
leftValues := tsLeft.Values
|
||||
rightValues := right[i].Values
|
||||
dstValues := dst[i].Values
|
||||
if len(leftValues) != len(rightValues) || len(leftValues) != len(dstValues) {
|
||||
logger.Panicf("BUG: len(leftVaues) must match len(rightValues) and len(dstValues); got %d vs %d vs %d",
|
||||
len(leftValues), len(rightValues), len(dstValues))
|
||||
}
|
||||
for j, a := range leftValues {
|
||||
b := rightValues[j]
|
||||
dstValues[j] = bf(a, b, isBool)
|
||||
}
|
||||
}
|
||||
// Optimization: remove time series containing only NaNs.
|
||||
// This is quite common after applying filters like `q > 0`.
|
||||
dst = removeNaNs(dst)
|
||||
return dst, nil
|
||||
}
|
||||
}
|
||||
|
||||
func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) {
|
||||
if len(be.GroupModifier.Op) == 0 && len(be.JoinModifier.Op) == 0 {
|
||||
if isScalar(left) {
|
||||
// Fast path: `scalar op vector`
|
||||
rvsLeft := make([]*timeseries, len(right))
|
||||
tsLeft := left[0]
|
||||
for i, tsRight := range right {
|
||||
resetMetricGroupIfRequired(be, tsRight)
|
||||
rvsLeft[i] = tsLeft
|
||||
}
|
||||
return rvsLeft, right, right, nil
|
||||
}
|
||||
if isScalar(right) {
|
||||
// Fast path: `vector op scalar`
|
||||
rvsRight := make([]*timeseries, len(left))
|
||||
tsRight := right[0]
|
||||
for i, tsLeft := range left {
|
||||
resetMetricGroupIfRequired(be, tsLeft)
|
||||
rvsRight[i] = tsRight
|
||||
}
|
||||
return left, rvsRight, left, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path: `vector op vector` or `a op {on|ignoring} {group_left|group_right} b`
|
||||
var rvsLeft, rvsRight []*timeseries
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(be, left, right)
|
||||
joinOp := strings.ToLower(be.JoinModifier.Op)
|
||||
groupOp := strings.ToLower(be.GroupModifier.Op)
|
||||
if len(groupOp) == 0 {
|
||||
groupOp = "ignoring"
|
||||
}
|
||||
groupTags := be.GroupModifier.Args
|
||||
for k, tssLeft := range mLeft {
|
||||
tssRight := mRight[k]
|
||||
if len(tssRight) == 0 {
|
||||
continue
|
||||
}
|
||||
switch joinOp {
|
||||
case "group_left":
|
||||
var err error
|
||||
rvsLeft, rvsRight, err = groupJoin("right", be, rvsLeft, rvsRight, tssLeft, tssRight)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
case "group_right":
|
||||
var err error
|
||||
rvsRight, rvsLeft, err = groupJoin("left", be, rvsRight, rvsLeft, tssRight, tssLeft)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
case "":
|
||||
if err := ensureSingleTimeseries("left", be, tssLeft); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := ensureSingleTimeseries("right", be, tssRight); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
tsLeft := tssLeft[0]
|
||||
resetMetricGroupIfRequired(be, tsLeft)
|
||||
switch groupOp {
|
||||
case "on":
|
||||
tsLeft.MetricName.RemoveTagsOn(groupTags)
|
||||
case "ignoring":
|
||||
tsLeft.MetricName.RemoveTagsIgnoring(groupTags)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected binary op modifier %q", groupOp)
|
||||
}
|
||||
rvsLeft = append(rvsLeft, tsLeft)
|
||||
rvsRight = append(rvsRight, tssRight[0])
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected join modifier %q", joinOp)
|
||||
}
|
||||
}
|
||||
dst := rvsLeft
|
||||
if joinOp == "group_right" {
|
||||
dst = rvsRight
|
||||
}
|
||||
return rvsLeft, rvsRight, dst, nil
|
||||
}
|
||||
|
||||
func ensureSingleTimeseries(side string, be *binaryOpExpr, tss []*timeseries) error {
|
||||
if len(tss) == 0 {
|
||||
logger.Panicf("BUG: tss must contain at least one value")
|
||||
}
|
||||
for len(tss) > 1 {
|
||||
if !mergeNonOverlappingTimeseries(tss[0], tss[len(tss)-1]) {
|
||||
return fmt.Errorf(`duplicate time series on the %s side of %s %s: %s and %s`, side, be.Op, be.GroupModifier.AppendString(nil),
|
||||
stringMetricTags(&tss[0].MetricName), stringMetricTags(&tss[len(tss)-1].MetricName))
|
||||
}
|
||||
tss = tss[:len(tss)-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func groupJoin(singleTimeseriesSide string, be *binaryOpExpr, rvsLeft, rvsRight, tssLeft, tssRight []*timeseries) ([]*timeseries, []*timeseries, error) {
|
||||
joinTags := be.JoinModifier.Args
|
||||
var m map[string]*timeseries
|
||||
for _, tsLeft := range tssLeft {
|
||||
resetMetricGroupIfRequired(be, tsLeft)
|
||||
if len(tssRight) == 1 {
|
||||
// Easy case - right part contains only a single matching time series.
|
||||
tsLeft.MetricName.AddMissingTags(joinTags, &tssRight[0].MetricName)
|
||||
rvsLeft = append(rvsLeft, tsLeft)
|
||||
rvsRight = append(rvsRight, tssRight[0])
|
||||
continue
|
||||
}
|
||||
|
||||
// Hard case - right part contains multiple matching time series.
|
||||
// Verify it doesn't result in duplicate MetricName values after adding missing tags.
|
||||
if m == nil {
|
||||
m = make(map[string]*timeseries, len(tssRight))
|
||||
} else {
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
bb := bbPool.Get()
|
||||
for _, tsRight := range tssRight {
|
||||
var tsCopy timeseries
|
||||
tsCopy.CopyFromShallowTimestamps(tsLeft)
|
||||
tsCopy.MetricName.AddMissingTags(joinTags, &tsRight.MetricName)
|
||||
bb.B = marshalMetricTagsSorted(bb.B[:0], &tsCopy.MetricName)
|
||||
if tsExisting := m[string(bb.B)]; tsExisting != nil {
|
||||
// Try merging tsExisting with tsRight if they don't overlap.
|
||||
if mergeNonOverlappingTimeseries(tsExisting, tsRight) {
|
||||
continue
|
||||
}
|
||||
return nil, nil, fmt.Errorf("duplicate time series on the %s side of `%s %s %s`: %s and %s",
|
||||
singleTimeseriesSide, be.Op, be.GroupModifier.AppendString(nil), be.JoinModifier.AppendString(nil),
|
||||
stringMetricTags(&tsExisting.MetricName), stringMetricTags(&tsRight.MetricName))
|
||||
}
|
||||
m[string(bb.B)] = tsRight
|
||||
rvsLeft = append(rvsLeft, &tsCopy)
|
||||
rvsRight = append(rvsRight, tsRight)
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
}
|
||||
return rvsLeft, rvsRight, nil
|
||||
}
|
||||
|
||||
func mergeNonOverlappingTimeseries(dst, src *timeseries) bool {
|
||||
// Verify whether the time series can be merged.
|
||||
srcValues := src.Values
|
||||
dstValues := dst.Values
|
||||
_ = dstValues[len(srcValues)-1]
|
||||
for i, v := range srcValues {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
if !math.IsNaN(dstValues[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Time series can be merged. Merge them.
|
||||
for i, v := range srcValues {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues[i] = v
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func resetMetricGroupIfRequired(be *binaryOpExpr, ts *timeseries) {
|
||||
if isBinaryOpCmp(be.Op) && !be.Bool {
|
||||
// Do not reset MetricGroup for non-boolean `compare` binary ops like Prometheus does.
|
||||
return
|
||||
}
|
||||
switch be.Op {
|
||||
case "default", "if", "ifnot":
|
||||
// Do not reset MetricGroup for these ops.
|
||||
return
|
||||
}
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
|
||||
func binaryOpPlus(left, right float64) float64 {
|
||||
return left + right
|
||||
}
|
||||
|
||||
func binaryOpMinus(left, right float64) float64 {
|
||||
return left - right
|
||||
}
|
||||
|
||||
func binaryOpMul(left, right float64) float64 {
|
||||
return left * right
|
||||
}
|
||||
|
||||
func binaryOpDiv(left, right float64) float64 {
|
||||
return left / right
|
||||
}
|
||||
|
||||
func binaryOpMod(left, right float64) float64 {
|
||||
return math.Mod(left, right)
|
||||
}
|
||||
|
||||
func binaryOpPow(left, right float64) float64 {
|
||||
return math.Pow(left, right)
|
||||
}
|
||||
|
||||
func binaryOpDefault(left, right float64) float64 {
|
||||
if math.IsNaN(left) {
|
||||
return right
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
func binaryOpIf(left, right float64) float64 {
|
||||
if math.IsNaN(right) {
|
||||
return nan
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
func binaryOpIfnot(left, right float64) float64 {
|
||||
if math.IsNaN(right) {
|
||||
return left
|
||||
}
|
||||
return nan
|
||||
}
|
||||
|
||||
func binaryOpEq(left, right float64) bool {
|
||||
// Special handling for nan == nan.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/150 .
|
||||
if math.IsNaN(left) {
|
||||
return math.IsNaN(right)
|
||||
}
|
||||
|
||||
return left == right
|
||||
}
|
||||
|
||||
func binaryOpNeq(left, right float64) bool {
|
||||
// Special handling for comparison with nan.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/150 .
|
||||
if math.IsNaN(left) {
|
||||
return !math.IsNaN(right)
|
||||
}
|
||||
if math.IsNaN(right) {
|
||||
return true
|
||||
}
|
||||
|
||||
return left != right
|
||||
}
|
||||
|
||||
func binaryOpGt(left, right float64) bool {
|
||||
return left > right
|
||||
}
|
||||
|
||||
func binaryOpLt(left, right float64) bool {
|
||||
return left < right
|
||||
}
|
||||
|
||||
func binaryOpGte(left, right float64) bool {
|
||||
return left >= right
|
||||
}
|
||||
|
||||
func binaryOpLte(left, right float64) bool {
|
||||
return left <= right
|
||||
}
|
||||
|
||||
func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k := range mRight {
|
||||
if tss := mLeft[k]; tss != nil {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func binaryOpOr(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for _, tss := range mLeft {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
for k, tss := range mRight {
|
||||
if mLeft[k] == nil {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func binaryOpUnless(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k, tss := range mLeft {
|
||||
if mRight[k] == nil {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func createTimeseriesMapByTagSet(be *binaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) {
|
||||
groupTags := be.GroupModifier.Args
|
||||
groupOp := strings.ToLower(be.GroupModifier.Op)
|
||||
if len(groupOp) == 0 {
|
||||
groupOp = "ignoring"
|
||||
}
|
||||
getTagsMap := func(arg []*timeseries) map[string][]*timeseries {
|
||||
bb := bbPool.Get()
|
||||
m := make(map[string][]*timeseries, len(arg))
|
||||
mn := storage.GetMetricName()
|
||||
for _, ts := range arg {
|
||||
mn.CopyFrom(&ts.MetricName)
|
||||
mn.ResetMetricGroup()
|
||||
switch groupOp {
|
||||
case "on":
|
||||
mn.RemoveTagsOn(groupTags)
|
||||
case "ignoring":
|
||||
mn.RemoveTagsIgnoring(groupTags)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected binary op modifier %q", groupOp)
|
||||
}
|
||||
bb.B = marshalMetricTagsSorted(bb.B[:0], mn)
|
||||
m[string(bb.B)] = append(m[string(bb.B)], ts)
|
||||
}
|
||||
storage.PutMetricName(mn)
|
||||
bbPool.Put(bb)
|
||||
return m
|
||||
}
|
||||
mLeft := getTagsMap(left)
|
||||
mRight := getTagsMap(right)
|
||||
return mLeft, mRight
|
||||
}
|
||||
|
||||
func isScalar(arg []*timeseries) bool {
|
||||
if len(arg) != 1 {
|
||||
return false
|
||||
}
|
||||
mn := &arg[0].MetricName
|
||||
if len(mn.MetricGroup) > 0 {
|
||||
return false
|
||||
}
|
||||
return len(mn.Tags) == 0
|
||||
}
|
||||
125
app/vmselect/promql/binary_op_test.go
Normal file
125
app/vmselect/promql/binary_op_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsBinaryOpSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOp(s) {
|
||||
t.Fatalf("expecting valid binaryOp: %q", s)
|
||||
}
|
||||
}
|
||||
f("and")
|
||||
f("AND")
|
||||
f("unless")
|
||||
f("unleSS")
|
||||
f("==")
|
||||
f("!=")
|
||||
f(">=")
|
||||
f("<=")
|
||||
f("or")
|
||||
f("Or")
|
||||
f("+")
|
||||
f("-")
|
||||
f("*")
|
||||
f("/")
|
||||
f("%")
|
||||
f("^")
|
||||
f(">")
|
||||
f("<")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOp(s) {
|
||||
t.Fatalf("unexpected valid binaryOp: %q", s)
|
||||
}
|
||||
}
|
||||
f("foobar")
|
||||
f("=~")
|
||||
f("!~")
|
||||
f("=")
|
||||
f("<==")
|
||||
f("234")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpGroupModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOpGroupModifier(s) {
|
||||
t.Fatalf("expecting valid binaryOpGroupModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("on")
|
||||
f("ON")
|
||||
f("oN")
|
||||
f("ignoring")
|
||||
f("IGnoring")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpGroupModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOpGroupModifier(s) {
|
||||
t.Fatalf("unexpected valid binaryOpGroupModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("off")
|
||||
f("by")
|
||||
f("without")
|
||||
f("123")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpJoinModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOpJoinModifier(s) {
|
||||
t.Fatalf("expecting valid binaryOpJoinModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("group_left")
|
||||
f("group_right")
|
||||
f("group_LEft")
|
||||
f("GRoup_RighT")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpJoinModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOpJoinModifier(s) {
|
||||
t.Fatalf("unexpected valid binaryOpJoinModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("on")
|
||||
f("by")
|
||||
f("without")
|
||||
f("123")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpBoolModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOpBoolModifier(s) {
|
||||
t.Fatalf("expecting valid binaryOpBoolModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("bool")
|
||||
f("bOOL")
|
||||
f("BOOL")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpBoolModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOpBoolModifier(s) {
|
||||
t.Fatalf("unexpected valid binaryOpBoolModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("on")
|
||||
f("by")
|
||||
f("without")
|
||||
f("123")
|
||||
}
|
||||
783
app/vmselect/promql/eval.go
Normal file
783
app/vmselect/promql/eval.go
Normal file
@@ -0,0 +1,783 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 30e3, "The maximum points per a single timeseries returned from the search")
|
||||
)
|
||||
|
||||
// The minimum number of points per timeseries for enabling time rounding.
|
||||
// This improves cache hit ratio for frequently requested queries over
|
||||
// big time ranges.
|
||||
const minTimeseriesPointsForTimeRounding = 50
|
||||
|
||||
// ValidateMaxPointsPerTimeseries checks the maximum number of points that
|
||||
// may be returned per each time series.
|
||||
//
|
||||
// The number mustn't exceed -search.maxPointsPerTimeseries.
|
||||
func ValidateMaxPointsPerTimeseries(start, end, step int64) error {
|
||||
points := (end-start)/step + 1
|
||||
if uint64(points) > uint64(*maxPointsPerTimeseries) {
|
||||
return fmt.Errorf(`too many points for the given step=%d, start=%d and end=%d: %d; cannot exceed -search.maxPointsPerTimeseries=%d`,
|
||||
step, start, end, uint64(points), *maxPointsPerTimeseries)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdjustStartEnd adjusts start and end values, so response caching may be enabled.
|
||||
//
|
||||
// See EvalConfig.mayCache for details.
|
||||
func AdjustStartEnd(start, end, step int64) (int64, int64) {
|
||||
points := (end-start)/step + 1
|
||||
if points < minTimeseriesPointsForTimeRounding {
|
||||
// Too small number of points for rounding.
|
||||
return start, end
|
||||
}
|
||||
|
||||
// Round start and end to values divisible by step in order
|
||||
// to enable response caching (see EvalConfig.mayCache).
|
||||
|
||||
// Round start to the nearest smaller value divisible by step.
|
||||
start -= start % step
|
||||
// Round end to the nearest bigger value divisible by step.
|
||||
adjust := end % step
|
||||
if adjust > 0 {
|
||||
end += step - adjust
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// EvalConfig is the configuration required for query evaluation via Exec
|
||||
type EvalConfig struct {
|
||||
Start int64
|
||||
End int64
|
||||
Step int64
|
||||
|
||||
Deadline netstorage.Deadline
|
||||
|
||||
MayCache bool
|
||||
|
||||
timestamps []int64
|
||||
timestampsOnce sync.Once
|
||||
}
|
||||
|
||||
// newEvalConfig returns new EvalConfig copy from src.
|
||||
func newEvalConfig(src *EvalConfig) *EvalConfig {
|
||||
var ec EvalConfig
|
||||
ec.Start = src.Start
|
||||
ec.End = src.End
|
||||
ec.Step = src.Step
|
||||
ec.Deadline = src.Deadline
|
||||
ec.MayCache = src.MayCache
|
||||
|
||||
// do not copy src.timestamps - they must be generated again.
|
||||
return &ec
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) validate() {
|
||||
if ec.Start > ec.End {
|
||||
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", ec.Start, ec.End)
|
||||
}
|
||||
if ec.Step <= 0 {
|
||||
logger.Panicf("BUG: step must be greater than 0; got %d", ec.Step)
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) mayCache() bool {
|
||||
if !ec.MayCache {
|
||||
return false
|
||||
}
|
||||
if ec.Start%ec.Step != 0 {
|
||||
return false
|
||||
}
|
||||
if ec.End%ec.Step != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) getSharedTimestamps() []int64 {
|
||||
ec.timestampsOnce.Do(ec.timestampsInit)
|
||||
return ec.timestamps
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) timestampsInit() {
|
||||
ec.timestamps = getTimestamps(ec.Start, ec.End, ec.Step)
|
||||
}
|
||||
|
||||
func getTimestamps(start, end, step int64) []int64 {
|
||||
// Sanity checks.
|
||||
if step <= 0 {
|
||||
logger.Panicf("BUG: Step must be bigger than 0; got %d", step)
|
||||
}
|
||||
if start > end {
|
||||
logger.Panicf("BUG: Start cannot exceed End; got %d vs %d", start, end)
|
||||
}
|
||||
if err := ValidateMaxPointsPerTimeseries(start, end, step); err != nil {
|
||||
logger.Panicf("BUG: %s; this must be validated before the call to getTimestamps", err)
|
||||
}
|
||||
|
||||
// Prepare timestamps.
|
||||
points := 1 + (end-start)/step
|
||||
timestamps := make([]int64, points)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = start
|
||||
start += step
|
||||
}
|
||||
return timestamps
|
||||
}
|
||||
|
||||
func evalExpr(ec *EvalConfig, e expr) ([]*timeseries, error) {
|
||||
if me, ok := e.(*metricExpr); ok {
|
||||
re := &rollupExpr{
|
||||
Expr: me,
|
||||
}
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, me.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if re, ok := e.(*rollupExpr); ok {
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, re.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if fe, ok := e.(*funcExpr); ok {
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
if nrf == nil {
|
||||
args, err := evalExprs(ec, fe.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tf := getTransformFunc(fe.Name)
|
||||
if tf == nil {
|
||||
return nil, fmt.Errorf(`unknown func %q`, fe.Name)
|
||||
}
|
||||
tfa := &transformFuncArg{
|
||||
ec: ec,
|
||||
fe: fe,
|
||||
args: args,
|
||||
}
|
||||
rv, err := tf(tfa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
args, re, err := evalRollupFuncArgs(ec, fe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf, err := nrf(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rv, err := evalRollupFunc(ec, fe.Name, rf, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if ae, ok := e.(*aggrFuncExpr); ok {
|
||||
if callbacks := getIncrementalAggrFuncCallbacks(ae.Name); callbacks != nil {
|
||||
fe, nrf := tryGetArgRollupFuncWithMetricExpr(ae)
|
||||
if fe != nil {
|
||||
// There is an optimized path for calculating aggrFuncExpr over rollupFunc over metricExpr.
|
||||
// The optimized path saves RAM for aggregates over big number of time series.
|
||||
args, re, err := evalRollupFuncArgs(ec, fe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf, err := nrf(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iafc := newIncrementalAggrFuncContext(ae, callbacks)
|
||||
return evalRollupFunc(ec, fe.Name, rf, re, iafc)
|
||||
}
|
||||
}
|
||||
args, err := evalExprs(ec, ae.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
af := getAggrFunc(ae.Name)
|
||||
if af == nil {
|
||||
return nil, fmt.Errorf(`unknown func %q`, ae.Name)
|
||||
}
|
||||
afa := &aggrFuncArg{
|
||||
ae: ae,
|
||||
args: args,
|
||||
ec: ec,
|
||||
}
|
||||
rv, err := af(afa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, ae.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if be, ok := e.(*binaryOpExpr); ok {
|
||||
left, err := evalExpr(ec, be.Left)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
right, err := evalExpr(ec, be.Right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bf := getBinaryOpFunc(be.Op)
|
||||
if bf == nil {
|
||||
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
|
||||
}
|
||||
bfa := &binaryOpFuncArg{
|
||||
be: be,
|
||||
left: left,
|
||||
right: right,
|
||||
}
|
||||
rv, err := bf(bfa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, be.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if ne, ok := e.(*numberExpr); ok {
|
||||
rv := evalNumber(ec, ne.N)
|
||||
return rv, nil
|
||||
}
|
||||
if se, ok := e.(*stringExpr); ok {
|
||||
rv := evalString(ec, se.S)
|
||||
return rv, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected expression %q", e.AppendString(nil))
|
||||
}
|
||||
|
||||
func tryGetArgRollupFuncWithMetricExpr(ae *aggrFuncExpr) (*funcExpr, newRollupFunc) {
|
||||
if len(ae.Args) != 1 {
|
||||
return nil, nil
|
||||
}
|
||||
e := ae.Args[0]
|
||||
// Make sure e contains one of the following:
|
||||
// - metricExpr
|
||||
// - metricExpr[d]
|
||||
// - rollupFunc(metricExpr)
|
||||
// - rollupFunc(metricExpr[d])
|
||||
|
||||
if me, ok := e.(*metricExpr); ok {
|
||||
// e = metricExpr
|
||||
if me.IsEmpty() {
|
||||
return nil, nil
|
||||
}
|
||||
fe := &funcExpr{
|
||||
Name: "default_rollup",
|
||||
Args: []expr{me},
|
||||
}
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
return fe, nrf
|
||||
}
|
||||
if re, ok := e.(*rollupExpr); ok {
|
||||
if me, ok := re.Expr.(*metricExpr); !ok || me.IsEmpty() || re.ForSubquery() {
|
||||
return nil, nil
|
||||
}
|
||||
// e = metricExpr[d]
|
||||
fe := &funcExpr{
|
||||
Name: "default_rollup",
|
||||
Args: []expr{re},
|
||||
}
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
return fe, nrf
|
||||
}
|
||||
fe, ok := e.(*funcExpr)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
if nrf == nil {
|
||||
return nil, nil
|
||||
}
|
||||
rollupArgIdx := getRollupArgIdx(fe.Name)
|
||||
arg := fe.Args[rollupArgIdx]
|
||||
if me, ok := arg.(*metricExpr); ok {
|
||||
if me.IsEmpty() {
|
||||
return nil, nil
|
||||
}
|
||||
// e = rollupFunc(metricExpr)
|
||||
return &funcExpr{
|
||||
Name: fe.Name,
|
||||
Args: []expr{me},
|
||||
}, nrf
|
||||
}
|
||||
if re, ok := arg.(*rollupExpr); ok {
|
||||
if me, ok := re.Expr.(*metricExpr); !ok || me.IsEmpty() || re.ForSubquery() {
|
||||
return nil, nil
|
||||
}
|
||||
// e = rollupFunc(metricExpr[d])
|
||||
return fe, nrf
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evalExprs(ec *EvalConfig, es []expr) ([][]*timeseries, error) {
|
||||
var rvs [][]*timeseries
|
||||
for _, e := range es {
|
||||
rv, err := evalExpr(ec, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rvs = append(rvs, rv)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func evalRollupFuncArgs(ec *EvalConfig, fe *funcExpr) ([]interface{}, *rollupExpr, error) {
|
||||
var re *rollupExpr
|
||||
rollupArgIdx := getRollupArgIdx(fe.Name)
|
||||
args := make([]interface{}, len(fe.Args))
|
||||
for i, arg := range fe.Args {
|
||||
if i == rollupArgIdx {
|
||||
re = getRollupExprArg(arg)
|
||||
args[i] = re
|
||||
continue
|
||||
}
|
||||
ts, err := evalExpr(ec, arg)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %s", i+1, fe.AppendString(nil), err)
|
||||
}
|
||||
args[i] = ts
|
||||
}
|
||||
return args, re, nil
|
||||
}
|
||||
|
||||
func getRollupExprArg(arg expr) *rollupExpr {
|
||||
re, ok := arg.(*rollupExpr)
|
||||
if !ok {
|
||||
// Wrap non-rollup arg into rollupExpr.
|
||||
return &rollupExpr{
|
||||
Expr: arg,
|
||||
}
|
||||
}
|
||||
if !re.ForSubquery() {
|
||||
// Return standard rollup if it doesn't contain subquery.
|
||||
return re
|
||||
}
|
||||
me, ok := re.Expr.(*metricExpr)
|
||||
if !ok {
|
||||
// arg contains subquery.
|
||||
return re
|
||||
}
|
||||
// Convert me[w:step] -> default_rollup(me)[w:step]
|
||||
reNew := *re
|
||||
reNew.Expr = &funcExpr{
|
||||
Name: "default_rollup",
|
||||
Args: []expr{
|
||||
&rollupExpr{Expr: me},
|
||||
},
|
||||
}
|
||||
return &reNew
|
||||
}
|
||||
|
||||
func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, re *rollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
|
||||
ecNew := ec
|
||||
var offset int64
|
||||
if len(re.Offset) > 0 {
|
||||
var err error
|
||||
offset, err = DurationValue(re.Offset, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecNew = newEvalConfig(ec)
|
||||
ecNew.Start -= offset
|
||||
ecNew.End -= offset
|
||||
ecNew.Start, ecNew.End = AdjustStartEnd(ecNew.Start, ecNew.End, ecNew.Step)
|
||||
}
|
||||
var rvs []*timeseries
|
||||
var err error
|
||||
if me, ok := re.Expr.(*metricExpr); ok {
|
||||
rvs, err = evalRollupFuncWithMetricExpr(ecNew, name, rf, me, iafc, re.Window)
|
||||
} else {
|
||||
if iafc != nil {
|
||||
logger.Panicf("BUG: iafc must be nil for rollup %q over subquery %q", name, re.AppendString(nil))
|
||||
}
|
||||
rvs, err = evalRollupFuncWithSubquery(ecNew, name, rf, re)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset != 0 && len(rvs) > 0 {
|
||||
// Make a copy of timestamps, since they may be used in other values.
|
||||
srcTimestamps := rvs[0].Timestamps
|
||||
dstTimestamps := append([]int64{}, srcTimestamps...)
|
||||
for i := range dstTimestamps {
|
||||
dstTimestamps[i] += offset
|
||||
}
|
||||
for _, ts := range rvs {
|
||||
ts.Timestamps = dstTimestamps
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, re *rollupExpr) ([]*timeseries, error) {
|
||||
// Do not use rollupResultCacheV here, since it works only with metricExpr.
|
||||
var step int64
|
||||
if len(re.Step) > 0 {
|
||||
var err error
|
||||
step, err = DurationValue(re.Step, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
step = ec.Step
|
||||
}
|
||||
var window int64
|
||||
if len(re.Window) > 0 {
|
||||
var err error
|
||||
window, err = DurationValue(re.Window, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ecSQ := newEvalConfig(ec)
|
||||
ecSQ.Start -= window + maxSilenceInterval + step
|
||||
ecSQ.Step = step
|
||||
if err := ValidateMaxPointsPerTimeseries(ecSQ.Start, ecSQ.End, ecSQ.Step); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecSQ.Start, ecSQ.End = AdjustStartEnd(ecSQ.Start, ecSQ.End, ecSQ.Step)
|
||||
tssSQ, err := evalExpr(ecSQ, re.Expr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, ec.Start, ec.End, ec.Step, window, sharedTimestamps)
|
||||
tss := make([]*timeseries, 0, len(tssSQ)*len(rcs))
|
||||
var tssLock sync.Mutex
|
||||
removeMetricGroup := !rollupFuncsKeepMetricGroup[name]
|
||||
doParallel(tssSQ, func(tsSQ *timeseries, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
values, timestamps = removeNanValues(values[:0], timestamps[:0], tsSQ.Values, tsSQ.Timestamps)
|
||||
preFunc(values, timestamps)
|
||||
for _, rc := range rcs {
|
||||
var ts timeseries
|
||||
doRollupForTimeseries(rc, &ts, &tsSQ.MetricName, values, timestamps, sharedTimestamps, removeMetricGroup)
|
||||
tssLock.Lock()
|
||||
tss = append(tss, &ts)
|
||||
tssLock.Unlock()
|
||||
}
|
||||
return values, timestamps
|
||||
})
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func doParallel(tss []*timeseries, f func(ts *timeseries, values []float64, timestamps []int64) ([]float64, []int64)) {
|
||||
concurrency := runtime.GOMAXPROCS(-1)
|
||||
if concurrency > len(tss) {
|
||||
concurrency = len(tss)
|
||||
}
|
||||
workCh := make(chan *timeseries, concurrency)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var tmpValues []float64
|
||||
var tmpTimestamps []int64
|
||||
for ts := range workCh {
|
||||
tmpValues, tmpTimestamps = f(ts, tmpValues, tmpTimestamps)
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, ts := range tss {
|
||||
workCh <- ts
|
||||
}
|
||||
close(workCh)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func removeNanValues(dstValues []float64, dstTimestamps []int64, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
hasNan := false
|
||||
for _, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
hasNan = true
|
||||
}
|
||||
}
|
||||
if !hasNan {
|
||||
// Fast path - no NaNs.
|
||||
dstValues = append(dstValues, values...)
|
||||
dstTimestamps = append(dstTimestamps, timestamps...)
|
||||
return dstValues, dstTimestamps
|
||||
}
|
||||
|
||||
// Slow path - remove NaNs.
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues = append(dstValues, v)
|
||||
dstTimestamps = append(dstTimestamps, timestamps[i])
|
||||
}
|
||||
return dstValues, dstTimestamps
|
||||
}
|
||||
|
||||
var (
|
||||
rollupResultCacheFullHits = metrics.NewCounter(`vm_rollup_result_cache_full_hits_total`)
|
||||
rollupResultCachePartialHits = metrics.NewCounter(`vm_rollup_result_cache_partial_hits_total`)
|
||||
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
|
||||
)
|
||||
|
||||
func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me *metricExpr, iafc *incrementalAggrFuncContext, windowStr string) ([]*timeseries, error) {
|
||||
if me.IsEmpty() {
|
||||
return evalNumber(ec, nan), nil
|
||||
}
|
||||
var window int64
|
||||
if len(windowStr) > 0 {
|
||||
var err error
|
||||
window, err = DurationValue(windowStr, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Search for partial results in cache.
|
||||
tssCached, start := rollupResultCacheV.Get(name, ec, me, iafc, window)
|
||||
if start > ec.End {
|
||||
// The result is fully cached.
|
||||
rollupResultCacheFullHits.Inc()
|
||||
return tssCached, nil
|
||||
}
|
||||
if start > ec.Start {
|
||||
rollupResultCachePartialHits.Inc()
|
||||
} else {
|
||||
rollupResultCacheMiss.Inc()
|
||||
}
|
||||
|
||||
// Fetch the remaining part of the result.
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start - window - maxSilenceInterval,
|
||||
MaxTimestamp: ec.End + ec.Step,
|
||||
TagFilterss: [][]storage.TagFilter{me.TagFilters},
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rssLen := rss.Len()
|
||||
if rssLen == 0 {
|
||||
rss.Cancel()
|
||||
// Add missing points until ec.End.
|
||||
// Do not cache the result, since missing points
|
||||
// may be backfilled in the future.
|
||||
tss := mergeTimeseries(tssCached, nil, start, ec)
|
||||
return tss, nil
|
||||
}
|
||||
sharedTimestamps := getTimestamps(start, ec.End, ec.Step)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, start, ec.End, ec.Step, window, sharedTimestamps)
|
||||
|
||||
// Verify timeseries fit available memory after the rollup.
|
||||
// Take into account points from tssCached.
|
||||
pointsPerTimeseries := 1 + (ec.End-ec.Start)/ec.Step
|
||||
rollupPoints := mulNoOverflow(pointsPerTimeseries, int64(rssLen*len(rcs)))
|
||||
rollupMemorySize := mulNoOverflow(rollupPoints, 16)
|
||||
rml := getRollupMemoryLimiter()
|
||||
if !rml.Get(uint64(rollupMemorySize)) {
|
||||
rss.Cancel()
|
||||
return nil, fmt.Errorf("not enough memory for processing %d data points across %d time series with %d points in each time series; "+
|
||||
"possible solutions are: reducing the number of matching time series; switching to node with more RAM; "+
|
||||
"increasing -memory.allowedPercent; increasing `step` query arg (%gs)",
|
||||
rollupPoints, rssLen*len(rcs), pointsPerTimeseries, float64(ec.Step)/1e3)
|
||||
}
|
||||
defer rml.Put(uint64(rollupMemorySize))
|
||||
|
||||
// Evaluate rollup
|
||||
removeMetricGroup := !rollupFuncsKeepMetricGroup[name]
|
||||
var tss []*timeseries
|
||||
if iafc != nil {
|
||||
tss, err = evalRollupWithIncrementalAggregate(iafc, rss, rcs, preFunc, sharedTimestamps, removeMetricGroup)
|
||||
} else {
|
||||
tss, err = evalRollupNoIncrementalAggregate(rss, rcs, preFunc, sharedTimestamps, removeMetricGroup)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tss = mergeTimeseries(tssCached, tss, start, ec)
|
||||
rollupResultCacheV.Put(name, ec, me, iafc, window, tss)
|
||||
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
var (
|
||||
rollupMemoryLimiter memoryLimiter
|
||||
rollupMemoryLimiterOnce sync.Once
|
||||
)
|
||||
|
||||
func getRollupMemoryLimiter() *memoryLimiter {
|
||||
rollupMemoryLimiterOnce.Do(func() {
|
||||
rollupMemoryLimiter.MaxSize = uint64(memory.Allowed()) / 4
|
||||
})
|
||||
return &rollupMemoryLimiter
|
||||
}
|
||||
|
||||
func evalRollupWithIncrementalAggregate(iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64, removeMetricGroup bool) ([]*timeseries, error) {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
preFunc(rs.Values, rs.Timestamps)
|
||||
ts := getTimeseries()
|
||||
defer putTimeseries(ts)
|
||||
for _, rc := range rcs {
|
||||
ts.Reset()
|
||||
doRollupForTimeseries(rc, ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps, removeMetricGroup)
|
||||
iafc.updateTimeseries(ts, workerID)
|
||||
|
||||
// ts.Timestamps points to sharedTimestamps. Zero it, so it can be re-used.
|
||||
ts.Timestamps = nil
|
||||
ts.denyReuse = false
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tss := iafc.finalizeTimeseries()
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func evalRollupNoIncrementalAggregate(rss *netstorage.Results, rcs []*rollupConfig,
|
||||
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64, removeMetricGroup bool) ([]*timeseries, error) {
|
||||
tss := make([]*timeseries, 0, rss.Len()*len(rcs))
|
||||
var tssLock sync.Mutex
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) {
|
||||
preFunc(rs.Values, rs.Timestamps)
|
||||
for _, rc := range rcs {
|
||||
var ts timeseries
|
||||
doRollupForTimeseries(rc, &ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps, removeMetricGroup)
|
||||
tssLock.Lock()
|
||||
tss = append(tss, &ts)
|
||||
tssLock.Unlock()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func doRollupForTimeseries(rc *rollupConfig, tsDst *timeseries, mnSrc *storage.MetricName, valuesSrc []float64, timestampsSrc []int64,
|
||||
sharedTimestamps []int64, removeMetricGroup bool) {
|
||||
tsDst.MetricName.CopyFrom(mnSrc)
|
||||
if len(rc.TagValue) > 0 {
|
||||
tsDst.MetricName.AddTag("rollup", rc.TagValue)
|
||||
}
|
||||
if removeMetricGroup {
|
||||
tsDst.MetricName.ResetMetricGroup()
|
||||
}
|
||||
tsDst.Values = rc.Do(tsDst.Values[:0], valuesSrc, timestampsSrc)
|
||||
tsDst.Timestamps = sharedTimestamps
|
||||
tsDst.denyReuse = true
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, start, end, step, window int64, sharedTimestamps []int64) (func(values []float64, timestamps []int64), []*rollupConfig) {
|
||||
preFunc := func(values []float64, timestamps []int64) {}
|
||||
if rollupFuncsRemoveCounterResets[name] {
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
removeCounterResets(values)
|
||||
}
|
||||
}
|
||||
newRollupConfig := func(rf rollupFunc, tagValue string) *rollupConfig {
|
||||
return &rollupConfig{
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
MayAdjustWindow: rollupFuncsMayAdjustWindow[name],
|
||||
Timestamps: sharedTimestamps,
|
||||
}
|
||||
}
|
||||
appendRollupConfigs := func(dst []*rollupConfig) []*rollupConfig {
|
||||
dst = append(dst, newRollupConfig(rollupMin, "min"))
|
||||
dst = append(dst, newRollupConfig(rollupMax, "max"))
|
||||
dst = append(dst, newRollupConfig(rollupAvg, "avg"))
|
||||
return dst
|
||||
}
|
||||
var rcs []*rollupConfig
|
||||
switch name {
|
||||
case "rollup":
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_rate", "rollup_deriv":
|
||||
preFuncPrev := preFunc
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
preFuncPrev(values, timestamps)
|
||||
derivValues(values, timestamps)
|
||||
}
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_increase", "rollup_delta":
|
||||
preFuncPrev := preFunc
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
preFuncPrev(values, timestamps)
|
||||
deltaValues(values)
|
||||
}
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_candlestick":
|
||||
rcs = append(rcs, newRollupConfig(rollupFirst, "open"))
|
||||
rcs = append(rcs, newRollupConfig(rollupLast, "close"))
|
||||
rcs = append(rcs, newRollupConfig(rollupMin, "low"))
|
||||
rcs = append(rcs, newRollupConfig(rollupMax, "high"))
|
||||
default:
|
||||
rcs = append(rcs, newRollupConfig(rf, ""))
|
||||
}
|
||||
return preFunc, rcs
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
func evalNumber(ec *EvalConfig, n float64) []*timeseries {
|
||||
var ts timeseries
|
||||
ts.denyReuse = true
|
||||
timestamps := ec.getSharedTimestamps()
|
||||
values := make([]float64, len(timestamps))
|
||||
for i := range timestamps {
|
||||
values[i] = n
|
||||
}
|
||||
ts.Values = values
|
||||
ts.Timestamps = timestamps
|
||||
return []*timeseries{&ts}
|
||||
}
|
||||
|
||||
func evalString(ec *EvalConfig, s string) []*timeseries {
|
||||
rv := evalNumber(ec, nan)
|
||||
rv[0].MetricName.MetricGroup = append(rv[0].MetricName.MetricGroup[:0], s...)
|
||||
return rv
|
||||
}
|
||||
|
||||
func evalTime(ec *EvalConfig) []*timeseries {
|
||||
rv := evalNumber(ec, nan)
|
||||
timestamps := rv[0].Timestamps
|
||||
values := rv[0].Values
|
||||
for i, ts := range timestamps {
|
||||
values[i] = float64(ts) * 1e-3
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func mulNoOverflow(a, b int64) int64 {
|
||||
if math.MaxInt64/b < a {
|
||||
// Overflow
|
||||
return math.MaxInt64
|
||||
}
|
||||
return a * b
|
||||
}
|
||||
248
app/vmselect/promql/exec.go
Normal file
248
app/vmselect/promql/exec.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var logSlowQueryDuration = flag.Duration("search.logSlowQueryDuration", 5*time.Second, "Log queries with execution time exceeding this value. Zero disables slow query logging")
|
||||
|
||||
var slowQueries = metrics.NewCounter(`vm_slow_queries_total`)
|
||||
|
||||
// ExpandWithExprs expands WITH expressions inside q and returns the resulting
|
||||
// PromQL without WITH expressions.
|
||||
func ExpandWithExprs(q string) (string, error) {
|
||||
e, err := parsePromQLWithCache(q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := e.AppendString(nil)
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Exec executes q for the given ec.
|
||||
func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result, error) {
|
||||
if *logSlowQueryDuration > 0 {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
d := time.Since(startTime)
|
||||
if d >= *logSlowQueryDuration {
|
||||
logger.Infof("slow query according to -search.logSlowQueryDuration=%s: duration=%s, start=%d, end=%d, step=%d, query=%q",
|
||||
*logSlowQueryDuration, d, ec.Start/1000, ec.End/1000, ec.Step/1000, q)
|
||||
slowQueries.Inc()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
ec.validate()
|
||||
|
||||
e, err := parsePromQLWithCache(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add an additional point to the end. This point is used
|
||||
// in calculating the last value for rate, deriv, increase
|
||||
// and delta funcs.
|
||||
ec.End += ec.Step
|
||||
|
||||
rv, err := evalExpr(ec, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove the additional point at the end.
|
||||
for _, ts := range rv {
|
||||
ts.Values = ts.Values[:len(ts.Values)-1]
|
||||
|
||||
// ts.Timestamps may be shared between timeseries, so truncate it with len(ts.Values) instead of len(ts.Timestamps)-1
|
||||
ts.Timestamps = ts.Timestamps[:len(ts.Values)]
|
||||
}
|
||||
ec.End -= ec.Step
|
||||
|
||||
if isFirstPointOnly {
|
||||
// Remove all the points except the first one from every time series.
|
||||
for _, ts := range rv {
|
||||
ts.Values = ts.Values[:1]
|
||||
ts.Timestamps = ts.Timestamps[:1]
|
||||
}
|
||||
}
|
||||
|
||||
maySort := maySortResults(e, rv)
|
||||
result, err := timeseriesToResult(rv, maySort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func maySortResults(e expr, tss []*timeseries) bool {
|
||||
if len(tss) > 100 {
|
||||
// There is no sense in sorting a lot of results
|
||||
return false
|
||||
}
|
||||
fe, ok := e.(*funcExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
switch fe.Name {
|
||||
case "sort", "sort_desc":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func timeseriesToResult(tss []*timeseries, maySort bool) ([]netstorage.Result, error) {
|
||||
tss = removeNaNs(tss)
|
||||
result := make([]netstorage.Result, len(tss))
|
||||
m := make(map[string]struct{}, len(tss))
|
||||
bb := bbPool.Get()
|
||||
for i, ts := range tss {
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
if _, ok := m[string(bb.B)]; ok {
|
||||
return nil, fmt.Errorf(`duplicate output timeseries: %s%s`, ts.MetricName.MetricGroup, stringMetricName(&ts.MetricName))
|
||||
}
|
||||
m[string(bb.B)] = struct{}{}
|
||||
|
||||
rs := &result[i]
|
||||
rs.MetricNameMarshaled = append(rs.MetricNameMarshaled[:0], bb.B...)
|
||||
rs.MetricName.CopyFrom(&ts.MetricName)
|
||||
rs.Values = append(rs.Values[:0], ts.Values...)
|
||||
rs.Timestamps = append(rs.Timestamps[:0], ts.Timestamps...)
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
|
||||
if maySort {
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return string(result[i].MetricNameMarshaled) < string(result[j].MetricNameMarshaled)
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func removeNaNs(tss []*timeseries) []*timeseries {
|
||||
rvs := tss[:0]
|
||||
for _, ts := range tss {
|
||||
allNans := true
|
||||
for _, v := range ts.Values {
|
||||
if !math.IsNaN(v) {
|
||||
allNans = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allNans {
|
||||
// Skip timeseries with all NaNs.
|
||||
continue
|
||||
}
|
||||
rvs = append(rvs, ts)
|
||||
}
|
||||
for i := len(rvs); i < len(tss); i++ {
|
||||
// Zero unused time series, so GC could reclaim them.
|
||||
tss[i] = nil
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
|
||||
func parsePromQLWithCache(q string) (expr, error) {
|
||||
pcv := parseCacheV.Get(q)
|
||||
if pcv == nil {
|
||||
e, err := parsePromQL(q)
|
||||
pcv = &parseCacheValue{
|
||||
e: e,
|
||||
err: err,
|
||||
}
|
||||
parseCacheV.Put(q, pcv)
|
||||
}
|
||||
if pcv.err != nil {
|
||||
return nil, pcv.err
|
||||
}
|
||||
return pcv.e, nil
|
||||
}
|
||||
|
||||
var parseCacheV = func() *parseCache {
|
||||
pc := &parseCache{
|
||||
m: make(map[string]*parseCacheValue),
|
||||
}
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Requests())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Misses())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Len())
|
||||
})
|
||||
return pc
|
||||
}()
|
||||
|
||||
const parseCacheMaxLen = 10e3
|
||||
|
||||
type parseCacheValue struct {
|
||||
e expr
|
||||
err error
|
||||
}
|
||||
|
||||
type parseCache struct {
|
||||
m map[string]*parseCacheValue
|
||||
mu sync.RWMutex
|
||||
|
||||
requests uint64
|
||||
misses uint64
|
||||
}
|
||||
|
||||
func (pc *parseCache) Requests() uint64 {
|
||||
return atomic.LoadUint64(&pc.requests)
|
||||
}
|
||||
|
||||
func (pc *parseCache) Misses() uint64 {
|
||||
return atomic.LoadUint64(&pc.misses)
|
||||
}
|
||||
|
||||
func (pc *parseCache) Len() uint64 {
|
||||
pc.mu.RLock()
|
||||
n := len(pc.m)
|
||||
pc.mu.RUnlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func (pc *parseCache) Get(q string) *parseCacheValue {
|
||||
atomic.AddUint64(&pc.requests, 1)
|
||||
|
||||
pc.mu.RLock()
|
||||
pcv := pc.m[q]
|
||||
pc.mu.RUnlock()
|
||||
|
||||
if pcv == nil {
|
||||
atomic.AddUint64(&pc.misses, 1)
|
||||
}
|
||||
return pcv
|
||||
}
|
||||
|
||||
func (pc *parseCache) Put(q string, pcv *parseCacheValue) {
|
||||
pc.mu.Lock()
|
||||
overflow := len(pc.m) - parseCacheMaxLen
|
||||
if overflow > 0 {
|
||||
// Remove 10% of items from the cache.
|
||||
overflow = int(float64(len(pc.m)) * 0.1)
|
||||
for k := range pc.m {
|
||||
delete(pc.m, k)
|
||||
overflow--
|
||||
if overflow <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
pc.m[q] = pcv
|
||||
pc.mu.Unlock()
|
||||
}
|
||||
4286
app/vmselect/promql/exec_test.go
Normal file
4286
app/vmselect/promql/exec_test.go
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user