Compare commits

...

180 Commits

Author SHA1 Message Date
Aliaksandr Valialkin
095feeee41 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-12 16:23:16 +03:00
Aliaksandr Valialkin
9dd493363c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-07 15:34:32 +03:00
Aliaksandr Valialkin
d964b04efd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-07 15:33:09 +03:00
Aliaksandr Valialkin
ec01a188fd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-07 15:25:52 +03:00
Aliaksandr Valialkin
40112df441 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-05 19:24:37 +03:00
Aliaksandr Valialkin
9e74fe3145 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-04 13:11:51 +03:00
Aliaksandr Valialkin
2c22e168f5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-01 12:38:34 +03:00
Aliaksandr Valialkin
5747b78f6f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-28 12:31:27 +03:00
Aliaksandr Valialkin
d9166e899e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-28 12:17:18 +03:00
Aliaksandr Valialkin
38699170c9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-24 19:22:42 +02:00
Aliaksandr Valialkin
5b4f7bbc0c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-18 19:54:43 +02:00
Aliaksandr Valialkin
db85f4a1cb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-18 17:59:12 +02:00
Aliaksandr Valialkin
780b2a139a Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-17 20:11:56 +02:00
Aliaksandr Valialkin
9d2805320b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-16 13:40:03 +02:00
Aliaksandr Valialkin
e636cab272 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-16 12:59:06 +02:00
Aliaksandr Valialkin
90a1502335 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-03 19:31:14 +02:00
Aliaksandr Valialkin
f8a05d4ada Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-22 21:12:18 +02:00
Aliaksandr Valialkin
ae64c2db61 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-22 21:10:53 +02:00
Aliaksandr Valialkin
37a4347a37 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-14 18:32:41 +02:00
Aliaksandr Valialkin
20cdb879e7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-14 17:56:09 +02:00
Aliaksandr Valialkin
7917486d78 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-14 17:52:50 +02:00
Aliaksandr Valialkin
107607bf47 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-07 18:34:47 +02:00
Aliaksandr Valialkin
78b028064f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-02 23:58:11 +02:00
Aliaksandr Valialkin
db286fdd73 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-25 15:33:35 +02:00
Aliaksandr Valialkin
e8ff658b2e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-23 13:24:13 +02:00
Aliaksandr Valialkin
e1668e7441 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-18 23:29:16 +02:00
Aliaksandr Valialkin
0d0469cc80 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-18 22:44:04 +02:00
Aliaksandr Valialkin
8d6d4e8033 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-18 22:38:35 +02:00
Aliaksandr Valialkin
b894f25f21 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-17 15:51:07 +02:00
Aliaksandr Valialkin
b6bae2f05f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-07 12:56:47 +02:00
Aliaksandr Valialkin
9e15858baf vendor: make vendor-update 2022-01-07 12:37:58 +02:00
Aliaksandr Valialkin
3f5b1084eb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-07 12:36:24 +02:00
Aliaksandr Valialkin
c2e9be96a7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-20 19:11:26 +02:00
Aliaksandr Valialkin
a72dadb8f4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-20 13:53:03 +02:00
Aliaksandr Valialkin
08219faf8d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-17 20:21:56 +02:00
Aliaksandr Valialkin
288620ca40 lib/storage: initial support for multi-level downsampling
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36

Based on https://github.com/valyala/VictoriaMetrics/pull/203
2021-12-15 16:42:44 +02:00
Aliaksandr Valialkin
2847c84a7b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-15 16:41:56 +02:00
Aliaksandr Valialkin
6a64823581 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-14 19:57:21 +02:00
Aliaksandr Valialkin
b94e986710 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-02 15:03:46 +02:00
Aliaksandr Valialkin
a29565d1bd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-11-08 15:48:09 +02:00
Aliaksandr Valialkin
39332cfc5c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-11-08 13:56:29 +02:00
Aliaksandr Valialkin
d07d2811d4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-10-22 19:41:51 +03:00
Aliaksandr Valialkin
206e451cae Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-10-08 17:56:22 +03:00
Aliaksandr Valialkin
307034fc2f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-10-08 16:10:22 +03:00
Aliaksandr Valialkin
c149132b14 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-23 22:55:56 +03:00
Aliaksandr Valialkin
6dd7a90c7c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-22 01:49:36 +03:00
Aliaksandr Valialkin
dc5507754f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-20 15:22:36 +03:00
Aliaksandr Valialkin
c68663deee Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-20 14:55:21 +03:00
Aliaksandr Valialkin
114a40e63f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-15 18:26:16 +03:00
Aliaksandr Valialkin
163f2a46fd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-15 18:18:59 +03:00
Aliaksandr Valialkin
375c46cb1f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-01 17:13:09 +03:00
Aliaksandr Valialkin
bb2d1128b8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-01 16:37:40 +03:00
Aliaksandr Valialkin
479b9da827 vendor: make vendor-update 2021-09-01 12:53:52 +03:00
Aliaksandr Valialkin
62857fc30e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-01 12:49:13 +03:00
Aliaksandr Valialkin
253315b1fe Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-19 10:35:13 +03:00
Aliaksandr Valialkin
efe6e30008 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-18 22:07:32 +03:00
Aliaksandr Valialkin
bc2512abdd docs/CHANGELOG.md: update urls to Prometheus 2.29 release
Previously these urls were pointing to rc0 release
2021-08-16 14:52:23 +03:00
Aliaksandr Valialkin
a07f8017ba docs/CHANGELOG.md: typo fix: satureated -> saturated 2021-08-16 14:51:04 +03:00
Aliaksandr Valialkin
cf70b766eb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-15 23:52:55 +03:00
Aliaksandr Valialkin
b00732074c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-15 23:51:06 +03:00
Aliaksandr Valialkin
8df8c414de Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-07-15 14:05:30 +03:00
Aliaksandr Valialkin
ce844238a4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-25 13:30:19 +03:00
Aliaksandr Valialkin
452720c5dc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-25 13:24:42 +03:00
Aliaksandr Valialkin
bbca1740c1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-18 10:55:54 +03:00
Aliaksandr Valialkin
e1c85395eb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-11 13:03:20 +03:00
Aliaksandr Valialkin
b348114dab Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-11 13:00:09 +03:00
Aliaksandr Valialkin
bb54e34dc5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-09 20:43:33 +03:00
Aliaksandr Valialkin
e0d0b9447e vendor: make vendor-update 2021-06-09 20:43:19 +03:00
Aliaksandr Valialkin
fae6e4fc85 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-09 19:10:11 +03:00
Aliaksandr Valialkin
e49bf9bc73 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-05-24 16:03:14 +03:00
Aliaksandr Valialkin
a142390014 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-05-01 10:05:48 +03:00
Aliaksandr Valialkin
bceb8082f6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-30 10:11:24 +03:00
Aliaksandr Valialkin
276969500e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-24 01:42:54 +03:00
Aliaksandr Valialkin
030e3a63f2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-21 11:00:07 +03:00
Aliaksandr Valialkin
1c5e0564af lib/promscrape: create a single swosFunc per scrape_config 2021-04-08 09:31:55 +03:00
Aliaksandr Valialkin
b8300338f0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-08 01:03:03 +03:00
Aliaksandr Valialkin
660c3c7251 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-08 00:54:19 +03:00
Aliaksandr Valialkin
80ba07dc95 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-30 15:41:16 +03:00
Aliaksandr Valialkin
11ded82e60 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-29 19:15:52 +03:00
Aliaksandr Valialkin
558b390ebc vendor: make vendor-update 2021-03-25 20:57:46 +02:00
Aliaksandr Valialkin
343f444e87 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-25 19:15:08 +02:00
Aliaksandr Valialkin
16884c20c0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-17 02:05:46 +02:00
Aliaksandr Valialkin
7d44cdd8ce Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-15 22:44:24 +02:00
Aliaksandr Valialkin
5d2394ad9b vendor: make vendor-update 2021-03-09 11:51:21 +02:00
Aliaksandr Valialkin
8582fba4b1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-09 11:46:39 +02:00
Aliaksandr Valialkin
b045f506f2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-03 11:51:32 +02:00
Aliaksandr Valialkin
6197440bb9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-02 21:46:03 +02:00
Aliaksandr Valialkin
966e9c227a Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-27 01:48:33 +02:00
Aliaksandr Valialkin
edb2ab7d8e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-27 00:25:01 +02:00
Aliaksandr Valialkin
0ad887fd4d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-26 23:00:40 +02:00
Aliaksandr Valialkin
d5dde7f6b1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-18 19:14:41 +02:00
Aliaksandr Valialkin
a54ca9bd8f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-18 15:47:41 +02:00
Aliaksandr Valialkin
3588687f84 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-18 14:53:19 +02:00
Aliaksandr Valialkin
687eb4ab00 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-16 22:29:45 +02:00
Aliaksandr Valialkin
b04fece006 lib/promscrape/discovery/kubernetes: add __meta_kubernetes_endpoints_label_* and __meta_kuberntes_endpoints_annotation_* labels to role: endpoints
This syncs kubernetes SD with Prometheus 2.25
See 617c56f55a
2021-02-15 02:50:46 +02:00
Aliaksandr Valialkin
d0c364d93d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-15 01:45:39 +02:00
Aliaksandr Valialkin
63c88d8ea2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-03 23:52:43 +02:00
Aliaksandr Valialkin
dc6636e2b2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-03 12:30:44 +02:00
Aliaksandr Valialkin
c13f1d99e0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-01 20:19:50 +02:00
Aliaksandr Valialkin
079888f719 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-27 01:12:45 +02:00
Aliaksandr Valialkin
b68264b4f5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-22 12:10:57 +02:00
Aliaksandr Valialkin
aed049f660 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-13 13:55:45 +02:00
Aliaksandr Valialkin
7fcc0a1ef0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-13 12:55:45 +02:00
Aliaksandr Valialkin
48951073c4 lib/backup: increase backup chunk size from 128MB to 1GB
This should reduce costs for object storage API calls by 8x. See https://cloud.google.com/storage/pricing#operations-pricing
2021-01-13 12:15:38 +02:00
Aliaksandr Valialkin
d0dfcb72b4 vendor: make vendor-update 2021-01-13 12:09:54 +02:00
Nikolay
4cf7a55808 fixes tmpBlockFile remove on prometheus search error (#109) 2021-01-13 11:53:11 +02:00
Aliaksandr Valialkin
d72fc60108 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-13 11:41:15 +02:00
Aliaksandr Valialkin
0b92e18047 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-11 21:16:32 +02:00
Aliaksandr Valialkin
aa8ea16160 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-11 13:12:44 +02:00
Nikolay
f5e70f0ab9 adds multiple match args support for prometheusSearch, (#106)
it merges result according to prometheus ChainedSeriesMerge.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1001
2021-01-11 13:06:54 +02:00
Aliaksandr Valialkin
9e10d5083e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-11 13:04:58 +02:00
Aliaksandr Valialkin
30c2d75815 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-08 00:26:00 +02:00
Aliaksandr Valialkin
0e80f3f45a go.mod: add missing dependency on github.com/oklog/ulid
This is a follow up for a5583ddaff
2021-01-07 23:44:50 +02:00
Aliaksandr Valialkin
6e3cbae0b3 app/vmstorage/promdb: code prettifying after a5583ddaff 2021-01-07 23:30:19 +02:00
Nikolay
a5583ddaff adds period compaction to prometheus data (#105)
* adds period compaction to prometheus data
and filtering for datapoints outside retention period

* lint fix

* adds custom retention func

* fixes compaction,
fixes search query adjustment
2021-01-07 22:55:35 +02:00
Aliaksandr Valialkin
5db9e82e54 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-29 11:45:16 +02:00
Aliaksandr Valialkin
80676cf1fd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-28 12:09:22 +02:00
Aliaksandr Valialkin
ba4c49dde6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-27 14:10:59 +02:00
Aliaksandr Valialkin
35e5e8ff1e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-27 13:32:54 +02:00
Aliaksandr Valialkin
4cdbc4642d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-25 17:41:24 +02:00
Aliaksandr Valialkin
23c0fb1efc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-24 17:21:19 +02:00
Aliaksandr Valialkin
441d3e4b3f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-24 12:50:11 +02:00
Aliaksandr Valialkin
a0ea5777f0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-24 11:49:03 +02:00
Aliaksandr Valialkin
fb006fc6c0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-21 08:44:04 +02:00
Aliaksandr Valialkin
8593358965 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-19 16:43:03 +02:00
Aliaksandr Valialkin
d0311b7fe5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-15 22:44:35 +02:00
Aliaksandr Valialkin
4edd38a906 Merge remote-tracking branch 'public/pmm-6401-read-prometheus-data-files' into pmm-6401-read-prometheus-data-files 2020-12-15 14:35:38 +02:00
Aliaksandr Valialkin
56054f4eb7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-15 14:33:39 +02:00
Nikolay
0ff0787797 adds custom apiPathLinks for victoria-metrics / api help (#968)
* adds custom apiPathLinks for victoria-metrics / api help

* adds custom paths for PMM
2020-12-15 14:20:51 +02:00
Aliaksandr Valialkin
f9c706e186 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-15 12:43:44 +02:00
Aliaksandr Valialkin
d74d22460c .github/workflows/main.yml: set GO111MODULE=off when installing auxiliary tools via go install 2020-12-15 01:01:22 +02:00
Aliaksandr Valialkin
d1193c87a8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-14 20:21:19 +02:00
Aliaksandr Valialkin
4f311e5827 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-14 14:20:52 +02:00
Aliaksandr Valialkin
142e6b6ecf Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-14 11:50:16 +02:00
Aliaksandr Valialkin
1b4ef473b9 .github/ISSUE_TEMPLATE/bug_report.md: add a link to upgrade procedure 2020-12-11 22:08:59 +02:00
Aliaksandr Valialkin
8beb1f9519 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-11 21:29:22 +02:00
Aliaksandr Valialkin
501fd8efd9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-11 12:10:21 +02:00
Aliaksandr Valialkin
45f2ba2572 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-05 14:57:17 +02:00
Aliaksandr Valialkin
cb2342029e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-05 13:24:28 +02:00
Aliaksandr Valialkin
ff0088ceec Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-26 02:07:16 +02:00
Aliaksandr Valialkin
afe6d2e736 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-25 23:03:44 +02:00
Aliaksandr Valialkin
e1a6262302 lib/fs: replace fs.OpenReaderAt with fs.MustOpenReaderAt
All the callers for fs.OpenReaderAt expect that the file will be opened.
So it is better to log fatal error inside fs.MustOpenReaderAt instead of leaving this to the caller.
2020-11-23 09:55:41 +02:00
Aliaksandr Valialkin
f000a10cd0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-23 09:41:15 +02:00
Aliaksandr Valialkin
4aee6ef4c0 vendor: make vendor-update 2020-11-19 19:07:10 +02:00
Aliaksandr Valialkin
f4dfacd493 vendor: update prometheus dependency 2020-11-19 19:00:46 +02:00
Aliaksandr Valialkin
fb2d4e56ce Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-19 18:56:02 +02:00
Aliaksandr Valialkin
36b748dfc7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-16 21:06:21 +02:00
Aliaksandr Valialkin
c625dc5b96 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-10 00:28:50 +02:00
Aliaksandr Valialkin
e32620afa1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-10 00:21:55 +02:00
Aliaksandr Valialkin
3f298272a8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-08 13:41:41 +02:00
Aliaksandr Valialkin
7a473798b7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-07 01:05:52 +02:00
Aliaksandr Valialkin
00ce906d97 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-05 17:14:27 +02:00
Aliaksandr Valialkin
41c9565aa1 update github.com/prometheus/prometheus from v1.8.2-0.20200911110723-e83ef207b6c2 to v1.8.2-0.20201029103703-63be30dceed9 2020-11-05 02:55:46 +02:00
Aliaksandr Valialkin
56303aee5b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-05 02:51:08 +02:00
Aliaksandr Valialkin
8d8e2ccf5f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-04 19:10:41 +02:00
Aliaksandr Valialkin
8772cb617c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-03 14:15:45 +02:00
Aliaksandr Valialkin
65fbfc5cbc vendor: add missing files 2020-11-02 22:01:42 +02:00
Aliaksandr Valialkin
1b389674c0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-02 21:58:57 +02:00
Aliaksandr Valialkin
98529e16ee Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-02 02:12:19 +02:00
Aliaksandr Valialkin
1b112405a8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-02 00:47:34 +02:00
Aliaksandr Valialkin
8bbc83e85e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-17 12:13:56 +03:00
Aliaksandr Valialkin
8349140744 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-16 15:19:09 +03:00
Aliaksandr Valialkin
4dc13754d8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-13 18:37:48 +03:00
Aliaksandr Valialkin
83b7eb8ca6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-09 12:36:16 +03:00
Aliaksandr Valialkin
e5ef3288dd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-08 19:27:30 +03:00
Aliaksandr Valialkin
e7f2907138 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-07 21:40:45 +03:00
Aliaksandr Valialkin
757c5cfbe0 Merge branch 'pmm-6401-read-prometheus-data-files' of github.com:valyala/VictoriaMetrics into pmm-6401-read-prometheus-data-files 2020-10-06 19:07:38 +03:00
Aliaksandr Valialkin
317ddb84b9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-06 16:18:01 +03:00
Aliaksandr Valialkin
2b1d0510fa Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-06 16:12:25 +03:00
Aliaksandr Valialkin
40d2f6fee4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-05 18:14:51 +03:00
Aliaksandr Valialkin
9fbb84d5c2 app/vmselect/promql: fill gaps on graphs for range_* and running_* functions
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/806
2020-10-02 13:58:19 +03:00
Aliaksandr Valialkin
bdaa9a91f3 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-01 19:31:26 +03:00
Aliaksandr Valialkin
1a91da35be Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-30 09:58:18 +03:00
Aliaksandr Valialkin
f85be226bb vendor: make vendor-update 2020-09-30 08:58:52 +03:00
Aliaksandr Valialkin
8df5a3c5f6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-30 08:55:57 +03:00
Aliaksandr Valialkin
9d3eb3f4b8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-24 20:49:16 +03:00
Aliaksandr Valialkin
2cd48959d4 lib/storage: correctly use maxBlockSize in various checks
Previously `maxBlockSize` has been multiplied by 8 in certain checks. This is unnecessary.
2020-09-24 20:17:51 +03:00
Aliaksandr Valialkin
8fc8874db4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-23 23:25:34 +03:00
Aliaksandr Valialkin
ff1cbb524e app/vmselect: obtain labels and label values from Prometheus storage inside netstorage package 2020-09-23 22:43:01 +03:00
Aliaksandr Valialkin
a70df4bd83 PMM-6401 Initial implementaton for reading data from Prometheus files 2020-09-23 14:26:39 +03:00
11 changed files with 616 additions and 33 deletions

View File

@@ -28,8 +28,16 @@ var (
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+
"Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag")
downsamplingPeriods = flagutil.NewArray("downsampling.period", "Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs "+
"to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details")
)
// custom api help links [["/api","doc"]] without http.pathPrefix.
var customAPIPathList = [][]string{
{"/graph/explore", "explore metrics grafana page"},
{"/graph/d/prometheus-advanced/advanced-data-exploration", "PMM grafana dashboard"},
}
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
@@ -51,7 +59,10 @@ func main() {
logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr)
startTime := time.Now()
storage.SetDedupInterval(*minScrapeInterval)
err := storage.SetDownsamplingPeriods(*downsamplingPeriods, *minScrapeInterval)
if err != nil {
logger.Fatalf("cannot parse -downsampling.period: %s", err)
}
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
vmselect.Init()
vminsert.Init()
@@ -101,6 +112,10 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
{"api/v1/status/top_queries", "top queries"},
{"api/v1/status/active_queries", "active queries"},
})
for _, p := range customAPIPathList {
p, doc := p[0], p[1]
fmt.Fprintf(w, "<a href=%q>%s</a> - %s<br/>", p, p, doc)
}
return true
}
if vminsert.RequestHandler(w, r) {

View File

@@ -13,6 +13,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
@@ -266,6 +267,12 @@ var gomaxprocs = cgroup.AvailableCPUs()
type packedTimeseries struct {
metricName string
brs []blockRef
pd *promData
}
type promData struct {
values []float64
timestamps []int64
}
type unpackWorkItem struct {
@@ -467,11 +474,46 @@ func (pts *packedTimeseries) Unpack(dst *Result, tbf *tmpBlocksFile, tr storage.
if firstErr != nil {
return firstErr
}
dedupInterval := storage.GetDedupInterval()
if pts.pd != nil {
// Add data from Prometheus to dst.
// It usually has smaller timestamps than the data from sbs, so put it first.
//
// There is no need in check for fetchData, since this is already checked when initializing pts.pd.
dst.Values = append(dst.Values, pts.pd.values...)
dst.Timestamps = append(dst.Timestamps, pts.pd.timestamps...)
}
dedupInterval := storage.GetDedupInterval(tr.MinTimestamp)
mergeSortBlocks(dst, sbs, dedupInterval)
if pts.pd != nil {
if !sort.IsSorted(dst) {
sort.Sort(dst)
}
pts.pd = nil
}
return nil
}
// sort.Interface implementation for Result
// Len implements sort.Interface
func (r *Result) Len() int {
return len(r.Timestamps)
}
// Less implements sort.Interface
func (r *Result) Less(i, j int) bool {
timestamps := r.Timestamps
return timestamps[i] < timestamps[j]
}
// Swap implements sort.Interface
func (r *Result) Swap(i, j int) {
timestamps := r.Timestamps
values := r.Values
timestamps[i], timestamps[j] = timestamps[j], timestamps[i]
values[i], values[j] = values[j], values[i]
}
func getSortBlock() *sortBlock {
v := sbPool.Get()
if v == nil {
@@ -621,6 +663,14 @@ func GetLabelsOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) (
labels[i] = "__name__"
}
}
// Merge labels obtained from Prometheus storage.
promLabels, err := promdb.GetLabelNamesOnTimeRange(tr, deadline)
if err != nil {
return nil, fmt.Errorf("cannot obtain labels from Prometheus storage: %w", err)
}
labels = mergeStrings(labels, promLabels)
// Sort labels like Prometheus does
sort.Strings(labels)
return labels, nil
@@ -686,11 +736,40 @@ func GetLabels(deadline searchutils.Deadline) ([]string, error) {
labels[i] = "__name__"
}
}
// Merge labels obtained from Prometheus storage.
promLabels, err := promdb.GetLabelNames(deadline)
if err != nil {
return nil, fmt.Errorf("cannot obtain labels from Prometheus storage: %w", err)
}
labels = mergeStrings(labels, promLabels)
// Sort labels like Prometheus does
sort.Strings(labels)
return labels, nil
}
func mergeStrings(a, b []string) []string {
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
m := make(map[string]struct{}, len(a)+len(b))
for _, s := range a {
m[s] = struct{}{}
}
for _, s := range b {
m[s] = struct{}{}
}
result := make([]string, 0, len(m))
for s := range m {
result = append(result, s)
}
return result
}
// GetLabelValuesOnTimeRange returns label values for the given labelName on the given tr
// until the given deadline.
func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
@@ -705,6 +784,14 @@ func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline
if err != nil {
return nil, fmt.Errorf("error during label values search on time range for labelName=%q: %w", labelName, err)
}
// Merge label values obtained from Prometheus storage.
promLabelValues, err := promdb.GetLabelValuesOnTimeRange(labelName, tr, deadline)
if err != nil {
return nil, fmt.Errorf("cannot obtain label values on time range for %q from Prometheus storage: %w", labelName, err)
}
labelValues = mergeStrings(labelValues, promLabelValues)
// Sort labelValues like Prometheus does
sort.Strings(labelValues)
return labelValues, nil
@@ -748,6 +835,14 @@ func GetLabelValues(labelName string, deadline searchutils.Deadline) ([]string,
if err != nil {
return nil, fmt.Errorf("error during label values search for labelName=%q: %w", labelName, err)
}
// Merge label values obtained from Prometheus storage.
promLabelValues, err := promdb.GetLabelValues(labelName, deadline)
if err != nil {
return nil, fmt.Errorf("cannot obtain label values for %q from Prometheus storage: %w", labelName, err)
}
labelValues = mergeStrings(labelValues, promLabelValues)
// Sort labelValues like Prometheus does
sort.Strings(labelValues)
return labelValues, nil
@@ -1086,6 +1181,26 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline search
return nil, fmt.Errorf("cannot finalize temporary file: %w", err)
}
// Fetch data from promdb.
pm := make(map[string]*promData)
err = promdb.VisitSeries(sq, fetchData, deadline, func(metricName []byte, values []float64, timestamps []int64) {
pd := pm[string(metricName)]
if pd == nil {
if _, ok := m[string(metricName)]; !ok {
orderedMetricNames = append(orderedMetricNames, string(metricName))
}
pd = &promData{}
pm[string(metricName)] = pd
}
pd.values = append(pd.values, values...)
pd.timestamps = append(pd.timestamps, timestamps...)
})
if err != nil {
putTmpBlocksFile(tbf)
putStorageSearch(sr)
return nil, fmt.Errorf("error when searching in Prometheus data: %w", err)
}
var rss Results
rss.tr = tr
rss.fetchData = fetchData
@@ -1095,6 +1210,7 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline search
pts[i] = packedTimeseries{
metricName: metricName,
brs: m[metricName],
pd: pm[metricName],
}
}
rss.packedTimeseries = pts

View File

@@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
@@ -112,6 +113,8 @@ func InitWithoutMetrics(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
sizeBytes := tm.SmallSizeBytes + tm.BigSizeBytes
logger.Infof("successfully opened storage %q in %.3f seconds; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
*DataPath, time.Since(startTime).Seconds(), partsCount, blocksCount, rowsCount, sizeBytes)
promdb.Init(retentionPeriod.Msecs)
}
// Storage is a storage.
@@ -255,6 +258,7 @@ func Stop() {
logger.Infof("gracefully closing the storage at %s", *DataPath)
startTime := time.Now()
WG.WaitAndBlock()
promdb.MustClose()
Storage.MustClose()
logger.Infof("successfully closed the storage in %.3f seconds", time.Since(startTime).Seconds())

View File

@@ -0,0 +1,276 @@
package promdb
import (
"context"
"flag"
"fmt"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/go-kit/kit/log"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/pkg/labels"
promstorage "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
)
var prometheusDataPath = flag.String("prometheusDataPath", "", "Optional path to readonly historical Prometheus data")
var prometheusRetentionMsecs int64
// Init must be called after flag.Parse and before using the package.
//
// See also MustClose.
func Init(retentionMsecs int64) {
if promDB != nil {
logger.Fatalf("BUG: it looks like MustOpenPromDB is called multiple times without MustClosePromDB call")
}
prometheusRetentionMsecs = retentionMsecs
if *prometheusDataPath == "" {
return
}
l := log.LoggerFunc(func(a ...interface{}) error {
logger.Infof("%v", a)
return nil
})
opts := tsdb.DefaultOptions()
opts.RetentionDuration = retentionMsecs
// Set max block duration to 10% of retention period or 31 days
// according to https://prometheus.io/docs/prometheus/latest/storage/#compaction
maxBlockDuration := int64((31 * 24 * time.Hour) / time.Millisecond)
if maxBlockDuration > retentionMsecs/10 {
maxBlockDuration = retentionMsecs / 10
}
if maxBlockDuration < opts.MinBlockDuration {
maxBlockDuration = opts.MinBlockDuration
}
opts.MaxBlockDuration = maxBlockDuration
// Custom delete function is needed, because Prometheus by default doesn't delete
// blocks outside the retention if no new blocks are created with samples with the current timestamps.
// See https://github.com/prometheus/prometheus/blob/997bb7134fcfd7279f250e183e78681e48a56aff/tsdb/db.go#L1116
opts.BlocksToDelete = func(blocks []*tsdb.Block) map[ulid.ULID]struct{} {
m := make(map[ulid.ULID]struct{})
minRetentionTime := time.Now().Unix()*1000 - retentionMsecs
for _, block := range blocks {
meta := block.Meta()
// delete block marked for deletion by compaction code.
if meta.Compaction.Deletable {
m[meta.ULID] = struct{}{}
continue
}
if block.MaxTime() < minRetentionTime {
m[meta.ULID] = struct{}{}
}
}
return m
}
pdb, err := tsdb.Open(*prometheusDataPath, l, nil, opts)
if err != nil {
logger.Panicf("FATAL: cannot open Prometheus data at -prometheusDataPath=%q: %s", *prometheusDataPath, err)
}
promDB = pdb
logger.Infof("successfully opened historical Prometheus data at -prometheusDataPath=%q with retentionMsecs=%d", *prometheusDataPath, retentionMsecs)
}
// MustClose must be called on graceful shutdown.
//
// Package functionality cannot be used after this call.
func MustClose() {
if *prometheusDataPath == "" {
return
}
if promDB == nil {
logger.Panicf("BUG: it looks like MustClosePromDB is called without MustOpenPromDB call")
}
if err := promDB.Close(); err != nil {
logger.Panicf("FATAL: cannot close promDB: %s", err)
}
promDB = nil
logger.Infof("successfully closed historical Prometheus data at -prometheusDataPath=%q", *prometheusDataPath)
}
var promDB *tsdb.DB
// GetLabelNamesOnTimeRange returns label names.
func GetLabelNamesOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
d := time.Unix(int64(deadline.Deadline()), 0)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
q, err := promDB.Querier(ctx, tr.MinTimestamp, tr.MaxTimestamp)
if err != nil {
return nil, err
}
defer mustCloseQuerier(q)
names, _, err := q.LabelNames()
// Make full copy of names, since they cannot be used after q is closed.
names = copyStringsWithMemory(names)
return names, err
}
// GetLabelNames returns label names.
func GetLabelNames(deadline searchutils.Deadline) ([]string, error) {
tr := storage.TimeRange{
MinTimestamp: 0,
MaxTimestamp: time.Now().UnixNano() / 1e6,
}
return GetLabelNamesOnTimeRange(tr, deadline)
}
// GetLabelValuesOnTimeRange returns values for the given labelName on the given tr.
func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
d := time.Unix(int64(deadline.Deadline()), 0)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
q, err := promDB.Querier(ctx, tr.MinTimestamp, tr.MaxTimestamp)
if err != nil {
return nil, err
}
defer mustCloseQuerier(q)
values, _, err := q.LabelValues(labelName)
// Make full copy of values, since they cannot be used after q is closed.
values = copyStringsWithMemory(values)
return values, err
}
// GetLabelValues returns values for the given labelName.
func GetLabelValues(labelName string, deadline searchutils.Deadline) ([]string, error) {
tr := storage.TimeRange{
MinTimestamp: 0,
MaxTimestamp: time.Now().UnixNano() / 1e6,
}
return GetLabelValuesOnTimeRange(labelName, tr, deadline)
}
func copyStringsWithMemory(a []string) []string {
result := make([]string, len(a))
for i, s := range a {
result[i] = string(append([]byte{}, s...))
}
return result
}
// SeriesVisitor is called by VisitSeries for each matching time series.
//
// The caller shouldn't hold references to metricName, values and timestamps after returning.
type SeriesVisitor func(metricName []byte, values []float64, timestamps []int64)
// VisitSeries calls f for each series found in the pdb.
//
// If fetchData is false, then empty values and timestamps are passed to f.
func VisitSeries(sq *storage.SearchQuery, fetchData bool, deadline searchutils.Deadline, f SeriesVisitor) error {
if *prometheusDataPath == "" {
return nil
}
d := time.Unix(int64(deadline.Deadline()), 0)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
minTime, maxTime := getSearchTimeRange(sq)
q, err := promDB.Querier(ctx, minTime, maxTime)
if err != nil {
return err
}
defer mustCloseQuerier(q)
var seriesSet []promstorage.SeriesSet
for _, tf := range sq.TagFilterss {
ms, err := convertTagFiltersToMatchers(tf)
if err != nil {
return fmt.Errorf("cannot convert tag filters to matchers: %w", err)
}
s := q.Select(false, nil, ms...)
seriesSet = append(seriesSet, s)
}
ss := promstorage.NewMergeSeriesSet(seriesSet, promstorage.ChainedSeriesMerge)
var (
mn storage.MetricName
metricName []byte
values []float64
timestamps []int64
)
for ss.Next() {
s := ss.At()
convertPromLabelsToMetricName(&mn, s.Labels())
metricName = mn.SortAndMarshal(metricName[:0])
values = values[:0]
timestamps = timestamps[:0]
if fetchData {
it := s.Iterator()
for it.Next() {
ts, v := it.At()
values = append(values, v)
timestamps = append(timestamps, ts)
}
if err := it.Err(); err != nil {
return fmt.Errorf("error when iterating Prometheus series: %w", err)
}
}
f(metricName, values, timestamps)
}
return ss.Err()
}
func getSearchTimeRange(sq *storage.SearchQuery) (int64, int64) {
maxTime := sq.MaxTimestamp
minTime := sq.MinTimestamp
minRetentionTime := time.Now().Unix()*1000 - prometheusRetentionMsecs
if maxTime < minRetentionTime {
maxTime = minRetentionTime
}
if minTime < minRetentionTime {
minTime = minRetentionTime
}
return minTime, maxTime
}
func convertPromLabelsToMetricName(dst *storage.MetricName, labels []labels.Label) {
dst.Reset()
for _, label := range labels {
if label.Name == "__name__" {
dst.MetricGroup = append(dst.MetricGroup[:0], label.Value...)
} else {
dst.AddTag(label.Name, label.Value)
}
}
}
func convertTagFiltersToMatchers(tfs []storage.TagFilter) ([]*labels.Matcher, error) {
ms := make([]*labels.Matcher, 0, len(tfs))
for _, tf := range tfs {
var mt labels.MatchType
if tf.IsNegative {
if tf.IsRegexp {
mt = labels.MatchNotRegexp
} else {
mt = labels.MatchNotEqual
}
} else {
if tf.IsRegexp {
mt = labels.MatchRegexp
} else {
mt = labels.MatchEqual
}
}
key := string(tf.Key)
if key == "" {
key = "__name__"
}
value := string(tf.Value)
m, err := labels.NewMatcher(mt, key, value)
if err != nil {
return nil, err
}
ms = append(ms, m)
}
return ms, nil
}
func mustCloseQuerier(q promstorage.Querier) {
if err := q.Close(); err != nil {
logger.Panicf("FATAL: cannot close querier: %s", err)
}
}

14
go.mod
View File

@@ -14,9 +14,16 @@ require (
github.com/aws/aws-sdk-go v1.43.37
github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/go-kit/kit v0.12.0
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.6
github.com/klauspost/compress v1.15.1
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/oklog/ulid v1.3.1
github.com/prometheus/common v0.33.0 // indirect
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/urfave/cli/v2 v2.4.0
github.com/valyala/fastjson v1.6.3
@@ -37,9 +44,6 @@ require (
cloud.google.com/go/iam v0.3.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-kit/log v0.2.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -48,15 +52,11 @@ require (
github.com/googleapis/gax-go/v2 v2.3.0 // indirect
github.com/googleapis/go-type-adapters v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.33.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect

View File

@@ -161,7 +161,8 @@ func (b *Block) deduplicateSamplesDuringMerge() {
// Nothing to dedup.
return
}
dedupInterval := GetDedupInterval()
maxTimestamp := srcTimestamps[len(srcTimestamps)-1]
dedupInterval := GetDedupInterval(maxTimestamp)
if dedupInterval <= 0 {
// Deduplication is disabled.
return

View File

@@ -1,27 +1,7 @@
package storage
import (
"time"
)
// SetDedupInterval sets the deduplication interval, which is applied to raw samples during data ingestion and querying.
//
// De-duplication is disabled if dedupInterval is 0.
//
// This function must be called before initializing the storage.
func SetDedupInterval(dedupInterval time.Duration) {
globalDedupInterval = dedupInterval.Milliseconds()
}
// GetDedupInterval returns the dedup interval in milliseconds, which has been set via SetDedupInterval.
func GetDedupInterval() int64 {
return globalDedupInterval
}
var globalDedupInterval int64
func isDedupEnabled() bool {
return globalDedupInterval > 0
return len(downsamplingPeriods) > 0
}
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in millseconds.

123
lib/storage/downsampling.go Normal file
View File

@@ -0,0 +1,123 @@
package storage
import (
"fmt"
"sort"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/metricsql"
)
// SetDownsamplingPeriods configures downsampling.
//
// The function must be called before opening or creating any storage.
func SetDownsamplingPeriods(periods []string, dedupInterval time.Duration) error {
dsps, err := parseDownsamplingPeriods(periods)
if err != nil {
return err
}
dedupIntervalMs := dedupInterval.Milliseconds()
if dedupIntervalMs > 0 {
if len(dsps) > 0 && dsps[len(dsps)-1].Offset == 0 {
return fmt.Errorf("-dedup.minScrapeInterval=%s cannot be used if -downsampling.period=%s contains zero offset", dedupInterval, periods)
}
// Deduplication is a special case of downsampling with zero offset.
dsps = append(dsps, DownsamplingPeriod{
Offset: 0,
Interval: dedupIntervalMs,
})
}
downsamplingPeriods = dsps
return nil
}
// DownsamplingPeriod describes downsampling period
type DownsamplingPeriod struct {
// Offset in milliseconds from the current time when the downsampling with the given interval must be applied
Offset int64
// Interval for downsampling - only a single sample is left per each interval
Interval int64
}
// String implements interface
func (dsp DownsamplingPeriod) String() string {
offset := time.Duration(dsp.Offset) * time.Millisecond
interval := time.Duration(dsp.Interval) * time.Millisecond
return fmt.Sprintf("%s:%s", offset, interval)
}
func (dsp *DownsamplingPeriod) parse(s string) error {
idx := strings.Index(s, ":")
if idx <= 0 {
return fmt.Errorf("incorrect format for downsampling period: %s, want `offset:interval` format", s)
}
offsetStr, intervalStr := s[:idx], s[idx+1:]
interval, err := metricsql.DurationValue(intervalStr, 0)
if err != nil {
return fmt.Errorf("incorrect interval: %s format for downsampling interval: %s err: %w", intervalStr, s, err)
}
offset, err := metricsql.DurationValue(offsetStr, 0)
if err != nil {
return fmt.Errorf("incorrect duration: %s format for downsampling offset: %s err: %w", offsetStr, s, err)
}
dsp.Interval = interval
dsp.Offset = offset
// sanity check
if offset > 0 && interval > offset {
return fmt.Errorf("downsampling interval=%d cannot exceed offset=%d", dsp.Interval, dsp.Offset)
}
return nil
}
var downsamplingPeriods []DownsamplingPeriod
// GetDedupInterval returns dedup interval, which must be applied to samples with the given timestamp.
func GetDedupInterval(timestamp int64) int64 {
dsp := getDownsamplingPeriod(timestamp)
return dsp.Interval
}
// getDownsamplingPeriod returns downsampling period, which must be used for the given timestamp
func getDownsamplingPeriod(timestamp int64) DownsamplingPeriod {
offset := int64(fasttime.UnixTimestamp())*1000 - timestamp
for _, dsp := range downsamplingPeriods {
if offset >= dsp.Offset {
return dsp
}
}
return DownsamplingPeriod{}
}
func parseDownsamplingPeriods(periods []string) ([]DownsamplingPeriod, error) {
if len(periods) == 0 {
return nil, nil
}
var dsps []DownsamplingPeriod
for _, period := range periods {
var dsp DownsamplingPeriod
if err := dsp.parse(period); err != nil {
return nil, fmt.Errorf("cannot parse downsampling period %q: %w", period, err)
}
dsps = append(dsps, dsp)
}
sort.Slice(dsps, func(i, j int) bool {
return dsps[i].Offset > dsps[j].Offset
})
dspPrev := dsps[0]
// sanity checks.
for _, dsp := range dsps[1:] {
if dspPrev.Interval <= dsp.Interval {
return nil, fmt.Errorf("prev downsampling interval %d must be bigger than the next interval %d", dspPrev.Interval, dsp.Interval)
}
if dspPrev.Offset == dsp.Offset {
return nil, fmt.Errorf("duplicate downsampling offset: %d", dsp.Offset)
}
if dspPrev.Interval%dsp.Interval != 0 {
return nil, fmt.Errorf("downsamping intervals must be multiples; prev: %d, current: %d", dspPrev.Interval, dsp.Interval)
}
dspPrev = dsp
}
return dsps, nil
}

View File

@@ -0,0 +1,62 @@
package storage
import (
"strings"
"testing"
)
func TestParseDownsamplingPeriodsFailure(t *testing.T) {
f := func(name string, src []string) {
t.Helper()
t.Run(name, func(t *testing.T) {
if _, err := parseDownsamplingPeriods(src); err == nil {
t.Fatalf("want fail for input: %s", strings.Join(src, ","))
}
})
}
f("empty duration", []string{"15d"})
f("empty interval", []string{":1m"})
f("incorrect duration decrease", []string{"30d:15h", "60d:1h"})
f("duplicate offset", []string{"30d:15h", "30d:1h"})
f("duplicate interval", []string{"60d:1h", "30d:1h"})
f("not multiple intervals", []string{"90d:12h", "60:9h", "30d:7h"})
}
func TestParseDownsamplingPeriodsSuccess(t *testing.T) {
f := func(name string, src []string, expected []DownsamplingPeriod) {
t.Helper()
t.Run(name, func(t *testing.T) {
dsps, err := parseDownsamplingPeriods(src)
if err != nil {
t.Fatalf("cannot parse downsampling configuration for: %s, err: %s", strings.Join(src, ","), err)
}
assertDownsamplingPeriods(t, expected, dsps)
})
}
f("one period", []string{"30d:1m"}, []DownsamplingPeriod{
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
})
f("three periods", []string{"15d:30s", "30d:1m", "60d:15m"}, []DownsamplingPeriod{
{Offset: 60 * 24 * 3600 * 1000, Interval: 15 * 60 * 1000},
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
{Offset: 15 * 24 * 3600 * 1000, Interval: 30 * 1000},
})
f("with the same divider periods", []string{"15d:1m", "30d:7m", "60d:14m", "90d:28m"}, []DownsamplingPeriod{
{Offset: 90 * 24 * 3600 * 1000, Interval: 28 * 60 * 1000},
{Offset: 60 * 24 * 3600 * 1000, Interval: 14 * 60 * 1000},
{Offset: 30 * 24 * 3600 * 1000, Interval: 7 * 60 * 1000},
{Offset: 15 * 24 * 3600 * 1000, Interval: 60 * 1000},
})
}
func assertDownsamplingPeriods(t *testing.T, want, got []DownsamplingPeriod) {
t.Helper()
if len(want) != len(got) {
t.Fatalf("len mismatch, want: %d, got: %d", len(want), len(got))
}
for i := 0; i < len(want); i++ {
if want[i] != got[i] {
t.Fatalf("want period: %s, got period: %s, idx: %d", want[i], got[i], i)
}
}
}

View File

@@ -358,6 +358,12 @@ func (mn *MetricName) String() string {
return fmt.Sprintf("%s{%s}", mnCopy.MetricGroup, tagsStr)
}
// SortAndMarshal sorts mn tags and then marshals them to dst.
func (mn *MetricName) SortAndMarshal(dst []byte) []byte {
mn.sortTags()
return mn.Marshal(dst)
}
// Marshal appends marshaled mn to dst and returns the result.
//
// mn.sortTags must be called before calling this function

View File

@@ -1075,7 +1075,7 @@ func (pt *partition) runFinalDedup() error {
func (pt *partition) getRequiredDedupInterval() (int64, int64) {
pws := pt.GetParts(nil)
defer pt.PutParts(pws)
dedupInterval := GetDedupInterval()
dedupInterval := GetDedupInterval(pt.tr.MaxTimestamp)
minDedupInterval := getMinDedupInterval(pws)
return dedupInterval, minDedupInterval
}
@@ -1184,7 +1184,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro
}
bsrs = nil
ph.MinDedupInterval = GetDedupInterval()
ph.MinDedupInterval = GetDedupInterval(ph.MaxTimestamp)
if err := ph.writeMinDedupInterval(tmpPartPath); err != nil {
return fmt.Errorf("cannot store min dedup interval for part %q: %w", tmpPartPath, err)
}