Compare commits

...

332 Commits

Author SHA1 Message Date
Aliaksandr Valialkin
b596228765 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-27 17:36:37 -08:00
Aliaksandr Valialkin
d0f9a5d4c4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-27 15:39:19 -08:00
Aliaksandr Valialkin
472a9360e6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-27 14:19:26 -08:00
Aliaksandr Valialkin
b00fcad604 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-26 12:28:11 -08:00
Aliaksandr Valialkin
3d755041c3 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-24 18:59:30 -08:00
Aliaksandr Valialkin
e22a9d6ba6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-24 17:55:04 -08:00
Aliaksandr Valialkin
9d7dc73038 vendor: make vendor-update 2023-02-24 17:33:28 -08:00
Aliaksandr Valialkin
63d9048990 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-24 17:32:15 -08:00
Aliaksandr Valialkin
8db1fd2f78 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-24 17:17:34 -08:00
Aliaksandr Valialkin
8f0afc656e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-24 13:49:32 -08:00
Aliaksandr Valialkin
be94882ada Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-23 19:27:31 -08:00
Aliaksandr Valialkin
ff990ab0c5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-20 20:00:43 -08:00
Aliaksandr Valialkin
5c8a01aecc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-18 22:44:46 -08:00
Aliaksandr Valialkin
2ce4d04d8e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-13 11:11:49 -08:00
Aliaksandr Valialkin
b026ebe91e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-11 20:54:02 -08:00
Aliaksandr Valialkin
c2b724d3ab Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-11 14:43:19 -08:00
Aliaksandr Valialkin
e4a61581e1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-11 12:53:25 -08:00
Aliaksandr Valialkin
a38bf70679 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-11 12:09:55 -08:00
Aliaksandr Valialkin
7b41c9ac72 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-11 01:07:13 -08:00
Aliaksandr Valialkin
c1d42f3288 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-11 00:33:44 -08:00
Aliaksandr Valialkin
4167344edb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-09 19:13:40 -08:00
Aliaksandr Valialkin
44e388ee6a Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-09 15:07:22 -08:00
Aliaksandr Valialkin
b8ab0b2f31 .github/workflows: remove unneeded workflows 2023-02-09 14:28:01 -08:00
Aliaksandr Valialkin
dcc4b84319 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-09 14:26:43 -08:00
Aliaksandr Valialkin
37f48cdaa5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-09 14:07:32 -08:00
Aliaksandr Valialkin
a39140baef Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-09 13:08:34 -08:00
Aliaksandr Valialkin
30c0a37032 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-01 13:04:00 -08:00
Aliaksandr Valialkin
32e46ea35f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-02-01 13:02:21 -08:00
Aliaksandr Valialkin
6faaefef7b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-27 11:38:43 -08:00
Aliaksandr Valialkin
5cd89aaaa1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-27 00:05:34 -08:00
Aliaksandr Valialkin
3a21fde0f3 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-26 23:54:43 -08:00
Aliaksandr Valialkin
274627943e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-25 09:23:08 -08:00
Aliaksandr Valialkin
21140318cc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-24 09:33:54 -08:00
Aliaksandr Valialkin
3f5bc2adce Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-18 14:06:05 -08:00
Aliaksandr Valialkin
a5975c31c2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-18 12:02:26 -08:00
Aliaksandr Valialkin
fad61eafc1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-18 01:42:05 -08:00
Aliaksandr Valialkin
30453af768 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-18 01:14:19 -08:00
Aliaksandr Valialkin
7737321133 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-18 00:01:57 -08:00
Aliaksandr Valialkin
a2ab1f0ec9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-17 21:49:03 -08:00
Aliaksandr Valialkin
a092df3f84 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-12 01:13:39 -08:00
Aliaksandr Valialkin
c3f178aa53 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-11 01:34:37 -08:00
Aliaksandr Valialkin
393e7636be Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-10 16:23:24 -08:00
Aliaksandr Valialkin
ebc200846c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2023-01-10 16:11:27 -08:00
Aliaksandr Valialkin
0158237875 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-20 14:52:33 -08:00
Aliaksandr Valialkin
be5bbb7ba7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-19 13:37:52 -08:00
Aliaksandr Valialkin
b79f02de21 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-14 17:54:17 -08:00
Aliaksandr Valialkin
ac58ab9664 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-14 12:59:54 -08:00
Aliaksandr Valialkin
0613ac5d02 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-11 03:24:33 -08:00
Aliaksandr Valialkin
22e48e6517 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-11 02:08:00 -08:00
Aliaksandr Valialkin
1f0432b5c1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-05 23:21:20 -08:00
Aliaksandr Valialkin
079953b4ea Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-12-02 19:18:34 -08:00
Aliaksandr Valialkin
d92da32041 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-29 21:47:16 -08:00
Aliaksandr Valialkin
8548650c2d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-25 20:13:43 -08:00
Aliaksandr Valialkin
2dd82e8355 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-17 01:33:16 +02:00
Aliaksandr Valialkin
bf0b5602d0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-11 01:28:54 +02:00
Aliaksandr Valialkin
e25d05f992 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-11 01:25:29 +02:00
Aliaksandr Valialkin
5ce8fa8b10 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-10 14:13:54 +02:00
Aliaksandr Valialkin
881f22ca62 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-07 15:00:11 +02:00
Aliaksandr Valialkin
38294e2f17 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-11-05 11:12:14 +02:00
Aliaksandr Valialkin
2d909f4979 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-29 02:58:19 +03:00
Aliaksandr Valialkin
0821298471 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-28 22:16:15 +03:00
Aliaksandr Valialkin
fa5cda60d9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-28 14:25:57 +03:00
Aliaksandr Valialkin
700eb5bb1d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-28 00:33:10 +03:00
Aliaksandr Valialkin
70bcc97d1c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-26 14:57:17 +03:00
Aliaksandr Valialkin
0074539441 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-26 01:11:17 +03:00
Aliaksandr Valialkin
fe0ab3840f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-25 17:55:02 +03:00
Aliaksandr Valialkin
c4fc87f8b8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-24 21:30:41 +03:00
Aliaksandr Valialkin
8e3198ba29 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-24 18:04:48 +03:00
Aliaksandr Valialkin
6c7c0790a0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-24 16:32:43 +03:00
Aliaksandr Valialkin
33343695a9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-23 14:09:38 +03:00
Aliaksandr Valialkin
db553f12bc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-23 14:02:45 +03:00
Aliaksandr Valialkin
07fe2c5361 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-21 15:03:12 +03:00
Aliaksandr Valialkin
22e87b0088 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-21 01:11:40 +03:00
Aliaksandr Valialkin
f105e2e8c3 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-18 20:55:52 +03:00
Aliaksandr Valialkin
20414b3038 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-14 15:31:43 +03:00
Aliaksandr Valialkin
fcb7ef68f8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-07 03:41:08 +03:00
Aliaksandr Valialkin
626142ab90 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-07 03:16:29 +03:00
Aliaksandr Valialkin
fd1b8be2e5 go.mod: go mod tidy 2022-10-07 01:21:34 +03:00
Aliaksandr Valialkin
d39ba2536e app/victoria-metrics: flagutil.NewArray -> flagutil.NewArrayString after c1fa9828b3 2022-10-07 01:16:52 +03:00
Aliaksandr Valialkin
e2c4578751 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-10-07 01:15:41 +03:00
Aliaksandr Valialkin
6ad7b0619c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-30 18:42:04 +03:00
Aliaksandr Valialkin
3a15bc761b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-30 13:21:21 +03:00
Aliaksandr Valialkin
bd79706eb3 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-26 18:03:30 +03:00
Aliaksandr Valialkin
e69fb9f3cf vendor: make vendor-update 2022-09-26 16:40:54 +03:00
Aliaksandr Valialkin
1a9cb85647 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-26 16:36:50 +03:00
Aliaksandr Valialkin
a80f0c9f42 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-21 11:32:46 +03:00
Aliaksandr Valialkin
4db1d24973 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-19 15:31:39 +03:00
Aliaksandr Valialkin
1c9f5b3580 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-14 17:54:06 +03:00
Aliaksandr Valialkin
9682c23786 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-08 19:02:16 +03:00
Aliaksandr Valialkin
bd2bb272f0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-02 22:01:12 +03:00
Aliaksandr Valialkin
6111abd0e6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-09-02 21:49:51 +03:00
Aliaksandr Valialkin
3f3f664b76 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-31 05:05:04 +03:00
Aliaksandr Valialkin
d1c6fb74fc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-31 02:35:29 +03:00
Aliaksandr Valialkin
b9668d5294 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-31 02:32:32 +03:00
Aliaksandr Valialkin
96160000e0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-30 12:40:52 +03:00
Aliaksandr Valialkin
28e961e511 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-24 01:26:12 +03:00
Aliaksandr Valialkin
628e87e727 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-22 00:40:04 +03:00
Aliaksandr Valialkin
3600c97ad7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-21 19:17:23 +03:00
Aliaksandr Valialkin
bb154f8829 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-18 01:31:49 +03:00
Aliaksandr Valialkin
d2e293b5c9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-15 01:43:51 +03:00
Aliaksandr Valialkin
e80ddbebd4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-08 19:59:28 +03:00
Aliaksandr Valialkin
bdd4940140 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-08 14:06:18 +03:00
Aliaksandr Valialkin
a8fee2d9b6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-08 13:56:56 +03:00
Aliaksandr Valialkin
2dbbf51ea9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-07 22:49:37 +03:00
Aliaksandr Valialkin
cd5cc4ec81 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-04 18:04:51 +03:00
Aliaksandr Valialkin
549d430907 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-08-02 13:33:59 +03:00
Aliaksandr Valialkin
69aef55ae7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-21 21:21:04 +03:00
Aliaksandr Valialkin
274145af2d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-21 20:23:41 +03:00
Aliaksandr Valialkin
c444f7e2b9 docs/Cluster-VictoriaMetrics.md: update after fe68bb3ba7 2022-07-21 20:21:58 +03:00
Aliaksandr Valialkin
10f41ea5f9 all: follow-up after 46f803fa7a
Add -pushmetrics.* command-line flags to all the VictoriaMetrics apps
2022-07-21 20:14:27 +03:00
Aliaksandr Valialkin
46f803fa7a all: add ability to push internal metrics to remote storage system specified via -pushmetrics.url 2022-07-21 19:49:52 +03:00
Aliaksandr Valialkin
ffe9bd248c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-14 16:18:18 +03:00
Aliaksandr Valialkin
151286f5a8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-14 11:04:42 +03:00
Aliaksandr Valialkin
77a1af4f7f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-14 00:56:50 +03:00
Aliaksandr Valialkin
c83ff99e0d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-13 18:07:46 +03:00
Aliaksandr Valialkin
4a0c9a1069 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-11 18:22:49 +03:00
Aliaksandr Valialkin
2fd56ddb38 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-07 20:38:21 +03:00
Aliaksandr Valialkin
b42e5627fb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-07 02:37:06 +03:00
Aliaksandr Valialkin
57375e72fa Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-06 13:50:59 +03:00
Aliaksandr Valialkin
0746766d95 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-06 13:31:28 +03:00
Aliaksandr Valialkin
6712a8269c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-06 13:04:08 +03:00
Aliaksandr Valialkin
4e20ea4b59 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-07-04 12:17:00 +03:00
Aliaksandr Valialkin
44dfb2ec0d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-30 20:20:19 +03:00
Aliaksandr Valialkin
e7b4e657a1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-28 20:22:11 +03:00
Aliaksandr Valialkin
cd91c29243 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-28 13:26:58 +03:00
Aliaksandr Valialkin
8b8e547dc8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-23 19:33:36 +03:00
Aliaksandr Valialkin
34a6b1fa3b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-22 13:20:58 +03:00
Aliaksandr Valialkin
af37ec8020 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-21 15:50:01 +03:00
Aliaksandr Valialkin
fff8ff946f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-21 14:02:43 +03:00
Aliaksandr Valialkin
fdccca238a Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-20 18:11:39 +03:00
Aliaksandr Valialkin
1b24afec36 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-20 17:42:27 +03:00
Aliaksandr Valialkin
cacd3d6f6d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-19 23:05:31 +03:00
Aliaksandr Valialkin
8632b8200e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-15 18:42:09 +03:00
Aliaksandr Valialkin
0445ad59db Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-14 13:29:48 +03:00
Aliaksandr Valialkin
f7b52b64a3 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-14 12:26:32 +03:00
Aliaksandr Valialkin
7fc62feddc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-09 13:33:07 +03:00
Aliaksandr Valialkin
0ea0168d98 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-06-04 01:13:48 +03:00
Aliaksandr Valialkin
3dec16702a Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-23 11:00:31 +03:00
Aliaksandr Valialkin
993ecbb141 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-21 02:27:04 +03:00
Aliaksandr Valialkin
35eb512efa Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-20 15:03:38 +03:00
Aliaksandr Valialkin
7f01217c3c Makefile: explicitly specify go1.17 compatibility when running go mod tidy at make vendor-update
This is needed because go1.17 is the minimum supported version of Go,
which is needed for building VictoriaMetrics
2022-05-20 14:41:09 +03:00
Aliaksandr Valialkin
2398b4a10a vendor: make vendor-update 2022-05-20 14:40:09 +03:00
Aliaksandr Valialkin
5a60387eea Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-20 14:34:02 +03:00
Aliaksandr Valialkin
2685992ca9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-07 02:02:31 +03:00
Aliaksandr Valialkin
ee63748753 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-05 11:01:55 +03:00
Aliaksandr Valialkin
620b0d11b7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-05 10:33:08 +03:00
Aliaksandr Valialkin
316cac2c0b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-05 00:16:55 +03:00
Aliaksandr Valialkin
9eb61e67af Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-05 00:02:14 +03:00
Aliaksandr Valialkin
a7333a7380 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-05-02 22:06:17 +03:00
Aliaksandr Valialkin
ee5bd20157 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-29 19:37:07 +03:00
Aliaksandr Valialkin
d713bdec20 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-29 14:23:04 +03:00
Aliaksandr Valialkin
6a5d6244d4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-20 22:55:51 +03:00
Aliaksandr Valialkin
095feeee41 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-12 16:23:16 +03:00
Aliaksandr Valialkin
9dd493363c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-07 15:34:32 +03:00
Aliaksandr Valialkin
d964b04efd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-07 15:33:09 +03:00
Aliaksandr Valialkin
ec01a188fd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-07 15:25:52 +03:00
Aliaksandr Valialkin
40112df441 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-05 19:24:37 +03:00
Aliaksandr Valialkin
9e74fe3145 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-04 13:11:51 +03:00
Aliaksandr Valialkin
2c22e168f5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-04-01 12:38:34 +03:00
Aliaksandr Valialkin
5747b78f6f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-28 12:31:27 +03:00
Aliaksandr Valialkin
d9166e899e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-28 12:17:18 +03:00
Aliaksandr Valialkin
38699170c9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-24 19:22:42 +02:00
Aliaksandr Valialkin
5b4f7bbc0c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-18 19:54:43 +02:00
Aliaksandr Valialkin
db85f4a1cb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-18 17:59:12 +02:00
Aliaksandr Valialkin
780b2a139a Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-17 20:11:56 +02:00
Aliaksandr Valialkin
9d2805320b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-16 13:40:03 +02:00
Aliaksandr Valialkin
e636cab272 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-16 12:59:06 +02:00
Aliaksandr Valialkin
90a1502335 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-03-03 19:31:14 +02:00
Aliaksandr Valialkin
f8a05d4ada Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-22 21:12:18 +02:00
Aliaksandr Valialkin
ae64c2db61 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-22 21:10:53 +02:00
Aliaksandr Valialkin
37a4347a37 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-14 18:32:41 +02:00
Aliaksandr Valialkin
20cdb879e7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-14 17:56:09 +02:00
Aliaksandr Valialkin
7917486d78 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-14 17:52:50 +02:00
Aliaksandr Valialkin
107607bf47 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-07 18:34:47 +02:00
Aliaksandr Valialkin
78b028064f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-02-02 23:58:11 +02:00
Aliaksandr Valialkin
db286fdd73 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-25 15:33:35 +02:00
Aliaksandr Valialkin
e8ff658b2e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-23 13:24:13 +02:00
Aliaksandr Valialkin
e1668e7441 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-18 23:29:16 +02:00
Aliaksandr Valialkin
0d0469cc80 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-18 22:44:04 +02:00
Aliaksandr Valialkin
8d6d4e8033 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-18 22:38:35 +02:00
Aliaksandr Valialkin
b894f25f21 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-17 15:51:07 +02:00
Aliaksandr Valialkin
b6bae2f05f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-07 12:56:47 +02:00
Aliaksandr Valialkin
9e15858baf vendor: make vendor-update 2022-01-07 12:37:58 +02:00
Aliaksandr Valialkin
3f5b1084eb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2022-01-07 12:36:24 +02:00
Aliaksandr Valialkin
c2e9be96a7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-20 19:11:26 +02:00
Aliaksandr Valialkin
a72dadb8f4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-20 13:53:03 +02:00
Aliaksandr Valialkin
08219faf8d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-17 20:21:56 +02:00
Aliaksandr Valialkin
288620ca40 lib/storage: initial support for multi-level downsampling
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/36

Based on https://github.com/valyala/VictoriaMetrics/pull/203
2021-12-15 16:42:44 +02:00
Aliaksandr Valialkin
2847c84a7b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-15 16:41:56 +02:00
Aliaksandr Valialkin
6a64823581 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-14 19:57:21 +02:00
Aliaksandr Valialkin
b94e986710 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-12-02 15:03:46 +02:00
Aliaksandr Valialkin
a29565d1bd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-11-08 15:48:09 +02:00
Aliaksandr Valialkin
39332cfc5c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-11-08 13:56:29 +02:00
Aliaksandr Valialkin
d07d2811d4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-10-22 19:41:51 +03:00
Aliaksandr Valialkin
206e451cae Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-10-08 17:56:22 +03:00
Aliaksandr Valialkin
307034fc2f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-10-08 16:10:22 +03:00
Aliaksandr Valialkin
c149132b14 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-23 22:55:56 +03:00
Aliaksandr Valialkin
6dd7a90c7c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-22 01:49:36 +03:00
Aliaksandr Valialkin
dc5507754f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-20 15:22:36 +03:00
Aliaksandr Valialkin
c68663deee Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-20 14:55:21 +03:00
Aliaksandr Valialkin
114a40e63f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-15 18:26:16 +03:00
Aliaksandr Valialkin
163f2a46fd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-15 18:18:59 +03:00
Aliaksandr Valialkin
375c46cb1f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-01 17:13:09 +03:00
Aliaksandr Valialkin
bb2d1128b8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-01 16:37:40 +03:00
Aliaksandr Valialkin
479b9da827 vendor: make vendor-update 2021-09-01 12:53:52 +03:00
Aliaksandr Valialkin
62857fc30e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-09-01 12:49:13 +03:00
Aliaksandr Valialkin
253315b1fe Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-19 10:35:13 +03:00
Aliaksandr Valialkin
efe6e30008 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-18 22:07:32 +03:00
Aliaksandr Valialkin
bc2512abdd docs/CHANGELOG.md: update urls to Prometheus 2.29 release
Previously these urls were pointing to rc0 release
2021-08-16 14:52:23 +03:00
Aliaksandr Valialkin
a07f8017ba docs/CHANGELOG.md: typo fix: satureated -> saturated 2021-08-16 14:51:04 +03:00
Aliaksandr Valialkin
cf70b766eb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-15 23:52:55 +03:00
Aliaksandr Valialkin
b00732074c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-08-15 23:51:06 +03:00
Aliaksandr Valialkin
8df8c414de Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-07-15 14:05:30 +03:00
Aliaksandr Valialkin
ce844238a4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-25 13:30:19 +03:00
Aliaksandr Valialkin
452720c5dc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-25 13:24:42 +03:00
Aliaksandr Valialkin
bbca1740c1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-18 10:55:54 +03:00
Aliaksandr Valialkin
e1c85395eb Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-11 13:03:20 +03:00
Aliaksandr Valialkin
b348114dab Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-11 13:00:09 +03:00
Aliaksandr Valialkin
bb54e34dc5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-09 20:43:33 +03:00
Aliaksandr Valialkin
e0d0b9447e vendor: make vendor-update 2021-06-09 20:43:19 +03:00
Aliaksandr Valialkin
fae6e4fc85 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-06-09 19:10:11 +03:00
Aliaksandr Valialkin
e49bf9bc73 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-05-24 16:03:14 +03:00
Aliaksandr Valialkin
a142390014 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-05-01 10:05:48 +03:00
Aliaksandr Valialkin
bceb8082f6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-30 10:11:24 +03:00
Aliaksandr Valialkin
276969500e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-24 01:42:54 +03:00
Aliaksandr Valialkin
030e3a63f2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-21 11:00:07 +03:00
Aliaksandr Valialkin
1c5e0564af lib/promscrape: create a single swosFunc per scrape_config 2021-04-08 09:31:55 +03:00
Aliaksandr Valialkin
b8300338f0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-08 01:03:03 +03:00
Aliaksandr Valialkin
660c3c7251 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-04-08 00:54:19 +03:00
Aliaksandr Valialkin
80ba07dc95 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-30 15:41:16 +03:00
Aliaksandr Valialkin
11ded82e60 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-29 19:15:52 +03:00
Aliaksandr Valialkin
558b390ebc vendor: make vendor-update 2021-03-25 20:57:46 +02:00
Aliaksandr Valialkin
343f444e87 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-25 19:15:08 +02:00
Aliaksandr Valialkin
16884c20c0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-17 02:05:46 +02:00
Aliaksandr Valialkin
7d44cdd8ce Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-15 22:44:24 +02:00
Aliaksandr Valialkin
5d2394ad9b vendor: make vendor-update 2021-03-09 11:51:21 +02:00
Aliaksandr Valialkin
8582fba4b1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-09 11:46:39 +02:00
Aliaksandr Valialkin
b045f506f2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-03 11:51:32 +02:00
Aliaksandr Valialkin
6197440bb9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-03-02 21:46:03 +02:00
Aliaksandr Valialkin
966e9c227a Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-27 01:48:33 +02:00
Aliaksandr Valialkin
edb2ab7d8e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-27 00:25:01 +02:00
Aliaksandr Valialkin
0ad887fd4d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-26 23:00:40 +02:00
Aliaksandr Valialkin
d5dde7f6b1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-18 19:14:41 +02:00
Aliaksandr Valialkin
a54ca9bd8f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-18 15:47:41 +02:00
Aliaksandr Valialkin
3588687f84 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-18 14:53:19 +02:00
Aliaksandr Valialkin
687eb4ab00 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-16 22:29:45 +02:00
Aliaksandr Valialkin
b04fece006 lib/promscrape/discovery/kubernetes: add __meta_kubernetes_endpoints_label_* and __meta_kuberntes_endpoints_annotation_* labels to role: endpoints
This syncs kubernetes SD with Prometheus 2.25
See 617c56f55a
2021-02-15 02:50:46 +02:00
Aliaksandr Valialkin
d0c364d93d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-15 01:45:39 +02:00
Aliaksandr Valialkin
63c88d8ea2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-03 23:52:43 +02:00
Aliaksandr Valialkin
dc6636e2b2 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-03 12:30:44 +02:00
Aliaksandr Valialkin
c13f1d99e0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-02-01 20:19:50 +02:00
Aliaksandr Valialkin
079888f719 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-27 01:12:45 +02:00
Aliaksandr Valialkin
b68264b4f5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-22 12:10:57 +02:00
Aliaksandr Valialkin
aed049f660 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-13 13:55:45 +02:00
Aliaksandr Valialkin
7fcc0a1ef0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-13 12:55:45 +02:00
Aliaksandr Valialkin
48951073c4 lib/backup: increase backup chunk size from 128MB to 1GB
This should reduce costs for object storage API calls by 8x. See https://cloud.google.com/storage/pricing#operations-pricing
2021-01-13 12:15:38 +02:00
Aliaksandr Valialkin
d0dfcb72b4 vendor: make vendor-update 2021-01-13 12:09:54 +02:00
Nikolay
4cf7a55808 fixes tmpBlockFile remove on prometheus search error (#109) 2021-01-13 11:53:11 +02:00
Aliaksandr Valialkin
d72fc60108 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-13 11:41:15 +02:00
Aliaksandr Valialkin
0b92e18047 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-11 21:16:32 +02:00
Aliaksandr Valialkin
aa8ea16160 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-11 13:12:44 +02:00
Nikolay
f5e70f0ab9 adds multiple match args support for prometheusSearch, (#106)
it merges result according to prometheus ChainedSeriesMerge.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1001
2021-01-11 13:06:54 +02:00
Aliaksandr Valialkin
9e10d5083e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-11 13:04:58 +02:00
Aliaksandr Valialkin
30c2d75815 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2021-01-08 00:26:00 +02:00
Aliaksandr Valialkin
0e80f3f45a go.mod: add missing dependency on github.com/oklog/ulid
This is a follow up for a5583ddaff
2021-01-07 23:44:50 +02:00
Aliaksandr Valialkin
6e3cbae0b3 app/vmstorage/promdb: code prettifying after a5583ddaff 2021-01-07 23:30:19 +02:00
Nikolay
a5583ddaff adds period compaction to prometheus data (#105)
* adds period compaction to prometheus data
and filtering for datapoints outside retention period

* lint fix

* adds custom retention func

* fixes compaction,
fixes search query adjustment
2021-01-07 22:55:35 +02:00
Aliaksandr Valialkin
5db9e82e54 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-29 11:45:16 +02:00
Aliaksandr Valialkin
80676cf1fd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-28 12:09:22 +02:00
Aliaksandr Valialkin
ba4c49dde6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-27 14:10:59 +02:00
Aliaksandr Valialkin
35e5e8ff1e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-27 13:32:54 +02:00
Aliaksandr Valialkin
4cdbc4642d Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-25 17:41:24 +02:00
Aliaksandr Valialkin
23c0fb1efc Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-24 17:21:19 +02:00
Aliaksandr Valialkin
441d3e4b3f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-24 12:50:11 +02:00
Aliaksandr Valialkin
a0ea5777f0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-24 11:49:03 +02:00
Aliaksandr Valialkin
fb006fc6c0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-21 08:44:04 +02:00
Aliaksandr Valialkin
8593358965 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-19 16:43:03 +02:00
Aliaksandr Valialkin
d0311b7fe5 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-15 22:44:35 +02:00
Aliaksandr Valialkin
4edd38a906 Merge remote-tracking branch 'public/pmm-6401-read-prometheus-data-files' into pmm-6401-read-prometheus-data-files 2020-12-15 14:35:38 +02:00
Aliaksandr Valialkin
56054f4eb7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-15 14:33:39 +02:00
Nikolay
0ff0787797 adds custom apiPathLinks for victoria-metrics / api help (#968)
* adds custom apiPathLinks for victoria-metrics / api help

* adds custom paths for PMM
2020-12-15 14:20:51 +02:00
Aliaksandr Valialkin
f9c706e186 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-15 12:43:44 +02:00
Aliaksandr Valialkin
d74d22460c .github/workflows/main.yml: set GO111MODULE=off when installing auxiliary tools via go install 2020-12-15 01:01:22 +02:00
Aliaksandr Valialkin
d1193c87a8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-14 20:21:19 +02:00
Aliaksandr Valialkin
4f311e5827 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-14 14:20:52 +02:00
Aliaksandr Valialkin
142e6b6ecf Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-14 11:50:16 +02:00
Aliaksandr Valialkin
1b4ef473b9 .github/ISSUE_TEMPLATE/bug_report.md: add a link to upgrade procedure 2020-12-11 22:08:59 +02:00
Aliaksandr Valialkin
8beb1f9519 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-11 21:29:22 +02:00
Aliaksandr Valialkin
501fd8efd9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-11 12:10:21 +02:00
Aliaksandr Valialkin
45f2ba2572 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-05 14:57:17 +02:00
Aliaksandr Valialkin
cb2342029e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-12-05 13:24:28 +02:00
Aliaksandr Valialkin
ff0088ceec Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-26 02:07:16 +02:00
Aliaksandr Valialkin
afe6d2e736 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-25 23:03:44 +02:00
Aliaksandr Valialkin
e1a6262302 lib/fs: replace fs.OpenReaderAt with fs.MustOpenReaderAt
All the callers for fs.OpenReaderAt expect that the file will be opened.
So it is better to log fatal error inside fs.MustOpenReaderAt instead of leaving this to the caller.
2020-11-23 09:55:41 +02:00
Aliaksandr Valialkin
f000a10cd0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-23 09:41:15 +02:00
Aliaksandr Valialkin
4aee6ef4c0 vendor: make vendor-update 2020-11-19 19:07:10 +02:00
Aliaksandr Valialkin
f4dfacd493 vendor: update prometheus dependency 2020-11-19 19:00:46 +02:00
Aliaksandr Valialkin
fb2d4e56ce Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-19 18:56:02 +02:00
Aliaksandr Valialkin
36b748dfc7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-16 21:06:21 +02:00
Aliaksandr Valialkin
c625dc5b96 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-10 00:28:50 +02:00
Aliaksandr Valialkin
e32620afa1 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-10 00:21:55 +02:00
Aliaksandr Valialkin
3f298272a8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-08 13:41:41 +02:00
Aliaksandr Valialkin
7a473798b7 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-07 01:05:52 +02:00
Aliaksandr Valialkin
00ce906d97 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-05 17:14:27 +02:00
Aliaksandr Valialkin
41c9565aa1 update github.com/prometheus/prometheus from v1.8.2-0.20200911110723-e83ef207b6c2 to v1.8.2-0.20201029103703-63be30dceed9 2020-11-05 02:55:46 +02:00
Aliaksandr Valialkin
56303aee5b Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-05 02:51:08 +02:00
Aliaksandr Valialkin
8d8e2ccf5f Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-04 19:10:41 +02:00
Aliaksandr Valialkin
8772cb617c Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-03 14:15:45 +02:00
Aliaksandr Valialkin
65fbfc5cbc vendor: add missing files 2020-11-02 22:01:42 +02:00
Aliaksandr Valialkin
1b389674c0 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-02 21:58:57 +02:00
Aliaksandr Valialkin
98529e16ee Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-02 02:12:19 +02:00
Aliaksandr Valialkin
1b112405a8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-11-02 00:47:34 +02:00
Aliaksandr Valialkin
8bbc83e85e Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-17 12:13:56 +03:00
Aliaksandr Valialkin
8349140744 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-16 15:19:09 +03:00
Aliaksandr Valialkin
4dc13754d8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-13 18:37:48 +03:00
Aliaksandr Valialkin
83b7eb8ca6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-09 12:36:16 +03:00
Aliaksandr Valialkin
e5ef3288dd Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-08 19:27:30 +03:00
Aliaksandr Valialkin
e7f2907138 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-07 21:40:45 +03:00
Aliaksandr Valialkin
757c5cfbe0 Merge branch 'pmm-6401-read-prometheus-data-files' of github.com:valyala/VictoriaMetrics into pmm-6401-read-prometheus-data-files 2020-10-06 19:07:38 +03:00
Aliaksandr Valialkin
317ddb84b9 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-06 16:18:01 +03:00
Aliaksandr Valialkin
2b1d0510fa Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-06 16:12:25 +03:00
Aliaksandr Valialkin
40d2f6fee4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-05 18:14:51 +03:00
Aliaksandr Valialkin
9fbb84d5c2 app/vmselect/promql: fill gaps on graphs for range_* and running_* functions
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/806
2020-10-02 13:58:19 +03:00
Aliaksandr Valialkin
bdaa9a91f3 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-10-01 19:31:26 +03:00
Aliaksandr Valialkin
1a91da35be Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-30 09:58:18 +03:00
Aliaksandr Valialkin
f85be226bb vendor: make vendor-update 2020-09-30 08:58:52 +03:00
Aliaksandr Valialkin
8df5a3c5f6 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-30 08:55:57 +03:00
Aliaksandr Valialkin
9d3eb3f4b8 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-24 20:49:16 +03:00
Aliaksandr Valialkin
2cd48959d4 lib/storage: correctly use maxBlockSize in various checks
Previously `maxBlockSize` has been multiplied by 8 in certain checks. This is unnecessary.
2020-09-24 20:17:51 +03:00
Aliaksandr Valialkin
8fc8874db4 Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files 2020-09-23 23:25:34 +03:00
Aliaksandr Valialkin
ff1cbb524e app/vmselect: obtain labels and label values from Prometheus storage inside netstorage package 2020-09-23 22:43:01 +03:00
Aliaksandr Valialkin
a70df4bd83 PMM-6401 Initial implementaton for reading data from Prometheus files 2020-09-23 14:26:39 +03:00
25 changed files with 1126 additions and 114 deletions

View File

@@ -1,48 +0,0 @@
name: nightly-build
on:
schedule:
# Daily at 2:48am
- cron: '48 2 * * *'
permissions:
contents: read
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Setup Go
uses: actions/setup-go@main
with:
go-version: 1.20.1
id: go
- name: Setup docker scan
run: |
mkdir -p ~/.docker/cli-plugins && \
curl https://github.com/docker/scan-cli-plugin/releases/latest/download/docker-scan_linux_amd64 -L -s -S -o ~/.docker/cli-plugins/docker-scan &&\
chmod +x ~/.docker/cli-plugins/docker-scan
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Code checkout
uses: actions/checkout@master
- uses: actions/cache@v3
with:
path: gocache-for-docker
key: gocache-docker-${{ runner.os }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.mod') }}
- name: build & publish
run: |
docker scan --severity=medium --login --token "$SNYK_TOKEN" --accept-license
LATEST_TAG=nightly PKG_TAG=nightly make publish
env:
SNYK_TOKEN: ${{ secrets.SNYK_AUTH_TOKEN }}

View File

@@ -1,33 +0,0 @@
name: wiki
on:
push:
paths:
- 'docs/*'
branches:
- master
permissions:
contents: read
jobs:
build:
permissions:
contents: write # for Git to git push
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: publish
shell: bash
env:
TOKEN: ${{secrets.CI_TOKEN}}
run: |
git clone https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git wiki
cp -r docs/* wiki
cd wiki
git config --local user.email "info@victoriametrics.com"
git config --local user.name "Vika"
git add .
git commit -m "update wiki pages"
remote_repo="https://vika:${TOKEN}@github.com/VictoriaMetrics/VictoriaMetrics.wiki.git"
git push "${remote_repo}"
cd ..
rm -rf wiki

View File

@@ -35,8 +35,16 @@ var (
"The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. "+
"Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
downsamplingPeriods = flagutil.NewArrayString("downsampling.period", "Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs "+
"to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details")
)
// custom api help links [["/api","doc"]] without http.pathPrefix.
var customAPIPathList = [][]string{
{"/graph/explore", "explore metrics grafana page"},
{"/graph/d/prometheus-advanced/advanced-data-exploration", "PMM grafana dashboard"},
}
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
@@ -59,7 +67,10 @@ func main() {
logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr)
startTime := time.Now()
storage.SetDedupInterval(*minScrapeInterval)
err := storage.SetDownsamplingPeriods(*downsamplingPeriods, *minScrapeInterval)
if err != nil {
logger.Fatalf("cannot parse -downsampling.period: %s", err)
}
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
vmselect.Init()
@@ -113,6 +124,10 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
{"api/v1/status/top_queries", "top queries"},
{"api/v1/status/active_queries", "active queries"},
})
for _, p := range customAPIPathList {
p, doc := p[0], p[1]
fmt.Fprintf(w, "<a href=%q>%s</a> - %s<br/>", p, p, doc)
}
return true
}
if vminsert.RequestHandler(w, r) {

View File

@@ -13,6 +13,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
@@ -338,6 +339,12 @@ var gomaxprocs = cgroup.AvailableCPUs()
type packedTimeseries struct {
metricName string
brs []blockRef
pd *promData
}
type promData struct {
values []float64
timestamps []int64
}
type unpackWork struct {
@@ -450,9 +457,21 @@ func (pts *packedTimeseries) Unpack(dst *Result, tbf *tmpBlocksFile, tr storage.
putSortBlocksHeap(sbh)
return err
}
dedupInterval := storage.GetDedupInterval()
if pts.pd != nil {
// Add data from Prometheus to dst.
// It usually has smaller timestamps than the data from sbs, so put it first.
dst.Values = append(dst.Values, pts.pd.values...)
dst.Timestamps = append(dst.Timestamps, pts.pd.timestamps...)
}
dedupInterval := storage.GetDedupInterval(tr.MinTimestamp)
mergeSortBlocks(dst, sbh, dedupInterval)
putSortBlocksHeap(sbh)
if pts.pd != nil {
if !sort.IsSorted(dst) {
sort.Sort(dst)
}
pts.pd = nil
}
return nil
}
@@ -569,6 +588,27 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
return dst, firstErr
}
// sort.Interface implementation for Result
// Len implements sort.Interface
func (r *Result) Len() int {
return len(r.Timestamps)
}
// Less implements sort.Interface
func (r *Result) Less(i, j int) bool {
timestamps := r.Timestamps
return timestamps[i] < timestamps[j]
}
// Swap implements sort.Interface
func (r *Result) Swap(i, j int) {
timestamps := r.Timestamps
values := r.Values
timestamps[i], timestamps[j] = timestamps[j], timestamps[i]
values[i], values[j] = values[j], values[i]
}
func getSortBlock() *sortBlock {
v := sbPool.Get()
if v == nil {
@@ -806,6 +846,15 @@ func LabelNames(qt *querytracer.Tracer, sq *storage.SearchQuery, maxLabelNames i
if err != nil {
return nil, fmt.Errorf("error during labels search on time range: %w", err)
}
// Merge labels obtained from Prometheus storage.
promLabels, err := promdb.GetLabelNamesOnTimeRange(tr, deadline)
if err != nil {
return nil, fmt.Errorf("cannot obtain labels from Prometheus storage: %w", err)
}
qt.Printf("get %d label names from Prometheus storage", len(promLabels))
labels = mergeStrings(labels, promLabels)
// Sort labels like Prometheus does
sort.Strings(labels)
qt.Printf("sort %d labels", len(labels))
@@ -877,14 +926,44 @@ func LabelValues(qt *querytracer.Tracer, labelName string, sq *storage.SearchQue
}
labelValues, err := vmstorage.SearchLabelValuesWithFiltersOnTimeRange(qt, labelName, tfss, tr, maxLabelValues, sq.MaxMetrics, deadline.Deadline())
if err != nil {
return nil, fmt.Errorf("error during label values search on time range for labelName=%q: %w", labelName, err)
return nil, fmt.Errorf("error during label values search on time range: %w", err)
}
// Merge label values obtained from Prometheus storage.
promLabelValues, err := promdb.GetLabelValuesOnTimeRange(labelName, tr, deadline)
if err != nil {
return nil, fmt.Errorf("cannot obtain label values on time range for %q from Prometheus storage: %w", labelName, err)
}
qt.Printf("get %d label values from Prometheus storage", len(promLabelValues))
labelValues = mergeStrings(labelValues, promLabelValues)
// Sort labelValues like Prometheus does
sort.Strings(labelValues)
qt.Printf("sort %d label values", len(labelValues))
return labelValues, nil
}
func mergeStrings(a, b []string) []string {
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
m := make(map[string]struct{}, len(a)+len(b))
for _, s := range a {
m[s] = struct{}{}
}
for _, s := range b {
m[s] = struct{}{}
}
result := make([]string, 0, len(m))
for s := range m {
result = append(result, s)
}
return result
}
// GraphiteTagValues returns tag values for the given tagName until the given deadline.
func GraphiteTagValues(qt *querytracer.Tracer, tagName, filter string, limit int, deadline searchutils.Deadline) ([]string, error) {
qt = qt.NewChild("get graphite tag values for tagName=%s, filter=%s, limit=%d", tagName, filter, limit)
@@ -1217,6 +1296,26 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
}
qt.Printf("fetch unique series=%d, blocks=%d, samples=%d, bytes=%d", len(m), blocksRead, samples, tbf.Len())
// Fetch data from promdb.
pm := make(map[string]*promData)
err = promdb.VisitSeries(sq, deadline, func(metricName []byte, values []float64, timestamps []int64) {
pd := pm[string(metricName)]
if pd == nil {
if _, ok := m[string(metricName)]; !ok {
orderedMetricNames = append(orderedMetricNames, string(metricName))
}
pd = &promData{}
pm[string(metricName)] = pd
}
pd.values = append(pd.values, values...)
pd.timestamps = append(pd.timestamps, timestamps...)
})
if err != nil {
putTmpBlocksFile(tbf)
putStorageSearch(sr)
return nil, fmt.Errorf("error when searching in Prometheus data: %w", err)
}
var rss Results
rss.tr = tr
rss.deadline = deadline
@@ -1225,6 +1324,7 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
pts[i] = packedTimeseries{
metricName: metricName,
brs: m[metricName].brs,
pd: pm[metricName],
}
}
rss.packedTimeseries = pts

View File

@@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
@@ -124,6 +125,8 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
logger.Infof("successfully opened storage %q in %.3f seconds; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
*DataPath, time.Since(startTime).Seconds(), partsCount, blocksCount, rowsCount, sizeBytes)
registerStorageMetrics(Storage)
promdb.Init(retentionPeriod.Msecs)
}
// Storage is a storage.
@@ -238,6 +241,7 @@ func Stop() {
logger.Infof("gracefully closing the storage at %s", *DataPath)
startTime := time.Now()
WG.WaitAndBlock()
promdb.MustClose()
stopStaleSnapshotsRemover()
Storage.MustClose()
logger.Infof("successfully closed the storage in %.3f seconds", time.Since(startTime).Seconds())

View File

@@ -0,0 +1,264 @@
package promdb
import (
"context"
"flag"
"fmt"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/go-kit/kit/log"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/model/labels"
promstorage "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
var prometheusDataPath = flag.String("prometheusDataPath", "", "Optional path to readonly historical Prometheus data")
var prometheusRetentionMsecs int64
// Init must be called after flag.Parse and before using the package.
//
// See also MustClose.
func Init(retentionMsecs int64) {
if promDB != nil {
logger.Fatalf("BUG: it looks like MustOpenPromDB is called multiple times without MustClosePromDB call")
}
prometheusRetentionMsecs = retentionMsecs
if *prometheusDataPath == "" {
return
}
l := log.LoggerFunc(func(a ...interface{}) error {
logger.Infof("%v", a)
return nil
})
opts := tsdb.DefaultOptions()
opts.RetentionDuration = retentionMsecs
// Set max block duration to 10% of retention period or 31 days
// according to https://prometheus.io/docs/prometheus/latest/storage/#compaction
maxBlockDuration := int64((31 * 24 * time.Hour) / time.Millisecond)
if maxBlockDuration > retentionMsecs/10 {
maxBlockDuration = retentionMsecs / 10
}
if maxBlockDuration < opts.MinBlockDuration {
maxBlockDuration = opts.MinBlockDuration
}
opts.MaxBlockDuration = maxBlockDuration
// Custom delete function is needed, because Prometheus by default doesn't delete
// blocks outside the retention if no new blocks are created with samples with the current timestamps.
// See https://github.com/prometheus/prometheus/blob/997bb7134fcfd7279f250e183e78681e48a56aff/tsdb/db.go#L1116
opts.BlocksToDelete = func(blocks []*tsdb.Block) map[ulid.ULID]struct{} {
m := make(map[ulid.ULID]struct{})
minRetentionTime := time.Now().Unix()*1000 - retentionMsecs
for _, block := range blocks {
meta := block.Meta()
// delete block marked for deletion by compaction code.
if meta.Compaction.Deletable {
m[meta.ULID] = struct{}{}
continue
}
if block.MaxTime() < minRetentionTime {
m[meta.ULID] = struct{}{}
}
}
return m
}
pdb, err := tsdb.Open(*prometheusDataPath, l, nil, opts, nil)
if err != nil {
logger.Panicf("FATAL: cannot open Prometheus data at -prometheusDataPath=%q: %s", *prometheusDataPath, err)
}
promDB = pdb
logger.Infof("successfully opened historical Prometheus data at -prometheusDataPath=%q with retentionMsecs=%d", *prometheusDataPath, retentionMsecs)
}
// MustClose must be called on graceful shutdown.
//
// Package functionality cannot be used after this call.
func MustClose() {
if *prometheusDataPath == "" {
return
}
if promDB == nil {
logger.Panicf("BUG: it looks like MustClosePromDB is called without MustOpenPromDB call")
}
if err := promDB.Close(); err != nil {
logger.Panicf("FATAL: cannot close promDB: %s", err)
}
promDB = nil
logger.Infof("successfully closed historical Prometheus data at -prometheusDataPath=%q", *prometheusDataPath)
}
var promDB *tsdb.DB
// GetLabelNamesOnTimeRange returns label names.
func GetLabelNamesOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
d := time.Unix(int64(deadline.Deadline()), 0)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
q, err := promDB.Querier(ctx, tr.MinTimestamp, tr.MaxTimestamp)
if err != nil {
return nil, err
}
defer mustCloseQuerier(q)
names, _, err := q.LabelNames()
// Make full copy of names, since they cannot be used after q is closed.
names = copyStringsWithMemory(names)
return names, err
}
// GetLabelValuesOnTimeRange returns values for the given labelName on the given tr.
func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
d := time.Unix(int64(deadline.Deadline()), 0)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
q, err := promDB.Querier(ctx, tr.MinTimestamp, tr.MaxTimestamp)
if err != nil {
return nil, err
}
defer mustCloseQuerier(q)
values, _, err := q.LabelValues(labelName)
// Make full copy of values, since they cannot be used after q is closed.
values = copyStringsWithMemory(values)
return values, err
}
func copyStringsWithMemory(a []string) []string {
result := make([]string, len(a))
for i, s := range a {
result[i] = string(append([]byte{}, s...))
}
return result
}
// SeriesVisitor is called by VisitSeries for each matching time series.
//
// The caller shouldn't hold references to metricName, values and timestamps after returning.
type SeriesVisitor func(metricName []byte, values []float64, timestamps []int64)
// VisitSeries calls f for each series found in the pdb.
func VisitSeries(sq *storage.SearchQuery, deadline searchutils.Deadline, f SeriesVisitor) error {
if *prometheusDataPath == "" {
return nil
}
d := time.Unix(int64(deadline.Deadline()), 0)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
minTime, maxTime := getSearchTimeRange(sq)
q, err := promDB.Querier(ctx, minTime, maxTime)
if err != nil {
return err
}
defer mustCloseQuerier(q)
var seriesSet []promstorage.SeriesSet
for _, tf := range sq.TagFilterss {
ms, err := convertTagFiltersToMatchers(tf)
if err != nil {
return fmt.Errorf("cannot convert tag filters to matchers: %w", err)
}
s := q.Select(false, nil, ms...)
seriesSet = append(seriesSet, s)
}
ss := promstorage.NewMergeSeriesSet(seriesSet, promstorage.ChainedSeriesMerge)
var (
mn storage.MetricName
metricName []byte
values []float64
timestamps []int64
)
var it chunkenc.Iterator
for ss.Next() {
s := ss.At()
convertPromLabelsToMetricName(&mn, s.Labels())
metricName = mn.SortAndMarshal(metricName[:0])
values = values[:0]
timestamps = timestamps[:0]
it = s.Iterator(it)
for {
typ := it.Next()
if typ == chunkenc.ValNone {
break
}
if typ != chunkenc.ValFloat {
// Skip unsupported values
continue
}
ts, v := it.At()
values = append(values, v)
timestamps = append(timestamps, ts)
}
if err := it.Err(); err != nil {
return fmt.Errorf("error when iterating Prometheus series: %w", err)
}
f(metricName, values, timestamps)
}
return ss.Err()
}
func getSearchTimeRange(sq *storage.SearchQuery) (int64, int64) {
maxTime := sq.MaxTimestamp
minTime := sq.MinTimestamp
minRetentionTime := time.Now().Unix()*1000 - prometheusRetentionMsecs
if maxTime < minRetentionTime {
maxTime = minRetentionTime
}
if minTime < minRetentionTime {
minTime = minRetentionTime
}
return minTime, maxTime
}
func convertPromLabelsToMetricName(dst *storage.MetricName, labels []labels.Label) {
dst.Reset()
for _, label := range labels {
if label.Name == "__name__" {
dst.MetricGroup = append(dst.MetricGroup[:0], label.Value...)
} else {
dst.AddTag(label.Name, label.Value)
}
}
}
func convertTagFiltersToMatchers(tfs []storage.TagFilter) ([]*labels.Matcher, error) {
ms := make([]*labels.Matcher, 0, len(tfs))
for _, tf := range tfs {
var mt labels.MatchType
if tf.IsNegative {
if tf.IsRegexp {
mt = labels.MatchNotRegexp
} else {
mt = labels.MatchNotEqual
}
} else {
if tf.IsRegexp {
mt = labels.MatchRegexp
} else {
mt = labels.MatchEqual
}
}
key := string(tf.Key)
if key == "" {
key = "__name__"
}
value := string(tf.Value)
m, err := labels.NewMatcher(mt, key, value)
if err != nil {
return nil, err
}
ms = append(ms, m)
}
return ms, nil
}
func mustCloseQuerier(q promstorage.Querier) {
if err := q.Close(); err != nil {
logger.Panicf("FATAL: cannot close querier: %s", err)
}
}

13
go.mod
View File

@@ -19,11 +19,18 @@ require (
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.5
github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb/v3 v3.1.2
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/fatih/color v1.14.1 // indirect
github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.7.0
github.com/influxdata/influxdb v1.11.0
github.com/klauspost/compress v1.16.0
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/oklog/ulid v1.3.1
github.com/prometheus/common v0.40.0 // indirect
github.com/prometheus/prometheus v0.42.0
github.com/urfave/cli/v2 v2.24.4
github.com/valyala/fastjson v1.6.4
@@ -64,10 +71,8 @@ require (
github.com/aws/aws-sdk-go-v2/service/sts v1.18.5 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/fatih/color v1.14.1 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
@@ -82,19 +87,15 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.40.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect

2
go.sum
View File

@@ -173,6 +173,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=

View File

@@ -160,7 +160,8 @@ func (b *Block) deduplicateSamplesDuringMerge() {
// Nothing to dedup.
return
}
dedupInterval := GetDedupInterval()
maxTimestamp := srcTimestamps[len(srcTimestamps)-1]
dedupInterval := GetDedupInterval(maxTimestamp)
if dedupInterval <= 0 {
// Deduplication is disabled.
return

View File

@@ -1,27 +1,7 @@
package storage
import (
"time"
)
// SetDedupInterval sets the deduplication interval, which is applied to raw samples during data ingestion and querying.
//
// De-duplication is disabled if dedupInterval is 0.
//
// This function must be called before initializing the storage.
func SetDedupInterval(dedupInterval time.Duration) {
globalDedupInterval = dedupInterval.Milliseconds()
}
// GetDedupInterval returns the dedup interval in milliseconds, which has been set via SetDedupInterval.
func GetDedupInterval() int64 {
return globalDedupInterval
}
var globalDedupInterval int64
func isDedupEnabled() bool {
return globalDedupInterval > 0
return len(downsamplingPeriods) > 0
}
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in milliseconds.

123
lib/storage/downsampling.go Normal file
View File

@@ -0,0 +1,123 @@
package storage
import (
"fmt"
"sort"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/metricsql"
)
// SetDownsamplingPeriods configures downsampling.
//
// The function must be called before opening or creating any storage.
func SetDownsamplingPeriods(periods []string, dedupInterval time.Duration) error {
dsps, err := parseDownsamplingPeriods(periods)
if err != nil {
return err
}
dedupIntervalMs := dedupInterval.Milliseconds()
if dedupIntervalMs > 0 {
if len(dsps) > 0 && dsps[len(dsps)-1].Offset == 0 {
return fmt.Errorf("-dedup.minScrapeInterval=%s cannot be used if -downsampling.period=%s contains zero offset", dedupInterval, periods)
}
// Deduplication is a special case of downsampling with zero offset.
dsps = append(dsps, DownsamplingPeriod{
Offset: 0,
Interval: dedupIntervalMs,
})
}
downsamplingPeriods = dsps
return nil
}
// DownsamplingPeriod describes downsampling period
type DownsamplingPeriod struct {
// Offset in milliseconds from the current time when the downsampling with the given interval must be applied
Offset int64
// Interval for downsampling - only a single sample is left per each interval
Interval int64
}
// String implements interface
func (dsp DownsamplingPeriod) String() string {
offset := time.Duration(dsp.Offset) * time.Millisecond
interval := time.Duration(dsp.Interval) * time.Millisecond
return fmt.Sprintf("%s:%s", offset, interval)
}
func (dsp *DownsamplingPeriod) parse(s string) error {
idx := strings.Index(s, ":")
if idx <= 0 {
return fmt.Errorf("incorrect format for downsampling period: %s, want `offset:interval` format", s)
}
offsetStr, intervalStr := s[:idx], s[idx+1:]
interval, err := metricsql.DurationValue(intervalStr, 0)
if err != nil {
return fmt.Errorf("incorrect interval: %s format for downsampling interval: %s err: %w", intervalStr, s, err)
}
offset, err := metricsql.DurationValue(offsetStr, 0)
if err != nil {
return fmt.Errorf("incorrect duration: %s format for downsampling offset: %s err: %w", offsetStr, s, err)
}
dsp.Interval = interval
dsp.Offset = offset
// sanity check
if offset > 0 && interval > offset {
return fmt.Errorf("downsampling interval=%d cannot exceed offset=%d", dsp.Interval, dsp.Offset)
}
return nil
}
var downsamplingPeriods []DownsamplingPeriod
// GetDedupInterval returns dedup interval, which must be applied to samples with the given timestamp.
func GetDedupInterval(timestamp int64) int64 {
dsp := getDownsamplingPeriod(timestamp)
return dsp.Interval
}
// getDownsamplingPeriod returns downsampling period, which must be used for the given timestamp
func getDownsamplingPeriod(timestamp int64) DownsamplingPeriod {
offset := int64(fasttime.UnixTimestamp())*1000 - timestamp
for _, dsp := range downsamplingPeriods {
if offset >= dsp.Offset {
return dsp
}
}
return DownsamplingPeriod{}
}
func parseDownsamplingPeriods(periods []string) ([]DownsamplingPeriod, error) {
if len(periods) == 0 {
return nil, nil
}
var dsps []DownsamplingPeriod
for _, period := range periods {
var dsp DownsamplingPeriod
if err := dsp.parse(period); err != nil {
return nil, fmt.Errorf("cannot parse downsampling period %q: %w", period, err)
}
dsps = append(dsps, dsp)
}
sort.Slice(dsps, func(i, j int) bool {
return dsps[i].Offset > dsps[j].Offset
})
dspPrev := dsps[0]
// sanity checks.
for _, dsp := range dsps[1:] {
if dspPrev.Interval <= dsp.Interval {
return nil, fmt.Errorf("prev downsampling interval %d must be bigger than the next interval %d", dspPrev.Interval, dsp.Interval)
}
if dspPrev.Offset == dsp.Offset {
return nil, fmt.Errorf("duplicate downsampling offset: %d", dsp.Offset)
}
if dspPrev.Interval%dsp.Interval != 0 {
return nil, fmt.Errorf("downsamping intervals must be multiples; prev: %d, current: %d", dspPrev.Interval, dsp.Interval)
}
dspPrev = dsp
}
return dsps, nil
}

View File

@@ -0,0 +1,62 @@
package storage
import (
"strings"
"testing"
)
func TestParseDownsamplingPeriodsFailure(t *testing.T) {
f := func(name string, src []string) {
t.Helper()
t.Run(name, func(t *testing.T) {
if _, err := parseDownsamplingPeriods(src); err == nil {
t.Fatalf("want fail for input: %s", strings.Join(src, ","))
}
})
}
f("empty duration", []string{"15d"})
f("empty interval", []string{":1m"})
f("incorrect duration decrease", []string{"30d:15h", "60d:1h"})
f("duplicate offset", []string{"30d:15h", "30d:1h"})
f("duplicate interval", []string{"60d:1h", "30d:1h"})
f("not multiple intervals", []string{"90d:12h", "60:9h", "30d:7h"})
}
func TestParseDownsamplingPeriodsSuccess(t *testing.T) {
f := func(name string, src []string, expected []DownsamplingPeriod) {
t.Helper()
t.Run(name, func(t *testing.T) {
dsps, err := parseDownsamplingPeriods(src)
if err != nil {
t.Fatalf("cannot parse downsampling configuration for: %s, err: %s", strings.Join(src, ","), err)
}
assertDownsamplingPeriods(t, expected, dsps)
})
}
f("one period", []string{"30d:1m"}, []DownsamplingPeriod{
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
})
f("three periods", []string{"15d:30s", "30d:1m", "60d:15m"}, []DownsamplingPeriod{
{Offset: 60 * 24 * 3600 * 1000, Interval: 15 * 60 * 1000},
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
{Offset: 15 * 24 * 3600 * 1000, Interval: 30 * 1000},
})
f("with the same divider periods", []string{"15d:1m", "30d:7m", "60d:14m", "90d:28m"}, []DownsamplingPeriod{
{Offset: 90 * 24 * 3600 * 1000, Interval: 28 * 60 * 1000},
{Offset: 60 * 24 * 3600 * 1000, Interval: 14 * 60 * 1000},
{Offset: 30 * 24 * 3600 * 1000, Interval: 7 * 60 * 1000},
{Offset: 15 * 24 * 3600 * 1000, Interval: 60 * 1000},
})
}
func assertDownsamplingPeriods(t *testing.T, want, got []DownsamplingPeriod) {
t.Helper()
if len(want) != len(got) {
t.Fatalf("len mismatch, want: %d, got: %d", len(want), len(got))
}
for i := 0; i < len(want); i++ {
if want[i] != got[i] {
t.Fatalf("want period: %s, got period: %s, idx: %d", want[i], got[i], i)
}
}
}

View File

@@ -367,6 +367,12 @@ func (mn *MetricName) String() string {
return fmt.Sprintf("%s{%s}", mnCopy.MetricGroup, tagsStr)
}
// SortAndMarshal sorts mn tags and then marshals them to dst.
func (mn *MetricName) SortAndMarshal(dst []byte) []byte {
mn.sortTags()
return mn.Marshal(dst)
}
// Marshal appends marshaled mn to dst and returns the result.
//
// mn.sortTags must be called before calling this function

View File

@@ -1255,7 +1255,7 @@ func (pt *partition) runFinalDedup() error {
func (pt *partition) getRequiredDedupInterval() (int64, int64) {
pws := pt.GetParts(nil, false)
defer pt.PutParts(pws)
dedupInterval := GetDedupInterval()
dedupInterval := GetDedupInterval(pt.tr.MaxTimestamp)
minDedupInterval := getMinDedupInterval(pws)
return dedupInterval, minDedupInterval
}
@@ -1511,7 +1511,7 @@ func (pt *partition) mergePartsInternal(tmpPartPath string, bsw *blockStreamWrit
return nil, fmt.Errorf("cannot merge parts to %q: %w", tmpPartPath, err)
}
if tmpPartPath != "" {
ph.MinDedupInterval = GetDedupInterval()
ph.MinDedupInterval = GetDedupInterval(ph.MaxTimestamp)
if err := ph.writeMinDedupInterval(tmpPartPath); err != nil {
return nil, fmt.Errorf("cannot store min dedup interval: %w", err)
}

22
vendor/github.com/go-kit/kit/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Peter Bourgon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

160
vendor/github.com/go-kit/kit/log/README.md generated vendored Normal file
View File

@@ -0,0 +1,160 @@
# package log
**Deprecation notice:** The core Go kit log packages (log, log/level, log/term, and
log/syslog) have been moved to their own repository at github.com/go-kit/log.
The corresponding packages in this directory remain for backwards compatibility.
Their types alias the types and their functions call the functions provided by
the new repository. Using either import path should be equivalent. Prefer the
new import path when practical.
______
`package log` provides a minimal interface for structured logging in services.
It may be wrapped to encode conventions, enforce type-safety, provide leveled
logging, and so on. It can be used for both typical application log events,
and log-structured data streams.
## Structured logging
Structured logging is, basically, conceding to the reality that logs are
_data_, and warrant some level of schematic rigor. Using a stricter,
key/value-oriented message format for our logs, containing contextual and
semantic information, makes it much easier to get insight into the
operational activity of the systems we build. Consequently, `package log` is
of the strong belief that "[the benefits of structured logging outweigh the
minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
Migrating from unstructured to structured logging is probably a lot easier
than you'd expect.
```go
// Unstructured
log.Printf("HTTP server listening on %s", addr)
// Structured
logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
```
## Usage
### Typical application logging
```go
w := log.NewSyncWriter(os.Stderr)
logger := log.NewLogfmtLogger(w)
logger.Log("question", "what is the meaning of life?", "answer", 42)
// Output:
// question="what is the meaning of life?" answer=42
```
### Contextual Loggers
```go
func main() {
var logger log.Logger
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
logger = log.With(logger, "instance_id", 123)
logger.Log("msg", "starting")
NewWorker(log.With(logger, "component", "worker")).Run()
NewSlacker(log.With(logger, "component", "slacker")).Run()
}
// Output:
// instance_id=123 msg=starting
// instance_id=123 component=worker msg=running
// instance_id=123 component=slacker msg=running
```
### Interact with stdlib logger
Redirect stdlib logger to Go kit logger.
```go
import (
"os"
stdlog "log"
kitlog "github.com/go-kit/kit/log"
)
func main() {
logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
stdlog.Print("I sure like pie")
}
// Output:
// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
```
Or, if, for legacy reasons, you need to pipe all of your logging through the
stdlib log package, you can redirect Go kit logger to the stdlib logger.
```go
logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
logger.Log("legacy", true, "msg", "at least it's something")
// Output:
// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
```
### Timestamps and callers
```go
var logger log.Logger
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
logger.Log("msg", "hello")
// Output:
// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
```
## Levels
Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level).
## Supported output formats
- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
- JSON
## Enhancements
`package log` is centered on the one-method Logger interface.
```go
type Logger interface {
Log(keyvals ...interface{}) error
}
```
This interface, and its supporting code like is the product of much iteration
and evaluation. For more details on the evolution of the Logger interface,
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
a talk by [Chris Hines](https://github.com/ChrisHines).
Also, please see
[#63](https://github.com/go-kit/kit/issues/63),
[#76](https://github.com/go-kit/kit/pull/76),
[#131](https://github.com/go-kit/kit/issues/131),
[#157](https://github.com/go-kit/kit/pull/157),
[#164](https://github.com/go-kit/kit/issues/164), and
[#252](https://github.com/go-kit/kit/pull/252)
to review historical conversations about package log and the Logger interface.
Value-add packages and suggestions,
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
are of course welcome. Good proposals should
- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
- Be friendly to packages that accept only an unadorned log.Logger.
## Benchmarks & comparisons
There are a few Go logging benchmarks and comparisons that include Go kit's package log.
- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log

118
vendor/github.com/go-kit/kit/log/doc.go generated vendored Normal file
View File

@@ -0,0 +1,118 @@
// Package log provides a structured logger.
//
// Deprecated: Use github.com/go-kit/log instead.
//
// Structured logging produces logs easily consumed later by humans or
// machines. Humans might be interested in debugging errors, or tracing
// specific requests. Machines might be interested in counting interesting
// events, or aggregating information for off-line processing. In both cases,
// it is important that the log messages are structured and actionable.
// Package log is designed to encourage both of these best practices.
//
// Basic Usage
//
// The fundamental interface is Logger. Loggers create log events from
// key/value data. The Logger interface has a single method, Log, which
// accepts a sequence of alternating key/value pairs, which this package names
// keyvals.
//
// type Logger interface {
// Log(keyvals ...interface{}) error
// }
//
// Here is an example of a function using a Logger to create log events.
//
// func RunTask(task Task, logger log.Logger) string {
// logger.Log("taskID", task.ID, "event", "starting task")
// ...
// logger.Log("taskID", task.ID, "event", "task complete")
// }
//
// The keys in the above example are "taskID" and "event". The values are
// task.ID, "starting task", and "task complete". Every key is followed
// immediately by its value.
//
// Keys are usually plain strings. Values may be any type that has a sensible
// encoding in the chosen log format. With structured logging it is a good
// idea to log simple values without formatting them. This practice allows
// the chosen logger to encode values in the most appropriate way.
//
// Contextual Loggers
//
// A contextual logger stores keyvals that it includes in all log events.
// Building appropriate contextual loggers reduces repetition and aids
// consistency in the resulting log output. With, WithPrefix, and WithSuffix
// add context to a logger. We can use With to improve the RunTask example.
//
// func RunTask(task Task, logger log.Logger) string {
// logger = log.With(logger, "taskID", task.ID)
// logger.Log("event", "starting task")
// ...
// taskHelper(task.Cmd, logger)
// ...
// logger.Log("event", "task complete")
// }
//
// The improved version emits the same log events as the original for the
// first and last calls to Log. Passing the contextual logger to taskHelper
// enables each log event created by taskHelper to include the task.ID even
// though taskHelper does not have access to that value. Using contextual
// loggers this way simplifies producing log output that enables tracing the
// life cycle of individual tasks. (See the Contextual example for the full
// code of the above snippet.)
//
// Dynamic Contextual Values
//
// A Valuer function stored in a contextual logger generates a new value each
// time an event is logged. The Valuer example demonstrates how this feature
// works.
//
// Valuers provide the basis for consistently logging timestamps and source
// code location. The log package defines several valuers for that purpose.
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
// DefaultCaller. A common logger initialization sequence that ensures all log
// entries contain a timestamp and source location looks like this:
//
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
//
// Concurrent Safety
//
// Applications with multiple goroutines want each log event written to the
// same logger to remain separate from other log events. Package log provides
// two simple solutions for concurrent safe logging.
//
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
// method. Using a SyncWriter has the benefit that the smallest practical
// portion of the logging logic is performed within a mutex, but it requires
// the formatting Logger to make only one call to Write per log event.
//
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
// Using a SyncLogger has the benefit that it guarantees each log event is
// handled atomically within the wrapped logger, but it typically serializes
// both the formatting and output logic. Use a SyncLogger if the formatting
// logger may perform multiple writes per log event.
//
// Error Handling
//
// This package relies on the practice of wrapping or decorating loggers with
// other loggers to provide composable pieces of functionality. It also means
// that Logger.Log must return an error because some
// implementations—especially those that output log data to an io.Writer—may
// encounter errors that cannot be handled locally. This in turn means that
// Loggers that wrap other loggers should return errors from the wrapped
// logger up the stack.
//
// Fortunately, the decorator pattern also provides a way to avoid the
// necessity to check for errors every time an application calls Logger.Log.
// An application required to panic whenever its Logger encounters
// an error could initialize its logger as follows.
//
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
// if err := fmtlogger.Log(keyvals...); err != nil {
// panic(err)
// }
// return nil
// })
package log

15
vendor/github.com/go-kit/kit/log/json_logger.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
package log
import (
"io"
"github.com/go-kit/log"
)
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
// single JSON object. Each log event produces no more than one call to
// w.Write. The passed Writer must be safe for concurrent use by multiple
// goroutines if the returned Logger will be used concurrently.
func NewJSONLogger(w io.Writer) Logger {
return log.NewJSONLogger(w)
}

51
vendor/github.com/go-kit/kit/log/log.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
package log
import (
"github.com/go-kit/log"
)
// Logger is the fundamental interface for all log operations. Log creates a
// log event from keyvals, a variadic sequence of alternating keys and values.
// Implementations must be safe for concurrent use by multiple goroutines. In
// particular, any implementation of Logger that appends to keyvals or
// modifies or retains any of its elements must make a copy first.
type Logger = log.Logger
// ErrMissingValue is appended to keyvals slices with odd length to substitute
// the missing value.
var ErrMissingValue = log.ErrMissingValue
// With returns a new contextual logger with keyvals prepended to those passed
// to calls to Log. If logger is also a contextual logger created by With,
// WithPrefix, or WithSuffix, keyvals is appended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func With(logger Logger, keyvals ...interface{}) Logger {
return log.With(logger, keyvals...)
}
// WithPrefix returns a new contextual logger with keyvals prepended to those
// passed to calls to Log. If logger is also a contextual logger created by
// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
return log.WithPrefix(logger, keyvals...)
}
// WithSuffix returns a new contextual logger with keyvals appended to those
// passed to calls to Log. If logger is also a contextual logger created by
// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func WithSuffix(logger Logger, keyvals ...interface{}) Logger {
return log.WithSuffix(logger, keyvals...)
}
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f.
type LoggerFunc = log.LoggerFunc

15
vendor/github.com/go-kit/kit/log/logfmt_logger.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
package log
import (
"io"
"github.com/go-kit/log"
)
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
// logfmt format. Each log event produces no more than one call to w.Write.
// The passed Writer must be safe for concurrent use by multiple goroutines if
// the returned Logger will be used concurrently.
func NewLogfmtLogger(w io.Writer) Logger {
return log.NewLogfmtLogger(w)
}

8
vendor/github.com/go-kit/kit/log/nop_logger.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
package log
import "github.com/go-kit/log"
// NewNopLogger returns a logger that doesn't do anything.
func NewNopLogger() Logger {
return log.NewNopLogger()
}

54
vendor/github.com/go-kit/kit/log/stdlib.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
package log
import (
"io"
"github.com/go-kit/log"
)
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
// designed to be passed to a Go kit logger as the writer, for cases where
// it's necessary to redirect all Go kit log output to the stdlib logger.
//
// If you have any choice in the matter, you shouldn't use this. Prefer to
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
type StdlibWriter = log.StdlibWriter
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
// logger's SetOutput. It will extract date/timestamps, filenames, and
// messages, and place them under relevant keys.
type StdlibAdapter = log.StdlibAdapter
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
type StdlibAdapterOption = log.StdlibAdapterOption
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
func TimestampKey(key string) StdlibAdapterOption {
return log.TimestampKey(key)
}
// FileKey sets the key for the file and line field. By default, it's "caller".
func FileKey(key string) StdlibAdapterOption {
return log.FileKey(key)
}
// MessageKey sets the key for the actual log message. By default, it's "msg".
func MessageKey(key string) StdlibAdapterOption {
return log.MessageKey(key)
}
// Prefix configures the adapter to parse a prefix from stdlib log events. If
// you provide a non-empty prefix to the stdlib logger, then your should provide
// that same prefix to the adapter via this option.
//
// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to
// true if you want to include the parsed prefix in the msg.
func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption {
return log.Prefix(prefix, joinPrefixToMsg)
}
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
// logger. It's designed to be passed to log.SetOutput.
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
return log.NewStdlibAdapter(logger, options...)
}

37
vendor/github.com/go-kit/kit/log/sync.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
package log
import (
"io"
"github.com/go-kit/log"
)
// SwapLogger wraps another logger that may be safely replaced while other
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
// will discard all log events without error.
//
// SwapLogger serves well as a package global logger that can be changed by
// importers.
type SwapLogger = log.SwapLogger
// NewSyncWriter returns a new writer that is safe for concurrent use by
// multiple goroutines. Writes to the returned writer are passed on to w. If
// another write is already in progress, the calling goroutine blocks until
// the writer is available.
//
// If w implements the following interface, so does the returned writer.
//
// interface {
// Fd() uintptr
// }
func NewSyncWriter(w io.Writer) io.Writer {
return log.NewSyncWriter(w)
}
// NewSyncLogger returns a logger that synchronizes concurrent use of the
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
// only one goroutine will be allowed to log to the wrapped logger at a time.
// The other goroutines will block until the logger is available.
func NewSyncLogger(logger Logger) Logger {
return log.NewSyncLogger(logger)
}

52
vendor/github.com/go-kit/kit/log/value.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package log
import (
"time"
"github.com/go-kit/log"
)
// A Valuer generates a log value. When passed to With, WithPrefix, or
// WithSuffix in a value element (odd indexes), it represents a dynamic
// value which is re-evaluated with each log event.
type Valuer = log.Valuer
// Timestamp returns a timestamp Valuer. It invokes the t function to get the
// time; unless you are doing something tricky, pass time.Now.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func Timestamp(t func() time.Time) Valuer {
return log.Timestamp(t)
}
// TimestampFormat returns a timestamp Valuer with a custom time format. It
// invokes the t function to get the time to format; unless you are doing
// something tricky, pass time.Now. The layout string is passed to
// Time.Format.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func TimestampFormat(t func() time.Time, layout string) Valuer {
return log.TimestampFormat(t, layout)
}
// Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller.
func Caller(depth int) Valuer {
return log.Caller(depth)
}
var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp = log.DefaultTimestamp
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC = log.DefaultTimestampUTC
// DefaultCaller is a Valuer that returns the file and line where the Log
// method was invoked. It can only be used with log.With.
DefaultCaller = log.DefaultCaller
)

3
vendor/modules.txt vendored
View File

@@ -265,6 +265,9 @@ github.com/fatih/color
# github.com/felixge/httpsnoop v1.0.3
## explicit; go 1.13
github.com/felixge/httpsnoop
# github.com/go-kit/kit v0.12.0
## explicit; go 1.17
github.com/go-kit/kit/log
# github.com/go-kit/log v0.2.1
## explicit; go 1.17
github.com/go-kit/log