Compare commits

...

766 Commits

Author SHA1 Message Date
Haley Wang
7d7d17d192 add changelog 2025-02-10 14:08:32 +08:00
Evgeny Kuzin
0a8b4281e5 fix race using the same list from 2 goroutines 2025-02-07 11:55:45 -05:00
Aliaksandr Valialkin
7a7f188133 deployment/docker: update VictoriaLogs Docker image tag from v1.4.0-victorialogs to v1.5.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.5.0-victorialogs
2025-01-13 07:34:33 +01:00
Aliaksandr Valialkin
3e00fae3f4 docs/VictoriaLogs/CHANGELOG.md: cut v1.5.0-victorialogs release 2025-01-13 07:28:08 +01:00
Roman Khavronenko
ee3c0c6a87 make: bump golangci-lint to v1.63.4 (
New version has additional checks and reduced resource consumption, so
it doesn't timeout for our internal repos.

To make linter happy, I addressed "redefinition of the built-in
function" lint error.

----
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2025-01-13 07:18:04 +01:00
Aliaksandr Valialkin
cf7ea78588 lib/logstorage: format pipe: add frequently used formatters
- url encoding / decoding with <urlencode:field> and <urldecode:field>
- base64 encoding / decoding with <base64encode:field> and <base64decode:field>
- hex encoding / decoding with <hexencode:field> and <hexdecode:field>
- hex encoding for integers with <hexnumencode:field> and <hexnumdecode:field>
2025-01-13 07:08:43 +01:00
Aliaksandr Valialkin
186aa3bb0e lib/logstorage: explicitly pass statsFunc to statsProcessor methods
This allows reducing the state of every statsProcessor by removing pointer to the corresponding statsFunc.
For example, this reduces statsCountProcessor size by 2x.
2025-01-13 04:49:39 +01:00
Aliaksandr Valialkin
e368f687a7 lib/logstorage: stats pipe: stop finalizeStats() as soon as the query is canceled
Previoysly finalizeStats() for some functions such as count_uniq() could run for long periods
of time after the query is canceled, since stopCh wan't propagated to finalizeStats().
2025-01-13 03:38:09 +01:00
Aliaksandr Valialkin
0214aa328e lib/logstorage: stats pipe: use integer group keys if stats by(...) contains a single field with integer values
This reduces memory usage and improves performance, since access to a map with integer keys
is faster than access to a map with string keys.
2025-01-13 03:22:00 +01:00
Aliaksandr Valialkin
dd919eeee6 lib/logstorage: count_uniq and count_uniq_hash stats functions: avoid converting integer values to strings
Prevsiously integer values were converted to strings before being passed to `updateState()` function at `count_uniq`
and `count_uniq_hash`. Later such values are converted back to integers in order to track them via integer map of unique values.

This commit avoids the int -> string -> int conversion. Instead, it passes integers directly to the integer map of unique values.
This improves performance of `count_uniq` and `count_uniq_hash` functions even further.
2025-01-13 02:45:28 +01:00
Aliaksandr Valialkin
3f22d06b0c lib/logstorage: add value_type filter to LogsQL
This filter can be used when debugging and exploring logs in order to understand better
which value types are used for storing the particular log fields.

The `value_type` filter complements `block_stats` pipe.
2025-01-12 22:21:39 +01:00
Aliaksandr Valialkin
b812de236b lib/logstorage: run make fmt after e610edf045 2025-01-12 03:17:57 +01:00
Aliaksandr Valialkin
40f56fa93b vendor: run make vendor-update
Add exclude google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a to go.mod
according to https://github.com/googleapis/google-cloud-go/issues/11283#issuecomment-2558515586 .

This fixes the following strange issue on `make vendor-update`:

cloud.google.com/go/storage imports
        google.golang.org/grpc/stats/opentelemetry: ambiguous import: found package google.golang.org/grpc/stats/opentelemetry in multiple modules:
        google.golang.org/grpc v1.69.0 (/go/pkg/mod/google.golang.org/grpc@v1.69.0/stats/opentelemetry)
        google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a (/go/pkg/mod/google.golang.org/grpc/stats/opentelemetry@v0.0.0-20240907200651-3ffb98b2c93a)
2025-01-12 03:14:52 +01:00
Aliaksandr Valialkin
e610edf045 lib/logstorage: improve performance for math pipe
- Pass the calculated results to the next pipe in float64 columns.
  Previously the results were converted to string columns. This could slow down further calculations.

- Use custom optimized logic for processing numeric columns, which are passed to math pipe.
  Previously all the input columns were converted to string and then converted to float64
  before math pipe calculations.

- Initialize the newly added columns at blockResult as soon as they are added.
  This improves performance when big number of columns are calculated by math pipe.
2025-01-12 03:01:47 +01:00
Aliaksandr Valialkin
764955b61c lib/logstorage: track integer values in integer maps when counting the number of unique values at count_uniq stats function
Previously integer values were tracked in string maps. Now every input value is parsed as integer.
On success the parsed integer is tracked via specialized maps, which hold only integers.
This reduces CPU usage and memory usage in general case.
2025-01-12 03:01:46 +01:00
Aliaksandr Valialkin
e3d31a371a lib/logstorage: avoid copying column name inside blockSearch.getColumnHeader() and blockSearch.getConstColumnValue()
Use the column name attached to the corresponding part. The lifetime of this column name exceed the blockSearch lifetime,
so it is safe using it here.

This is a follow-up for 8d968acd0a
2025-01-12 03:01:46 +01:00
Aliaksandr Valialkin
df723a4870 lib/logstorage: automatically detect columns with int64 values and store them as packed 8-byte int64 values
Previously columns with negative int64 values were stored either as float64 or string
depending on whether the negative int64 values are bigger or smaller than -2^53.
If the integer values are smaller than -2^53, then they are stored as string, since float64 cannot
hold such values without precision loss. Now such values are stored as int64.
This should improve compression ratio and query performance over columns with negative int64 values.
2025-01-12 03:01:46 +01:00
Aliaksandr Valialkin
bd00e3a735 lib/logstorage: make sure that the automatic conversion of field values to float64 is lossless
Previously field values could be automatically converted to float64 with precision loss.
This could lead to unexpected results when querying such field values.
For example, "10007199254740992" was incorrectly represented as 10007199254740993.
This commit prevents from such lossy conversions when storing field values.

While at it, prevent from int64 overflow at tryParseBytes and tryParseDuration functions,
which are used for parsing constants in queries for byte sizes and durations.
Now these functions return 1<<63-1 (the maximum int64 value) for constants exceeding
this value. Previously they could return arbitrary garbage for such constants.
2025-01-12 03:01:45 +01:00
Aliaksandr Valialkin
e794582f31 app/vlinsert/insertutils: avoid excess copying of lines at LineReader.buf
1. Do not copy every line from LineReader.buf to LineReader.Line - just refer the line at LineReader.buf.
2. Do not copy the next found line to the beginning of LineReader.buf - just track the next line start index with LineReader.bufOffset.

This reduces memory copying when many lines are read into LineReader.buf by a single read() syscall.
2025-01-12 03:01:45 +01:00
Roman Khavronenko
7cab4fd30d app/vmselect/promql: account for staleness when populating realPrevValue (#8002)
When vmselect process a rollup function it fetches all the raw samples
on requested `start-end` interval of the query. It then loops through
the raw samples, picks the range of the samples based on provided `step`
interval and invokes a rollup function for each of the picked ranges of
samples.

During this processing, vmselect always populates the `realPrevValue`
field with the closest previous raw sample value before the picked range
of samples. This `realPrevValue` is used by rollup functions like
increase_pure or delta to decide whether the counter change happened or
not. For example, we get the counter value == 1. If we've seen this
counter before and its value was also 1 - then no change happened. If we
didn't see it before, then this counter should have started with value=0
and we need to account for `1-0=1` change. All this is required to deal
with situations when scrapes are missing or `step` is too small.

However, vmselect doesn't check how "old" is the `realPrevValue`. In
other words, it doesn't respect the staleness interval when picking it.
In result, depending on the `start` and `end` params, vmselect can use
`realPrevValue` which is a couple of hours old and is unlikely to be a
temporary scrape fail. In result, some increases can be incorrectly
ingnored by vmselect.

This change makes sure that vmselect doesn't populate `realPrevValue`
with samples that are older than staleness interval.

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ x ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).


-------------------

To reproduce, create a dataset with one metric `foo` which has samples
with value=1 on interval of couple of hours and resolution 15s, and a
gap for an hour in the middle:
<img width="769" alt="image"
src="https://github.com/user-attachments/assets/a39b2740-b741-45f8-ad18-093b7c57c3b3"
/>

Then run `increase(foo[1m])` expression on this time range (disable
cache):
<img width="1472" alt="image"
src="https://github.com/user-attachments/assets/463cece1-f359-4c75-a96c-60092a31cab2"
/>

In result, there will be one increase on the beginning of the series.
And no increase after the gap. Then change the time range so it starts
in the middle of the gap:
<img width="1505" alt="image"
src="https://github.com/user-attachments/assets/f4a460c3-9fd1-4ec7-ab47-15e716ec1019"
/>

Now, there is an increase>0 because the `realPrevValue` wasn't
populated. This is wrong, because it hides the increase of the series.

With the fix, the original increase query on full time range should show
2 increases:
<img width="1492" alt="image"
src="https://github.com/user-attachments/assets/aa9d8a6b-7b22-41f6-9eb9-83b3113a6982"
/>

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2025-01-10 16:45:44 +01:00
Zakhar Bessarab
3333135bc0 docs/CHANGELOG.md: cut v1.109.0
Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2025-01-10 18:53:33 +04:00
Zakhar Bessarab
1db1841b20 app/{vmselect,vlselect}: run make vmui-update vmui-logs-update
Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2025-01-10 18:53:33 +04:00
Aliaksandr Valialkin
f7ce191482 docs/VictoriaLogs/README.md: add "Profiling" chapter
This chapter is needed for referring from Github issues when CPU or memory profiles are needed to be collected
in order to investigate issues with high CPU and/or RAM usage at VictoriaLogs.
2025-01-10 14:22:08 +01:00
Aliaksandr Valialkin
96ea222780 LICENSE: update the current year from 2024 to 2025 2025-01-10 14:19:11 +01:00
Zakhar Bessarab
03c0d9a672 app/vmselect/promql: set tenant information for numbers
Since
44b071296d
`evalNumber` function no longer updating MetricName tenancy information.
This leads to mismatch in metric names between the query result and
evaluated number for all tenants other than 0:0.

For example, query `count(up) or 0` will return different results for
tenants 0:0 and 1:1 (assuming up is present for both tenants):
- tenant 0:0 - will only contain result of `count(up)`
- tenant 1:1 - will return both `count(up)` and `0` since metric names
will not be matched

This restores setting of tenancy information for metric name for
single-tenant queries.

Related issue:

 https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7987
---
Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2025-01-10 13:04:38 +01:00
Nikolay
e9f86af7f5 lib/storage: add a hint for merge about type of parts in merge (#7998)
Hint allows to choose type of cache to be used for index search:
- in-memory parts are storing recently ingested samples and should use
main cache. This improves ingestion speed and cache hit ration for
queries accessing recently ingested samples.
- merges of file parts is performed in background, using a separate
cache allows avoiding pollution of the main cache with irrelevant
entries.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7182

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2025-01-10 16:01:39 +04:00
Nikolay
9ada784983 lib/storage: make finalDedup schedule interval configurable
This commit makes configurable interval for checking if final dedup
process for the historical data should be started. It allows to spread
resource utilisation for multiple vmstorage/vmsingle instances in time.
Since final dedup may add additional preasure on disk, backup systems
and make cluster less stable. Storage unconditionally adds 25% jitter to
the provided value, it should simplify configuration management at
Kubernetes ecosystem. Because Kubernetes application pods must have the
same configuration.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7880


---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2025-01-10 10:46:46 +01:00
f41gh7
a83ee2b3f1 github/workflows: set GOGC=10 for unit tests
It reduces memory usage during tests execution. It makes tests execution more reliable.
Since it sometimes crashes with OOM at small github runners.

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2025-01-10 10:45:41 +01:00
Github Actions
2564f10d98 Automatic update helm docs from VictoriaMetrics/helm-charts@117506e (#7996)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2025-01-10 05:16:54 +01:00
Andrii Chubatiuk
0871770634 victorialogs: ugraded datadog extension version in compose to one which supports custom endpoint configuration (#7989)
recently new datadog extension was released, where custom endpoint
configuration was added

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2025-01-09 10:44:36 +04:00
Zakhar Bessarab
51b21dfd57 app/vmalert/notifier: fix rendering of Alertmanager notification body
commitL  c7fc0d0d2f  enabled skipping alerts
in case there is no labels present for an alert. This made clause which
was adding a comma for the JSON list incorrect as it is not possible to
determine if the next alert will be skipped or not.

This fix renders all alert labels in advance allowing properly format
JSON payload for Alertmanager notification.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7985


Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2025-01-08 19:02:20 +01:00
Zhu Jiekun
276989716f lib/promscrape: add Marathon service discovery
This commit adds support for [Marathon](https://mesosphere.github.io/marathon/)
service discovery to the scrape configuration. 

The following flag is introduced:
```
  -promscrape.marathonSDCheckInterval duration
          Interval for checking for changes in Marathon service discovery. This works only if marathon_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#marathon_sd_configs for details  (default 30s)
```

The service discovery could be config like:
```yaml
scrape_configs:          
- job_name: marathon_job 
  marathon_sd_configs:   
      servers:
        - "..."
        - "..."
```
See:
[b555d94d1a/docs/sd_configs.md (marathon_sd_configs))

related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6642


---------

Co-authored-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2025-01-08 18:57:22 +01:00
Artem Fetishev
6cb3c0cac8 Update VictoriaLogs FAQ: add a section about max log record length (#7984)
### Describe Your Changes

There has been a question in our public Slack on whether the length
limit of a log record is going to be changed. See:
https://victoriametrics.slack.com/archives/C05UNTPAEDN/p1736156255119689

This PR documents the max length and explains why it has been chosen.
This FAQ section could serve as an answer to more questions like this.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2025-01-08 15:33:38 +01:00
cuiweiyuan
d064e14933 chore: fix function name in comment (#7926)
### Describe Your Changes

 fix function name in comment

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: cuiweiyuan <cuiweiyuan@aliyun.com>
2025-01-08 13:58:22 +01:00
Afolabi Badmos
77b0fcfdd9 vmauth: fix bug in discovering ipv6 addresses (#7955)
### Describe Your Changes

Fixes error in `vmauth` when discovering ipv6 addresses.

`vmauth` attempts to [slice till
`:`](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmauth/auth_config.go#L397)
in the discovered addresses without accounting for ipv6. This causes it
to fail in ipv6 only environments.

```sh
$ nslookup vmselect.ns.svc.cluster.local

...
Name: vmselect.ns.svc.cluster.local
Address: 2600:dead:beef:dead:beef::8
```

```sh
$ kubectl logs -f vmauth

...
error: dial tcp: lookup 2600: no such host
```


### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: f41gh7 <nik@victoriametrics.com>
2025-01-08 16:51:13 +04:00
Yury Molodov
ee7fe11fd2 vmui/logs: add autocomplete support for LogsQL (#6949)
### Describe Your Changes

This pull request adds support for autocomplete in LogsQL queries. The
new feature provides suggestions for field names, field values, and pipe
names as you type.


---------

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2025-01-08 11:36:37 +01:00
Andrii Chubatiuk
4c26fb6fe5 docs: make badges in docs clickable (#7960)
### Describe Your Changes

added links to badges and made them clickable at
docs.victoriametrics.com

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2025-01-08 08:58:43 +01:00
Github Actions
fc135094b3 Automatic update operator docs from VictoriaMetrics/operator@028ab8b (#7981)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2025-01-08 07:45:54 +01:00
Roman Khavronenko
5d42f21abd docs: mention publicly available playgrounds (#7977)
The point of the new section is to highlight publicly available
playgrounds for users. All of them were mentioned in other parts of the
documentation, but we didn't have all of them in one place before.

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2025-01-07 22:16:50 +01:00
hagen1778
28eeabded1 docs: rm extra new lines as they bring no value
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2025-01-07 13:52:35 +01:00
hagen1778
b6910cfff7 docs: make vmui related pages below the vmui parent page
This change only updates the hierarchy of pages within the readme.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2025-01-07 13:48:39 +01:00
Zhu Jiekun
8938ef398c docs: clarify extra resource is needed for downsampling/retention filter (#7974)
### Describe Your Changes

clarify extra resource is needed when downsampling with filter(s) or
retention filter(s) is applied

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2025-01-07 12:30:19 +01:00
hagen1778
df2b75fa81 docs: fix markdown typos
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2025-01-07 09:18:25 +01:00
hagen1778
857734c66c docs: fix markdown typo
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2025-01-07 09:06:45 +01:00
Github Actions
bedc0c0f8f Automatic update helm docs from VictoriaMetrics/helm-charts@7004727 (#7958)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2025-01-03 19:06:18 +04:00
YuDong Tang
5a41bdf329 app/select: add command-line flag -search.maxBinaryOpPushdownLabelValues
### Describe Your Changes

Binary operations like `exprFirst op exprSecond` in VictoriaMetrics are
performed in the following way:
1. Execute exprFirst.
2. Extract **common label filters** from the result of step 1.
3. Apply these common label filters to `exprSecond` and execute it, in
order to retrieve less time series from vmstorage nodes.

In step 2, only labels with less than `100` (hard-coded) value could be
used as **common label filter** (e.g. `{common_lb=~"v1|v2|...|v100"}`.

In our scenarios, a label, take `instance` label as an example, could
has thousands of candidate values. Regarding bring more pressure to
vmstorage node, it's still beneficial if labels with more than 100
values could be used as filter in `exprSecond`, with enough vmstorage
resources. After adjusting the value from `100` to `10000`, our query
round-trip time drops significantly from 5s to 2s.

This pull request change the hard-coded value into a configurable flag.
2025-01-03 13:20:50 +01:00
Github Actions
bf5d0dd245 Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@7f94969 (#7911) 2025-01-03 14:58:46 +04:00
Github Actions
1cec37b0f5 Automatic update operator docs from VictoriaMetrics/operator@5992757 (#7954)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2025-01-02 18:46:11 +01:00
f41gh7
c40c25b03c docs/changelog: properly mention vminsert changes
storageNode sorting should be BUGFIX, since previously vminsert performed sort and this behaviour was changed.
Also this change only affects OSS version
2025-01-02 17:53:57 +01:00
kiriklo
82badc3dd5 app/vmselect/promql: improve performance of parseCache on systems with many CPU cores
### Describe Your Changes

Parse cache is a pretty simple implementation of cache. It's just a
standard map with mutex.
Map with mutex overall has poor performance, plus when the cache
overflow occurs, the whole cache locks until 1k elements have been
deleted (now it's 10% of 10000 max elements in the cache). To avoid this
bottleneck and improve performance of cache on systems with many CPU
cores but keep it rather simple, we can implement cache with per bucket
locks like it's done in fastcache. The logic and API remain the same. So
now each bucket will have a map with approximately 78 elements (with 128
buckets), and overflow will occur now for each bucket, and only 7
elements need to be deleted.
Because exec_test.go has about 10k lines of code, it's better to move
the cache into a separate file to add tests and benchmarks for it,
because now it does not have them.

```
goos: windows
goarch: amd64
pkg: github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql
cpu: 11th Gen Intel(R) Core(TM) i9-11900K @ 3.50GHz

Current cache implementation performance on 8 cores:
BenchmarkCachePutNoOverFlow-8               1932            618372 ns/op             253 B/op          0 allocs/op
BenchmarkCacheGetNoOverflow-8               6547            211527 ns/op               0 B/op          0 allocs/op
BenchmarkCachePutGetNoOverflow-8            1873            621718 ns/op             261 B/op          0 allocs/op
BenchmarkCachePutOverflow-8                 2262            464328 ns/op              32 B/op          0 allocs/op
BenchmarkCachePutGetOverflow-8              1764            655866 ns/op              38 B/op          0 allocs/op

New cache implementation performance on 8 cores:
BenchmarkCachePutNoOverFlow-8              10408            111412 ns/op               0 B/op          0 allocs/op
BenchmarkCacheGetNoOverflow-8              22407             52809 ns/op               0 B/op          0 allocs/op
BenchmarkCachePutGetNoOverflow-8            6583            168088 ns/op               0 B/op          0 allocs/op
BenchmarkCachePutOverflow-8                 9822            117212 ns/op               2 B/op          0 allocs/op
BenchmarkCachePutGetOverflow-8              6481            175952 ns/op               3 B/op          0 allocs/op

Current cache implementation performance on 16 cores:
BenchmarkCachePutNoOverFlow-16              2331            475307 ns/op             218 B/op          0 allocs/op
BenchmarkCacheGetNoOverflow-16              6069            196905 ns/op               0 B/op          0 allocs/op
BenchmarkCachePutGetNoOverflow-16           1870            644236 ns/op             262 B/op          0 allocs/op
BenchmarkCachePutOverflow-16                2296            509279 ns/op              34 B/op          0 allocs/op
BenchmarkCachePutGetOverflow-16             1726            671510 ns/op              45 B/op          0 allocs/op

New cache implementation performance on 16 cores:
BenchmarkCachePutNoOverFlow-16             13549             82413 ns/op               0 B/op          0 allocs/op
BenchmarkCacheGetNoOverflow-16             30274             38997 ns/op               0 B/op          0 allocs/op
BenchmarkCachePutGetNoOverflow-16           8512            126239 ns/op               0 B/op          0 allocs/op
BenchmarkCachePutOverflow-16               13884             88124 ns/op               1 B/op          0 allocs/op
BenchmarkCachePutGetOverflow-16             7903            131299 ns/op               3 B/op          0 allocs/op
```
From the benchmarks above, we can see that the new implementation is ~5
times faster than the old one.


---------
Co-authored-by: f41gh7 <nik@victoriametrics.com>
2025-01-02 17:43:23 +01:00
Alex Gustafsson
43ded688f7 Add open containers source label to Dockerfiles (#7893)
### Describe Your Changes

In order for third-party tooling to identify the source repository of
VictoriaMetrics, add the org.opencontainers.image label to the
Dockerfiles. This enables a whole suite of tools that scan container
images to further correlate data with the source code.

The lack of these annotations can be identified using docker:

```shell
docker pull victoriametrics/victoria-metrics
docker inspect victoriametrics/victoria-metrics
```

```jsonc
// ...
"Labels": null
// ...
```

If we try an image that has the annotations, we'll see more output.

```shell
docker pull traefik
docker image inspect traefik
```

```jsonc
// ...
"Labels": {
    "org.opencontainers.image.description": "A modern reverse-proxy",
    "org.opencontainers.image.documentation": "https://docs.traefik.io",
    "org.opencontainers.image.source": "https://github.com/traefik/traefik",
    "org.opencontainers.image.title": "Traefik",
    "org.opencontainers.image.url": "https://traefik.io",
    "org.opencontainers.image.vendor": "Traefik Labs",
    "org.opencontainers.image.version": "v3.2.3"
}
// ...
```

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2025-01-02 20:43:17 +04:00
Hui Wang
661420fe85 dashboard/vmagent: change metric for Persistent Queue panel
consistently use `vmagent_remotewrite_pending_data_bytes`  on vmagent dashboard to represent persistent queue size.

`vmagent_remotewrite_pending_data_bytes =
vm_persistentqueue_bytes_pending + pendingInmemoryBytes`
According to panel description, `vmagent_remotewrite_pending_data_bytes`
is more accurate.
>Persistent queue size shows size of pending samples in bytes which
hasn't been flushed to remote storage yet.
 
And we already use `vmagent_remotewrite_pending_data_bytes` in other two
panels.

44d2205136/dashboards/vmagent.json (L7132)
2025-01-02 13:04:40 +01:00
Andrii Chubatiuk
7aab967447 Makefile: cspell makefile refactor
- removed absolute paths to run without docker
- set cspell to default entrypoint value
- set cspell config path instead of cspell.json copying and removal
2025-01-02 12:52:24 +01:00
Hui Wang
afb07034ed app/vmalert: fix the auto-generated metrics ALERTS and ALERTS_FOR_STATE
Previously, since labels slice is reused for both `ALERTS` and
`ALERTS_FOR_STATE`, metrics might have incorrect labels and affect the
restore process. Tested the fix under `TestAlertingRule_Exec:
"for-pending=>empty"`.

The bug is introduced in
282f13cf11.
Affected versions: v1.106.1, v1.107...v1.108.x

related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7796
2025-01-02 12:51:05 +01:00
f41gh7
44d2205136 app/vmalert: properly format datasource URL for enterprise group.tenant
At Enterprise version of the vmalert, `group` supports `tenant` field.

`tenant` field value must be added to the `datasource` as a part of the URL path prefix.

But VictoriaLogs can obtain tenant information only from `headers` and defined `tenant` breaks requests to the `VictoriaLogs` datasource.

 This commit properly checks `datasourceType` and skips adding path prefix if `datasourceType` is `vlogs`.

---------
Co-authored-by: Nikolay <nik@victoriametrics.com>
2024-12-30 15:42:29 +01:00
f41gh7
b226318f9e app/vmstorage: allow to override the default unique time series limit
previously vmstorage ignored limit values from vmselect component.

This behavior is prohibited starting from v1.105.0, with
85f60237e2.

This breaks the original intent of the -search.maxUniqueTimeseries command-line flag, which has been added at vmselect nodes in the commit b843f0e : to be able to override the default limit at vmstorage on the number of unique time series, at different subsets of vmselect nodes.

The behavior should be the following:

*    If -search.maxUniqueTimeseries command-line flag isn't set at both vmselect and vmstorage nodes, then the limit on  the number of unique time series must be automatically detected at vmstorage nodes according to

* vmstorage: automatically adjust -search.maxUniqueTimeseries max value   . This simplifies configuration of VictoriaMetrics cluster for the typical case.

* If -search.maxUniqueTimeseries command-line flag is explicitly set at vmstorage node, then it must be used as the limit on the number of unique time series, without automatic detection of the limit. Explicitly set limit at vmstorage node cannot be exceeded by the limit from vmselect nodes.
* If the -search.maxUniqueTimeseries command-line flag is explicitly set at vmselect node, then it must override the automatically detected limit at vmstorage node. For example, if vmselect node provides the limit, which exceeds the automatically detected limit at vmstorage node, then the limit from the vmselect node must be applied during query execution at vmstorage node. This will allow properly executing queries from the subset of vmselect nodes for reporting queries described above.

related issue:
 https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7852
2024-12-30 15:20:52 +01:00
Github Actions
30999204c9 Automatic update operator docs from VictoriaMetrics/operator@5e0854a (#7938)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-30 14:06:27 +01:00
Zhu Jiekun
ffddfa1f94 app/vmctl: properly handle influx series without tags
### Describe Your Changes

Previously, vmctl expect that tag must exist for each measurement, but
it's actually not necessary.


f16a58f14c/app/vmctl/influx/influx.go (L183-L186)

This pull request fix it by removing the check. For influx series
`measurement1_value1{}`, it will be represented as:
```go
Series{
  Measurement: "measurement1",
  Field:       "value1",
  LabelPairs:  []LabelPair{},
  EmptyTags:   []string{},
}
```
and searched by the following query:
```sql
select "value1" from "measurement1"
``` 

 Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7921
2024-12-26 20:39:06 +01:00
f41gh7
fc336bbf20 app/vminsert: properly ingest influx metrics
Commit 71bb9fc0d0 introduced a regression.
If labels are empty and relabeling is not configured, influx ingestion hanlder
performed an earlier exit due to TryPrepareLabels call.
 Due micro-optimisations for this procotol, this check was not valid.
Since it didn't take in account metircName, which added later and skip metrics line.

 This commit removes `TryPrepareLabel` function call from this path and inline it instead.
It properly track empty labels path.

 Adds initial tests implementation for data ingestion protocols.

 Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7933

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-26 12:14:42 +01:00
f41gh7
e0b2c1c4f5 docs/changelog: removes duplicate record for maxIngestionRate feature
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-24 19:40:44 +01:00
Github Actions
5afbee5f6f Automatic update operator docs from VictoriaMetrics/operator@43f5554 (#7923)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-24 19:31:27 +01:00
Github Actions
51459196f9 Automatic update helm docs from VictoriaMetrics/helm-charts@1f33c21 (#7929)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-12-24 19:30:43 +01:00
Dima Shur
7941877233 docs: changed typo in label (enterpriSe instead of enterpriZe) (#7925)
### Describe Your Changes

Fixed typo in contributing.md (enterpriZe -> enterpriSe in the label
name)

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-24 15:51:35 +04:00
Phuong Le
f303081304 vminsert: sort the storage nodes during initialization (#7899)
Fixes #7898
2024-12-23 19:41:17 +01:00
Ted Possible
a84628f701 app/vminsert: support for rate limiting number of samples/sec with -maxIngestionRate
This commit adds feature to limit sample ingestion rate globally for ingestion protocols. 

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7377
2024-12-23 17:37:30 +01:00
Github Actions
f823a225ac Automatic update operator docs from VictoriaMetrics/operator@471f183 (#7916)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-12-23 16:48:49 +01:00
Andrii Chubatiuk
79f1a37ee6 vlinsert: take into account order of msgfields to have predictable _msg field selection in case of multiple matches (#7784)
### Describe Your Changes

Currently if multiple msgFields are present in a log row it's not
obvious which field is selected as a _msg field. With this PR and order
of msgfield values defined either via headers or query arg params
defines a priority of these values

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-23 10:10:02 +01:00
Andrii Chubatiuk
f9cd408ca9 datadog-serverless: fixed metrics and logs ingestion from Datadog serverless extensions for AWS and GCP (#7769)
fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7761

### Describe Your Changes

- datadog /api/v2/logs api supports message field in json format, which
is not documented and is used by serverless extension. This PR allows
message field to be both string and object type. Also added support of
not documented timestamp field
- added `-datadog.streamFields` and `-datadog.ignoreFields` flags to
configure default stream fields for datadog logs, where there's no
alternative option to pass extra headers and query args
- added ingest `max` and `min` values of data, which are ingested using
`datadogsketches` API, which is also actively used by serverless
extensions
- use default `.` separator instead of `_` for sketches metric names
until metrics are not sanitized
2024-12-23 09:57:48 +01:00
Aliaksandr Valialkin
c2811d8d11 docs/VictoriaLogs/LogsQL.md: fix a link to count_uniq_hash stats function docs
It must be consistent with the other stats functions

This is a follow-up for de0ae735aa
2024-12-22 14:39:27 +01:00
Aliaksandr Valialkin
8d981b15c9 deployment: update VictoriaLogs Docker image from v1.3.2-victorialogs to v1.4.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.4.0-victorialogs
2024-12-22 14:36:49 +01:00
Aliaksandr Valialkin
58f09fe3f8 docs/VictoriaLogs/CHANGELOG.md: cut v1.4.0-victorialogs release 2024-12-22 14:31:31 +01:00
Aliaksandr Valialkin
afd926a0b0 lib/logstorage: limit the maximum number of logs and/or log streams, which can be passed to stream_context pipe
This should prevent from excess usage of CPU, RAM and other resources when too many logs
are passed to 'stream_context' pipe.

It is expected that 'stream_context' pipe results are investigated by humans, who cannot inspect
surrounding logs for millions of initial logs. That's why it is OK to limit the number of logs
and/or log streams, which can be passed to 'stream_context' pipe.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7766
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7903
2024-12-22 14:28:50 +01:00
Aliaksandr Valialkin
204c102342 app/vlselect/vmui: run make vmui-logs-update after the commit 1fbc2c0db1
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7288
2024-12-22 13:53:45 +01:00
Aliaksandr Valialkin
c5949af9e8 lib/logstorage: reduce memory allocations when splitting in(...) values into tokens and calculating hashes for these tokens
While at it, reduce memory allocations at Storage.getFieldValuesNoHits and make it more scalable on multi-CPU systems.

This improves performance of in(<query>) filter when the <query> returns big number of values.
2024-12-22 13:13:44 +01:00
Aliaksandr Valialkin
5dc0413bc0 lib/logstorage: allow specifying hits column name in the top pipe via top ... hits as <column_name> syntax 2024-12-22 11:23:19 +01:00
Aliaksandr Valialkin
f919783de9 lib/logstorage: uncommend accidentally commented tests at 60f9f44150 2024-12-22 02:20:57 +01:00
Aliaksandr Valialkin
60f9f44150 lib/logstorage: reduce memory allocations at stats and top pipes
Use chunked allocator in order to reduce memory allocations. It allocates objects from slices of up to 64Kb size.
This improves performance for `stats` and `top` pipes by up to 2x when they are applied to big number of `by (...)` groups.

Also parallelize execution of `count_uniq`, `count_uniq_hash` and `uniq_values` stats functions,
so they are executed faster on hosts with many CPU cores when applied to fields with big number
of unique values.
2024-12-22 02:13:02 +01:00
Github Actions
0fcbe8fdae Automatic update Grafana datasource docs from VictoriaMetrics/victoriametrics-datasource@b18583c (#7910) 2024-12-21 09:42:48 -08:00
Github Actions
458b602938 Automatic update Grafana datasource docs from VictoriaMetrics/victoriametrics-datasource@cbff3fa (#7909) 2024-12-21 09:31:07 -08:00
Aliaksandr Valialkin
471f1d0a09 lib/logstorage: fixed a typo in blockResult.reset()
The commit 4599429f51 improperly set br.cs to nil,
while it should set br.bs to nil instead. This resulted in excess memory allocations
at br.csInit() and br.csInitFast().
2024-12-21 13:39:25 +01:00
hagen1778
7f80c1633f docs: mention filebeat version requirement for vlogs integration
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-20 16:21:13 +01:00
Yury Molodov
186b00df6b vmui: add export button for raw query data (#7828)
### Describe Your Changes

1. Added the ability to export data from the `Raw Query` page and import
exported data to the `Query Analyzer` page (related issue #7628).
2. Added a `Title` input field; the `Title` is displayed when importing
data on the `Query Analyzer` page.
3. Implemented `Markdown` support for comments in exported data.  
4. Updated the styling of the `Query Analyzer` page.  
5. Fixed an issue where the `Upload JSON` button on the `Query Analyzer`
page was only clickable on the button text (now clickable on the entire
button area).
6. Added a tooltip with `Deduplication` information on the `Raw Query`
page (related to issue #7763).

<details>
  <summary>Screenshots</summary>
  
#### Data export and `Markdown` preview

<img width="400"
src="https://github.com/user-attachments/assets/bbab31bb-81d3-4335-98c3-d01c8786bde4"/>
<img width="400"
src="https://github.com/user-attachments/assets/3cfd9938-b518-45d6-8ded-e3e7e6ab9299"/>

#### `Query Analyzer` page displaying data from `Raw Query`

<img width="900"
src="https://github.com/user-attachments/assets/008e0e93-92f2-4c25-a20e-3cee90a03397"/>

#### Viewing stats and comments on the `Query Analyzer` page  
    
<img width="600"
src="https://github.com/user-attachments/assets/18bfbba1-a11c-420e-84f2-78229ac7bd25"/>

#### Viewing stats data from the `Query` page

<img width="900"
src="https://github.com/user-attachments/assets/0f7a3009-9fb5-4727-b0c4-257aa196a9c1"/>

#### Tooltip on the `Raw Query` page  

<img width="900"
src="https://github.com/user-attachments/assets/400f86e7-f362-4307-8b1d-24af3c67020e"/>
  
</details>

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-12-20 15:51:06 +01:00
Github Actions
4205ae3011 Automatic update helm docs from VictoriaMetrics/helm-charts@feb0675 (#7897)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-20 15:27:51 +01:00
Daria Karavaieva
491028774a docs/vmanomaly: popup deprecated_from and available_from for all docs (#7905)
### Describe Your Changes
added deprecated form and available from popups in vmanomaly docs

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-20 15:24:16 +01:00
Mathias Palmersheim
565b79c9ca docs: update vmalert+victorialogs doc with multitenant recording (#7779)
### Describe Your Changes
 
- Adds Headers to FAQ questions in vmalert for Victorialogs
- Adds FAQ for multitenant recording rules described in #7656

### Checklist

The following checks are **mandatory**:

- [X] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Haley Wang <haley@victoriametrics.com>
2024-12-20 15:02:00 +01:00
Aliaksandr Valialkin
5478cc61c2 lib/cgroup: add missing initialization of gogc variable inside SetGOGC
This is a follow-up for 79c08ecac4

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7902
2024-12-20 14:56:59 +01:00
Aliaksandr Valialkin
79c08ecac4 lib/cgroup: use the default GOGC=100 for the most of VictoriaMetrics components
Historically some of VictoriaMetrics components were optimized for the low rate of memory allocations.
These are: vmagent, single-node VictoriaMetrics and vmstorage. These components benefit from the low
GOGC value, since this allow reducing their memory usage in steady state on typical workloads.

Other VictoriaMetrics components aren't optimized for the reduced rate of memory allocations.
This results in the increased CPU usage spent on garbage collection (GC) in these components,
since it must be triggered at higher rate. See https://tip.golang.org/doc/gc-guide#GOGC for details.

These components do not use too much memory, so it is OK increasing the GOGC for these components
from 30 to 100 - this won't affect the most users.

Keep GOGC to 30 only for vmagent, single-node VictoriaMetrics and vmstorage components.
See 077193d87c and 54b9e1d3cb .

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7902
2024-12-20 14:48:28 +01:00
hagen1778
f47fd83e54 docs: add example with dots in label name to vlogs rules
This change adds an example of how to use labels with `.` dots
in rule annotations.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-20 14:07:48 +01:00
Aliaksandr Valialkin
9c39bac565 lib/logstorage: fix imroper sorting of numeric fields when they are stored as const values at sort pipe
Numeric fields can be stored as const values in the block of logs. In this case the `sort` pipe
was incorrectly comparing such values as strings instead of numbers. This results in incorrect
sort results. For example, 123 was smaller than 2. Fix this by removing the incorrect case
for comparing const fields.

While at it, replace lessString() with strings.LessNatural() in the sortBlockLess.
This improves sorting performance a bit, since the sortBlockLess function already tried
comparing numeric values, and it doesn't need to spend CPU time on such a comparison again inside lessString() call.
The commit 42c9183281 wasn't correct by replacing strings.LessNatural() with lessString()
inside the sortBlockLess() function.
2024-12-20 13:26:20 +01:00
Roman Khavronenko
1042f07498 docs: update OTEL guide (#7887)
* simplify wording
* update styles
* remove extra info about go application details. The details are likely
not needed and we didn't have details for rolling-dice app anyway. So
keep it simple for consstency and brevity.
* update navigation for simplicity sake
* fix typos

follow-up after
40b47601d1

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-19 15:13:03 +01:00
Nikolay
79a595c6d0 app/vmauth: properly log host at debugInfo function (#7886)
vmauth started to use request.Host after commit
f4776fec1b for`src_hosts` routing rules.

This commit adds http.Request.Host to the debugInfo output in order to
be consistent with routing logic.

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-19 15:04:37 +01:00
Andrii Chubatiuk
40b47601d1 docs/guides/otel: added logs integration, updated old otel dependencies
### Describe Your Changes

- added VictoriaLogs to OpenTelemetry guide
- updated deprecated dependencies
- added deltatocumulative processor to example and deltatemporality
selector to one of examples to use for counters by default
- added exponential histograms to example

---
Signed-off-by: Andrii Chubatiuk <andrew.chubatiuk@gmail.com>
2024-12-19 12:32:41 +01:00
Zakhar Bessarab
6bfcbe66f7 docs/release-guide: add a note about versioning in helm charts and ansible
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-12-19 12:28:32 +01:00
f41gh7
94118c63f6 docs: update VM apps version to v1.108.1
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-19 12:25:37 +01:00
f41gh7
9605d73809 CHANGELOG.md: cut v1.108.1 release 2024-12-18 23:34:58 +01:00
f41gh7
3237c64ef3 make vmui-update 2024-12-18 23:08:22 +01:00
Yury Molodov
1fbc2c0db1 vmui: fix cursor reset in query input
Fix cursor reset in query input field. 

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7288.
2024-12-18 22:30:08 +01:00
Nikolay
71bb9fc0d0 app/vminsert: properly apply relabeling at ingestion
Regression was introduced at 564e6ea024
after implementing:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6928

ctx.Labels array could be incorrectly updated and changes to it after
relabeling rules can be lost.
E.g. ctx.Labels passed to WriteDataPoint function as slice copy, but
results of relabeling only changed an actual slice at ctx.Labels.

This commit replaces implicit relabeling call with explicit
`TryPrepareLabels` function.
It also reduces code diffs with cluster version and adds integration tests

 related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7865

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-12-18 22:27:51 +01:00
Github Actions
0210f4ebd2 Automatic update operator docs from VictoriaMetrics/operator@9b337c1 (#7879)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-12-18 16:27:37 +01:00
Andrii Chubatiuk
891ad8f202 app/vlinsert: loki healthcheck endpoint (#7864)
### Describe Your Changes

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7824

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-12-18 14:59:44 +01:00
Github Actions
e501640f44 Automatic update Grafana datasource docs from VictoriaMetrics/victoriametrics-datasource@d830b2a (#7855) 2024-12-18 12:27:15 +01:00
Daria Karavaieva
21082405ec docs/vmanomaly: add version popup (#7860)
### Describe Your Changes

Added `available_from` popup into documentation of vmanomaly

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-18 12:26:42 +01:00
Github Actions
094a5ab58f Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@b8fd925 (#7862) 2024-12-18 12:26:11 +01:00
hagen1778
bbc84fa119 docs: add requirements to commit message to contributing
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-18 12:22:20 +01:00
Github Actions
9d1a72aca8 Automatic update operator docs from VictoriaMetrics/operator@5c3657d (#7871)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-12-18 12:13:58 +01:00
Dmytro Kozlov
05d3db248b deployment/docker: rename victorialogs-datasource to victoriametrics-logs-datasource (#7874)
### Describe Your Changes

Renamed victorialogs-datasource to victoriametrics-logs-datasource.

We prepared the victorialogs Grafana plugin for sign and updated the
plugin ID. This action require to update configs in our ops repository

Please check this
[release](https://github.com/VictoriaMetrics/victorialogs-datasource/releases/tag/v0.13.0)
and https://github.com/VictoriaMetrics/victorialogs-datasource/pull/161
with changes

### Checklist

The following checks are **mandatory**:

- [X] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-18 11:41:16 +01:00
Github Actions
59d739ff0b Automatic update helm docs from VictoriaMetrics/helm-charts@3b7bfbd (#7876)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-18 11:40:39 +01:00
hagen1778
b54d10be63 docs: port LTS changelog
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-17 20:20:19 +01:00
Aliaksandr Valialkin
524f0e8d8b lib/logstorage: eliminate memory allocations when finalizing per-group values calculated by stats pipe
This improves query performance a bit when `stats by (...)` returns millions of individual `by (...)` groups
2024-12-17 15:17:01 +01:00
Roman Khavronenko
72419834af docs: add missing resource usage limits (#7856)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-17 15:02:54 +01:00
Aliaksandr Valialkin
e6b7d25ab4 app/vlselect: allow passing arbitrary LogsQL filters to extra_filters and extra_stream_filters query args
While at at, allow passing an array of string values per each JSON entry at extra_filters and extra_stream_filters.
For example, `extra_filters={"foo":["bar","baz"]}` is converted into `foo:in("bar", "baz")` extra filter,
while `extra_stream_fitlers={"foo":["bar","baz"]}` is converted into `{foo=~"bar|baz"}` extra filter.

This should simplify creating faceted search when multiple values per a single log field must be selected.
This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7365#issuecomment-2447964259

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5542
2024-12-17 13:02:13 +01:00
Daria Karavaieva
ac124cf5aa docs/vmanomaly: deprecate Overview page (#7812)
### Describe Your Changes

-Deprecate Overview page in Anomaly Detection docs. 
- Adding service description  to `README.md`
- Moving Licensing information to Quickstart page

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-17 12:45:44 +01:00
Aliaksandr Valialkin
3d7f8377f7 lib/logstorage: do not return log fields with the same constant value across all the selected logs from facets pipe
Such log fields do not give any useful information during logs' exploration.
They just clutter the output of the `facets` pipe. So it is better to drop such fields by default.

If these fields are needed, then `keep_const_fields` option can be added to `facets` pipe.
2024-12-17 12:23:00 +01:00
Mathias Palmersheim
4992e083f0 fixed #7804 Added NoSelfMonitoringMetrics rule (#7805)
### Describe Your Changes

fixes #7804 by adding alert for missing uptime metric in vmanomaly

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-16 10:00:29 -06:00
hagen1778
71a9fb16f7 deployment/docker: fix typo after d86788e9a2
Thanks to @Haleygo for pointing it out here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7843#issuecomment-2545949268

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-16 16:37:21 +01:00
Artem Fetishev
7e7d029de1 docs: fix typo in keyConcepts.md (#7844)
Fix a typo and simplify the statement

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-16 16:34:33 +01:00
Github Actions
983f30c326 Automatic update helm docs from VictoriaMetrics/helm-charts@c486483 (#7840)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-16 16:17:06 +01:00
Artem Fetishev
efd8098b0b docs: update instant query description in key concepts (#7842)
### Describe Your Changes

Update docs to reflect the changes introduced in #7767 to fix #5796

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-12-16 16:14:54 +01:00
Dima Shur
d86788e9a2 deployment/docker: set vmalert --remoteWrite.url to port 8429 (vmagent) (#7843)
### Describe Your Changes

Updated docker.compose.yml, set remotewrite.url to port 8429 so it would
correspond to documentation

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-16 16:13:45 +01:00
Aliaksandr Valialkin
a87ad250d0 docs/VictoriaLogs/data-ingestion/README.md: add missing of 2024-12-16 15:01:01 +01:00
hagen1778
bf84de3c6b docs: move change from c6f6302ca4 to #tip
The change was mistakenly put to the released version of VM

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-16 14:20:32 +01:00
Aliaksandr Valialkin
7ec8ea8301 docs/VictoriaLogs/data-ingestion/Vector.md: improve docs a bit
- Remove Loki sink, since it brings more troubles when users try using it in Vector.
  For example, it encodes all the log fields as a JSON string and puts it into "message" field.
  This results in storing the "message" field with the JSON string containing all the log fields
  in VictoriaLogs. This is not what expected - every log field must be stored as a separate field
  according to https://docs.victoriametrics.com/victorialogs/keyconcepts/

- Remove 'mode: bulk' option from Elasticsearch sink configuration, since this option is set by default to this value,
  so there is no need in explicit setting.

- Add 'compression: gzip' to all the config examples, since the compression reduces the used network bandwidth by 4-5 times,
  while it doesn't increase CPU usage too much at both Vector and VictoriaLogs sides. So it is better to enable the compression in config examples.

- Mention about HTTP parameters accepted by VictoriaLogs data ingestion APIs in both examples for Elasticsearch and JSON line protocols.
2024-12-16 13:52:35 +01:00
Artem Fetishev
c6f6302ca4 Fix inconsistent treatment of millisecond-precision time for instant queries (#7767)
### Describe Your Changes

This PR fixes #5796. See the points 6 and 7 in `Steps to reproduce`:

> Now let's set time to only 5ms past the timestamp of the first point,
since even 199ms worked for the second point. Surprise, the point isn't
returned 💥:
>
> ```curl -s $VMQURL -d 'query=series1' -d 'time=1707123456705' -d
'step=1ms' | grep 10 # nothing!```
>
> But, 4ms works: 🤨🤔
>
> ```curl -s $VMQURL -d 'query=series1' -d 'time=1707123456704' -d
'step=1ms' | grep 10 # found```

This happens so because the actual step becomes 5ms due to jitter being
applied. THe fix is to do not apply jitter if scrape interval was not
detected (the case when vmstorage returns only one result). In this case
the scrape interval is set to `5m+step`.

An integration test has been added to check the steps to reproduce and
then to confirm that fix works. Note that the cluster tests are
currently disabled because the fix is not in cluster branch yet.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-12-16 13:24:52 +01:00
Github Actions
87100e55cc Automatic update helm docs from VictoriaMetrics/helm-charts@ec141b8 (#7836)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-16 12:51:04 +01:00
Roman Khavronenko
c464d4484f lib/storage: update dedup tests
* update misleading comments about preferring NaNs on intervals. NaNs
are only preferred on timestamp conflicts
* add conflicting timestamps to the benchmark test. Previously,
benchmark wasn't checking the timestamp conflict code branch. The
updated results after
c0fcfd6b97
are the following:
```
benchstat old.txt new.txt

goos: darwin
goarch: arm64
pkg: github.com/VictoriaMetrics/VictoriaMetrics/lib/storage
cpu: Apple M4 Pro
                                                       │   old.txt    │               new.txt                │
                                                       │    sec/op    │    sec/op     vs base                │
DeduplicateSamples/minScrapeInterval=3s-14               889.7n ± ∞ ¹   904.3n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamples/minScrapeInterval=4s-14               735.9n ± ∞ ¹   748.7n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamples/minScrapeInterval=10s-14              637.7n ± ∞ ¹   659.3n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamplesDuringMerge/minScrapeInterval=3s-14    838.8n ± ∞ ¹   810.4n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamplesDuringMerge/minScrapeInterval=4s-14    765.2n ± ∞ ¹   735.1n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamplesDuringMerge/minScrapeInterval=10s-14   673.1n ± ∞ ¹   622.4n ± ∞ ¹       ~ (p=1.000 n=1) ²
geomean                                                  751.7n         741.0n        -1.42%
```

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

---
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-16 12:50:41 +01:00
f41gh7
91f858ee1e docs: bump last VM versions
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-16 12:19:51 +01:00
f41gh7
da0d57e4b6 CHANGELOG.md: cut v1.108.0 release 2024-12-16 12:12:02 +01:00
hagen1778
fa621b384e docs: mention deprecation of metric names in update notes
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-16 11:20:32 +01:00
f41gh7
02fedb8585 docs/changelog: add missing PR links
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-13 12:08:16 +01:00
f41gh7
04d19a2200 make vmui-update 2024-12-13 12:01:03 +01:00
f41gh7
e612877fe7 app/vmselect: respect -search.skipSlowReplicas when -globalReplicationFactor > 1
Previously cluster with the following vmselect configuration:

./bin/vmselect
  -storageNode=gr1/:8211,gr1/:8212
  -storageNode=gr2/:8213,gr2/:8214
  -search.skipSlowReplicas=true
  -globalReplicationFactor=2

Here we have two vmstorage groups and -globalReplicationFactor=2, which effectively means that "every ingested sample is replicated across multiple vmstorage groups". Hence, gr1 and gr2 contain identical data set. And when we set -search.skipSlowReplicas=true it is expected vmselect should return result as soon as at least one storage group returned the full result.
In current state, -search.skipSlowReplicas is ignored on the storage group level. It is only respected within the group (with -replicationFactor flag).

   This commit fixes global replication for skipSlowReplicas.

 To ensure that the fix works and does not break
anything replication tests have been added. For checking the fix for
skipping slow replicas see `testGroupSkipSlowReplicas()`.

To emulate storage groups, the integration test creates a cluster with
multilevel vminsert. The L1 inserts are group-level inserts, each writes
to its own group of vmstorages. The L2 vminsert is a global vminsert
that writes replicated to the L1 vminserts.

To enable multilevel inserts changes in apptest framework and
`lib/ingestserver/clusternative/server.go` were necessary.

related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6924

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-12-13 11:59:03 +01:00
Zhu Jiekun
43181b67b1 discovery/dockerswarm: add missing service labels to tasks discovery role
Previously service labels won't be attached when `role: tasks` is set.
Because the `addServicesLabels` function is shared by `role: tasks` and
`role: services`, and it will return nothing when `vip.Addr` is invalid
or empty.

In Prometheus, even if `vip.Addr` is empty, it attach common service
labels with [a standalone
function](f10c3454e9/discovery/moby/services.go (L129)),
which offers:
- `__meta_dockerswarm_service_id`: the id of the service.
- `__meta_dockerswarm_service_name`: the name of the service.
- `__meta_dockerswarm_service_mode`: the mode of the service.
- `__meta_dockerswarm_service_label_<labelname>`: each label of the
service, with any unsupported characters converted to an underscore.

This PR add a `addServicesLabelsForTask`, to replace the usage of
`addServicesLabels` when `role: tasks` is set. This function offers
common service labels listed above.

related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7800
2024-12-13 11:28:04 +01:00
Hui Wang
b0ed5b6174 app/vmalert: fixes reload of external templates
Previously after configuration reload call `externalURL` templaing function defined at external templates could be lost. Since it was added only at initial `Load` call and never copied during template reload process.
External templates for vmalert could be defined via `-rule.templates` flag.

 This commit properly reload external templates. It's no longer copies mutated templates and instead fully reloads it each time if there is any changes.
2024-12-13 10:29:19 +01:00
Github Actions
4aeda4b267 Automatic update helm docs from VictoriaMetrics/helm-charts@4b32065 (#7817)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-13 10:04:56 +01:00
Github Actions
20a2822c23 Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@21936b8 (#7814) 2024-12-13 10:04:42 +01:00
hagen1778
1891b74a0a docs: mention required version for multitenancy endpoint in vmalert
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-12 13:01:26 +01:00
Andrei Baidarov
0dc576d3da lib/storage: prefer stale markers over other values on dedup interval
Previously, during de-duplication staleness markers could be removed due to incorrect logic at
values equality check.
 During the evaluation of read query vmselect deduplicates samples using dedupInterval option. It picks the highest value across all points with the same timestamp next to the border of dedupInterval. The issue is any comparison with NaN via <, > returns false. This means that the position of NaN in srcValues could affect the result.


 This commit changes this logic with additional step, that explicitly checks for staleness marker for the following cases:
 1. Deduplication on vmselect
2. Deduplication in vmstorage during merges
3. Deduplication in stream aggregation

check performed only for stale markers, because other NaNs are rejected on ingestion
by vmstorage or by stream aggregation.

Checking for stale markers in general slows down dedup speed by 3%:
```
 benchstat old.txt new.txt

goos: darwin
goarch: arm64
pkg: github.com/VictoriaMetrics/VictoriaMetrics/lib/storage
cpu: Apple M4 Pro
                                                       │   old.txt    │               new.txt                │
                                                       │    sec/op    │    sec/op     vs base                │
DeduplicateSamples/minScrapeInterval=1s-14               462.8n ± ∞ ¹   425.2n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamples/minScrapeInterval=2s-14               905.6n ± ∞ ¹   903.3n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamples/minScrapeInterval=5s-14               710.0n ± ∞ ¹   698.9n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamples/minScrapeInterval=10s-14              632.7n ± ∞ ¹   638.5n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamplesDuringMerge/minScrapeInterval=1s-14    439.7n ± ∞ ¹   409.9n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamplesDuringMerge/minScrapeInterval=2s-14    908.9n ± ∞ ¹   882.2n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamplesDuringMerge/minScrapeInterval=5s-14    721.2n ± ∞ ¹   684.7n ± ∞ ¹       ~ (p=1.000 n=1) ²
DeduplicateSamplesDuringMerge/minScrapeInterval=10s-14   659.1n ± ∞ ¹   630.6n ± ∞ ¹       ~ (p=1.000 n=1) ²
geomean                                                  659.5n         636.0n        -3.56%
```

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7674
---------
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-12-12 12:34:17 +01:00
dependabot[bot]
88861c66fe build(deps): bump golang.org/x/crypto from 0.29.0 to 0.31.0 (#7807)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from
0.29.0 to 0.31.0.
<details>
<summary>Commits</summary>
<ul>
<li><a
href="b4f1988a35"><code>b4f1988</code></a>
ssh: make the public key cache a 1-entry FIFO cache</li>
<li><a
href="7042ebcbe0"><code>7042ebc</code></a>
openpgp/clearsign: just use rand.Reader in tests</li>
<li><a
href="3e90321ac7"><code>3e90321</code></a>
go.mod: update golang.org/x dependencies</li>
<li><a
href="8c4e668694"><code>8c4e668</code></a>
x509roots/fallback: update bundle</li>
<li>See full diff in <a
href="https://github.com/golang/crypto/compare/v0.29.0...v0.31.0">compare
view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/crypto&package-manager=go_modules&previous-version=0.29.0&new-version=0.31.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
You can disable automated security fix PRs for this repo from the
[Security Alerts
page](https://github.com/VictoriaMetrics/VictoriaMetrics/network/alerts).

</details>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-12 09:07:43 +01:00
hagen1778
1ee5ba8d55 docs: clarify meaning of deduplication for exported data
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7763

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-11 20:57:35 +01:00
Andrii Chubatiuk
e0ab3fccaf app/vlinsert/syslog: fixed structured data parsing (#7801)
### Describe Your Changes

rfc5424 doesn't allow structured data to be started from whitespace, but
it can be present in the end of this section
related issue
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7776

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-12-11 17:08:36 +01:00
Github Actions
2fe6640193 Automatic update helm docs from VictoriaMetrics/helm-charts@9524e91 (#7793)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-11 17:03:58 +01:00
Yury Molodov
d1ccf205c4 vmui: add more details for "clipboard not supported" error (#7778)
### Describe Your Changes

Added a message for Clipboard API errors with common issues and a link
to the docs. Added a check for secure context, showing a clear error and
a doc link if the context is not secure.

Related issue: #7677

<img width="400"
src="https://github.com/user-attachments/assets/a448d82e-f484-43de-9004-fbd5a57f49a7">
<img width="400"
src="https://github.com/user-attachments/assets/8de97577-89a3-445d-a4bb-a091a4549f39">

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-12-11 17:03:35 +01:00
Aliaksandr Valialkin
b42ed019f5 docs/VictoriaLogs/LogsQL.md: collapse_nums pipe docs: clarify that <N> is a placeholder 2024-12-11 16:34:40 +01:00
Aliaksandr Valialkin
5a41c7f5a5 docs/VictoriaLogs/LogsQL.md: mention that collapse_nums can miss collapsing some numbers or can collapse unexpected numbers
Suggest a solution with replace_regexp() pipe for custom collapsing.
2024-12-11 16:32:36 +01:00
Aliaksandr Valialkin
ec193ef691 docs/VictoriaLogs/querying/vlogscli.md: document \wrap_long_lines option
This is a follow-up for f55791f20b
2024-12-11 15:54:35 +01:00
hagen1778
e669c87af4 docs/changelog: re-order LTS releases for better navigation
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-11 15:27:38 +01:00
Roman Khavronenko
87c1b2de6f deployment/docker: update base Alpine docker image from 3.20.3 to 3.21.0 (#7798)
See https://alpinelinux.org/posts/Alpine-3.21.0-released.html

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-11 11:30:37 +01:00
hagen1778
bcd8d9d6c6 docs: re-order changes by priority
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-11 11:06:38 +01:00
f41gh7
dbed0de650 lib/timeserieslimits: follow-up for 564e6ea024
Changed enabled limit condition to `or` instead of `and`. Since labels must checked if at least one of the limits is defined.

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-11 11:00:27 +01:00
hagen1778
34a730ac65 docs: update wording after 564e6ea024
Mention all related limits and the way to troubleshoot them.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-11 10:55:07 +01:00
hagen1778
e21bdcdbc7 docs: make wording more transparent for readers
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-11 10:46:52 +01:00
Hui Wang
9db8e071c4 vmalert-tool: support debug mode for alerting rule (#7788)
User can enable [debug
mode](https://docs.victoriametrics.com/vmalert/#debug-mode) in
vmalert-tool, to check alerting rule evaluation status and write
`alert_rule_test` cases.
2024-12-11 09:49:14 +01:00
hagen1778
1627bcc6cb dashboards: add missing filter by instance to Go scheduling latency panel
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-11 09:03:09 +01:00
Github Actions
5033d05d55 Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@f31bdac (#7790) 2024-12-10 21:23:47 +01:00
hagen1778
5279faf02f deployment: bump victorialogs datasource version
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-10 21:23:08 +01:00
Andrii Chubatiuk
564e6ea024 app/{vminsert,vmagent}: drop time series on exceeding labels limits.
Previously, time series with labels exceeding the configured limits were truncated and written to storage, potentially causing data inconsistency. This could lead to collisions between time series and make it difficult to identify the source due to truncated labels.

This commit changes the behavior:
*  Such time series are now rejected outright.
* Rejected time series are logged to stdout, and corresponding counters are incremented.  
* removes `vm_too_long_label_values_total`, `vm_too_long_label_names_total`, `vm_metrics_with_dropped_labels_total` metrics.  
* adds new values `[too_many_labels,too_long_label_name,too_long_label_value]`  to `reason` label of the `vm_rows_ignored_total` metric name

related issues:
- https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6928
- https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7661
2024-12-10 21:19:16 +01:00
Zhu Jiekun
6b48126603 discovery/docker: add match_first_network support for docker_sd_configs
This commit aligns behaviour of docker service discovery with Prometheus implementation.

It adds the following changes:
* introduce new config param `match_first_network` with default value of `true`. It uses the first network if the container has multiple networks
defined.  It should help to avoid collecting duplicate targets error with multi network setups.

* add `networks` for the containers with linked network to the other containers with `network_mode: container:id` setting. It resolve an issue with attached containers aka `pods` in Kubernetes.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7398
2024-12-10 20:15:33 +01:00
Yury Molodov
4a2192431d vmui: prevent accordion collapse on text selection in headers
Prevent accordion from collapsing when selecting text in headers.

Related issue: 
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7742
2024-12-10 20:05:32 +01:00
Yury Molodov
86bc7d5cd1 vmui: fix incorrect message in Table tab
Updated the message in the “Table” tab of the VictoriaMetrics UI. It now
correctly displays the step value based on the actual configuration.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7401
2024-12-10 20:00:23 +01:00
hagen1778
d05fadf988 docs: fix typo in facets example
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-10 15:52:33 +01:00
Hui Wang
e439e40e79 app/vmalert: fix possible template overwritten between rule annotations
Previous commit b09272ccac added regression, which could lead to the template
global state overwrites. 
 
 The issue related to the mechanism how `vmalert` inherits templates. It has global templates, that could be changed via `rule.templates` flag. And local templates defined per labels/annotations for rules and groups.

 During labels/annotations templating state could be changed via `define` syntax. 

 This commit restores previous behavior with `Clone` call for templates before templating labels/annotations.

 Affected releases:
- 1.106.1
- v1.102.7
- v1.97.12

 Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6894
2024-12-10 14:59:40 +01:00
Nikolay
d6f5ba2887 app/vmauth: allow to start with empty auth config file
This commit adds ability to launch vmauth without configuration file.
Which is possible use case for operator based installations.

  Operator provides global resource `VMAuth` and allows to create
`VMUser` objects for it. Eventually operator creates configuration for
`VMAuth` based on user defined selectors for `VMUser`.

  Since there is no direct relations between
those objects. And any object could be created in on-demand by
Kubernetes users. It's required to be able to start `vmauth` with empty
auth config file.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6467
2024-12-10 14:51:11 +01:00
Github Actions
94e4c4e367 Automatic update helm docs from VictoriaMetrics/helm-charts@42d92a7 (#7783)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-10 14:48:29 +01:00
Github Actions
aadd8d5f3a Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@649d972 (#7786) 2024-12-10 14:47:03 +01:00
Dmytro Kozlov
44d8e6a19d deployment/docker: update victorialogs datasource versions to the latest releases (#7787)
### Describe Your Changes
Updated victorialogs-datasource to the latest
[release](https://github.com/VictoriaMetrics/victorialogs-datasource/tree/v0.11.0)
### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-10 13:08:55 +01:00
hagen1778
6b0ae0b79f docs/vmalert: update debug description
* mention that `debug` messages require -loggerLevel=INFO
* rm version requirement, as mentioned version is pretty old
and it is liklely everyone is using a newer version

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-10 10:22:39 +01:00
Nikolay
a51a18403c lib/storage: properly apply dedup.minScrapeInterval
Previously, if only `-dedup.minScrapeInterval` was set without
`downsampling.Period, function
getDownsamplingFilters returned empty result for
downsamplingPeriodFilters. Because it didn't take in
account globalDedup variable.

 This commit adds fast path for this case and returns a single
downsampling filter with global interval value.

In addition, it adds the following changes:

* Removes global state modification at ParseDownsamplingPeriods
  function. Which could lead to data races at vmselect
* simplifies logic of isDedupNeeded function. Since
  donwsamplingPeriodsWithout filters is subset of
dowsamplingPeriodByFilters. There is no need for len check
* Improves tests by proper reset global state of downsampling

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7764
2024-12-09 15:20:22 +01:00
Aliaksandr Valialkin
de0ae735aa lib/logstorage: add count_uniq_hash function to stats pipe
This function calculates the number of unique value hashes. This number is a good approximation
for the number of unique values. The `count_uniq_hash` function uses less memory and works faster
than `count_uniq` when applied to fields with big number of unique values.
2024-12-09 13:29:41 +01:00
Alexander Marshalov
acbe526307 vmbackupmanager: increase min sleep time between scheduling cycles from 0 to 1s to avoid spammed logs. (#807)
* vmbackupmanager: increase min sleep time between scheduling cycles from 0 to 1s to avoid spammed logs.

* Update docs/changelog/CHANGELOG.md

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-09 12:29:25 +01:00
Github Actions
9a6ddb48df Automatic update operator docs from VictoriaMetrics/operator@ee0406d (#7765)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-12-09 12:21:30 +01:00
Github Actions
7d3e60f7f1 Automatic update helm docs from VictoriaMetrics/helm-charts@4b502fc (#7768)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-09 12:21:01 +01:00
Aliaksandr Valialkin
f54f73033b docs/VictoriaLogs/LogsQL.md: typo fix: remove double with with 2024-12-09 00:37:01 +01:00
Aliaksandr Valialkin
75a2e23b7e deployment: update VictoriaLogs Docker image from v1.3.1-victorialogs to v1.3.2-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.3.2-victorialogs
2024-12-09 00:34:53 +01:00
Aliaksandr Valialkin
6fe079dbfb docs/VictoriaLogs/CHANGELOG.md: cut v1.3.2-victorialogs release 2024-12-09 00:30:59 +01:00
Aliaksandr Valialkin
843fae3419 lib/logstorage: fix possible panic in stream_context pipe
The panic may occur when the surrounding logs for some original log entry are empty.
This is possible when these logs were included into surrounding logs for the previous original log entry.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7762
2024-12-09 00:24:20 +01:00
Aliaksandr Valialkin
db961f8609 lib/logstorage: add an ability to detect common patterns at collapse_nums pipe
The following patterns are detected:

- `<N>-<N>-<N>-<N>-<N>` is replaced with `<UUID>`.
- `<N>.<N>.<N>.<N>` is replaced with `<IP4>`.
- `<N>:<N>:<N>` is replaced with `<TIME>`. Optional fractional seconds after the time are treated as a part of `<TIME>`.
- `<N>-<N>-<N>` and `<N>/<N>/<N>` is replaced with `<DATE>`.
- `<N>-<N>-<N>T<N>:<N>:<N>` and `<N>-<N>-<N> <N>:<N>:<N>` is replaced with `<DATETIME>`. Optional timezone after the datetime is treated as a part of `<DATETIME>`.
2024-12-08 20:09:02 +01:00
Aliaksandr Valialkin
c45451bf69 lib/promutils: properly parse timestamps in microseconds and nanoseconds
This is needed for _time filter in VictoriaLogs, which supports timestamps with nanosecond precision
2024-12-08 20:07:43 +01:00
Aliaksandr Valialkin
30029f1e39 deployment: update VictoriaLogs Docker image from v1.3.0-victorialogs to v1.3.1-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.3.1-victorialogs
2024-12-08 02:20:37 +01:00
Aliaksandr Valialkin
48f395456e lib/logstorage: fix assignment to entry in nil map panic at facets pipe
The panic has been introduced in the commit b4f3861690
2024-12-08 02:16:46 +01:00
Aliaksandr Valialkin
08ce6ef825 deployment: update VictoriaLogs Docker image from v1.2.0-victorialogs to v1.3.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.3.0-victorialogs
2024-12-08 01:49:13 +01:00
Aliaksandr Valialkin
cd10bb585c docs/VictoriaLogs/CHANGELOG.md: cut v1.3.0-victorialogs release 2024-12-08 01:41:41 +01:00
Aliaksandr Valialkin
4ac94db2c7 app/vlogscli: show '<', '>' and '&' as is in JSON output instead of using the corresponding \uXXXX encoding
This improves reading JSON lines with these chars at vlogscli
2024-12-08 01:26:11 +01:00
Aliaksandr Valialkin
65d831a0ee lib/logstorage: add collapse_nums pipe, which replaces decimal and hexadecimal nums in the given log field with <N>
This is useful for detecting patterns across log messages, which differ by various numeric fields,
with the following query:

_time:1h | collapse_nums | top 10 by (_msg)
2024-12-08 01:03:30 +01:00
Aliaksandr Valialkin
48540ac409 app/vlselect: allow passing max_value_len query arg to /select/logsql/facets API
The max_value_len query arg allows controlling the maximum length of values
per every log field. If the length is exceeded, then the log field is dropped
from the results, since it contains incomplete (misleading) set of most frequently seen field values.
2024-12-07 14:30:07 +01:00
Aliaksandr Valialkin
3cef820cba lib/logstorage: facets pipe: return back ignoring empty values
It is impossible to count all the empty value per every seen field,
since they aren't counted for data blocks, which do not contain the given field.
So it is better ignoring empty values in order to reduce the level of confusion
when users see incorrect hits for empty per-field values.
2024-12-07 14:24:39 +01:00
Aliaksandr Valialkin
b4f3861690 lib/logstorage: facets pipe: ignore fields, which contain at least a single value with too big length
It is very confusing to see incomplete set of values for fields, which contain a subset of short values,
while the rest of values are too long. It is better to ignore all the values in such fields.

It is also very confusing if the list of most frequently values has no an empty value.
So it is better counting hits for an empty value.
2024-12-07 12:45:42 +01:00
Aliaksandr Valialkin
4c8691450a lib/logstorage: stream_context pipe: reduce the amounts of surrounding logs to check
Do not check surrounding logs before the selected log if `after N` in set,
and do not check logs after the selected log if `before N` is set

This is a follow-up for 08af80ebe0

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7637
2024-12-07 12:21:22 +01:00
Andrii Chubatiuk
fefa3e7936 deployment/rules: updated sum expressions in alerts to be able to inject cluster labels in helm charts scripts (#7670)
### Describe Your Changes

Many users are running k8s-stack in multiple kubernetes clusters and to
configure a proper routing in alertmanager it's required to support
`cluster` label in alerting rules. It's now implemented in helm-chart
hack scripts, but it's tricky part to define if cluster label should be
added or not, when functions has no `by` expression. Updated existing
alerts to provide later an ability to inject cluster label later

Also take into an account `storage.minFreeDiskSpaceBytes` in
`DiskRunsOutOfSpace` alerts

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-12-06 15:48:09 +01:00
Aliaksandr Valialkin
08af80ebe0 lib/logstorage: add an ability to change the time window for searching for surrounding logs in the stream_context pipe
Thanks to @worker24h for the idea at https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7637#issuecomment-2523313740
2024-12-06 15:47:52 +01:00
Andrii Chubatiuk
915867fe56 cspell: fixed typos, updated dictionary 2024-12-06 15:29:34 +01:00
Github Actions
786ce2c5b3 Automatic update helm docs from VictoriaMetrics/helm-charts@45df5e5 (#7751)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-06 15:28:55 +01:00
Aliaksandr Valialkin
bddb0e369f lib/logstorage: optimize stream_context pipe over log streams with tens of millions of logs
`stream_context` is implemented in the way, which needs scanning all the logs for the selected log streams.
The scan performance is usually fast, since the majority of blocks are skipped, since they do not contain
rows with the needed timestamps. But there was a pathological case with `stream_context before N`:

VictoriaLogs usually scans blocks in chronological order. That means that the `before` context logs are constantly
updated with the new logs. This requires reading the actual data for the requested log fields from disk.
The workaround is to split the process of obtaining stream context logs into two phases:

1. Select only timestamps for the stream context logs, whithout selecting other log fields.
   This operation is usually much faster than reading the requested log fields.

2. Select stream context logs for the selected timestamps. This operation is usually fast,
   since the requested number of context logs is usually not so big.

Performance testing for the new algorithm shows up to 30x speed improvement for `stream_context before N`
and up to 5x speed improvement for `stream_context after N` when applied to log stream with 50M logs.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7637
2024-12-06 15:00:46 +01:00
Github Actions
f322494ca2 Automatic update operator docs from VictoriaMetrics/operator@a5fd092 (#7753)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
Co-authored-by: Hui Wang <haley@victoriametrics.com>
2024-12-06 11:48:10 +08:00
Aliaksandr Valialkin
ceb081a018 deployment: update VictoriaLogs Docker image from v1.1.0-victorialogs to v1.2.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.2.0-victorialogs
2024-12-06 04:42:39 +01:00
Aliaksandr Valialkin
e2fa25ab29 docs/VictoriaLogs/CHANGELOG.md: cut v1.2.0-victorialogs release 2024-12-06 04:35:15 +01:00
Aliaksandr Valialkin
740548ccfc app/vlselect: add /select/logsql/facets endpoint
This endpoint returns the most frequent values per each field seen in the selected logs.
This endpoint is going to be used by VictoriaLogs web UI for faceted search.
2024-12-06 02:41:09 +01:00
Aliaksandr Valialkin
dbec34bafc lib/logstorage: add facets pipe for returning the most frequent values across all the log fields seen in the selected logs 2024-12-06 01:24:15 +01:00
Aliaksandr Valialkin
04796ba249 lib/fs: suggest increasing the limit on the number of open files in the error message when the file cannot be opened by ReaderAt
This should simplify troubleshooting of too low limit on the number of open files
2024-12-06 01:07:30 +01:00
Aliaksandr Valialkin
5c7b044685 lib/fs: suggest possible solutions inside cannot allocate memory errors during failed mmap attempt
This should improve troubleshooting of the such errors
2024-12-06 00:52:18 +01:00
Aliaksandr Valialkin
80c5066ef3 lib/logstorage: properly format math pipe expressions, which contain multiple binary operators with the same priority
Previously such expressions were improperly formatted, which could result
in incorrect calculations at vlogscli.

For example, 'x / (y / z)' was formatted as 'x / y / z',
while 'x - (y + z)' was formatted as 'x - y + z'.
2024-12-05 17:10:47 +01:00
Aliaksandr Valialkin
c3b8da81cd lib/logstorage: add rate and rate_sum stats functions
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7415
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7646
2024-12-05 17:10:46 +01:00
f41gh7
8b1fd6a619 docs/changelog: mention vmselect panic bugfix
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-05 15:34:15 +01:00
Aliaksandr Valialkin
b57f8d3cb6 docs/VictoriaLogs/LogsQL.md: add references to first and last pipes from the top pipe description
`top` pipe can be confused with the `first` and `last` pipes, so add references to these pipes from the `top` pipe docs.
This should help users locating the needed pipes.
2024-12-05 13:33:40 +01:00
Hui Wang
f4776fec1b app/vmauth: fix requests routing by host when using `src_hosts"
Requests processed by built-in HTTP server has the [origin
form](https://datatracker.ietf.org/doc/html/rfc7230#section-5.3) rather
than the absolute form.

 So in[Request.URL](https://pkg.go.dev/net/http#Request), fields other than
Path and RawQuery will be empty.
> 	// For server requests, the URL is parsed from the URI
> 	// supplied on the Request-Line as stored in RequestURI.  For
> 	// most requests, fields other than Path and RawQuery will be
> 	// empty. (See RFC 7230, Section 5.3)

 Using `request.Host` field instead to match `src_hosts` fixes issue and allows to route requests properly.

An addition It allows user to route requests with customized `Host` header.
2024-12-05 11:44:59 +01:00
Github Actions
b76c77649d Automatic update operator docs from VictoriaMetrics/operator@a1ef5dd (#7716)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-12-05 10:00:12 +01:00
Github Actions
cedacf5f5c Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@3bc9782 (#7743) 2024-12-05 09:59:25 +01:00
Github Actions
a2c3b33e42 Automatic update helm docs from VictoriaMetrics/helm-charts@5c35472 (#7747)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-05 09:59:04 +01:00
hagen1778
7d1477e984 deployment: bump victorialogs plugin version to v0.10.0
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-05 09:47:58 +01:00
Aliaksandr Valialkin
02effba767 vendor: update github.com/VictoriaMetrics/metricsql from v0.81.0 to v0.81.1
This fixes possible `index out of range` panic when -search.logImplicitConversion
or -search.disableImplicitConversion command-line flags are passed to vmselect
and it tries executing incorrect query with too small number of arguments passed
to rollup function.
2024-12-05 02:39:06 +01:00
Aliaksandr Valialkin
601a25d4e8 deployment: update VictoriaLogs Docker image from v1.0.0-victorialogs to v1.1.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.1.0-victorialogs
2024-12-05 02:02:23 +01:00
Aliaksandr Valialkin
b9b117d149 docs/VictoriaLogs/CHANGELOG.md: cut v1.1.0-victorialogs release 2024-12-05 01:55:11 +01:00
Aliaksandr Valialkin
0b021fa5a7 app/vlselect/vmui: run make vmui-logs-update after the commit 10c42668a1 2024-12-05 01:54:03 +01:00
Aliaksandr Valialkin
b43fcc0cf8 lib/logstorage: add tests, which verify that offset and limit pipes cannot be used in /select/logsql/stats_query_range
`offset` and `limit` pipes cannot be applied individually per every step on the [start ... end] time range,
so they must be disallowed at /select/logsql/stats_query_range.

This is a follow-up for 534371031e
2024-12-05 01:49:39 +01:00
Aliaksandr Valialkin
534371031e lib/logstorage: add first and last pipes
The `first N by (field)` pipe is a shorthand to `sort by (field) limit N`,
while the `last N by (field)` pipe is a shorthand to `sort by (field) desc limit N`.

While at it, add support for partitioning sort results by log groups and applying
individual limit per each group.

For example, the following query returns up to 3 logs per each host with the biggest value
for the `request_duration` field:

_time:5m | last 3 by (request_duration) partition by (host)

This query is equivalent to the following one:

_time:5m | sort by (request_duration) desc limit 3 partition by (host)

Automatically add the 'partition by (_time)` into `sort`, `first` and `last` pipes
used in the query to `/select/logsql/stats_query_range` API.
This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7699
2024-12-05 01:42:03 +01:00
Aliaksandr Valialkin
0602d60047 deployment/docker: update Go builder from Go1.23.3 to Go1.23.4
See https://github.com/golang/go/issues?q=milestone%3AGo1.23.4+label%3ACherryPickApproved
2024-12-04 22:45:20 +01:00
Aliaksandr Valialkin
cdc0db8ad7 lib/logstorage: properly ignore log fields when they are passed via streamFields arg to LogRows.MustAdd()
Previously streamFields were unconditionally added to log stream fields, even if they were listed in the ignoreFields.
Also do not add extraStreamFields to log stream fields if streamFields is non-nil, since this may confuse users.

This is a follow-up for 17b813ba28

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7554
2024-12-04 21:45:06 +01:00
Aliaksandr Valialkin
77430b797d lib/logstorage: add support for uppercase/lowercase transformations for log fields in "| format ..." pipe
This is needed for consistent formatting of some log fields in the same case.
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7620#issuecomment-2502170924
2024-12-04 14:38:37 +01:00
Artem Fetishev
f2ad481a1f vendor: uppdate metricsql to v0.81.0
This is a follow-up for
https://github.com/VictoriaMetrics/metricsql/pull/37. 

It ports the fix to the VictoriaMetrics repo

Related issue:

https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5796


---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-12-04 14:27:09 +01:00
Aliaksandr Valialkin
17b813ba28 app/vlinsert: use default set of log stream fields for Loki and OpenTelemetry protocols if _stream_fields query arg is empty
Loki protocol supports a list of log stream labels - see https://grafana.com/docs/loki/latest/get-started/labels/

OpenTelemetry protocol also supports a list of log stream labels, which are named resource attributes there.
See https://opentelemetry.io/docs/concepts/resources/#semantic-attributes-with-sdk-provided-default-value

Simplify logs' ingestion into VictoriaLogs for these protocols by allowing the data ingestion without
the need to specify _stream_fields query arg or VL-Stream-Fields HTTP header. In this case the upstream log stream fields
are used during data ingestion. The set of log stream fields can be overriden via _stream_fields query arg
and via VL-Stream-Fields HTTP header if needed.

Thanks to @AndrewChubatiuk for the initial idea and implementation at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7554
2024-12-04 13:57:23 +01:00
Aliaksandr Valialkin
6a71921565 lib/logstorage: ignore logs with too many fields instead of trying to store them
The storage isn't designed to work efficiently with logs containing too many log fields.
It is better to emit a warning to the user and ignore such logs instead of trying to store them.
This will allow fixing the issue by the user ASAP, and won't lead to excess resource usage
at VictoriaLogs side, such as RAM, CPU, disk IO and disk space.

While at it, ignore too long logs with the size exceeding the maximum block size during data ingestion.
This should prevent from possible issues when dealing with such long logs if they were stored in the storage.
Emit a warning in this case, so the user could identify and fix the issue ASAP.

This is a follow-up for 22e6385f56

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7568
2024-12-04 12:18:34 +01:00
Aliaksandr Valialkin
7e924d7ecf app/vlinsert: properly skip too long lines at Elasticsearch bulk import protocol
Previously too long line in Elasticsearch bulk import protocol resulted in clsoing
the client stream and ignoring the rest of log messages in the stream.

Now only the too long message is ignored properly, while the rest of log messages
are read successfully.

This is a follow-up for 61e7c77ce25967269192ed2e201f67d8c48b972e
2024-12-04 12:18:32 +01:00
Aliaksandr Valialkin
480a8be48f app/vlinsert: track vl_rows_ingested_total metric in a single place
Previously vl_rows_ingested_total metric was tracked individually per each supported data ingestion protocols.
It is better from maintainability PoV tracking this metric consistently in a single place - at logMessageProcessor.AddRow() function
in the same way as vl_bytes_ingested_total metric is tracked.

This is a follow-up for 50bfa689c9
2024-12-04 12:18:30 +01:00
Aliaksandr Valialkin
c58d0549a8 app/vlinsert: continue parsing lines after too long lines in JSON line stream and Elasticsearch bulk import stream
Previously all the lines after the too long line in the stream were ignored. This wasn't expected by most users.
2024-12-04 12:18:28 +01:00
hagen1778
cc70b5bb34 docs: fix typo after 9feacf9761
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-04 09:10:54 +01:00
hagen1778
9feacf9761 docs: add version marker for retention/downsampling filters
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-04 09:01:36 +01:00
Fred Navruzov
53d438aab0 docs/vmanomaly: release v1.18.8 (#7734)
### Describe Your Changes

docs/vmanomaly: release v1.18.8

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-03 23:49:55 +02:00
hagen1778
d29260f4e8 docs: update telegraf ingestion examples
* explicitly mention it is using HTTP protocol
* consistently use `victoriametrics_url` placeholder across the docs
* mention v2 influx format in docs
* consistently remove/add extra newlines for better formatting

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-03 20:54:38 +01:00
hagen1778
671ba82894 docs: fix newline typo in vmauth
Extra new line was removed after macros substitution

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-03 15:47:15 +01:00
Github Actions
88fe4ebb34 Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@558d333 (#7725) 2024-12-03 15:44:02 +01:00
Dmytro Kozlov
1b1aef57e0 deployment/docker: update victorialogs datasource versions to the latest releases (#7726)
### Describe Your Changes

Update victorialogs datasource to the latest release
[version](https://github.com/VictoriaMetrics/victorialogs-datasource/releases/tag/v0.9.0)

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-03 15:43:34 +01:00
Artem Fetishev
30b9167965 apptest: Cluster replication tests (#7693)
### Describe Your Changes

Add cluster replication tests. No group replication yet. Some necessary
enhancements to the apptest framework have been done as well. Also other
existing tests were revisitied to take advantage of new QueryOpts added
by @f41gh7 in #7635.

The tests verify the following scenarios:
1.  Data is written to vmstorages multiple times
2. Vmselect deduplicates replicated data
3. Vmselect does not return partial result if it receives responses from
enough replicas
4. Vmselect does not wait for the rest from all replicas (skips slower
ones)

Something similar will be added for storage groups. These tests should
be used to prove that the fix for #6924 works and at the same time does
not break other aspects of replication.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-12-03 12:25:53 +01:00
hagen1778
24a2a4a962 dashboards: mention reserved disk space in descriptions
Follow-up after 57ddb51089

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-03 12:16:36 +01:00
Fred Navruzov
cdf384eb4d docs/vmanomaly - release v1.18.7 (#7719)
### Describe Your Changes

- docs/vmanomaly - release v1.18.7
- modified table markdown for proper rendering on vmanomaly component
pages

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-02 20:32:17 +02:00
f41gh7
c89926bdf7 docs/changelog: fixes date typo 2023->2024
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-12-02 14:27:57 +01:00
hagen1778
33a60f907c docs: bump last LTS versions
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-02 13:07:47 +01:00
hagen1778
5e0db31914 docs: re-order and cleanup changelog items
* sort changes by importance
* cleanup wording in change nots

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-12-02 13:02:23 +01:00
Github Actions
54f1a33a63 Automatic update operator docs from VictoriaMetrics/operator@2fc01c5 (#7714)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-12-02 11:54:29 +01:00
Lauri Tirkkonen
e4525516e2 docs/vlogs: replace reference to fluentbit with vector
the helm chart being referenced switched from fluentbit to vector in
a1aea5d694ff725c350b325205e3372b52242639, so update the docs to match
2024-12-02 11:53:29 +01:00
Github Actions
d3101b075f Automatic update helm docs from VictoriaMetrics/helm-charts@612923a (#7713)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-12-02 11:46:15 +01:00
f41gh7
b4d8d135e9 {docs,deployment}: update vm apps to the latest v1.107.0 release version 2024-12-02 11:09:05 +01:00
f41gh7
12c9aa9bf3 CHANGELOG.md: cut v1.107.0 release 2024-12-02 10:27:51 +01:00
Fred Navruzov
30b61c6d8a docs/vmanomaly - patch release v1.18.6 docs (#7706)
### Describe Your Changes

docs/vmanomaly - patch release v1.18.6 docs

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-12-01 18:20:52 +02:00
Aliaksandr Valialkin
4d39dfc0ee lib/logstorage: print column names to the error log message when the number of unique columns in a block exceeds the limit
This should simplify debugging issues related to too big number of columns per block in the future.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7568
2024-11-30 18:23:34 +01:00
Aliaksandr Valialkin
d2cd004710 lib/logstorage: follow-up for 22e6385f56
Make variable names and comments more clear. This should simplify code maintenance in the future.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7568
2024-11-30 18:04:40 +01:00
Nikolay
22e6385f56 lib/logstorage: fixes panic at Block.MustInitFromRows (#7695)
Previously Block columns wasn't properly limited by maxColumnsPerBlock.
And it was possible a case, when more columns per block added than
expected.
 For example, if ingested log stream has many unuqie fields
and it's sum exceed maxColumnsPerBlock.
 We only enforce fieldsPerBlock limit during row parsing, which limits
isn't enough to mitigate this issue. Also it
would be very expensive to apply maxColumnsPerBlock limit during
ingestion, since it requires to track all possible field tags
combinations.

 This commit adds check for maxColumnsPerBlock limit during
MustInitFromRows function call. And it returns offset of the rows and
timestamps added to the block.
 Function caller must create another block and ingest remaining rows
into it.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7568

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2024-11-30 17:40:56 +01:00
Aliaksandr Valialkin
50bfa689c9 app/vlinsert: expose vl_bytes_ingested_total metric
This metric tracks an approximate amounts of bytes processed when parsing the ingested logs.
The metric is exposed individually per every supported data ingestion protocol. The protocol name
is exposed via "type" label in order to be consistent with vl_rows_ingested_total metric.

Thanks to @tenmozes for the initial idea and implementation at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7682

While at it, remove the unneeded "format" label from vl_rows_ingested_total metric.
The "type" label must be enough for encoding the data ingestion format.
2024-11-30 17:25:57 +01:00
Aliaksandr Valialkin
f8cb2cf1a0 app/vmctl/testdata: fix tests broken after updating Prometheus dependencies in the commit 7c40b95224
This is a follow-up for 765ce1b181

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7700
2024-11-29 22:46:40 +01:00
f41gh7
6d0420b454 make docs-update-version 2024-11-29 17:45:18 +01:00
f41gh7
5bf2d6a689 make vmui-update 2024-11-29 17:43:55 +01:00
f41gh7
765ce1b181 app/vmctl: follow-up after vendor-update
Comment broken tests for remote_read integration test.
 Prometheus broke library compatibility and it's required to rewrite tests.
 Also, test structure and format should be revisited and improved according to our test code style.

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-29 14:45:18 +01:00
f41gh7
036f33de48 app/vmagent: follow-up 430163d and 680b8c2
Removes global defaultAuthToken, since it's no longer needed.
It was added as fallback for 'remoteWrite.multitenantURL' feature.
This feature was deprecated at v1.102 version and removed.

 Updates newRemoteWriteCtxs function, it shouldn't accept auth.Token no longer.
This was also a part of remove feature.

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-29 14:36:59 +01:00
Nikolay
430163d01a app/vmagent: fixes multitenant token parse
Previously, vmagent produced parsing error for 'multitenant' auth token
value for the cases:
* data ingestion with enableMultitentEndpoints
* data scrapping at promscrape

 It's inconsistent to the other VictoriaMetrics components.
Since 'multitenant' is well-known token value for multitenancy via
labels. And vmagent is intended to be compatible with vminsert ingestion
endpoints.

 This commit replaces NewToken with NewTokenPossibleMultitenant function
for token parsing. It allows to use multitenant value for it. And it
makes token values consistent for the all components.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7694
2024-11-29 14:04:07 +01:00
Aliaksandr Valialkin
7c40b95224 vendor: run make vendor-update 2024-11-29 13:48:50 +01:00
f41gh7
cd89923e3c app/vmauth: fixes ip_filters typo at example_config
url_map section cannot have ip_filters. Only top level config section
have it.
2024-11-29 11:01:00 +01:00
f41gh7
f404e0b3a5 app/vmauth: fixes ip_filters init for uauthorized_user
Previously ip_filters wasn't properly inited for this part of config.
It resulted to bypass requests for this section.

 This commit properly inits `ip_filter`.
2024-11-29 11:01:00 +01:00
f41gh7
1d4a4eb8b0 app/vmauth: add ip_filter deny metrics
Previously, all requests rejected by `ip_filter` were silently aborted.

This commit adds new metrics:
* vmauth_user_ip_denies_total
* vmauth_global_ip_denies_total
* vmauth_unauthorized_user_ip_denies_total

 It adds observability to this feature and allow to measure rejected requests.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6883

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-29 11:00:32 +01:00
Github Actions
30903d9361 Automatic update helm docs from VictoriaMetrics/helm-charts@f5f1b9c (#7691)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-29 10:30:34 +01:00
Hui Wang
000a918f38 app/vmalert-tool: print an error message if no rule group is found under 'rule_files'
Both vmalert and vmalert-tool support multiple `rule_files` and use
directory as a file, so it's ok if some files don't contain any rule
group. But vmalert-tool should warn the user if no rule group is found
in any of the `rule_files`.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7663
2024-11-29 10:29:04 +01:00
Nikolay
9c35807368 app/vmauth: add removeXFFHTTPHeaderValue flag
Previously, there was no option to replace value of `X-Forwarded-For`
HTTP Header. It was only possible to completely remove it. It's not good
solution, since backend may require this information. But using direct
value of this header is insecure. And requires complex knowledge of
infrastruce at backend side (see spoofing X-Forwarded-For articles).

This commit adds new flag, that replaces content of `X-Forwarded-For`
HTTP Header value with current `RemoteAddress` of client that send
request.
It should be used if `vmauth` is directly attached to the internet.

 Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6883

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-29 10:25:47 +01:00
Nikolay
92512cbe54 app/vmauth: add real_ip_header to ip_filters
This commit allows vmauth to obtain client IP address from HTTP Headers.
Main scenario for it is vmauth located behind reverse-proxy.

 It adds both global and per user configuration settings: -httpRealIPHeader and `real_ip_header` config option.

vmauth try to obtain IP from header if this setting is set. If header is not exists, vmauth fallbacks to `remoteAddress`.

Commit also updates incorrect benchmarks and align test package naming for ip_filters

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6883

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-28 14:55:35 +01:00
Yury Molodov
dec9a2f023 vmui: fix predefined panels
### Describe Your Changes

- Fixes the handling of the `showLegend` flag.  
- Fixes the handling of `alias`.  
- Adds support for alias templates, allowing dynamic substitutions like
`{{label_name}}`.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7565
2024-11-28 13:47:37 +01:00
Andrii Chubatiuk
0f06d2f072 deployment/docker: use vector for local setup (#7681) 2024-11-28 13:25:25 +01:00
Github Actions
9c6a4d915d Automatic update helm docs from VictoriaMetrics/helm-charts@a5bc12b (#7685)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-28 13:25:10 +01:00
Fred Navruzov
6fa3283d04 docs/vmanomaly - release 1.18.5 (#7684)
### Describe Your Changes

docs/vmanomaly - release 1.18.5 doc updates

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-28 13:34:45 +02:00
Nikolay
20d5314833 app/vmauth: adds dryRun flag for config validation
This flag only validates configuration file pointed by `auth.config`
flag.

 Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7505

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-27 13:51:48 +01:00
Yury Molodov
10c42668a1 vmui/logs: optimize memory consumption (#7524)
### Describe Your Changes

- **Memory Optimization**: Reduced memory consumption on the "Group" and
"JSON" tabs by approximately 30%.

- **Table Pagination**: Added pagination to the "Table" view with an
option to select the number of rows displayed (from 10 to 1000 items per
page, with a default of 1000). This change significantly reduced memory
usage by approximately 75%.

Related to #7185

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-27 13:49:06 +01:00
Andrii Chubatiuk
60d587f55b docs/vmgateway: added to documentation a case, when vm_access claim value is of string type 2024-11-27 11:13:59 +01:00
Viet Hung Nguyen
466cbee433 docs: fix rule unittest rule_files point to alerts.yaml (#7664)
### Describe Your Changes

Fix wrong path to rules_file in vmalert-tool doc

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Viet Hung Nguyen <hvn@familug.org>
Co-authored-by: Hui Wang <haley@victoriametrics.com>
2024-11-27 08:04:21 +01:00
Hui Wang
cdbe69e62b docs: add multitenat endpoints usage for vmalert multitenancy (#7665)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-27 08:01:11 +01:00
Zhu Jiekun
7374a8813d lib/promscrape/discovery: properly apply the resource_group filter for Azure service discovery
Previously, this filter did not apply to virtual
machine scale sets, causing all virtual machines to be discovered.

 This commit conditionally adds `resource_group` filter for Azure service discovery on virtual
machine scale sets. 

 Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7630.
2024-11-26 19:06:43 +01:00
Nikolay
bb99ddf957 apptest: adds cluster test for multitenant API requests
This commit adds integration test for multitenant via labels feature -
https://docs.victoriametrics.com/cluster-victoriametrics/#multitenancy-via-labels

It also extends current test models in order to:
- accept float timestamps returned from /api/v1/query_range and query
api
https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmselect/prometheus/util.qtpl#L43
- accept arbitrary query url params for requests. It simplifies testing
for the different VM API extensions


---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-26 19:05:03 +01:00
Andrii Chubatiuk
9cfdbc582f refactoring: changed prompb to prompbmarshal everythere where internal series transformations are happening (#7409)
### Describe Your Changes

doing similar changes for both vmagent and vminsert (like one in
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7399) ends up
with almost same implementations for each of packages instead of having
this shared code in one place. one of the reasons is the same Timeseries
and Labels structure from different prompb and prompbmarshal packages.
My proposal is to use structures from prompb package only to
marshal/unmarshal sent/received data, but for internal transformations
use only structures from prompbmarshal package

Another example, where it already can help to simplify code is streaming
aggregation pipeline for vmsingle (now it first marshals
prompb.Timeseries to storage.MetricRow and then if streaming aggregation
or deduplication is enabled it unmarshals all the series back but to
prompbmarshal.Timeseries)

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-26 12:45:17 +01:00
Roman Khavronenko
8ab1261750 app/vmauth: dump requests that failed the route rules to stderr (#7649)
Additional info from the dump can be used to debug rotuing rules.

https://pkg.go.dev/net/http/httputil#DumpRequest

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-26 10:36:27 +01:00
Artem Navoiev
b1324360b8 docs: update formatting on cloud billing page
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-11-26 09:52:53 +01:00
matty
c3fa806b2f docs: Bump LTS versions (#7658)
### Describe Your Changes

LTS versions are out of date, linking to old releases. This updates them
to the latest.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-26 09:45:06 +01:00
Hui Wang
786a10835b alerts-vmalert: reserve rule name for description (#7659) 2024-11-26 09:22:00 +01:00
Artem Navoiev
3adc7abf8f docs: add cloud billing page (#7655)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-11-25 11:03:15 -08:00
Github Actions
2962e35c0b Automatic update helm docs from VictoriaMetrics/helm-charts@ebc6d67 (#7653)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-25 18:07:27 +01:00
Andrei Baidarov
727bc02a5c vmagent: set up a timeout for tcp connection establishment during k8s discovery
Previously, default dial timeout was used for kubernetes API server connection.

 This commit changes it for custom dialer used by the all VictoriaMetrics components. It has lower connection timeout (30s by default). 


 Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7127

---------
Co-authored-by: f41gh7 <nik@victoriametrics.com>
2024-11-25 18:02:09 +01:00
hagen1778
71d774f76d docs: re-structure multitenancy docs
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-25 13:10:30 +01:00
hagen1778
530b731101 docs: update otel docs
* cross-link related doc chapters and guides about otel
* mention different URL format for cluster version of VM

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-25 13:10:14 +01:00
hagen1778
ec81deb7e8 dashboards: fix Ingestion row for vmagent dashboard
Previously, clicking on Ingestion row could result in a visual blip.
Re-ordering panels within the row seems to fix it.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-25 12:40:56 +01:00
hagen1778
0e6731323a docs: update link for tsdb stats word
The link was pointing to itself, which doesn't make sense.
Changed it to point to https://docs.victoriametrics.com/url-examples/#apiv1statustsdb

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-25 12:27:26 +01:00
Nikolay
bdac00f674 app/vmselect: fixes multitenant cache init
Previously multitenant cache was inited before flag.Parse call. It
didn't allow to change cache expiration value and default value was
always used.

 This commit adds cache init at the first time cache was called.

 Also this commit adds small cache improvements:
* chore for cleanup cache, it now uses common pattern for in-place items
filtering
* fail cache request fast if item is already expired

---------
Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-25 11:51:30 +01:00
Andrei Baidarov
037808dad5 app/vmselect: fix panic/incorrect tenant in key
This is a follow-up after 3120dc2

- Consistently use key for rollupCache in multitenant mode cache keys use different authTokens. Previously it could lead to panic in rare cases when cache state was inconsistent.
- Do not share `err` variable across goroutines for `processBlock` function. It could lead to data races.

Related issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7549
---------
Signed-off-by: Andrei Baidarov <abaidarov@yandex.ru>
Co-authored-by: f41gh7 <nik@victoriametrics.com>
2024-11-25 11:50:49 +01:00
Hui Wang
508bafced3 vmalert-tool: exit immediately with error message if no test file found in -files (#7627) 2024-11-25 11:43:50 +01:00
Github Actions
0b2b96422d Automatic update helm docs from VictoriaMetrics/helm-charts@6a5b14b (#7642)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-25 10:08:51 +01:00
dmitry-shur
2328656f87 docs: fixed typo in FAQ (#7643)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-25 09:43:00 +01:00
Github Actions
9d5ef1cdd1 Automatic update helm docs from VictoriaMetrics/helm-charts@a582432 (#7641)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-25 09:10:51 +01:00
hagen1778
c9837de9cd docs: add vmui section to cluster docs
It looks like users get confused with vmui availability in vmselect.
Adding a note about it to cluster docs should improve situation.

See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7631

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-24 14:18:26 +01:00
Artem Navoiev
6c6e469bfb docs: cloud mention silver level in support
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-11-23 11:08:18 +01:00
Hui Wang
6ff1de89a9 vmalert: fix alert states restoration (#7624)
Previously, when the alert got resolved shortly before the vmalert
process shuts down, this could result in false alerts.

This change switches vmalert to use MetricsQL function during alerts state restore, which makes it
incompatible for state restoration with PromQL.

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-22 09:11:31 +01:00
Artem Fetishev
f2d1f0716b apptest: add tests for stale nans in instant query (#7621)
### Describe Your Changes

These are the integration tests that confirm that instant queries may
return stale NaNs when the query contains a rollup function.

The bug was reported at #5806. There is also a fix: #7275. The tests in
this PR will be used co confirm that the fix works.

Some test refactoring has been done along the way. Sorry, couldn't
resist.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-11-21 19:39:17 +01:00
Github Actions
4319d9f2b0 Automatic update Grafana datasource docs from VictoriaMetrics/victoriametrics-datasource@a5e2cb4 (#7622) 2024-11-21 19:32:27 +01:00
Dmytro Kozlov
5a97d512c0 deployment/docker: update victoriametrics datasource versions to the latest releases (#7623)
### Describe Your Changes

Updated victoriametrics-datasource to the newest
[release](https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/tag/v0.10.3)
version

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-21 19:32:04 +01:00
hagen1778
a15fcac1b6 docs: update differences between vmagent and Prometheus in FAQ
* mention stream aggregation
* rm statement that Prometheus can only pull data, which is not true anymore
* mention absence of backfilling limitations

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-21 16:30:59 +01:00
Ivan Yurochko
5dd879cd17 lib/streamaggr: add ignore_first_sample_interval param for streamaggr cfg (#7313)
### Describe Your Changes

As of right now by default aggregated output in streaming aggregation
takes a staleness interval and only starts sending first samples after
the staleness interval passes. We have a use case where we prefer to
start sending data as soon as we have any. This adds the option to
configure when we start sending first samples

https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7116

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-11-21 16:20:22 +01:00
Github Actions
84b4b5f3e5 Automatic update helm docs from VictoriaMetrics/helm-charts@b0095d3 (#7618)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-21 16:04:37 +01:00
hagen1778
a4b3ce9641 docs: mention Raw Query tab in vmui
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7024
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-21 16:03:05 +01:00
Yury Molodov
cd0ad293fe vmui: add Raw Query tab (#7575)
### Describe Your Changes

1. **Add new `Raw Query` tab**  
A new `Raw Query` tab has been added to the
[vmui](https://docs.victoriametrics.com/#vmui) interface for displaying
raw data. The tab uses the `/api/v1/export` API endpoint. Related issue:
[#7024](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7024)


2. **Fix rendering of isolated points on the graph**  
Previously, isolated points (not connected to other points on the left
or right) were not visible on the graph. Now, they are rendered
correctly.
 

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-21 15:52:50 +01:00
Nikolay
bb399518db app/vmselect: properly return binary pow function result (#7619)
Previously, for `^` aka pow function calls, VictoriaMetrics returned `1`
if left arg was Nan. For example, given query=`(hour()==2)^1` returns 1
for NaN produced by hour() == 2 function. It added additional non-exist
datapoints to the timeseries.

This commit port bugfix from `metricql` package and adds test for it.
Now, VictoriaMetrics
correctly returns `NaN` for such cases.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7359

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-21 15:16:28 +01:00
Hui Wang
1bd927e3fe vmalert: remove deprecated cmd-line flags -datasource.lookback, `da… (#6779)
…tasource.queryTimeAlignment` and `remoteRead.ignoreRestoreErrors`

Those flags were all deprecated before
[v1.101.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.101.0).
2024-11-21 13:58:09 +01:00
Artem Fetishev
3383589fd1 lib/storage: confirm that changing retention period can cause previous indexDB deletion (#7569)
### Describe Your Changes

Add test cases proving that it is possible to lose indexDB after
changing the retention period. See #7609

### Checklist

The following checks are **mandatory**:

- [x ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-21 10:44:21 +01:00
Github Actions
f07574a78e Automatic update operator docs from VictoriaMetrics/operator@b3e19a2 (#7608)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-11-21 09:48:14 +01:00
Github Actions
689196048f Automatic update helm docs from VictoriaMetrics/helm-charts@8a9669e (#7616)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-21 09:47:46 +01:00
Artem Fetishev
61532930e6 Makefile: remove -v flag from integration tests
This is a follow-up for 49fe403af1

 Commit disables the verbosity in integration
tests after confirming that the tests run in both master and cluster
branches.


Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-11-20 23:47:32 +01:00
Will Jordan
a19a4f34ff lib/tenantmetrics: improves CounterMap performance with large numbers of tenants
Previously, map for storing tenant metrics was re-created to each newly ingested tenant. It has significant performance impact for systems with large number of tenants.

 This commit addresses this issue by changing algorithm of creating tenant metric records at map. Instead of map re-creation, it uses `sync.Map` primitive.

Benchmark results:

```
goos: linux
goarch: amd64
pkg: github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics
cpu: AMD Ryzen 9 5900X 12-Core Processor
                                            │ lib/tenantmetrics/orig.bench │     lib/tenantmetrics/new.bench     │
                                            │            sec/op            │    sec/op     vs base               │
CounterMapGrowth/n=100,nProcs=GOMAXPROCS-24                  1943.2µ ±  5%   248.0µ ± 11%  -87.24% (p=0.001 n=7)
CounterMapGrowth/n=100-24                                    434.63µ ±  5%   98.82µ ± 16%  -77.26% (p=0.001 n=7)
CounterMapGrowth/n=1000-24                                   32.719m ± 20%   1.425m ±  5%  -95.65% (p=0.001 n=7)
CounterMapGrowth/n=10000-24                                 3653.60m ±  5%   18.00m ±  2%  -99.51% (p=0.001 n=7)
geomean                                                       17.83m         890.4µ        -95.00%
```

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7482

---
Co-authored-by: Artem Fetishev <rtm@victoriametrics.com>
2024-11-20 18:42:47 +01:00
Dmytro Kozlov
93c63d77c0 deployment/docker: update victorialogs datasource versions to the latest releases (#7604)
### Describe Your Changes

Updated the victorialogs data source version to the v0.8.0 release

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-20 16:38:20 +01:00
dependabot[bot]
8735fb12fb build(deps): bump codecov/codecov-action from 4 to 5 (#7545)
Bumps
[codecov/codecov-action](https://github.com/codecov/codecov-action) from
4 to 5.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a
href="https://github.com/codecov/codecov-action/releases">codecov/codecov-action's
releases</a>.</em></p>
<blockquote>
<h2>v5.0.0</h2>
<h2>v5 Release</h2>
<p><code>v5</code> of the Codecov GitHub Action will use the <a
href="https://github.com/codecov/wrapper">Codecov Wrapper</a> to
encapsulate the <a
href="https://github.com/codecov/codecov-cli">CLI</a>. This will help
ensure that the Action gets updates quicker.</p>
<h3>Migration Guide</h3>
<p>The <code>v5</code> release also coincides with the opt-out feature
for tokens for public repositories. In the <code>Global Upload
Token</code> section of the settings page of an organization in
codecov.io, you can set the ability for Codecov to receive a coverage
reports from any source. This will allow contributors or other members
of a repository to upload without needing access to the Codecov token.
For more details see <a
href="https://docs.codecov.com/docs/codecov-tokens#uploading-without-a-token">how
to upload without a token</a>.</p>
<blockquote>
<p>[!WARNING]<br />
<strong>The following arguments have been changed</strong></p>
<ul>
<li><code>file</code> (this has been deprecated in favor of
<code>files</code>)</li>
<li><code>plugin</code> (this has been deprecated in favor of
<code>plugins</code>)</li>
</ul>
</blockquote>
<p>The following arguments have been added:</p>
<ul>
<li><code>binary</code></li>
<li><code>gcov_args</code></li>
<li><code>gcov_executable</code></li>
<li><code>gcov_ignore</code></li>
<li><code>gcov_include</code></li>
<li><code>report_type</code></li>
<li><code>skip_validation</code></li>
<li><code>swift_project</code></li>
</ul>
<p>You can see their usage in the <code>action.yml</code> <a
href="https://github.com/codecov/codecov-action/blob/main/action.yml">file</a>.</p>
<h2>What's Changed</h2>
<ul>
<li>chore(deps): bump to eslint9+ and remove eslint-config-google by <a
href="https://github.com/thomasrockhu-codecov"><code>@​thomasrockhu-codecov</code></a>
in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1591">codecov/codecov-action#1591</a></li>
<li>build(deps-dev): bump <code>@​octokit/webhooks-types</code> from
7.5.1 to 7.6.1 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1595">codecov/codecov-action#1595</a></li>
<li>build(deps-dev): bump typescript from 5.6.2 to 5.6.3 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1604">codecov/codecov-action#1604</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/parser</code> from
8.8.0 to 8.8.1 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1601">codecov/codecov-action#1601</a></li>
<li>build(deps): bump <code>@​actions/core</code> from 1.11.0 to 1.11.1
by <a href="https://github.com/dependabot"><code>@​dependabot</code></a>
in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1597">codecov/codecov-action#1597</a></li>
<li>build(deps): bump github/codeql-action from 3.26.9 to 3.26.11 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1596">codecov/codecov-action#1596</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/eslint-plugin</code>
from 8.8.0 to 8.8.1 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1600">codecov/codecov-action#1600</a></li>
<li>build(deps-dev): bump eslint from 9.11.1 to 9.12.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1598">codecov/codecov-action#1598</a></li>
<li>build(deps): bump github/codeql-action from 3.26.11 to 3.26.12 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1609">codecov/codecov-action#1609</a></li>
<li>build(deps): bump actions/checkout from 4.2.0 to 4.2.1 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1608">codecov/codecov-action#1608</a></li>
<li>build(deps): bump actions/upload-artifact from 4.4.0 to 4.4.3 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1607">codecov/codecov-action#1607</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/parser</code> from
8.8.1 to 8.9.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1612">codecov/codecov-action#1612</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/eslint-plugin</code>
from 8.8.1 to 8.9.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1611">codecov/codecov-action#1611</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/eslint-plugin</code>
from 8.9.0 to 8.10.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1615">codecov/codecov-action#1615</a></li>
<li>build(deps-dev): bump eslint from 9.12.0 to 9.13.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1618">codecov/codecov-action#1618</a></li>
<li>build(deps): bump github/codeql-action from 3.26.12 to 3.26.13 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1617">codecov/codecov-action#1617</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/parser</code> from
8.9.0 to 8.10.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1614">codecov/codecov-action#1614</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/eslint-plugin</code>
from 8.10.0 to 8.11.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1620">codecov/codecov-action#1620</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/parser</code> from
8.10.0 to 8.11.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1619">codecov/codecov-action#1619</a></li>
<li>build(deps-dev): bump <code>@​types/jest</code> from 29.5.13 to
29.5.14 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1622">codecov/codecov-action#1622</a></li>
<li>build(deps): bump actions/checkout from 4.2.1 to 4.2.2 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1625">codecov/codecov-action#1625</a></li>
<li>build(deps): bump github/codeql-action from 3.26.13 to 3.27.0 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1624">codecov/codecov-action#1624</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/eslint-plugin</code>
from 8.11.0 to 8.12.1 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1626">codecov/codecov-action#1626</a></li>
<li>build(deps-dev): bump <code>@​typescript-eslint/eslint-plugin</code>
from 8.12.1 to 8.12.2 by <a
href="https://github.com/dependabot"><code>@​dependabot</code></a> in <a
href="https://redirect.github.com/codecov/codecov-action/pull/1629">codecov/codecov-action#1629</a></li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a
href="https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md">codecov/codecov-action's
changelog</a>.</em></p>
<blockquote>
<h2>4.0.0-beta.2</h2>
<h3>Fixes</h3>
<ul>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/1085">#1085</a>
not adding -n if empty to do-upload command</li>
</ul>
<h2>4.0.0-beta.1</h2>
<p><code>v4</code> represents a move from the <a
href="https://github.com/codecov/uploader">universal uploader</a> to the
<a href="https://github.com/codecov/codecov-cli">Codecov CLI</a>.
Although this will unlock new features for our users, the CLI is not yet
at feature parity with the universal uploader.</p>
<h3>Breaking Changes</h3>
<ul>
<li>No current support for <code>aarch64</code> and <code>alpine</code>
architectures.</li>
<li>Tokenless uploading is unsuported</li>
<li>Various arguments to the Action have been removed</li>
</ul>
<h2>3.1.4</h2>
<h3>Fixes</h3>
<ul>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/967">#967</a>
Fix typo in README.md</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/971">#971</a>
fix: add back in working dir</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/969">#969</a>
fix: CLI option names for uploader</li>
</ul>
<h3>Dependencies</h3>
<ul>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/970">#970</a>
build(deps-dev): bump <code>@​types/node</code> from 18.15.12 to
18.16.3</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/979">#979</a>
build(deps-dev): bump <code>@​types/node</code> from 20.1.0 to
20.1.2</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/981">#981</a>
build(deps-dev): bump <code>@​types/node</code> from 20.1.2 to
20.1.4</li>
</ul>
<h2>3.1.3</h2>
<h3>Fixes</h3>
<ul>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/960">#960</a>
fix: allow for aarch64 build</li>
</ul>
<h3>Dependencies</h3>
<ul>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/957">#957</a>
build(deps-dev): bump jest-junit from 15.0.0 to 16.0.0</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/958">#958</a>
build(deps): bump openpgp from 5.7.0 to 5.8.0</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/959">#959</a>
build(deps-dev): bump <code>@​types/node</code> from 18.15.10 to
18.15.12</li>
</ul>
<h2>3.1.2</h2>
<h3>Fixes</h3>
<ul>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/718">#718</a>
Update README.md</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/851">#851</a>
Remove unsupported path_to_write_report argument</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/898">#898</a>
codeql-analysis.yml</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/901">#901</a>
Update README to contain correct information - inputs and negate
feature</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/955">#955</a>
fix: add in all the extra arguments for uploader</li>
</ul>
<h3>Dependencies</h3>
<ul>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/819">#819</a>
build(deps): bump openpgp from 5.4.0 to 5.5.0</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/835">#835</a>
build(deps): bump node-fetch from 3.2.4 to 3.2.10</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/840">#840</a>
build(deps): bump ossf/scorecard-action from 1.1.1 to 2.0.4</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/841">#841</a>
build(deps): bump <code>@​actions/core</code> from 1.9.1 to 1.10.0</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/843">#843</a>
build(deps): bump <code>@​actions/github</code> from 5.0.3 to 5.1.1</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/869">#869</a>
build(deps): bump node-fetch from 3.2.10 to 3.3.0</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/872">#872</a>
build(deps-dev): bump jest-junit from 13.2.0 to 15.0.0</li>
<li><a
href="https://redirect.github.com/codecov/codecov-action/issues/879">#879</a>
build(deps): bump decode-uri-component from 0.2.0 to 0.2.2</li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a
href="968872560f"><code>9688725</code></a>
Update README.md</li>
<li><a
href="2112eaec1b"><code>2112eae</code></a>
chore(deps): bump wrapper to 0.0.23 (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1644">#1644</a>)</li>
<li><a
href="193421c5b3"><code>193421c</code></a>
fixL use the correct source (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1642">#1642</a>)</li>
<li><a
href="6018df70b0"><code>6018df7</code></a>
fix: update container builds (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1640">#1640</a>)</li>
<li><a
href="eff1a643d6"><code>eff1a64</code></a>
fix: add missing vars (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1638">#1638</a>)</li>
<li><a
href="4582d54fd3"><code>4582d54</code></a>
Update README.md (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1639">#1639</a>)</li>
<li><a
href="bb7467c2bc"><code>bb7467c</code></a>
feat: use wrapper (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1621">#1621</a>)</li>
<li><a
href="1d6059880c"><code>1d60598</code></a>
build(deps-dev): bump <code>@​typescript-eslint/eslint-plugin</code>
from 8.12.2 to 8.13.0 ...</li>
<li><a
href="e587ce276e"><code>e587ce2</code></a>
build(deps-dev): bump <code>@​typescript-eslint/parser</code> from
8.12.2 to 8.13.0 (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1635">#1635</a>)</li>
<li><a
href="e43f28e103"><code>e43f28e</code></a>
build(deps-dev): bump <code>@​typescript-eslint/parser</code> from
8.11.0 to 8.12.2 (<a
href="https://redirect.github.com/codecov/codecov-action/issues/1628">#1628</a>)</li>
<li>Additional commits viewable in <a
href="https://github.com/codecov/codecov-action/compare/v4...v5">compare
view</a></li>
</ul>
</details>
<br />


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=codecov/codecov-action&package-manager=github_actions&previous-version=4&new-version=5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)


</details>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-20 16:37:54 +01:00
Alexander Frolov
7454d938cc app/vmagent: respect Pushgateway protocol in multi-tenant vmagent handler (#7571)
### Describe Your Changes

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3636 for
multi-tenant vmagent handler

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-20 16:34:25 +01:00
Artem Fetishev
49fe403af1 apptest: typical cluster configuration for business logic tests
Add the ability to create a simple cluster configuration for tests that
do not verify the cluster-specific behavior but instead are focused on
the business logic tests, such as API surface or MetricsQL. For such
tests this cluster configuration will be enough in most cases.

Cluster-specific tests should continue creating custom configurations.

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-11-20 16:30:55 +01:00
Hui Wang
580fb3ad85 doc: clarify rule group type (#7595)
address https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7585
2024-11-20 16:23:13 +01:00
Renato Monteiro
4485877a83 docs: fix small grammatical error in CONTRIBUTING.md (#7596) 2024-11-20 16:22:42 +01:00
Github Actions
a8997e97de Automatic update operator docs from VictoriaMetrics/operator@c2b7a5a (#7598)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-11-20 16:21:49 +01:00
Github Actions
d3316b333d Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@c4d6669 (#7602) 2024-11-20 16:21:12 +01:00
Hui Wang
71f521fc0c vmalert: revert the default value of -remoteWrite.maxQueueSize from… (#7570)
… `1_000_000` to `100_000`

It was bumped in
[v1.104.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.104.0),
which increases memory usage and is not needed for most setups. See
[this
issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7471).
2024-11-20 16:20:51 +01:00
Github Actions
6bf49aef03 Automatic update operator docs from VictoriaMetrics/operator@1e7c2c5 (#7582)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: Haleygo <39937150+Haleygo@users.noreply.github.com>
2024-11-19 16:40:51 +01:00
Github Actions
a8c5035d3d Automatic update helm docs from VictoriaMetrics/helm-charts@131846f (#7581)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-19 16:40:20 +01:00
Mathias Palmersheim
43a45cf4e3 Updated vmanomaly product link (#7580)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

fixes #7579 7579
2024-11-18 15:56:48 -06:00
Fred Navruzov
a534df6cf3 docs/vmanomaly: add self-monitoring section (#7558)
### Describe Your Changes

- Added self-monitoring guide for `vmanomaly`.
- Added cross-referencing on other pages.
- Slight improvements in wording on related pages
- Update references to v1.18.4
- [x] publish Grafana dashboard to
https://grafana.com/orgs/victoriametrics/dashboards:
https://grafana.com/grafana/dashboards/22337-victoriametrics-vmanomaly/

@AndrewChubatiuk , JFYI if it somehow impacts your work on supporting
`vmanomaly` in operator.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-18 20:14:46 +02:00
JAYICE
a696ef21df docs: correct the wrong description of vector support in the docs (#7566)
### Describe Your Changes

https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7557

Vector only supports OpenTelemetry as a source.
2024-11-18 16:30:43 +01:00
f41gh7
d4d3ec877e {deployment/,docs/}: point app versions to the lastest v1.106.1 release
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-18 15:07:30 +01:00
f41gh7
4ade6f9d25 docs/changelog: mention 1.106.1,1.102.7 and 1.97.12 releases
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-11-18 14:33:04 +01:00
Hui Wang
37f1c76e71 add vlogs type of rule in example (#7548) 2024-11-18 14:28:16 +08:00
f41gh7
4b74baf696 CHANGELOG.md: cut v1.106.1 release
remove upcoming v1.107.0 release from changelog
2024-11-15 23:38:06 +01:00
f41gh7
d5f52adf3d make vmui-update 2024-11-15 19:21:51 +01:00
f41gh7
0abefae46c CHANGELOG.md: cut v1.107.0 release 2024-11-15 18:06:48 +01:00
Zakhar Bessarab
11af902b22 lib/storage/downsampling: handle dedup separately from downsampling rules with filters
Previously, dedup was added as a downsampling rule with 0s offset to all downsmapling rules with filters. That enforced a metric name lookup even in cases it is not needed.
For example, the following configuration: `-dedup.minScrapeInterval=10s -downsampling.period={__name__=~"node.*"}:1h:1m` would be parsed as: `{__name__=~"node.*"}:1h:1m {}:0s:10s`

This commit changes this logic and treats dedup as a separate case. This allows to perform metric name lookups only in cases when timestamp of current partition can be eligible to use some of downsampling filters. Newer parts will not trigger metric name lookup and will apply deduplication directly.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7440
---------
Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
Co-authored-by: f41gh7 <nik@victoriametrics.com>
2024-11-15 17:56:47 +01:00
Nikolay
1985110de2 lib/storage: properly check for minMissingTimestamps
After changes at commit 787b9cd. Minimal timestamps for extDB check was performed without context of the index search prefix.
It worked fine for Single node version, but for cluster version a different prefix was used for
metricID search requests. It may lead to incomplete results, if minimal missing timestamp was cached
for the tenant with different ingestion patterns.

 Minimal reproducible case is:
- metrics were ingested for tenants 0 and 1
- at some point in time metrics ingestion for tenant 1 stopped
- index records have the following timestamps layout:
 tenant 0:  1,2,3,4,5,6
 tenant 1:  1,2,3,4
- after indexDB rotation, containsTimeRange lookups may produce
  incorrect results:
 time range request for tenant 1 - 5:6 caches 5 as min timestamp
 request for the same or smaller time range for tenant 0 now returns
empty results.

Second case:
- requests for the tenant without metrics always updates atomic value with incorrect minimal time range for other tenants.

 This commit replaces single atomic with map of search prefix keys. It should have slight performance overhead,
but work consistently for cluster version. minMissingTimestamp is cached by prefix search key, which included tenantID.

 Since it will be only populated at runtime, it doesn't hold unused tenants for queries.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7417
2024-11-15 16:25:13 +01:00
Andrei Baidarov
479ae93e04 app/vmselect: fixes possible panics for multitenant queries
This commit fixes panic for multitenant requests and empty storage node responses for tenants api.

 It also optimizes `populateSqTenantTokensIfNeeded` function calls, by making it only once for query request. Previously it was incorrectly called multiple times per each storage node request.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7549
---------
Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: f41gh7 <nik@victoriametrics.com>
2024-11-15 16:22:30 +01:00
Aliaksandr Valialkin
fc0b6c62fe docs: refer to the https://itnext.io/how-do-open-source-solutions-for-logs-work-elasticsearch-loki-and-victorialogs-9f7097ecbc2f in the appropriate places 2024-11-15 14:29:01 +01:00
Aliaksandr Valialkin
47a52f5f40 docs/Articles.md: fix broken link to Open-sourcing VictoriaMetrics article 2024-11-15 14:15:13 +01:00
andriibeee
5d85968659 app/vmauth: fix unauthorized_user routing inconsistency
This commit makes vmauth respect the routing config for unauthorized
requests for requests that despite having Authorization header failed to
authorize successfully.

 It covers the following use-cases:
- vmauth is used at load-balanacer and must forward requests as is. There is no any authorization configs.
- vmauth has authorization config, but it must forward requests with invalid credential tokens to some other backend.

related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7543

---------
Signed-off-by: Andrii <andriibeee@gmail.com>
2024-11-15 12:28:25 +01:00
Fred Navruzov
a335ed23c7 docs/vmanomaly - patch release 1.18.3 (#7540)
### Describe Your Changes

`vmanomaly` docs update with patch release 1.18.3

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-14 19:59:13 +02:00
Hui Wang
18afeff742 app/vmalert: fix flaky ut TestRecordingRule_Exec
The order of stale metrics can't be controlled in recording rule, only
use two time series then.
2024-11-14 15:30:39 +01:00
hagen1778
6b903d79a9 dashboards: rename datapoints to logs for vlogs dashboard
Logs has more clear menaing than `datapoints` in this case.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-14 07:14:39 -07:00
Hui Wang
b09272ccac app/vmalert: improve performances when rules produce large volumes of results
1. Avoid storing the last evaluation results outside of rules, check for
stale time series as soon as possible;
2. remove duplicated template `Clone()`.

This pull request is primarily reducing memory usage when rules produce
large volumes of results, as seen in
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6894.
The CPU time spent on garbage collection remains high and may be
addressed in a separate PR.
2024-11-14 12:23:39 +01:00
Zhu Jiekun
0a6d58b4ca app/vmctl: add command-line flag list to doc
This commit  adds documentation for  **Command-line flags** of vmctl.

---
Related issue: 
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7521
2024-11-14 11:23:48 +01:00
Hui Wang
304996bc08 docs/vmalert: clarify some vmalert flags
Some flags are shared between datasourceURL and remoteReadURL, some
flags are not valid for victoriaLogs as the datasource.
2024-11-14 11:21:35 +01:00
Crazygit
5eb6a0f9da docs/vlogs: fixes typo for VL-Extra-Field
`VL-Extra-Field` should changed to `VL-Extra-Fields` according to code
2024-11-14 11:20:30 +01:00
Artem Navoiev
1cf5cf05db docs: vmbackupmanager specify schedule
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-11-14 10:43:37 +01:00
Artem Navoiev
e5b4812d77 dashboards: fix read queries graph
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-11-14 10:24:29 +01:00
Fred Navruzov
5696a087b8 docs/vmanomaly - release 1.18.2 (#7533)
### Describe Your Changes

docs update for `vmanomaly` release 1.18.2

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-13 18:52:20 +02:00
hagen1778
02e5fb81c5 dashboards: make dashboards-sync after 683f8c2780
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-12 17:58:05 -07:00
hagen1778
6fdc111fd2 dashboards: set Y-min to 0 for stats panel with range queries
Y-min set to 0 gives better understanding of changes, as it shows absolute change.
Otherwise, the panel will show relative change and could make a false impression
of the changes.

Other panels in dashboards are either instant (no historical data displayed),
or already set to Y-min: 0.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-12 17:56:17 -07:00
Aliaksandr Valialkin
7e02cb484c deployment/docker: update VictoriaLogs from v0.42.0-victorialogs to v1.0.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.0.0-victorialogs
2024-11-12 19:15:57 +01:00
Aliaksandr Valialkin
99607e2f3b docs/VictoriaLogs/CHANGELOG.md: mention that VictoriaLogs gained all the planned features 2024-11-12 19:07:09 +01:00
Aliaksandr Valialkin
1bf58b2f13 docs/VictoriaLogs: cut v1.0.0-victorialogs 2024-11-12 18:31:59 +01:00
Aliaksandr Valialkin
4837dc6e09 docs/VictoriaLogs/querying/vlogscli.md: replace vlogsql with vlogscli everywhere in the docs
`vlogsql` was the initial name of the command-line utility for interactive querying of logs.
Later it was renamed to `vlogscli`.

Thanks to @grinapo for the bugreport at https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7512
2024-11-12 18:21:00 +01:00
Aliaksandr Valialkin
33a4f275b1 app/vmauth: properly inherit user-level options at url_map when url_prefix isnt set at the user level
The following user-level options must be unconditionally inherited by url_map, since this is what most users expect:

- retry_status_codes
- load_balancing_policy
- drop_src_path_prefix_parts
- discover_backend_ips

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7519
2024-11-12 17:53:19 +01:00
Hui Wang
32b89447ae dashboards: add file label filter to vmalert dashboard panels (#7515)
Previously, metrics from groups with the same name but in different
files could be mixed in the results.

e.g. the evaluation time
[here](https://grafana.maas.victoriametrics.com/d/LzldHAVnz/victoriametrics-vmalert?orgId=1&var-ds=PE8D8DB4BEE4E4B22&var-job=All&var-instance=All&var-file=%2Fetc%2Fvmalert%2Fconfig%2Fvm-per-tenant-rulefiles-0%2Fmaas-tenant-1011-maas-1011-vm-health.yaml&var-group=All&var-topk=5&editPanel=23)
is the total for multiple groups from different tenants.
2024-11-12 09:00:39 -07:00
Github Actions
254d9e2729 Automatic update helm docs from VictoriaMetrics/helm-charts@ae2dabc (#7522)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-12 09:00:19 -07:00
Github Actions
8b0f5b2315 Automatic update operator docs from VictoriaMetrics/operator@593d523 (#7510)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: Amper <495795+Amper@users.noreply.github.com>
2024-11-12 08:59:51 -07:00
Zhu Jiekun
58ae3772fe vmctl: add docs migrating from cluster to single with --vm-native-disable-binary-protocol (#7520)
### Describe Your Changes

when migrating from cluster to single, the native export API attach
`vm_account_id` and `vm_project_id` labels.
`--vm-native-disable-binary-protocol` is a workaround to remove them,
which is available since v1.93.0 but did not documented in vmctl doc.

See: https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4716

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-12 08:59:04 -07:00
Artem Fetishev
58dae07b7a tests: cover key concepts with more tests (#7516)
More key concept tests

-  Verify how the time range points are calculated
-  Vefify that a range query is equivalent to many instant queries

Fix docs accordingly.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-11-12 08:57:54 -07:00
Aliaksandr Valialkin
83fc33af89 app/vmauth: simplify the logic for the fix at a0a154511a
The fix at a0a154511a looks too complicated and fragile:

- It moves buMin initialization to the place, which is far from its usage.
- It embeds unclear logic on selecting the proper buMin if it is broken,
  into unrelated loop.

The actual fix must be more clear:

$ git diff 95acca6b52 -- app/vmauth/

-               if n := bu.concurrentRequests.Load(); n < minRequests {
+               if n := bu.concurrentRequests.Load(); n < minRequests || buMin.isBroken() {

This should simplify further maintenance of this code.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7489
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3061
2024-11-12 16:43:07 +01:00
Fred Navruzov
47b7487b5f docs: vmanomaly - release 1.18.1 (#7517)
### Describe Your Changes

doc updates for vmanomaly v1.18.1

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-12 10:20:29 +02:00
Github Actions
7e72848ab3 Automatic update helm docs from VictoriaMetrics/helm-charts@27cf093 (#7498)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-10 16:55:33 -07:00
Roman Khavronenko
6857736f04 docs: recommend using available_from in CONTRIBUTING and release gu… (#7493)
…ides

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-10 16:54:46 -07:00
Roman Khavronenko
564c309b11 docs: use available_from shortcode for versioning features (#7492)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-10 16:54:28 -07:00
Aliaksandr Valialkin
0f2233aef3 Add --delete option to rsync command, so it removes non-existing files from the destination
- The --delete option is needed to be passed to rsync during backups,
Since otherwise the backup may contain superfluous files after the second run of rsync,
because these files can be already removed at the source because of background merge

- the --delete option is needed when restoring from backup in order to remove superfluous files
from the destination directory. Otherwise these files may lead to inconsistent data at VictoriaLogs.
2024-11-10 10:10:29 +02:00
Aliaksandr Valialkin
cc908122bd docs/VictoriaLogs: remove support for alerting from the roadmap, since it has been already implemented in the commit 68bad22fd2
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7255
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6706
2024-11-09 00:09:21 +01:00
Aliaksandr Valialkin
bd106de2b2 deployment/docker: update VictoriaLogs Docker image from v0.41.0-victorialogs to v0.42.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.42.0-victorialogs
2024-11-08 23:51:56 +01:00
Aliaksandr Valialkin
23aa1897c6 deployment: update Go builder from Go1.23.1 to Go1.23.3
See https://github.com/golang/go/issues?q=milestone%3AGo1.23.2+label%3ACherryPickApproved
and https://github.com/golang/go/issues?q=milestone%3AGo1.23.3+label%3ACherryPickApproved
2024-11-08 23:40:33 +01:00
Zhu Jiekun
4602752003 docs: [VictoriaLogs] FAQ add logs without msg field (#7487)
### Describe Your Changes

Add FAQs to VictoriaLogs:
- I want to ingest logs without message field, is that possible?
- What if my logs have multiple message fields candidates

Preview:
https://github.com/VictoriaMetrics/VictoriaMetrics/blob/docs/VL-FAQ-empty-msg/docs/VictoriaLogs/FAQ.md#i-want-to-ingest-logs-without-message-field-is-that-possible

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-08 23:32:56 +01:00
Aliaksandr Valialkin
0eb3a0a902 docs/VictoriaLogs/CHANGELOG.md: cut v0.42.0-victorialogs 2024-11-08 23:29:18 +01:00
Aliaksandr Valialkin
546bf7d579 lib/logstorage: properly skip filtered out dict values when calculating uniq_values, min, max, row_min and row_max stats functions
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7458
2024-11-08 23:21:21 +01:00
Aliaksandr Valialkin
2f1ce74d97 lib/logstorage: properly clone field values at values stats function
Previously field values weren't properly cloned, which could lead to garbage output for `values` stats function

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7458
2024-11-08 23:16:42 +01:00
Aliaksandr Valialkin
9b766d3e32 lib/logstorage: simplify the code for uniq_values stats function a bit
Move the repeated check for an empty value into statsUniqValuesProcessor.updateState() function.
This allow removing duplicate code for this check from statsUniqValuesProcessor.updateState() call sites.
2024-11-08 22:52:34 +01:00
Aliaksandr Valialkin
342f84c569 app/vlinsert/loki: show the original request body on parse errors
This should simplify debugging.
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7490
2024-11-08 22:00:58 +01:00
Aliaksandr Valialkin
b7c290ea5e docs/VictoriaLogs/CHANGELOG.md: refer to the issue related to adding fields to Syslog logs
This is a follow-up for cd60a4c589
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7354
2024-11-08 21:50:14 +01:00
Aliaksandr Valialkin
4f0bec6f03 app/vlinsert/syslog: allow changing the default set of log fields to use as stream fields during syslog data ingestion
Thanks to @AndrewChubatiuk for the initial implementation at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7488
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7480

See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields
2024-11-08 21:21:08 +01:00
Aliaksandr Valialkin
cd60a4c589 app/vlinsert/syslog: add an ability to drop and add fields during data ingestion via Syslog protocol
See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields
and https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields
2024-11-08 20:57:59 +01:00
Aliaksandr Valialkin
63c76b9b27 lib/logstorage: support for [label1=value1 ... labelN=valueN] syntax inside syslog messages for adding arbitrary labels (fields) to log entries 2024-11-08 19:57:22 +01:00
Aliaksandr Valialkin
f55791f20b app/vlogscli: allow toggling wrapping long lines with \wrap_long_lines command 2024-11-08 17:08:50 +01:00
Roman Khavronenko
2febd00bb3 tests: couple applications and test suit (#7476)
* make test suite responisble for stopping apps
* reuse test suite fields to simplify function signatures

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>

(cherry picked from commit e60cce54a8)
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-08 17:06:55 +01:00
Aliaksandr Valialkin
62e6c9bd6f docs/VictoriaLogs/README.md: add Security chapter
It is inspired by https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7428#discussion_r1831555716
2024-11-08 16:43:55 +01:00
Aliaksandr Valialkin
a44787372f app/vlselect/logsql: add an ability to delay returning matching logs from live tailing via offset query arg
By default the delay equals to 1 second.

While at it, document refresh_interval query arg at /select/logsql/tail endpoint.

Thanks to @Fusl for the idea and the initial implementation at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7428
2024-11-08 16:43:55 +01:00
Aliaksandr Valialkin
e5537bc64d lib/logstorage: properly take into account the end query arg when calculating time range for _time:duration filters 2024-11-08 16:43:54 +01:00
Aliaksandr Valialkin
a98fb495c6 lib/logstorage: allow specifying _time filter offset without time range
This is useful when builiding graphs on time ranges in the past.
2024-11-08 16:43:54 +01:00
Aliaksandr Valialkin
66b2987f49 lib/logstorage: optimize query imeediately after its parsing
This eliminates possible bugs related to forgotten Query.Optimize() calls.

This also allows removing optimize() function from pipe interface.

While at it, drop filterNoop inside filterAnd.
2024-11-08 16:43:54 +01:00
Aliaksandr Valialkin
0550093802 app/vlselect/logsql: call Query.Optimize() inside parseCommonArgs(), which is called et every /select/logsql/* endpoint.
This reduces the probability of forgotten call to Query.Optimize().
2024-11-08 16:43:53 +01:00
Aliaksandr Valialkin
5a6531b329 lib/logstorage: add an ability to add prefix to resulting query field names in join pipe
See https://docs.victoriametrics.com/victorialogs/logsql/#join-pipe
2024-11-08 16:43:53 +01:00
Andrii Chubatiuk
30dd4cdc0d docs: add make command for docs version update (#7430)
### Describe Your Changes

added make target, which updates `{{% available_from "#" %}}` shortcode
to `{{% available_from "$(PKG_TAG)" %}}` if PKG_TAG matches expression
`v.*`. `{{% available_from %}}` shortcode was introduced in
https://github.com/VictoriaMetrics/vmdocs/pull/89 to show a reference to
a version in a changelog since which a feature was introduced

related issue
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7376

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-08 16:35:08 +01:00
nemobis
690328028d docs: Update RELEX Oy figures (#7491)
Update figures for the existing RELEX Oy case study.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Co-authored-by: Artem Navoiev <tenmozes@gmail.com>
2024-11-08 07:13:05 -08:00
hagen1778
8e0fbb0ed2 docs: test available_from shortcode for versioning
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-08 16:06:27 +01:00
hagen1778
acb9f47e8d docs: fix OpenTSDB port for cluster version in API examples
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-08 12:59:30 +01:00
Roman Khavronenko
a0a154511a app/vmauth: properly check for backend health before leastLoaded policy (#7489)
Previously, vmauth could have pick `buMin` as least loaded backend
without checking its status. In result, vmauth could have respond to the
user with an error even if there were healthy backends. That could
happen if healthy backends already had non-zero amount of concurrent
requests executing at the moment of least-loaded backend choosing logic.

Steps to reproduce:
1. Setup vmauth with two backends: healthy and non-healthy
2. Execute a bunch of concurrent requests against vmauth (i.e. Grafana
dash reload)
3. Observe that some requests will fail with message that all backends
are unavailable

Addresses https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3061
---
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-08 11:45:16 +01:00
Andrii Chubatiuk
b399f1c656 victorialogs: added missing changelog entry about DataDog support (#7464)
### Describe Your Changes

Added missing changelog entry for PR
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5536

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-07 14:26:02 +01:00
Github Actions
af9638c480 Automatic update helm docs from VictoriaMetrics/helm-charts@1d4f6d6 (#7473)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-07 14:24:36 +01:00
Github Actions
156a04e900 Automatic update operator docs from VictoriaMetrics/operator@ee35f90 (#7474)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-11-07 14:24:02 +01:00
Github Actions
39fee4fd98 Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@0b86b62 (#7477) 2024-11-07 14:23:42 +01:00
Github Actions
eb4ace8741 Automatic update Grafana datasource docs from VictoriaMetrics/victoriametrics-datasource@aad9171 (#7478) 2024-11-07 14:23:28 +01:00
Andrii Chubatiuk
d96678a0c3 dashboards/victorialogs: allow multiple values for instance variable (#7465)
### Describe Your Changes

Related issue https://github.com/VictoriaMetrics/helm-charts/issues/1699

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-07 14:22:53 +01:00
hagen1778
f553628f46 tests: fix minor typos
* rename vmsingle test to actually match the mask
* swap got/want arguments, as they were misplaced

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-07 13:27:20 +01:00
Artem Fetishev
7c60d82215 tests: integration tests for vmsingle (#7434)
### Describe Your Changes

This PR continues the implementation of integration tests (#7199). It
adds the support for vm-single:

-    A vmsingle app wrapper has been added
- Sample vmsingle tests that test the VM documentation related to
querying data (#7435)
- The tests use the go-cmp/{cmp,/cmpopts} packages, therefore they have
been added to ./vendor
-    Minor refactoring: data objects have been moved to model.go

Advice on porting things to cluster branch:

- The build rule must include tests that start with TestVmsingle
(similarly to how TestCluster tests are skipped in master branch)
- The build rule must depend on `vmstorage vminsert vmselect` instead of
`victoria-metrics`
- The query_test.go can actually be implemented for cluster as well. To
do this the tests need to be renamed to start with TestCluster and the
tests must instantiace vm{storage,insert,select} instead of vmsingle.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-11-07 12:58:37 +01:00
Phuong Le
4d383fdb9a deployment/docker/base: fix the typo that causes InvalidDefaultArgInFrom 2024-11-07 11:33:03 +01:00
Aliaksandr Valialkin
d1af84a49d docs/VictoriaLogs/LogsQL.md: add missing reference to pipes docs from join pipe docs
This is needed for consistency with other pipe docs
2024-11-06 20:59:25 +01:00
Aliaksandr Valialkin
a549240852 deployment/docker: update VictoriaLogs from v0.40.0-victorialogs to v0.41.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.41.0-victorialogs
2024-11-06 20:26:26 +01:00
Aliaksandr Valialkin
a4ce80b4ea docs/VictoriaLogs/CHANGELOG.md: cut v0.41.0-victorialogs 2024-11-06 20:10:21 +01:00
Aliaksandr Valialkin
bd64c7e3ca app/vlselect/vmui: run make vmui-logs-update after 1e1952acf5
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7344
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7184
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7045
2024-11-06 19:47:28 +01:00
Yury Molodov
1e1952acf5 vmui/logs: add log sorting (#7344)
### Describe Your Changes

add sorting of logs by groups and within each group by time in desc
order. See #7184 and #7045

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2024-11-06 19:33:35 +01:00
Aliaksandr Valialkin
3d75c39ff4 app/vlinsert/loki: follow-up for 3aeb1b96a2
- Disallow more than 3 items in Loki line entry, since it must contain two mandatory entries: timestamp and message,
  plus one optional entry - structured metadata. See https://grafana.com/docs/loki/latest/reference/loki-http-api/#ingest-logs

- Update references to structured metadata docs in Loki, in order to simplify further maintenance of the code

- Move the change from bugfix to feature at docs/VictoriaLogs/CHANGELOG.md, since VictoriaLogs never supported
  structured metadata over JSON Loki protocol. The support for structured metadata in protobuf Loki protocol
  has been added in ac06569c49 , which has been included in v0.28.0-victorialogs.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7431
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7432
2024-11-06 19:23:38 +01:00
Aliaksandr Valialkin
42c9183281 docs/VictoriaLogs: properly sort log fields with floating-point numbers 2024-11-06 18:56:13 +01:00
Aliaksandr Valialkin
5ed54ebadf lib/logstorage: add block_stats pipe for analyzing per-block storage stats 2024-11-06 18:55:45 +01:00
Aliaksandr Valialkin
f9e23bf8e3 lib/logstorage: add join pipe for joining multiple query results 2024-11-06 18:53:29 +01:00
Aliaksandr Valialkin
3a5f1019ba app/vlselect: add start_offset query arg for /select/logsql/tail endpoint for returning historical logs before live tailing 2024-11-06 18:53:28 +01:00
Evgeniy Negriy
d27dfac5c6 app/vmselect: fixes graphite function transformRemoveEmptySeries
Previously it incorrectly applied xFilesFactor, if it's value equal to 0.

 This commit properly handles this case and returns result according to
the graphite documentation:
  
`xFilesFactor follows the same semantics as in Whisper storage schemas. Setting it to 0 (the default) means that only a single value in the series needs to be non-null for it to be considered non-empty, setting it to 1 means that all values ​​in the series must be non-null. A setting of 0.5 means that at least half the values ​​in the series must be non-null.`

Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Evgeniy Negriy <einegriy@avito.ru>
2024-11-06 17:35:59 +01:00
Zhu Jiekun
3aeb1b96a2 app/vlinisert/loki: properly parse json logs with structured metadata
Loki protocol supports optional `metadata` object for each ingested line. It's added as 3rd field at the (ts,msg,metadata) tuple. Previously,  loki request json parsers rejected log line if tuple size != 2.

This commit allows optional tuple field. It parses it as json object and adds it as log metadata fields to the  log message stream.


related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7431

---------
Co-authored-by: f41gh7 <nik@victoriametrics.com>
2024-11-06 17:25:05 +01:00
Andrii Chubatiuk
a88f896b43 promql: exclude limit_offset from default by metric name sorting (#7402)
### Describe Your Changes

I don't like this solution, but it works. Other possible solutions
described in an issue

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7068

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-11-06 15:10:23 +01:00
hagen1778
05f2e9548d docs/victorialogs: recommend using separate installations of vmalert for vm and vl
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-06 14:58:11 +01:00
hagen1778
a5f1764171 docs/victorialogs: clarify usage of -rule.defaultRuleType=vlogs
User experience suggests that examples shouldn't have `-rule.defaultRuleType=vlogs` set,
as it may confuse users who run vmalert with their existing rules or only use
rules from examples for testing purposes.

This change is supposed to remove the confusion by removing `-rule.defaultRuleType=vlogs`
from default recommendations and explcitily specifying `type` on group level in examples.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-06 14:55:58 +01:00
Roman Khavronenko
0390d58a34 docs: fix typos in vlogs rules examples (#7457)
* fix typos in rules definition. Otherwise, they can't pass validation
* add code types for rendered examples

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-06 13:55:08 +01:00
Zhu Jiekun
f16a58f14c vmctl: fixed import duplicate data when query result contains multiple series (#7330)
### Describe Your Changes

Fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7301

When querying with condition like `WHERE a=1` (looking for series A),
InfluxDB can return data with the tag `a=1` (series A) and data with the
tag `a=1,b=1` (series B).

However, series B is will be queried later and it's data should not be
combined into series A's data.

This PR filter those series that are not identical to the original query
condition.

For table `example`:
```
// time                           host    region  value
// ----                           ----    ------  -----
// 2024-10-25T02:12:13.469720983Z serverA us_west 0.64
// 2024-10-25T02:12:21.832755213Z serverA us_west 0.75
// 2024-10-25T02:12:32.351876479Z serverA         0.88
// 2024-10-25T02:12:37.766320484Z serverA         0.95
```

The query for series A (`example_value{host="serverA"}`) and result will
be:
```SQL
SELECT * FROM example WHERE host = "serverA"
```
```json
{
	"results": [{
		"statement_id": 0,
		"series": [{
			"name": "cpu",
			"columns": ["time", "host", "region", "value"],
			"values": [
				["2024-10-25T02:12:13.469720983Z", "serverA", "us_west", 0.64],
				["2024-10-25T02:12:21.832755213Z", "serverA", "us_west", 0.75],
				["2024-10-25T02:12:32.351876479Z", "serverA", null, 0.88],
				["2024-10-25T02:12:37.766320484Z", "serverA", null, 0.95]
			]
		}]
	}]
}
```

We need to abandon `values[0]` and `values[1]` because the value of
**unwanted** column `region` is not null.

As for series B (`example_value{host="serverA", region="us_west"}`), no
change needed since the query filter out unwanted rows already.

### Note
This is a draft PR for verifying the fix.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-11-06 13:53:49 +01:00
Artem Navoiev
115ac7d0d7 docs: understand your setup size fix formula
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-11-06 12:29:50 +01:00
Artem Fetishev
d212243a0f docs: Fix images used in Query data docs (#7443)
### Describe Your Changes

This is a follow up for #7435. Images need to be updated too:

-   The time is changed from 10 hrs to 08 hrs
-   A missing data point is added to the range query image
-   Source escalidraw has been updated as well

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-11-06 11:58:56 +01:00
Github Actions
021f514842 Automatic update helm docs from VictoriaMetrics/helm-charts@f598096 (#7452)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-06 11:57:34 +01:00
Zhu Jiekun
11d735a91f docs: [puppetdb] add changelog for puppetdb service discovery (#7455)
### Describe Your Changes

Add puppetdb sd to changelog of `v1.106.0` version.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-11-06 09:19:51 +01:00
Github Actions
fa7adcaeaf Automatic update operator docs from VictoriaMetrics/operator@f8ca70f (#7444)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-11-05 19:05:33 +01:00
Github Actions
74a9c6f91c Automatic update helm docs from VictoriaMetrics/helm-charts@76f3195 (#7447)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: zekker6 <1367798+zekker6@users.noreply.github.com>
2024-11-05 19:05:24 +01:00
Zakhar Bessarab
be677065bc {docker,docs}: update references to latest release
Update references to latest release - v1.106.0.

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-11-05 18:40:13 +01:00
Smaine Kahlouch
5b838b03ec docs(articles): add blog ogenki to third party articles (#7414)
### Describe Your Changes

Adding a blog post that introduces VictoriaMetrics to third party
articles

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Smaine Kahlouch <smainklh@gmail.com>
2024-11-05 18:30:48 +01:00
Zakhar Bessarab
d73e5bdb8b dashboards: add dashboards with victoria-logs datasource (#7424)
### Describe Your Changes

Sync list of dashboards to be provided with Prometheus and
VictoriaMetrics' datasources.
### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-11-05 16:53:14 +01:00
Andrii Chubatiuk
e0930687f1 vlinsert: support datadog logs
This commit adds the following changes:

- Added support to push datadog logs with examples of how to ingest data
using Vector and Fluentbit
- Updated VictoriaLogs examples directory structure to have single
container image for victorialogs, agent (fluentbit, vector, etc) but
multiple configurations for different protocols

Related issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6632
2024-11-05 16:52:35 +01:00
Arie Heinrich
2e8f420d84 docs: spelling fixes (#7420)
### Describe Your Changes

Christmas is early and you get the first present in the shape of
spelling fixes.
Sorry for the big amount :)

### Checklist

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-05 16:44:23 +01:00
Github Actions
397997b2a8 Automatic update helm docs from VictoriaMetrics/helm-charts@f8ad8eb (#7438)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-11-05 16:36:34 +01:00
Artem Fetishev
d311c12dce documentation: Fix query docs
This commit changes the following:
-   The datetime has been fixed so it corresponds to the timestamps in example samples. The datetime now also include the UTC time zone and is changed to adhere ISO format.
-   The data points in query range result have been fixed to match the inserted data.

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-11-05 15:36:14 +01:00
Zakhar Bessarab
a3401c8d64 docs/changelog: sync latest release
- remove reference to sparse cache as it was reverted in 9f9cc24e4c
- add reference to 1.102.6 and 1.97.11 LTS releases

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-11-05 10:08:36 -03:00
Roman Khavronenko
f52577a1a8 docs: mention VM version when DS filters became available
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-11-05 13:31:27 +01:00
Zakhar Bessarab
371e193279 docs/CHANGELOG.md: cut v1.106.0
Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-11-04 11:13:39 -03:00
Zakhar Bessarab
e8adbc9f09 app/{vmselect,vlselect}: run make vmui-update vmui-logs-update
Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-11-04 10:59:42 -03:00
Zakhar Bessarab
9f9cc24e4c Revert "lib/mergeset: add sparse indexdb cache (#7269)"
This reverts commit 837d0d136d.
2024-11-04 10:29:14 -03:00
Aliaksandr Valialkin
5733e56e40 docs/VictoriaLogs/querying: refer to vlogscli from docs for /select/logsql/query and /select/logsql/tail HTTP endpoints
These endpoints are easier to query via vlogscli
2024-11-02 12:31:40 +01:00
Aliaksandr Valialkin
37a58677fa docs/VictoriaLogs/data-ingestion: mention Grafana Agent and Grafana Alloy additionally to Promtail as log collectors for Grafana Loki 2024-11-02 11:36:22 +01:00
Aliaksandr Valialkin
c7e242b8ce docs/VictoriaLogs/LogsQL.md: remove misleading information that min and max stats functions work only for numeric values
These functions work for both numeric and string values after ad505a7a9a
2024-11-01 20:25:33 +01:00
Aliaksandr Valialkin
a86df52adf docs/VictoriaLogs/LogsQL.md: typo fix in the link to description for sum_len stats function: sum-len-stats -> sum_len-stats
This is a follow-up for 364f084b43
2024-11-01 20:13:47 +01:00
Aliaksandr Valialkin
4478e48eb6 app/vlinsert: implement the ability to add extra fields to the ingested logs
This can be done via extra_fields query arg or via VL-Extra-Fields HTTP header.

See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7354#issuecomment-2448671445
2024-11-01 20:06:17 +01:00
Aliaksandr Valialkin
b3e2db5647 docs/VictoriaLogs/keyConcepts.md: remove duplicate sentence about missing timezone information in the ingested _time field values 2024-11-01 17:20:28 +01:00
Aliaksandr Valialkin
9ba6be4179 lib/logstorage: increase the the maximum number of columns per block from 1000 to 2000
This will allow storing wide events with up to 2K fields per event into VictoriaLogs.
While at it, remove the misleading comment that columnsHeader is read in full per each matching block.
This is no longer the case after the improvements made at 202eb429a7 .
Now only the needed columnHeader is read for the column mentioned in the query.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6425#issuecomment-2418337124
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762
2024-11-01 17:00:07 +01:00
Andrii Chubatiuk
5e2cb78cce Updated k8s guides (#7411)
### Describe Your Changes

- updated dependencies versions 
- removed Helm v2 from docs
- fixed VMAgent configuration

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-11-01 19:13:59 +04:00
f41gh7
5cc2e49297 app/vmgateway: fixes rate limit for multitenant requests
Previously vmgateway returned error for the requests with multitenant
tenant.

 This commit allows to rate limit multitenant requests and apply global
rate limit for it.

Currently it supports only queries for rate limiting.

Related issue: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7201

This commit also addresses gateway start-up crash if datasource.url is not accessible.

Previously vmgateway could crash at start-up with enabled rate limiting if datasource for metrics
was not avaiable for any reason.  It seems, that crash is expected. But in fact it's not. For instance, datasource could be in restart phase.

Replaces crash with log message error. It increased availability of vmgateway component.

Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-10-31 20:20:02 +01:00
Nikolay
bba08f7846 lib/promscrape: add relabel configs to global section
This commit adds `metric_relabel_configs` and `relabel_configs` fields
into the `global` section of scrape configuration file.

 New fields are used as global relabeling rules for the scrape targets.

 These relabel configs are prepended to the target relabel configs.
This feature is useful to:
* apply global rules to __meta labels from service discovery targets.
* drop noisy labels during scrapping.
* mutate labels without affecting metrics ingested via any of push
protocols. 

Related issue
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6966

---------
Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Zhu Jiekun <jiekun@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-31 19:58:22 +01:00
Artem Fetishev
4414f1e2e1 app/victoria-metrics: fixes flaky e2e graphite test
This commit fixes flaky test TestWriteRead/read/graphite/subquery-aggregation in app/victoria-metrics/main_test.go

The test fails when the test execution falls on the first second of a minute,
for example 6:59:00. In all other cases (such as 6:59:01) the test passes.

The test fails because of the way VictoriaMetrics implements sub-queries: it
aligns the time range to the step. The test config does not account for this.

Assuming that the implementation is correct, the fix is to adjust the test
config so that the data is inserted at intervals other than 1m.

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-10-31 19:53:40 +01:00
hagen1778
06621995bd docs: mention stats object in Prometheus API enhancements
The doc explains fields meaning in `stats` object.
It also clarifies their purpose.
See related ticket https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7170

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-31 15:09:04 +01:00
Github Actions
955f3660de Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@3ecaebd (#7395) 2024-10-31 14:08:35 +01:00
hagen1778
c5b36138e2 docs: mention #7392 in changelog
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-31 14:08:08 +01:00
Viet Hung Nguyen
21d1385ae1 vmalert-tool: set default interval for unittest input_series (#7392)
### Describe Your Changes
Currently it is not optional option, but if user forgot to set, it
defaults to 0, which cause unexpected behavior.

This change sets default = evaluation_interval similar to promtool
behavior.


https://prometheus.io/docs/prometheus/2.55/configuration/unit_testing_rules/#test_group
### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Viet Hung Nguyen <hvn@familug.org>
2024-10-31 14:04:50 +01:00
Roman Khavronenko
3f0e2ab3b2 deployment/alerts: add RemoteWriteDroppingData to vmalert rules (#7393)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-31 14:03:08 +01:00
Yury Molodov
bfb55d5f2f vmui: fix the display of the link to vmalert (#7380)
### Describe Your Changes

Fix the issue mentioned in
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7088#issuecomment-2391360368

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-31 13:43:58 +01:00
Aliaksandr Valialkin
24b6e117dd deployment/docker: update VictoriaLogs from v0.39.0-victorialogs to v0.40.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.40.0-victorialogs
2024-10-30 23:43:20 +01:00
Aliaksandr Valialkin
1a7b55009b docs/VictoriaLogs/CHANGELOG.md: cut v0.40.0-victorialogs release 2024-10-30 23:38:06 +01:00
Aliaksandr Valialkin
f88e2ae9fb docs/VictoriaLogs/README.md: follow-up for 0f6b9e9490: consistently use canonical url for Grafana dashboards
See also 0a5ffb3bc1
2024-10-30 23:29:23 +01:00
Aliaksandr Valialkin
0f6b9e9490 docs/VictoriaLogs/README.md: recommend installing Grafana dashboard for VictoriaLogs at Monitoring chapter
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6886
2024-10-30 23:20:36 +01:00
Aliaksandr Valialkin
c5d08d317c lib/logstorage: properly reset cached output fields for extract and extract_regexp pipes after the log entry matches if(...) condition
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7162
2024-10-30 22:29:52 +01:00
Aliaksandr Valialkin
2e635a42d8 lib/logstorage: properly cache replace() and replace_regexp() results for identical adjacent field values
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7162
2024-10-30 22:29:52 +01:00
Dmytro Kozlov
6a6d08d03d deployment/docker: update datasource versions to the latest releases (#7396)
### Describe Your Changes

Updated the versions of the data sources to the latest releases

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-31 01:18:38 +04:00
Aliaksandr Valialkin
f62502a943 docs/VictoriaLogs/querying/README.md: typo fix after f2cd284cf4a7429848fb4be1522483a9b93a43e7: Alls -> All 2024-10-30 19:08:57 +01:00
Aliaksandr Valialkin
7603446850 app/vlselect: add support for extra_filters and extra_stream_filters query args across all the HTTP querying APIs
These query args are going to be used for quick filtering on field values at https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7365
2024-10-30 17:59:49 +01:00
Aliaksandr Valialkin
d2dce13df6 app/vlinsert: typo fix after 16ee470da6 2024-10-30 17:59:49 +01:00
Artem Fetishev
683f8c2780 dashboards: add Restarts panel (#7394)
Reopening PR #7373 from a branch in VictoriaMetrics repo in order to
enable edits and rebase.

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-30 16:44:08 +01:00
hagen1778
6494606924 deployment/alerts: consistently update path to alerting rules
Follow-up after 68bad22fd2

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-30 16:06:57 +01:00
Artem Fetishev
e614367cbf tests: Initial version of integration tests (#7253)
### Describe Your Changes

Related issue: #7199

This is the initial version of the integration tests for cluster. See
`README.md` for details.

Currently cluster only, but it can also be used for vm-single if needed.

The code has been added to the apptest package that resides in the root
directory of the VM codebase. This is done to exclude the integration
tests from regular testing build targets because:

- Most of the test variants do not apply to integration testing (such as
pure or race).
- The integtation tests may also be slow because each test must wait for
2 seconds so vmstorage flushes pending content). It may be okay when
there are a few tests but when there is a 100 of them running tests will
require much more time which will affect the developer wait time and CI
workflows.
- Finally, the integration tests may be flaky especially short term.

An alternative approach would be placing apptest under app package and
exclude apptest from packages under test, but that is not trivial.

The integration tests rely on retrieving some application runtime info
from the application logs, namely the application's host:port. Therefore
some changes to lib/httpserver/httpserver.go were necessary, such as
reporting the effective host:port instead the one from the flag.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
(cherry picked from commit d7b3589dbd)
2024-10-30 15:22:22 +01:00
Aliaksandr Valialkin
25bca94d04 deployment/docker: update VictoriaLogs from v0.38.0-victorialogs to v0.39.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.39.0-victorialogs
2024-10-30 15:11:17 +01:00
Aliaksandr Valialkin
13822f6d11 docs/VictoriaLogs/CHANGELOG.md: cut v0.39.0-victorialogs release 2024-10-30 15:06:00 +01:00
Aliaksandr Valialkin
16ee470da6 app/vlinsert: accept logs with empty _msg field
In this case the _msg field is set to the value specified in the -defaultMsgValue command-line flag.

This should simplify first-time migration to VictoriaLogs from other systems.
2024-10-30 14:59:38 +01:00
Aliaksandr Valialkin
96466562b6 docs/VictoriaLogs/data-ingestion/README.md: formatting fixes after 8b36529b32
- Remove leading whitespace from the first lines in 'HTTP parameters' chapter.
  This whitespace isn't needed for the markdown formatting.

- Add leading whitespace for the second sentence in the list bullet describing AccountID and ProjectID HTTP headers.
  This fixes markdown formatting for this list bullet.
2024-10-30 14:23:54 +01:00
Aliaksandr Valialkin
ed73f8350b app/vlinsert: allow specifying comma-separated list of fields containing log message via _msg_field query arg and VL-Msg-Field HTTP request header
This msy be useful when ingesting logs from different sources, which store the log message in different fields.
For example, `_msg_field=message,event.data,some_field` will get log message from the first non-empty field:
`message`, `event.data` and `some_field`.
2024-10-30 14:17:33 +01:00
Aliaksandr Valialkin
102e9d4f4e lib/logstorage: make sure that the number of output (bloom, values) shards is bigger than zero.
If the number of output (bloom, values) shards is zero, then this may lead to panic
as shown at https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7391 .

This panic may happen when parts with only constant fields with distinct values are merged into
output part with non-constant fields, which should be written to (bloom, values) shards.
2024-10-30 13:39:28 +01:00
Dan Dascalescu
258ee93fd1 docs: clarify "single" in Single-server-VictoriaMetrics.md (#7369)
### Describe Your Changes

"Single version" is unclear, since VM is also a single-executable. I
think "single-node" is clearer.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-30 12:55:52 +01:00
danish-mehmood
8dc4e2b5a5 docs: fix typos and format in case study (#7374)
### Describe Your Changes

- made small typo fix in case studies

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-30 12:48:09 +01:00
cangqiaoyuzhuo
45896fb477 chore: fix function name (#7381)
### Describe Your Changes

 fix function name

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-30 12:45:20 +01:00
hagen1778
cfba770c8e docs: consistently update twitter.com to x.com
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7383

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-30 12:44:50 +01:00
Lubov66
702ff923fe docs: replace twitter.com with x.com (#7383)
Replacing the name and link of the social network
2024-10-30 12:41:14 +01:00
Github Actions
b2cf8685e5 Automatic update helm docs from VictoriaMetrics/helm-charts@87ea94d (#7385)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-10-30 12:39:35 +01:00
hagen1778
2c93353173 docs: add frequently asked questions to vmalert integration with vlogs
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-30 12:38:47 +01:00
hagen1778
b789a9dc83 docs: mark alerting feature as done in VictoriaLogs
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-30 12:20:00 +01:00
hagen1778
77b690ab26 docs: update version placeholder to point to the next version
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-30 12:19:44 +01:00
Artem Navoiev
3473440d26 docs: logs vmalert add identifier
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-30 09:45:28 +01:00
hagen1778
2ac07aa813 docs: rm unused vmanomaly assets
These images copies are present in vmanomaly subfolder.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-29 18:52:25 +01:00
hagen1778
a49eb7d4f5 docs: add step for updating VM version in docs
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7388

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-29 18:50:40 +01:00
hagen1778
b86b0dd910 docs: update VM versions to the latest version
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7388

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-29 18:48:36 +01:00
Aliaksandr Valialkin
c963d7d10d docs/VictoriaLogs/CHANGELOG.md: remove unnededed with prefix in front of rank at top pipe example 2024-10-29 18:27:40 +01:00
Aliaksandr Valialkin
c43a6ce0eb deployment/docker: update VictoriaLogs from v0.37.0-victorialogs to v0.38.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.38.0-victorialogs
2024-10-29 18:16:16 +01:00
Aliaksandr Valialkin
12223cf5d0 docs/VictoriaLogs/CHANGELOG.md: cut v0.38.0 release 2024-10-29 18:08:36 +01:00
Aliaksandr Valialkin
4f057e5669 app/vlselect/vmui: run make vmui-logs-update after dd89745a34
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7294
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7279
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7290
2024-10-29 18:06:51 +01:00
Aliaksandr Valialkin
7a623c225f lib/logstorage: follow-up for af831a6c906158f371f1b6810706fa0a54b78386
Sync the code between top and sort pipes regarding the code related to rank.
2024-10-29 16:44:46 +01:00
Aliaksandr Valialkin
8faee6b446 app/vlogscli: print hint on how to see available commands when starting vlogscli
This should improve the first-time experience with vlogscli
2024-10-29 16:44:46 +01:00
Aliaksandr Valialkin
3c06d083ea lib/logstorage: add an ability to return rank from top pipe results 2024-10-29 16:44:45 +01:00
Aliaksandr Valialkin
7a62eefa34 lib/logstorage: dynamically adjust the number of (bloom, values) shards in a part depending on the number of non-const columns
This allows reducing the amounts of data, which must be read during queries over logs with big number of fields (aka "wide events").
This, in turn, improves query performance when the data, which needs to be scanned during the query, doesn't fit OS page cache.
2024-10-29 16:44:45 +01:00
Aliaksandr Valialkin
67b4059aa4 docs/VictoriaLogs/README.md: add tuning chapter 2024-10-29 16:44:44 +01:00
Aliaksandr Valialkin
8d968acd0a lib/logstorage: avoid reading columnsHeader data when field_values pipe is applied directly to log filters
This improves performance of `field_values` pipe when it is applied to large number of data blocks.
This also improves performance of /select/logsql/field_values HTTP API.
2024-10-29 16:44:44 +01:00
Hui Wang
68bad22fd2 vmalert: integrate with victorialogs (#7255)
address https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6706.
See
https://github.com/VictoriaMetrics/VictoriaMetrics/blob/vmalert-support-vlog-ds/docs/VictoriaLogs/vmalert.md.

Related fix
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7254.

Note: in this pull request, vmalert doesn't support
[backfilling](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/vmalert-support-vlog-ds/docs/VictoriaLogs/vmalert.md#rules-backfilling)
for rules with a customized time filter. It might be added in the
future, see [this
issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7289)
for details.

Feature can be tested with image
`victoriametrics/vmalert:heads-vmalert-support-vlog-ds-0-g420629c-scratch`.

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-29 16:30:39 +01:00
Fred Navruzov
5d73b8b866 docs/vmanomaly - release 1.18.0 (#7378)
### Describe Your Changes

docs/vmanomaly - release 1.18.0

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-28 17:25:24 +02:00
Zakhar Bessarab
4e50d6eed3 lib/storage/partition: prevent panic in case resulting in-memory part is empty after merge (#7329)
It is possible for in-memory part to be empty if ingested samples are
removed by retention filters. In this case, data will not be discarded
due to retention before creating in memory part. After in-memory parts
merge samples will be removed resulting in creating completely empty
part at destination.

 This commit checks for resulting part and skips it, if it's empty.

---------
Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-27 20:40:13 +01:00
Zhu Jiekun
f06c7e99fe lib/promscrape: adds support for PuppetDB service discovery
This commit adds support for
[PuppetDB](https://www.puppet.com/docs/puppetdb/8/overview.html) service
discovery to the `vmagent` and `victoria-metrics-single` components.

Related issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5744
2024-10-27 20:38:34 +01:00
Andrii Chubatiuk
7e60afb6fc app/vlinsert: adds journald ingestion support
This commit allows to ingest logs with journald format. 

https://www.freedesktop.org/software/systemd/man/latest/systemd-journal-remote.service.html

related issue: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4618
2024-10-27 20:36:33 +01:00
Dan Dascalescu
66971d3141 Fix "loosing" typo in README.md (#7368)
### Checklist

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/)
2024-10-27 00:56:42 -07:00
Zhu Jiekun
cd2222aa95 dashboards: fix query for full ETA vm_free_disk_space_bytes - vm_free_disk_space_limit_bytes (#7355)
### Describe Your Changes

Fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7334

available disk space should be 
```
(vm_free_disk_space_bytes{job=~...} - vm_free_disk_space_limit_bytes{job=~...})
```
instead of 
```
vm_free_disk_space_bytes{job=~...}
```

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-25 15:09:14 +02:00
Hui Wang
0172e65b8d docs: clarify flags -search.maxxxDuration can only be overridden to… (#7227)
… a smaller value with `timeout` arg
2024-10-25 11:11:09 +02:00
Github Actions
cf344f5250 Automatic update Grafana datasource docs from VictoriaMetrics/victoriametrics-datasource@ed19341 (#7352) 2024-10-25 01:18:06 -07:00
Github Actions
2224424136 Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@9e6db28 (#7350) 2024-10-25 01:16:42 -07:00
Yury Molodov
dd89745a34 vmui/logs: fix query and limit update issue (#7294)
### Describe Your Changes

Fixes issues with incorrect updating of query and limit fields, and
resolves the problem where the display tab resets.

Related issue: #7279 and #7290

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-25 09:32:20 +02:00
Zakhar Bessarab
372ce74d62 docs/guides-vmgateway-grafana: update guide (#7347)
### Describe Your Changes

- update to recent versions of components
- add information about the license key
- add example configuration for remote write with oAuth identity for
vmagent

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-24 23:01:57 +04:00
Andrii Chubatiuk
fc537bea00 lib/promscrape/discovery/kubernetes: support kubernetes native sidecars (#7324)
This commit adds Kubernetes Native Sidecar support. 

It's the special type of init containers, that have restartPolicy == "Always" and continue to run after container initialization. 


related issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7287
2024-10-24 17:04:12 +02:00
Zakhar Bessarab
837d0d136d lib/mergeset: add sparse indexdb cache (#7269)
Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7182

- add a separate index cache for searches which might read through large
amounts of random entries. Primary use-case for this is retention and
downsampling filters, when applying filters background merge needs to
fetch large amount of random entries which pollutes an index cache.
Using different caches allows to reduce effect on memory usage and cache
efficiency of the main cache while still having high cache hit rate. A
separate cache size is 5% of allowed memory.

- reduce size of indexdb/dataBlocks cache in order to free memory for
new sparse cache. Reduced size by 5% and moved this to a separate cache.

- add a separate metricName search which does not cache metric names -
this is needed in order to allow disabling metric name caching when
applying downsampling/retention filters. Applying filters during
background merge accesses random entries, this fills up cache and does
not provide an actual improvement due to random access nature.


Merge performance and memory usage stats before and after the change:

- before

![image](https://github.com/user-attachments/assets/485fffbb-c225-47ae-b5c5-bc8a7c57b36e)


- after

![image](https://github.com/user-attachments/assets/f4ba3440-7c1c-4ec1-bc54-4d2ab431eef5)

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-24 15:21:17 +02:00
Andrii Chubatiuk
5fecb77f69 app/vmctl: fix match expression for vm-native protocol with --vm-native-disable-per-metric-migration flag enabled (#7310)
Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7309

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-24 14:57:58 +02:00
hagen1778
53b7288e0d docs: clarify that auto generated metrics can't be relabeled in scrape config
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-24 12:09:30 +02:00
Github Actions
ec0abe736a Automatic update helm docs from VictoriaMetrics/helm-charts@f3aac52 (#7335)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
Co-authored-by: Hui Wang <haley@victoriametrics.com>
2024-10-24 12:20:06 +08:00
Github Actions
6434fa2c4e Automatic update Grafana datasource docs from VictoriaMetrics/victorialogs-datasource@12fc7d5 (#7340) 2024-10-23 13:02:18 -07:00
Artem Fetishev
6b9f57e5f7 lib/storage: Fix flaky test: TestStorageRotateIndexDB (#7267)
This commit fixes the TestStorageRotateIndexDB flaky test reported at:
#6977. Sample test failure: https://pastebin.com/bTSs8HP1

The test fails because one goroutine adds items to the indexDB table
while another goroutine is closing that table. This may happen if
indexDB rotation happens twice during one Storage.add() operation:
-  Storage.add() takes the current indexDB and adds index recods to it
- First index db rotation makes the current index DB a previous one
(still ok at this point)
- Second index db rotation removes the indexDB that was current two
rotations earlier. It does this by setting the mustDrop flag to true and
decrementing the ref counter. The ref counter reaches zero which cases
the underlying indexdb table to release its resources gracefully.
Graceful release assumes that the table is not written anymore. But
Storage.add() still adds items to it.

The solution is to increment the indexDB ref counters while it is used
inside add().
The unit test has been changed a little so that the test fails reliably.
The idea is to make add() function invocation to last much longer,
therefore the test inserts not just one record at a time but thouthands
of them.

To see the test fail, just replace the idbsLocked() func with:

```go
unc (s *Storage) idbsLocked2() (*indexDB, *indexDB, func()) {
       return s.idbCurr.Load(), s.idbNext.Load(), func() {}
}
``` 


---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-10-23 11:48:21 +02:00
hagen1778
7e53324f5d docs: clarify that vminsert also supports exponential histogram parsing
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-23 11:47:50 +02:00
Github Actions
5ee3bc98d6 Automatic update helm docs from VictoriaMetrics/helm-charts@6237358 (#7327)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-10-23 03:27:32 +04:00
Roman Khavronenko
0204ce942d app/vmalert: update -remoteWrite.concurrency and -remoteWrite.flushInterval (#7272)
Auto-adjust `-remoteWrite.concurrency` cmd-line flags with the number of
available CPU cores in the same way as vmagent does. With this change
the default behavior of vmalert in high-loaded installation should
become more resilient. This change also reduces
`-remoteWrite.flushInterval` from `5s` to `2s` to provide better data
freshness.


---------
Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: Nikolay <nik@victoriametrics.com>
2024-10-22 14:43:55 +02:00
Antoine Deschênes
d656934d22 vmalert: properly set group_name and file fields for recording rules (#7298)
This commit properly adds `group_name` and `file` fields for recording rules web api response   at `/api/v1/rules`.
Previously these fields were blank.

Related issue https://github.com/victoriaMetrics/victoriaMetrics/issues/7297

Signed-off-by: Antoine Deschênes <antoine.deschenes@linux.com>
2024-10-22 14:13:56 +02:00
Github Actions
ac82b5aea6 Automatic update helm docs from VictoriaMetrics/helm-charts@999d44f (#7316)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-10-22 14:04:48 +02:00
Github Actions
e8c7d6373e Automatic update operator docs from VictoriaMetrics/operator@b357f60 (#7319)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-10-22 14:04:24 +02:00
Fred Navruzov
b17fce3e4b docs/vmanomaly-release-1.17.2 (#7322)
### Describe Your Changes

- release 1.17.2 updates
- added sections on logging and CLI args to docs

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-22 13:42:37 +03:00
Andrii Chubatiuk
7ecf68093f docs: updated cmd flags highlight style (#7312)
### Describe Your Changes

Changed highlight style for cmd flags

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-22 03:00:07 -07:00
f41gh7
50487823ab deployment: bump VM to v1.105.0
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-10-21 23:42:30 +02:00
Zakhar Bessarab
05f6ea621d app/vmselect: add retention and downsampling filters debug pages (#776)
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6304

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-21 09:52:24 +02:00
Github Actions
1cb32ee6c8 Automatic update helm docs from VictoriaMetrics/helm-charts@1e789d9 (#7307)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-10-19 02:44:35 +04:00
hagen1778
a1882a84fb app/vmui: add missing assets after a710d43a20
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 19:52:25 +02:00
Fred Navruzov
7a538bbe78 docs/vmanomaly: release 1.17.1 (#7302)
### Describe Your Changes

docs/vmanomaly: release 1.17.1

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-18 21:07:24 +04:00
hagen1778
d553d101b2 docs/CHANGELOG.md: cut v1.102.5
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 14:31:24 +02:00
hagen1778
73b073e298 docs/CHANGELOG.md: cut v1.97.10
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 14:30:26 +02:00
hagen1778
361afaec5b docs/CHANGELOG.md: cut v1.105.0
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 14:28:14 +02:00
hagen1778
a710d43a20 app/{vmselect,vlselect}: run make vmui-update vmui-logs-update
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 14:26:47 +02:00
hagen1778
f9c79eba30 docs: re-order changes by priority in log
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 14:24:20 +02:00
Zhu Jiekun
8c50c38a80 vmstorage: auto calculate maxUniqueTimeseries based on resources (#6961)
### Describe Your Changes

Add support for
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6930

Calculate `-search.maxUniqueTimeseries` by
`-search.maxConcurrentRequests` and remaining memory if it's **not set**
or **less equal than 0**.

The remaining memory is affected by `-memory.allowedPercent`,
`-memory.allowedBytes` and cgroup memory limit.
### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>

(cherry picked from commit 85f60237e2)
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 14:00:14 +02:00
Andrii Chubatiuk
965a33c893 lib/promscrape: fixed reload on max_scrape_size change (#7282)
### Describe Your Changes

fixed reload on max_scrape_size change
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7260

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 11:35:23 +02:00
Hui Wang
c4fe23794a vmalert: fix blocking hot-reload process if the old rule group hasn't started yet (#7258)
Group
[sleeps](daa7183749/app/vmalert/rule/group.go (L320))
random duration before start the evaluation, and during the sleep,
`g.updateCh <- new` will be blocked since there is no `<-g.updateCh`
waiting.

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 11:18:24 +02:00
hagen1778
41e0bbb6d1 docs/vmctl: clarify the meaning of the comment
The comment was ambiguous and not clear to the readers.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-18 09:46:54 +02:00
Aliaksandr Valialkin
025eec2cb0 deployment: update VictoriaLogs from v0.36.0-victorialogs to v0.37.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.37.0-victorialogs
2024-10-18 02:40:13 +02:00
Aliaksandr Valialkin
14e33d93ef app/vlselect/vmui: run make vmui-logs-update after 423df09d7d
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7206
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7117

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7167
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7133
2024-10-18 02:33:41 +02:00
Aliaksandr Valialkin
51cd3ba02b docs/VictoriaLogs/CHANGELOG.md: cut v0.37.0-victorialogs release 2024-10-18 02:32:37 +02:00
Yury Molodov
423df09d7d vmui/logs: add ability to hide hits chart (#7206)
### Describe Your Changes

**Added ability to hide the hits chart**

- Users can now hide or show the hits chart by clicking the "eye" icon
located in the upper-right corner of the chart.
- When the chart is hidden, it will stop sending requests to
`/select/logsql/hits`.
- Upon displaying the chart again, it will automatically refresh. If a
relative time range is set, the chart will update according to the time
period of the logs currently being displayed.

**Hits chart visible:**

![image](https://github.com/user-attachments/assets/577e877b-6417-4b83-8d84-c55e3d39864a)

**Hits chart hidden:**

![image](https://github.com/user-attachments/assets/068b1143-d140-4d72-8d65-663900124f32)

Related issue: #7117

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2024-10-18 02:30:56 +02:00
Yury Molodov
36a86c3aaf vmui/logs: fix display of hits chart (#7167)
### Describe Your Changes

Fixed the display of hits chart in VictoriaLogs.
See #7133

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-18 02:28:23 +02:00
Aliaksandr Valialkin
064b9a6314 docs/VictoriaLogs/CHANGELOG.md: remove "index.html" trailer from the link to docs for the sake of consistency with other links to docs
This is a follow-up for 3538869942
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7252
2024-10-18 02:26:01 +02:00
Aliaksandr Valialkin
0f24078146 lib/logstorage: use simpler in-memory cache instead of workingsetcache for caching recently ingested _stream values and recently queried set of streams
These caches aren't expected to grow big, so it is OK to use the most simplest cache based on sync.Map.
The benefit of this cache compared to workingsetcache is better scalability on systems with many CPU cores,
since it doesn't use mutexes at fast path.
An additional benefit is lower memory usage on average, since the size of in-memory cache equals
working set for the last 3 minutes.

The downside is that there is no upper bound for the cache size, so it may grow big during workload spikes.
But this is very unlikely for typical workloads.
2024-10-18 02:22:43 +02:00
Aliaksandr Valialkin
8aa144fa74 lib/logstorage: do not persist streamIDCache, since it may go out of sync with partition directories, which can be changed manually between VictoriaLogs restarts
Partition directories can be manually deleted and copied from another sources such as backups or other VitoriaLogs instances.
In this case the persisted cache becomes out of sync with partitions. This can result in missing index entries
during data ingestion or in incorrect results during querying. So it is better to do not persist caches.
This shouldn't hurt VictoriaLogs performance just after the restart too much, since its caches usually contain
small amounts of data, which can be quickly re-populated from the persisted data.
2024-10-18 02:22:43 +02:00
Aliaksandr Valialkin
1892e357c3 lib/logstorage: consistently use "pHits := m[..]" pattern
Consistency improves maintainability of the code a bit.
2024-10-18 02:22:43 +02:00
Aliaksandr Valialkin
2023f017b1 lib/logstorage: optimize performance for queries, which select all the log fields for logs containing hundreds of log fields (aka "wide events")
Unpack the full columnsHeader block instead of unpacking meta-information per each individual column
when the query, which selects all the columns, is executed. This improves performance when scanning
logs with big number of fields.
2024-10-18 02:22:42 +02:00
Aliaksandr Valialkin
78c6fb0883 lib/logstorage: improve performance of top and field_values pipes on systems with many CPU cores
- Parallelize mering of per-CPU results.
- Parallelize writing the results to the next pipe.
2024-10-18 02:22:42 +02:00
Aliaksandr Valialkin
c4b2fdff70 lib/logstorage: optimize 'stats by(...)' calculations for by(...) fields with millions of unique values on multi-CPU systems
- Parallelize merging of per-CPU `stats by(...)` result shards.
- Parallelize writing `stats by(...)` results to the next pipe.
2024-10-18 02:22:41 +02:00
Aliaksandr Valialkin
192c07f76a lib/logstorage: optimize performance for top pipe when it is applied to a field with millions of unique values
- Use parallel merge of per-CPU shard results. This improves merge performance on multi-CPU systems.
- Use topN heap sort of per-shard results. This improves performance when results contain millions of entries.
2024-10-18 02:21:56 +02:00
hagen1778
98fcd95438 docs/vmagent: distinguish between metrics, samples and series
Before, doc incorrectly used `metric` instead of `sample` or `series`.
This commit aligns description with https://docs.victoriametrics.com/keyconcepts/#structure-of-a-metric

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 19:21:52 +02:00
Artem Fetishev
d6bafe31d3 docs/troubleshooting: add reduce_mem_usage=1 to export query (#7286)
### Describe Your Changes

When debugging unexpected query results, add reduce_mem_usage=1 param to
export query to preserve duplicates.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-10-17 18:50:25 +02:00
Fred Navruzov
bc65c9f399 docs/vmanomaly: release v1.17.0 (#7285)
### Describe Your Changes

docs/vmanomaly: release v1.17.0

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-17 18:56:04 +03:00
Nikolay
635bdd130b lib/storage: properly unmarshal SearchQuery (#7277)
After adding multitenant query feature at v1.104.0, searchQuery wasn't
properly unmarshalled at bottom vmselect in multi-level cluster setup.
It resulted into empty query responses.

This commit adds fallback to Unmarshal method of SearchQuery to fill
TenantTokens. It allows to properly execute search requests
at vmselect side.

Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7270

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-10-17 10:52:35 -03:00
Zakhar Bessarab
d036063c78 docs/vmbackup: add information about cluster backups (#7244)
### Describe Your Changes

Add more detailed information about performing backups for
VictoriaMetrics cluster setup.
More detailed explanation should help to address questions similar to
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7225

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-17 13:56:23 +02:00
hagen1778
aa6c237603 docs: follow-up after f0d1db81dc
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 13:49:51 +02:00
Roman Khavronenko
05ac508fbf lib/flagutil: rename Duration to RetentionDuration (#7284)
The purpose of this change is to reduce confusion between using
`flag.Duration` and `flagutils.Duration`. The reason is that
`flagutils.Duration` was mistakenly used for cases that required `m`
support. See
ab0d31a7b0

The change in name should clearly indicate the purpose of this data
type.

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 13:47:48 +02:00
Alexander Frolov
f0d1db81dc lib/flagutil: rm misleading minutes support from flagutil.Duration docs (#7066)
### Describe Your Changes

`flagutil.Duration` docs state that `m` suffix stands for `minute`, but
in fact this suffix is not supported due to ambiguity with `month`

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Alexander Frolov <winningpiece@gmail.com>
2024-10-17 13:29:51 +02:00
Hui Wang
ab0d31a7b0 vmagent: fix type of command-line flag -streamAggr.dedupInterval (#7081)
Previously unit `m` is not correctly supported.

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 13:27:59 +02:00
Artem Fetishev
ca787c70d1 dashboards: fix vmagent monitoring chart descriptions (#7283)
### Describe Your Changes

Fix vmagent monitoring chart descriptions
### Checklist

The following checks are **mandatory**:

- [x ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-10-17 12:12:47 +02:00
Zakhar Bessarab
65e9d19f3c lib/flagutil/dict: properly update default value in case there is no key value set (#7211)
### Describe Your Changes

If a dict flag has only one value without a prefix it is supposed to
replace default value.

Previously, when flag was set to `-flag=2` and the default value in
`NewDictInt` was set to 1 the resulting value for any `flag.Get()` call
would be 1 which is not expected.

This commit updates default value for the flag in case there is only one
entry for flag and the entry is a number without a key.

This affects cluster version and specifically `replicationFactor` flag
usage with vmstorage [node
groups](https://docs.victoriametrics.com/cluster-victoriametrics/#vmstorage-groups-at-vmselect).
Previously, the following configuration would effectively be ignored:
```
/path/to/vmselect \
 -replicationFactor=2 \
 -storageNode=g1/host1,g1/host2,g1/host3 \
 -storageNode=g2/host4,g2/host5,g2/host6 \
 -storageNode=g3/host7,g3/host8,g3/host9
```

Changes from this PR will force default value for `replicationFactor`
flag to be set to `2` which is expected as the result of this
configuration.


---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-17 12:05:47 +02:00
hagen1778
23f8ab6f81 docs/contirubting: clarify the type of changelog line we expect
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 11:59:55 +02:00
Andrii Chubatiuk
3538869942 vlogs: added basic alerts (#7252)
### Describe Your Changes

Added basic VLogs alerts

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 11:33:06 +02:00
Hui Wang
4984e71da6 vmalert-tool: add more syntax checks for input_series and exp_samples (#7263)
address https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7224,
allow using
```
        exp_samples:
          - labels: '{}'
```
for prometheus compatibility.

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 11:00:34 +02:00
Hui Wang
c90adf566e vmalert-tool: reduce victoriametrics health check interval (#7256)
address https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6970.
This reduces the hard limit on duration for completing the test when
users run vmalert-tool on slow hosts.

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-17 10:51:12 +02:00
Github Actions
c5fb281019 Automatic update helm docs from VictoriaMetrics/helm-charts@845bc1f (#7268)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-10-17 10:42:41 +02:00
Aliaksandr Valialkin
a72e1155b9 docs/VictoriaLogs/CHANGELOG.md: add missing part of the sentence 2024-10-16 20:22:19 +02:00
Aliaksandr Valialkin
677f1cd1be docs/VictoriaLogs/CHANGELOG.md: typo fix: refer the correct endpoints for stats results 2024-10-16 20:19:22 +02:00
Aliaksandr Valialkin
9187ed0648 deployment: update VictoriaLogs Docker image from v0.35.0-victorialogs to v0.36.0-victorialogs 2024-10-16 20:10:05 +02:00
Aliaksandr Valialkin
6ca1b15134 docs/VictoriaLogs/README.md: fix copy-n-paste typo: partitions in VictoriaLogs are per-day, not per-month 2024-10-16 20:08:30 +02:00
Aliaksandr Valialkin
91987763d4 docs/VictoriaLogs/CHANGELOG.md: cut v0.36.0-victorialogs release 2024-10-16 20:00:35 +02:00
Aliaksandr Valialkin
a23aa87282 app/vlselect/vmui: run make vmui-logs-update after the commit 6c9772b101
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7204
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7097
2024-10-16 19:58:14 +02:00
Aliaksandr Valialkin
508e498ae3 lib/logstorage: follow-up for 72941eac36
- Allow dropping metrics if the query result contains at least a single metric.
- Allow copying by(...) fields.
- Disallow overriding by(...) fields via `math` pipe.
- Allow using `format` pipe in stats query. This is useful for constructing some labels from the existing by(...) fields.
- Add more tests.
- Remove the check for time range in the query filter according to https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7254/files#r1803405826

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7254
2024-10-16 19:43:52 +02:00
Hui Wang
72941eac36 victorialogs: add more checks for stats query APIs (#7254)
1. Verify if field in [fields
pipe](https://docs.victoriametrics.com/victorialogs/logsql/#fields-pipe)
exists. If not, it generates a metric with illegal float value "" for
prometheus metrics protocol.
2. check if multiple time range filters produce conflicted query time
range, for instance:
```
query: _time: 5m | stats count(), 
start:2024-10-08T10:00:00.806Z, 
end: 2024-10-08T12:00:00.806Z, 
time: 2024-10-10T10:02:59.806Z
```
must give no result due to invalid final time range.

---------

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2024-10-16 19:25:43 +02:00
Aliaksandr Valialkin
202eb429a7 lib/logstorage: refactor storage format to be more efficient for querying wide events
It has been appeared that VictoriaLogs is frequently used for collecting logs with tens of fields.
For example, standard Kuberntes setup on top of Filebeat generates more than 20 fields per each log.
Such logs are also known as "wide events".

The previous storage format was optimized for logs with a few fields. When at least a single field
was referenced in the query, then the all the meta-information about all the log fields was unpacked
and parsed per each scanned block during the query. This could require a lot of additional disk IO
and CPU time when logs contain many fields. Resolve this issue by providing an (field -> metainfo_offset)
index per each field in every data block. This index allows reading and extracting only the needed
metainfo for fields used in the query. This index is stored in columnsHeaderIndexFilename ( columns_header_index.bin ).
This allows increasing performance for queries over wide events by 10x and more.

Another issue was that the data for bloom filters and field values across all the log fields except of _msg
was intermixed in two files - fieldBloomFilename ( field_bloom.bin ) and fieldValuesFilename ( field_values.bin ).
This could result in huge disk read IO overhead when some small field was referred in the query,
since the Operating System usually reads more data than requested. It reads the data from disk
in at least 4KiB blocks (usually the block size is much bigger in the range 64KiB - 512KiB).
So, if 512-byte bloom filter or values' block is read from the file, then the Operating System
reads up to 512KiB of data from disk, which results in 1000x disk read IO overhead. This overhead isn't visible
for recently accessed data, since this data is usually stored in RAM (aka Operating System page cache),
but this overhead may become very annoying when performing the query over large volumes of data
which isn't present in OS page cache.

The solution for this issue is to split bloom filters and field values across multiple shards.
This reduces the worst-case disk read IO overhead by at least Nx where N is the number of shards,
while the disk read IO overhead is completely removed in best case when the number of columns doesn't exceed N.
Currently the number of shards is 8 - see bloomValuesShardsCount . This solution increases
performance for queries over large volumes of newly ingested data by up to 1000x.

The new storage format is versioned as v1, while the old storage format is version as v0.
It is stored in the partHeader.FormatVersion.

Parts with the old storage format are converted into parts with the new storage format during background merge.
It is possible to force merge by querying /internal/force_merge HTTP endpoint - see https://docs.victoriametrics.com/victorialogs/#forced-merge .
2024-10-16 17:35:07 +02:00
Roman Khavronenko
1d637667a6 vmui: clarify the info for TotalSeries stat (#7271)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-16 15:15:28 +02:00
rusttech
87910e4fa8 app/vmctl: fixes opentsdb source metric tags
Previously it was incorrectly used append for pre-allocated slice of labels.

This commit fixes slice append by allocating zero length slice with needed capacity.

---------

Co-authored-by: Nikolay <nik@victoriametrics.com>
2024-10-16 10:35:17 +02:00
hagen1778
e347d90531 docs: update anchor level to fix menu rendering in changelog
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-15 20:40:58 +02:00
Yury Molodov
86029de0d4 vmui: fix alert display with long messages (#7228)
### Describe Your Changes

Fix `Alert` component to prevent it from overflowing the screen when
displaying long messages.

Related issue: #7207

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-15 16:35:57 +02:00
Yury Molodov
0ff17c3ec4 vmui: add retention and downsampling filters debug pages (#7238)
### Describe Your Changes

- add VMUI pages for filters debug
- add `config.json` file to the root of the application. The file
structure is as follows:
  ```
  {
    "license": {
      "type": "enterprise" or "opensource"
    }
  }
  ```
- refactor navigation configuration files. This refactor enables more
flexible customization of menu elements.

UI:

<details>
<summary>Renention filters debug</summary>

Empty page:

![1723474670](https://github.com/user-attachments/assets/3824bf64-dd22-410a-beb5-9599b8769acd)

Results:

![1723474597](https://github.com/user-attachments/assets/1bc074ba-b6a7-4127-8638-65cb32e04db8)

Example config:

![1723541836](https://github.com/user-attachments/assets/ccdb7f75-4e77-42c4-98be-4bfa7809a3b0)

</details>

<details>
<summary>Downsampling filters debug</summary>

Empty page:

![1723474663](https://github.com/user-attachments/assets/7bbd07bd-adce-440f-ba43-f4218e237280)

Results:

![1723474589](https://github.com/user-attachments/assets/b793ae08-b685-427d-81f1-1c7c532a244a)

Example config:

![1723541828](https://github.com/user-attachments/assets/d2ee4e37-8945-4c0f-a4ca-cff5fe3cfcd2)

</details>

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-15 14:52:00 +02:00
Yury Molodov
6c9772b101 vmui: add the ability to cancel running queries (#7204)
### Describe Your Changes

- Added functionality to cancel running queries on the Explore Logs and
Query pages.
- The loader was changed from a spinner to a top bar within the block.
This still indicates loading, but solves the issue of the spinner
"flickering," especially during graph dragging.

Related issue: #7097


https://github.com/user-attachments/assets/98e59aeb-905b-4b9d-bbb2-688223b22a82

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-15 14:48:40 +02:00
Zakhar Bessarab
a8d8987825 lib/jwt: accept scope encoded as a slice (#790)
Some IDPs encode scope as a slice of strings. Handle this gracefully by encoding a slice back to string.

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

(cherry picked from commit f61d8c3ebb)

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-15 14:40:46 +02:00
Andrii Chubatiuk
daa7183749 lib/protoparser/influx: enable batch processing by default (#7165)
### Describe Your Changes

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7090

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-15 11:48:40 +02:00
Aliaksandr Valialkin
bac193e50b app/vlselect: do not show empty fields in query results
Empty fields are treated as non-existing fields by VictoriaLogs data model.
So there is no sense in returning empty fields in query results, since they may mislead and confuse users.
2024-10-14 23:43:58 +02:00
Zhu Jiekun
343463fc0f docs: make all statement of active time series consistent (#7242)
### Describe Your Changes

https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7212

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-14 15:38:34 +02:00
Andrii Chubatiuk
41e0b62099 docs/victorialogs: fixed HA examples links (#7249)
### Describe Your Changes

Fixed VictoriaLogs HA examples references in docs

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-14 00:01:44 -07:00
Aliaksandr Valialkin
3c73dbbacc app/vlstorage: add support for forced merge via /internal/force_merge HTTP endpoint 2024-10-13 22:20:31 +02:00
Aliaksandr Valialkin
b4b79a4961 lib/logstorage: make a copy of s.partitions slice when performing queries over the selected partitions
s.partitions can be changed when new partition is registered or when old partition is dropped.
This could lead to data races and panics when s.partitions slice is accessed by concurrently executed queries.

The fix is to make a copy of the selected partitions under s.partitionsLock before performing the query.
2024-10-13 22:14:34 +02:00
Aliaksandr Valialkin
507b206a7d lib/logstorage: move getConstColumnValue() and getColumnHeader() methods from columnsHeader to blockSearch
This localizes blockSearch.getColumnsHeader() call at block_search.go .
This call is going to be optimized in the next commits in order to avoid
unmarshaling of header data for unneeded columns, which weren't requested
by getConstColumnValue() / getColumnHeader().
2024-10-13 14:29:02 +02:00
Aliaksandr Valialkin
279e25e7c8 lib/logstorage: avoid redundant copying of column names and column values for dictionary-encoded columns during querying
Refer the original byte slice with the marshaled columnsHeader for columns names and dictionary-encoded column values.
This improves query performance a bit when big number of blocks with big number of columns are scanned during the query.
2024-10-13 13:25:38 +02:00
Aliaksandr Valialkin
91f5417572 docs/VictoriaLogs: replace incorrect usafe of VictoriaMetrics with VictoriaLogs 2024-10-13 13:01:42 +02:00
Aliaksandr Valialkin
9e48074b59 lib/logstorage: avoid calling columnsHeader.initFromBlockHeader() multiple times for the same blockSearch
This should improve performance when blockSearch.getColumnsHeader() is called multiple times
from different places of the code.
2024-10-13 12:56:12 +02:00
Aliaksandr Valialkin
200d723b9a docs/VictoriaLogs/Roadmap.md: remove OpenTelemetry, Fluentd and Telegraf from the list of data ingestion protocols, which are going to be supported by VictoriaLogs
These protocols are already supported:

- OpenTelemetry protocol - https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4839 ,
  https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6218 and 01430a155c

- Fluentd protocol - https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7098
  and 05a64a8c14

- Telegraf protocol - https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5310 ,
  https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6443 and 1731c0eabf
2024-10-13 11:10:19 +02:00
Aliaksandr Valialkin
867f671cc4 lib/logstorage: make sure that bs.br is non-nil before checking br.bs.bsw.bh.rowsCount there
br.bs may be nil when br contains the block with additional filters applied during pipe calculations.
For example, `* | count() if (error) errors`.
2024-10-12 20:51:29 +02:00
Zakhar Bessarab
2239f5829f docs/victoria-logs: fix typo (#7245)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-12 02:24:51 +04:00
hagen1778
22d3f67908 docs: fix typos in change line
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-11 16:43:50 +02:00
Hui Wang
d3f110373c dashboards: fix description about pending datapoints (#7235)
See [our
playground](https://play-grafana.victoriametrics.com/d/oS7Bi_0Wz_vm/victoriametrics-cluster-vm?orgId=1&var-ds=P996FABE17B5F6D1E&var-job=All&var-job_insert=All&var-job_select=All&var-job_storage=All&var-instance=All)
for reference.
2024-10-11 13:47:14 +02:00
Yury Molodov
c7771b1866 vmui: update dependencies npm (#7209)
### Describe Your Changes

Related issue: #7142

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-11 13:46:15 +02:00
Github Actions
6a738e0b41 Automatic update helm docs from VictoriaMetrics/helm-charts@c584b99 (#7237)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-10-11 13:45:34 +02:00
Andrii Chubatiuk
9eb0c1fd86 lib/protoparser/opentelemetry: added exponential histograms support (#6354)
### Describe Your Changes

added opentelemetry exponential histograms support. Such histograms are automatically converted into
VictoriaMetrics histogram with `vmrange` buckets.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-11 13:44:52 +02:00
Lauri Tirkkonen
8fe41b2b08 deployment/alerts: fix quoting on DiskRunsOutOfSpace (#7234)
### Describe Your Changes

there's an extra `"` at the end of the dashboard url for this alert;
remove it by making the quoting consistent with other alerts in this
file.

### Checklist

The following checks are **mandatory**:

- [X] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Co-authored-by: Lauri Tirkkonen <lauri@hacktheplanet.fi>
2024-10-11 00:44:18 -07:00
Dmytro Kozlov
95de37de2c docs/victoriametrics-cloud: fix button description (#7221)
### Describe Your Changes

Fixed button name in the cloud docs

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-10 16:18:13 +02:00
Aliaksandr Valialkin
b9a8c1ff3a docs/VictoriaLogs/querying/vlogscli.md: add the list of features of vlogscli 2024-10-10 11:49:10 +02:00
Github Actions
595298ac98 Automatic update helm docs from VictoriaMetrics/helm-charts@c4a0442 (#7223)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-10-10 01:56:22 -07:00
n4mine
f060b67da5 docs: fix typo in docs/VictoriaLogs/data-ingestion/Vector.md (#7222)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-10 09:45:30 +02:00
Github Actions
d3f4b01001 Automatic update helm docs from VictoriaMetrics/helm-charts@0b610bd (#7220)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-10-10 09:10:26 +02:00
Dmytro Kozlov
c910c1c6b8 docs/victoriametrics-cloud: update images in the cloud documentation (#7210)
### Describe Your Changes

Update images with updated interface of the cloud solution 

### Checklist

The following checks are **mandatory**:

- [ x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-10 09:08:32 +02:00
Roman Khavronenko
ca2a08eabe docs: update stream aggregation docs (#7202)
This PR is based on
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6777. The
differences are the following:
* it keeps backward compatibility for links
* it re-structures only original document file
* it adds #common-mistakes section, re-phrased

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
2024-10-09 21:35:51 +02:00
Aliaksandr Valialkin
fe022ed795 app/vlselect/logsql: add missing return from ProcessLiveTailRequest() when the query cannot be live tailed 2024-10-09 16:30:10 +02:00
Aliaksandr Valialkin
baa87b5b36 deployment: update VictoriaLogs from v0.34.0-victorialogs to v0.35.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.35.0-victorialogs
2024-10-09 16:17:51 +02:00
Aliaksandr Valialkin
7b475ed95d lib/logstorage: disallow using pipe names as the first unquoted words in filter pipe
Improperly written pipes could be silently parsed as filter pipe.
For example, the following query:

   * | by (x)

was silently parsed to:

   * | filter "by" x

It is better to return error, so the user could identify and fix invalid pipe
instead of silently executing invalid query with `filter` pipe.
2024-10-09 16:10:13 +02:00
Aliaksandr Valialkin
252aa792f7 docs/VictoriaLogs: cut v0.35.0 release 2024-10-09 15:55:20 +02:00
Aliaksandr Valialkin
9413b2de91 docs/VictoriaLogs: make vlogscli more visible 2024-10-09 15:54:25 +02:00
Aliaksandr Valialkin
6acf543b90 lib/logstorage: disallow using by as the first word in log filters, since it frequently clashes with stats by(...) pipe where stats word is omitted 2024-10-09 15:53:15 +02:00
Aliaksandr Valialkin
ad5d8097da app/vlogscli: add -accountID and -projectID command-line flags for querying the given tenants 2024-10-09 12:56:49 +02:00
Aliaksandr Valialkin
e31625e0b2 app/vlogscli: add support for live tailing 2024-10-09 12:30:17 +02:00
n4mine
344d61da79 docs: fix typo in docs/VictoriaLogs/data-ingestion/Vector.md (#7208)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-09 00:32:36 -07:00
Fred Navruzov
f61d8c3ebb docs/vmanomaly: updates for v1.16.3 (#7203)
### Describe Your Changes

docs/vmanomaly: updates for v1.16.3

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-08 19:24:56 +03:00
kirti purohit
008b649658 vmalert: parse multi doc yaml (#6995)
### Describe Your Changes

This PR adds the feature to parse a multi yaml doc following the
`\n---\n`
The issue is
[6753](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6753)

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: kirti purohit <kirti.purohit@hpe.com>
Co-authored-by: kirti purohit <kirti.purohit@hpe.com>
Co-authored-by: Jiekun <jiekun@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-10-08 14:28:32 +02:00
Artem Fetishev
e2c73dc89f app/(vmagent,vmalert)/remotewrite/client: Fix flag docs (#7198)
### Describe Your Changes

The flags docs mention the flag that does not exist (and never existed).
Perhaps that was a typo.

`s/retryMaxInterval/retryMaxTime/g`

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-10-08 13:14:38 +02:00
Aliaksandr Valialkin
03862368b5 deployment: update VictoriaLogs image tag from v0.33.0-victorialogs to v0.34.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.34.0-victorialogs
2024-10-08 12:30:27 +02:00
Aliaksandr Valialkin
6878982c93 docs/VictoriaLogs/CHANGELOG.md: cut v0.34.0 release 2024-10-08 12:21:19 +02:00
Zakhar Bessarab
eefae85450 vmagent: add support of HTTP2 client for Kubernetes SD (#7114)
### Describe Your Changes

Currently, vmagent always uses a separate `http.Client` for every group
watcher in Kubernetes SD. With a high number of group watchers this
leads to large amount of opened connections.

This PR adds 2 changes to address this:
- re-use of existing `http.Client` - in case `http.Client` is connecting
to the same API server and uses the same parameters it will be re-used
between group watchers
- HTTP2 support - this allows to reuse connections more efficiently due
to ability of using streaming via existing connections.

See this issue for the details and test results -
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5971

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-10-08 10:36:31 +02:00
Zakhar Bessarab
9b6efb5e81 make: add darwin builds for cluster (#7195)
### Describe Your Changes

Add darwin `amd64` and `arm64` builds for cluster binaries build.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>

(cherry picked from commit b9115d6882)
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-08 10:19:58 +02:00
Aliaksandr Valialkin
89686094a0 lib/logstorage: allow special chars in unquoted _stream tag names and values
This simplifies writing _stream filters. For example,

{foo-bar=abc:de}

can be written instead of

{"foo-bar"="abc:de"}
2024-10-07 15:10:03 +02:00
Aliaksandr Valialkin
462b7cd597 lib/logstorage: quote logfmt strings only if they contain special chars, which could break logfmt parsing and/or reading 2024-10-07 14:31:30 +02:00
Roman Khavronenko
ebd393d8b3 app/vmselect/promql: fix seriesFetched update logic (#7181)
### Describe Your Changes

evalInstantRollup could have overreport the number of fetched series if
`offset` checks will result into retry. This change updates fetched
series only if these checks were successful.

It also adds a comment to another potential place of over-reporting
series fetched. It doesn't fix it, because it would require spending
extra resources on such a check, while discrepancy in seriesFetched
doesn't affect calculations in any way.

Probably related to
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7170

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-07 14:27:50 +02:00
Github Actions
5481fa669c Automatic update operator docs from VictoriaMetrics/operator@80f95d2 (#7191)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: Haleygo <39937150+Haleygo@users.noreply.github.com>
2024-10-07 14:26:39 +02:00
Meysam
5e7b3e035b docs: fix typo in vmagent docs (#7192) 2024-10-07 14:26:20 +02:00
Aliaksandr Valialkin
492190885d app/vlogscli: add ability to display query results in logfmt, single-line and multi-line json modes 2024-10-07 12:20:06 +02:00
Artem Navoiev
e144a2b062 docs: change tier menu item name
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-07 12:12:45 +02:00
Aliaksandr Valialkin
2d8785fdf6 app/vlogscli: store incompletely written lines in the history 2024-10-07 10:57:50 +02:00
Aliaksandr Valialkin
6c9e643ea8 app/vlogscli: add \q command for the exit from vlogcli
This should help psql users, who expect `\q` command for the exit
2024-10-07 10:57:49 +02:00
Artem Navoiev
2593f32b63 docs: replace cloud url to be consistent with the other ones (#7194)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-07 10:44:01 +02:00
Aliaksandr Valialkin
daad96b3a5 app/vlogscli: return back sorting result fields by name
This simplifies locating the needed field when the number of fields per each returned result is big
2024-10-07 10:41:48 +02:00
Artem Navoiev
18dd4105be docs: add audit logs page for Cloud (#7193)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-07 01:38:00 -07:00
Fred Navruzov
5c9bd35eb9 docs/vmanomaly: remove duplicate header in VmWriter docs (#7189)
### Describe Your Changes

docs/vmanomaly: remove duplicate header in VmWriter docs

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-06 09:49:04 -07:00
Fred Navruzov
b2e7b05918 docs/vmanomaly: release 1.16.2 (#7188)
### Describe Your Changes

docs for `vmanomaly`, updated after release 1.16.2

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-06 18:54:43 +03:00
Aliaksandr Valialkin
596e4de248 app/vlogscli: preserve the original order of fields in the displayed responses 2024-10-05 21:27:32 +02:00
Github Actions
4d9ad9654f Automatic update helm docs from VictoriaMetrics/helm-charts@65cc293 (#7180)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-10-04 10:50:23 +02:00
Aliaksandr Valialkin
66645c3dff docs/VictoriaLogs/LogsQL.md: remove redundant replace word in examples for conditional replace
Thanks to @p5i for the bugreport at https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7168
2024-10-03 20:26:58 +02:00
dependabot[bot]
155089afbf build(deps-dev): bump rollup from 2.79.1 to 2.79.2 in /app/vmui/packages/vmui (#7131)
Bumps [rollup](https://github.com/rollup/rollup) from 2.79.1 to 2.79.2.

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-03 18:41:48 +02:00
Artem Fetishev
c1cd3e85a7 lib/promscrape: Fix TestClientProxyReadOk flaky test (#7173)
This PR fixes #7062 

For hijacked connections, one has to read from the connection buffer,
but still write directly to the connection. Otherwise, when reading
directly from such connections, the first byte may be lost. This, in
turn corrupts the ClientHello TLS handshake message and when the backend
server receives it, it closes the connection and reports the following
error in the log:

```
http: TLS handshake error from 127.0.0.1:33150: tls: first record does not look
like a TLS handshake
```

The first byte may be lost because underlying HTTP request handler may
read it from the connection and put it into the buffer. As the result,
subsequent connection reads won't see that byte.

-   See: https://github.com/golang/go/issues/27408
-   The fix is taken from : https://github.com/k3s-io/k3s/pull/6216

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-10-03 18:27:15 +02:00
Aliaksandr Valialkin
364f084b43 lib/logstorage: add len pipe for calculating byte length of log field values 2024-10-03 18:21:10 +02:00
Fred Navruzov
63a9fb34d1 docs/vmanomaly: fix example snippets (#7174)
### Describe Your Changes

fix of typos and improper version references in code snippets of example
usage

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-03 18:30:24 +04:00
hagen1778
3f2bfd2ff6 docs: move Retry-After to the 1.104.0 notes
It was mistakenly place to 1.103.0

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-03 15:23:03 +02:00
hagen1778
7f47713821 docs: add missing -search.maxDeleteSeries to vmselect flags
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-03 14:11:04 +02:00
Zhu Jiekun
c22eae0384 dox: fix anchor in github readme (#7160)
### Describe Your Changes

Fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7155.

Some anchors became invalid after
58e667c895.
This PR fixed them.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-03 10:39:57 +02:00
hagen1778
4086cef01c docs: rm incorrectly placed bugfix change from v1.103
The change was present in v1.103 by mistake.
In fact, it was released in v1.104
See c193e6d43e

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-03 09:55:07 +02:00
hagen1778
80d4acf2cf docs: add tickets routine to release guide
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-03 09:49:07 +02:00
hagen1778
feba481ac2 docs: re-qualify -search.maxDeleteSeries change into feature
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 21:18:54 +02:00
hagen1778
ce81a86fc2 docs: re-order changes by priority
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 21:15:59 +02:00
hagen1778
41850995d3 docs: rm vm_rows_ignored_total{reason="nan_value"}
It was reverted in 0d4f4b8f7d

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 21:13:53 +02:00
Artem Navoiev
ddfb6db8cf docs: api cloud clarify key value property
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-02 19:46:11 +02:00
Artem Navoiev
5df015b9db docs: api cloud fix the naming
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-02 18:00:43 +02:00
hagen1778
bd84f8a35d docs: bump VM latest LTS releases
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 16:34:06 +02:00
hagen1778
07902baa8e deployment: bump VM to v1.104.0
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 16:33:00 +02:00
hagen1778
4c6b7ce6da docs: mention to test releases before publishing them in release guide
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 16:31:11 +02:00
hagen1778
8592fc3162 docs: add link to docs for multitenant reads
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 16:27:20 +02:00
Fred Navruzov
150ee902fd docs/vmanomaly: patch release 1.16.1 (#7169)
### Describe Your Changes

`vmanomaly` patch release 1.16.1 updates

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-02 18:10:08 +04:00
hagen1778
01bc28eda2 docs: mention version available for multitenant reads
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 14:58:06 +02:00
hagen1778
cd8a478a8d docs: actualize multitenancy docs
Follow-up after https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6346

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 14:35:29 +02:00
hagen1778
36acde1d11 docs: add missing release notes
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 14:20:55 +02:00
Roman Khavronenko
0d4f4b8f7d (app|lib)/vmstorage: do not increment vm_rows_ignored_total on NaNs (#7166)
`vm_rows_ignored_total` metric is a metric for users to signalize about
ingestion issues, such as bad timestamp or parsing error.
In commit
a5424e95b3
this metric started to increment each time vmstorage gets NaN. But NaN
is a valid value for Prometheus data model and for Prometheus metrics
exposition format. Exporters from Prometheus ecosystem could expose NaNs
as values for metrics and these values will be delivered to vmstorage
and increment the metric.
Since there is nothing user can do with this, in opposite to parsing
errors or bad timestamps, there is not much sense in incrementing this
metric. So this commit rolls-back `reason="nan_value"` increments.

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-02 12:37:27 +02:00
Artem Navoiev
c04c377d09 docs: cloud API remove link to guide as we don't have it
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-02 11:10:38 +02:00
Artem Navoiev
504da7d02b docs: add API doc for cloud (#7164)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-10-02 10:58:35 +02:00
Fred Navruzov
dde2a0cb25 docs/vmanomaly - update versions in examples (#7163)
### Describe Your Changes

update `vnanomaly` versions in examples

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-02 11:53:05 +03:00
Fred Navruzov
0e54cfe350 docs/vmanomaly - release 1.16.0 docs (#7159)
### Describe Your Changes

doc updates for vmanomaly v1.16.0

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-10-02 00:19:14 +02:00
Aliaksandr Valialkin
456aeda605 app/vlogscli: preserve less output
This simplifies logs' investigation, since it allows copying some text from the previous query output
2024-10-01 21:46:36 +02:00
Aliaksandr Valialkin
3634fefc64 docs/VictoriaLogs/querying/vlogscli.md: typo fixes 2024-10-01 21:10:05 +02:00
f41gh7
776c501cb2 CHANGELOG.md: cut v1.104.0 release 2024-10-01 16:55:04 +02:00
f41gh7
076a1f84e1 vmselect: add support of multi-tenant queries
Added  ability to query data across multiple tenants. See:
VictoriaMetrics/VictoriaMetrics#1434

Currently, the following endpoints work with multi-tenancy:
- /prometheus/api/v1/query
- /prometheus/api/v1/query_range
- /prometheus/api/v1/series
- /prometheus/api/v1/labels
- /prometheus/api/v1/label/<label_name>/values
- /prometheus/api/v1/status/active_queries
- /prometheus/api/v1/status/top_queries
- /prometheus/api/v1/status/tsdb
- /prometheus/api/v1/export
- /prometheus/api/v1/export/csv
- /vmui

A note regarding VMUI: endpoints such as `active_queries` and
`top_queries` have been updated to indicate whether query was a
single-tenant or multi-tenant, but UI needs to be updated to display
this info.
cc: @Loori-R

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: f41gh7 <nik@victoriametrics.com>
2024-10-01 16:49:46 +02:00
Aliaksandr Valialkin
496015aa0e docs/VictoriaLogs/querying/vlogscli.md: provide instructions on how to download run vlogscli 2024-10-01 16:13:12 +02:00
Aliaksandr Valialkin
b05fbee63d deployment: update VictoriaLogs docker image from v0.32.1-victorialogs to v0.33.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.33.0-victorialogs
2024-10-01 13:57:56 +02:00
Aliaksandr Valialkin
234c81754e docs/VictoriaLogs/CHANGELOG.md: cut v0.33.0-victorialogs release 2024-10-01 13:42:18 +02:00
Roman Khavronenko
d57d8b5e60 docs: add note that otel is not using streaming parsing (#7148)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-01 13:39:40 +02:00
Aliaksandr Valialkin
a350be48b6 lib/logstorage: do not count dictionary values which have no matching logs in count_uniq stats function
Create blockResultColumn.forEachDictValue* helper functions for visiting matching
dictionary values. These helper functions should prevent from counting dictionary values
without matching logs in the future.

This is a follow-up for 0c0f013a60
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7152
2024-10-01 13:34:45 +02:00
Aliaksandr Valialkin
630211cfed app/vlogscli: add interactive command-line tool for querying VictoriaLogs 2024-10-01 12:23:07 +02:00
Aliaksandr Valialkin
61ae077e43 docs/VictoriaLogs/FAQ.md: clarify answers a bit 2024-10-01 12:08:27 +02:00
Roman Khavronenko
ead3250b80 docs: old logo seems not referenced anymore (#7151)
6y old :)

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-10-01 10:26:34 +02:00
Nikolay
fbaa026ae6 dashboards: updates operator dashboard (#7139)
* Replaces deprecated graphs with Timeseries panels
* Adds new latency dashboards for rest client and golang scheduler
* Adds new overview panels
* Adds VM Datasource version of dashboard

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-09-30 15:35:39 +02:00
Artem Navoiev
0b2d3d7752 docs: cloud add support page. remove title as far it's already there
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-09-30 14:45:33 +02:00
Zhu Jiekun
7bb8853a5c feature: [vmagent] Add service discovery support for OVH Cloud VPS and dedicated server (#6160)
### Describe Your Changes
related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6071

#### Added
- Added service discovery support for OVH Cloud:
    - VPS.
    - Dedicated server.

#### Docs
- `CHANGELOG.md`, `sd_configs.md`, `vmagent.md` are updated.

#### Note
- Useful links: 
    - OVH Cloud VPS API: https://eu.api.ovh.com/console/#/vps~GET
- OVH Cloud Dedicated server API:
https://eu.api.ovh.com/console/#/dedicated/server~GET
    - OVH Cloud SDK: https://github.com/ovh/go-ovh
- Prometheus SD:
https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ovhcloud_sd_config

Tested on OVH Cloud VPS and dedicated server.
<img width="1722" alt="image"
src="https://github.com/VictoriaMetrics/VictoriaMetrics/assets/30280396/d3f0adc8-b0ef-423e-9379-8a9b9b0792ee">

<img width="1724" alt="image"
src="https://github.com/VictoriaMetrics/VictoriaMetrics/assets/30280396/18b5b730-3512-4fc0-8b2c-f2450ac550fd">

---
Signed-off-by: Jiekun <jiekun@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-09-30 14:42:46 +02:00
Aliaksandr Valialkin
aafa9262c5 deployment/docker: update VictoriaLogs from v0.32.0-victorialogs to v0.32.1-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.32.1-victorialogs
2024-09-30 14:35:40 +02:00
Aliaksandr Valialkin
82482fca4b docs/VictoriaLogs/CHANGELOG.md: cut v0.32.1-victorialogs release 2024-09-30 14:31:17 +02:00
Artem Navoiev
f5f04c903b docs: cloud add support page (#7143)
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-09-30 14:29:26 +02:00
Hui Wang
664f337c70 stream aggregation: fix possible duplicated aggregation results (#7118)
When ingesting samples with the same labels(duplicated samples or
samples with the same labels after `by` or `without` options). They
could register different entries for the same labelset in
LabelsCompressor.
For example, both index 99 and 100 can be assigned to label `foo=1` in
two concurrent pushes. Then due to differing label indexes in encoded
keys, the samples will appear as distinct in aggrState, resulting in
duplicated results after decompressing the label indexes.

fbde238cdc/lib/streamaggr/streamaggr.go (L933)

In this pull request, since we need to store `idxToLabel` first to
ensure the idx can be searched after `lc.labelToIdxStore`,
the `lc.idxToLabel` still could contain a duplicated entries
[100]="foo=1". But given the low likelihood of this issue and the size
of idxToLabel, it should be fine.
2024-09-30 14:24:59 +02:00
Aliaksandr Valialkin
0c0f013a60 lib/logstorage: skip values with zero hits for 'uniq', 'top' and 'field_values' pipes
See https://github.com/VictoriaMetrics/victorialogs-datasource/issues/72#issuecomment-2352078483
2024-09-30 14:15:07 +02:00
Artem Fetishev
85ea0f80fc Change the default value of the maxDeleteSeries flag to 1 million (#7140)
Change the default value of the maxDeleteSeries flag to 1 million. This
is a follow up for ed5da38ede
---

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2024-09-30 12:40:49 +02:00
f41gh7
758f42fc12 docs: add Update Note for upcoming release changes
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2024-09-30 12:37:30 +02:00
Artem Fetishev
ed5da38ede Introduce a flag for limiting the number of time series to delete (#7091)
### Describe Your Changes

Introduce the `-search.maxDeleteSeries` flag that limits the number of
time series that can be deleted with a single
`/api/v1/admin/tsdb/delete_series` call.

Currently, any number can be deleted and if the number is big (millions)
then the operation may result in unaccounted CPU and memory usage spikes
which in some cases may result in OOM kill (see #7027). The flag limits
the number to 30k by default and the users may override it if needed at
the vmstorage start time.


---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
Co-authored-by: Nikolay <nik@victoriametrics.com>
2024-09-30 10:02:21 +02:00
Alexander Frolov
80a3c410d4 vmselect: ensure default -search.maxConcurrentRequests is non-decreasing (#6996)
### Describe Your Changes

vmselect determines the default value of `-search.maxConcurrentRequests`
multiplying the number of available CPUs by 2 if and only if the number
is small (to be precise <= 4). That leads
`-search.maxConcurrentRequests` is decreasing at the edge of these two
cases as shown below:
| CPUs | MaxConcurrentRequests | MaxConcurrentRequests (original
proposal) | MaxConcurrentRequests (updated proposal) |
|--------|--------|--------|--------|
| 1 | 2 | 2 | 2 |
| 2 | 4 (prev+2) | 4 (prev+2) | 4 (prev+2) |
| 3 | 6 (prev+2) | 6 (prev+2) | 6 (prev+2) |
| 4 | 8 (prev+2) | 8 (prev+2) | 8 (prev+2) |
| 5 | 5 __(prev-3)__ | 9 __(prev+1)__ | 10 __(prev+2)__ |
| 6 | 6 (prev+1) | 10 (prev+1) | 12 (prev+2) |
| 7 | 7 (prev+1) | 11 (prev+1) | 14 (prev+2) |
| 8 | 8 (prev+1) | 12 (prev+1) | 16 (prev+2) |

I propose to make the default value non-decreasing.
2024-09-30 09:51:54 +02:00
hagen1778
2df2e9b92f docs: use global dedup flag in vmagent's FlexibleDeduplication docs
Current doc is using per-url deduplication, and users might use this example
when they have more than 1 remoteWrite URL. Which would result into extra resource usage.
Changing the example to use global dedup, as it makes more sense.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-30 09:08:56 +02:00
Github Actions
3d01bc3fbe Automatic update helm docs from VictoriaMetrics/helm-charts@de4356f (#7138)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-09-30 13:03:15 +08:00
Github Actions
5e67e611b8 Automatic update operator docs from VictoriaMetrics/operator@0e44654 (#7136)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-09-29 22:30:24 +02:00
Aliaksandr Valialkin
2047ad20ef deployment: update VictoriaLogs docker image from v0.31.0-victorialogs to v0.32.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.32.0-victorialogs
2024-09-29 14:54:21 +02:00
Aliaksandr Valialkin
45cfb6b526 docs/VictoriaLogs/CHANGELOG.md: cut v0.32.0-victorialogs 2024-09-29 14:47:31 +02:00
Aliaksandr Valialkin
1da4650143 lib/logstorage: allow using ! in unescaped phrase
Previously the phrase filter with `!` was treated unexpectedly.
For example, `foo!bar` filter was treated at `foo AND NOT bar`,
while most users expect that it matches "foo!bar" phrase.

This commit aligns with users' expectations.
2024-09-29 11:14:15 +02:00
Aliaksandr Valialkin
60183c7c79 lib/logstorage: allow using - instead of ! in front of (...) 2024-09-29 11:12:22 +02:00
Nikolay
3bbb2aed72 fscore: rollback trailing space trim (#7106)
Previous commit 201fd6de1e removed
trailing space trim from data read from file. But common practice is to
remove such trailing space. And it leaded to the authorization errors
for the major group of users.

In first place, this change must help to mitigate an issue with
kubernetes. When authorization information was read from Secret content.
Changes to the operator was made to mitigate such problem at commit
1cf64358c8

We could introduce later optional flag for VictoriaMetrics to disable
trim space behavior.

Related issues:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6986
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7089 
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6947

---------

Signed-off-by: f41gh7 <nik@victoriametrics.com>
Co-authored-by: Zhu Jiekun <jiekun@victoriametrics.com>
2024-09-29 10:59:25 +02:00
Aliaksandr Valialkin
b52862badf lib/logstorage: return the expected hits results from uniq pipe when the number of unique values reaches the specified limit
Previously `uniq` pipe could return zero `hits` if the number of found unique values equals the specified limit.
This wasn't expected in most cases.
2024-09-29 10:51:09 +02:00
Aliaksandr Valialkin
55eb321f77 lib/logstorage: clear hits slice obtained from encoding.GetUint64s() before updating it with hits for valueTypeDict column
encoding.GetUint64s() returns uninitialized slice, which may contain arbitrary values.
So values in this slice must be reset to zero before using it for counting hits in `uniq` and `top` pipes.
2024-09-29 10:29:13 +02:00
Aliaksandr Valialkin
94afcbd9a9 lib/logstorage: postpone initialization of per-shard stateSizeBudget until the first call to pipeProcessor.writeBlock()
This simplifies pipeProcessor initialization logic a bit.
This also doesn't mangle the original maxStateSize value, which is used in error messages when the state size exceeds maxStateSize.
2024-09-29 10:29:13 +02:00
Andrii Chubatiuk
dfcab4a47f deployment/victorialogs: rename not executable compose files (#7124)
### Describe Your Changes

Renamed base compose files to prevent envs to be created from them

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-29 09:54:45 +02:00
Aliaksandr Valialkin
0b91452ca4 lib/logstorage: add non-empty if (...) condition to automatically generated result names in stats pipe
This allows executing queries with `stats` pipe, which calculate multiple results with the same functions,
but with different `if (...)` conditions. For example:

  _time:5m | count(), count() if (error)

Previously such queries couldn't be executed becasue automatically generated name for the second result
didn't include `if (error)`, so names for both results were identical - `count(*)`.
2024-09-29 09:51:28 +02:00
Aliaksandr Valialkin
8772aea24b lib/logstorage: support order alias for sort pipe
Now the following queries are equivalents:

    _time:5s | sort by (_time)

    _time:5s | order by (_time)

This is needed for convenience, since `order by` is commonly used in other query languages such as SQL.
2024-09-29 09:51:27 +02:00
Artem Navoiev
14a0396f53 docs: changelog fix typo in url
Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-09-28 23:30:19 +02:00
Artem Navoiev
96efe99eef docs: mention new create backup api in docs and changelog (#7104)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: Artem Navoiev <tenmozes@gmail.com>
2024-09-28 14:28:58 -07:00
Aliaksandr Valialkin
806bc2ac58 app/vlinsert: support unix timestamps in seconds and milliseconds in JSON stream data ingestion API 2024-09-28 21:56:50 +02:00
Aliaksandr Valialkin
7d7d7c03bc app/vlinsert: accept unix timestamp in seconds additionally to milliseconds at ElasticSearch bulk API
Timestamps in seconds are sometimes used for data ingestion via ElasticSearch bulk API
2024-09-28 21:19:54 +02:00
Roman Khavronenko
fd890a2771 deployment: separate datasource between single and cluster versions (#7119)
Before, single and cluster deployments were provisioned with both
Grafana datasources: single and cluster. But this resulted into a
problem: single DS didn't work for cluster and vice versa. See
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7113

This PR splits datasource file into 2 files: single and cluster. Now,
these files are separately provisioned to single and cluster deployments
correspondingly.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-27 19:08:05 +02:00
Github Actions
e62b88b2e0 Automatic update operator docs from VictoriaMetrics/operator@d39fc94 (#7129)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-09-27 19:03:14 +02:00
Andrii Chubatiuk
00912bfa0f docs/victorialogs: change fluentd input type in examples (#7123)
### Describe Your Changes

Use fluentd logging driver in examples to have enriched data in
VictoriaLogs

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-27 15:09:36 +02:00
Roman Khavronenko
59bc63ebc4 app/vmalert: mention labels conflict resolution strategy (#7085)
The change should help users to understand what happens on labels
conflict.

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-27 14:41:33 +02:00
Andrii Chubatiuk
05a64a8c14 victorialogs: marked fluentd support in roadmap, added syslog example (#7098)
### Describe Your Changes

Marked fluentd in victorialogs roadmap
Added fluentd syslog example setup

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-27 14:38:39 +02:00
Aliaksandr Valialkin
86c0eb816c deployment: update VictoriaLogs docker image from v0.30.1-victorialogs to v0.31.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.31.0-victorialogs
2024-09-27 13:58:54 +02:00
Aliaksandr Valialkin
58c69386c7 docs/VictoriaLogs/CHANGELOG.md: cut v0.31.0-victorialogs release 2024-09-27 13:54:17 +02:00
Aliaksandr Valialkin
c8e23eefba app/{vmselect,vlselect}: run make vmui-update vmui-logs-update after 25a9802ca4 and 8657d03433
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7088
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5924

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7025
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6545#issuecomment-2336805237
2024-09-27 13:50:47 +02:00
Yury Molodov
25a9802ca4 vmui: add link to vmalert (#7088)
### Describe Your Changes

Add link to VMalert when proxy is enabled. The link is displayed when
the `-vmalert.proxyURL` flag is present.

#5924


![image](https://github.com/user-attachments/assets/c45ca884-8912-4bd9-a867-df5919f278a1)

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2024-09-27 13:22:22 +02:00
Yury Molodov
8657d03433 vmui/logs: improve graph usability (#7025)
### Describe Your Changes

- Show the time range in the tooltip when hovering over staircase
graphs.
- Use bolder lines for staircase graphs.
- Increase the number of steps on the staircase graph to 100.
- Reduce the maximum width of the tooltip to 1/3 of the screen.
- Insert only the label name under the cursor into the query input field
when `Ctrl`-clicking the line legend.

See [this
comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6545#issuecomment-2336805237).

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2024-09-27 13:19:46 +02:00
Aliaksandr Valialkin
09b309a82e lib/logstorage: allow using - instead of ! as a shorthand for NOT operator in LogsQL 2024-09-27 13:14:47 +02:00
Aliaksandr Valialkin
76c1b0b8ea lib/logstorage: support skipping _stream: prefix for stream filters
'_stream:{...}' can be written as '{...}'

This simplifies writing queries with stream filters, and makes them more familier to Loki users.
2024-09-27 13:14:46 +02:00
Hui Wang
fbde238cdc stream aggregation: support configuring multiple labels per `remoteWrite… (#7073)
….url` using `-remoteWrite.streamAggr.dropInputLabels`

Before, labels were set to all the `remoteWrite.url`.

address https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6780

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-09-27 12:21:09 +02:00
Yury Molodov
c896bf340d vmui: add functionality to preserve selected columns (#7037)
### Describe Your Changes

1) Changed table settings from a popup to a modal window to simplify
future functionality additions.
2) Added functionality to save selected columns when data is modified or
the page is reloaded. See #7016.

<details>
  <summary>Example screenshots</summary>
  
<img alt="demo-1" width="600"
src="https://github.com/user-attachments/assets/a5d9a910-363c-4931-8b12-18ea8b3d97d8"/>
  
</details>


### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-09-27 11:52:01 +02:00
Github Actions
2d26d3e3de Automatic update operator docs from VictoriaMetrics/operator@db0d09f (#7111)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-09-27 11:42:10 +02:00
Github Actions
ea3b20622a Automatic update helm docs from VictoriaMetrics/helm-charts@ed0f351 (#7115)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-09-27 11:41:24 +02:00
Aliaksandr Valialkin
b670b0e9ff deployment: update VictoriaLogs docker image tag from v0.30.0-victorialogs to v0.30.1-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.30.1-victorialogs
2024-09-27 11:26:15 +02:00
Aliaksandr Valialkin
8077585303 docs/VictoriaLogs/CHANGELOG.md: cut v0.30.1-victorialogs release 2024-09-27 11:20:58 +02:00
Aliaksandr Valialkin
9367a9a6a2 lib/logstorage: consistently sort stream contexts belonging to different streams by the minimum time seen in the matching logs
This should simplify debugging of stream_context output, since it remains stable over repeated requests.
2024-09-27 11:19:26 +02:00
Aliaksandr Valialkin
b49d1ea809 lib/logstorage: add _msg="---" delimiter between different log streams in stream_context output
This should help investigating contexts, which belong to different log streams.
2024-09-27 11:01:13 +02:00
Aliaksandr Valialkin
13cc4a2618 docs/VictoriaLogs/LogsQL.md: use proper heading for blocks_count pipe docs
All the links in docs assume that the heading for pipe docs should end with `pipe` word.
This fixes broken links.

This is a follow-up for e9950f6307
2024-09-27 09:45:46 +02:00
Aliaksandr Valialkin
6f1fde24dc deployment: update VictoriaLogs docker image from v0.29.0-victorialogs to v0.30.0-victorialogs
See https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.30.0-victorialogs
2024-09-27 09:34:09 +02:00
Aliaksandr Valialkin
31117c66d5 docs/VictoriaLogs/CHANGELOG.md: cut v0.30.0-victorialogs release 2024-09-27 09:18:31 +02:00
Aliaksandr Valialkin
f65b976eda vendor: run make vendor-update 2024-09-26 22:33:05 +02:00
Aliaksandr Valialkin
b82bd0c2ec lib/logstorage: improve performance for stream_context pipe over streams with big number of log entries
Do not read timestamps for blocks, which cannot contain surrounding logs.
This should improve peformance for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6730 .

Also optimize min(_time) and max(_time) calculations a bit by avoiding conversion
of timestamp to string when it isn't needed.
This should improve performance for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7070 .
2024-09-26 22:22:23 +02:00
Aliaksandr Valialkin
3646724c6f lib/contextutil: make golanci-lint happy by substituing unused function arg name with _
This is a follow-up for 4b1611267f
2024-09-26 17:06:48 +02:00
Aliaksandr Valialkin
4b1611267f lib/logstorage: properly return surrounding logs outside the selected time range by stream_context pipe
Previously only logs inside the selected time range could be returned by stream_context pipe.
For example, the following query could return up to 10 surrounding logs only for the last 5 minutes,
while most users expect this query should return up to 10 surrounding logs without restrictions on the time range.

    _time:5m panic | stream_context before 10

This enables the ability to implement stream context feature at VictoriaLogs web UI: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7063 .

Reduce memory usage when returning stream context over big log streams with millions of entries.
The new logic scans over all the log messages for the selected log stream, while keeping in memory only
the given number of surrounding logs. Previously all the logs for the given log stream on the selected time range
were loaded in memory before selecting the needed surrounding logs.
This should help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6730 .

Reduce the scan performance for big log streams by fetching only the requested fields. For example, the following
query should be executed much faster than before if logs contain many fields other than _stream, _msg and _time:

    panic | stream_context after 30 | fields _stream, _msg, _time
2024-09-26 17:03:45 +02:00
Aliaksandr Valialkin
037652d5ae app/vlinsert: support _time field without timezone information during data ingestion
Use local timezone of the host server in this case. The timezone can be overridden
with TZ environment variable if needed.

While at it, allow using whitespace instead of T as a delimiter between data and time
in the ingested _time field. For example, '2024-09-20 10:20:30' is now accepted
during data ingestion. This is valid ISO8601 format, which is used by some log shippers,
so it should be supported. This format is also known as SQL datetime format.

Also assume local time zone when time without timezone information is passed to querying APIs.
Previously such a time was parsed in UTC timezone. Add `Z` to the end of the time string
if the old behaviour is preferred.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6721
2024-09-26 12:49:35 +02:00
Aliaksandr Valialkin
6b775ca68c app/vlinsert/insertutils: add a link to docs why _msg field must be non-empty 2024-09-26 09:53:17 +02:00
Aliaksandr Valialkin
7c86835f3c docs/VictoriaLogs/CHANGELOG.md: typo fix: itentifying -> identifying 2024-09-26 09:41:30 +02:00
Zhu Jiekun
7185fe012b feature: [victorialogs] drop logs without non-empty _msg field (#7056)
### Describe Your Changes

VictoriaLogs allows logs without `_msg` field or `_msg` field is empty.
This lead to incorrect search result. See:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6785

This pull request search for non-empty `_msg` field before log entry is
added to `LogRows`.

New counter `vl_rows_dropped_total{reason="msg_not_exist"}` is
introduced.

Example log output:
```
2024-09-23T02:33:19.719Z        warn    app/vlinsert/insertutils/common_params.go:189   dropping log line without _msg field; [{@timestamp 2024-09-18T13:42:16.600000000Z} {Attributes.array.attribute ["many","values"]} {Attributes.boolean.attribute true} {Attributes.double.attribute 637.704} {Attributes.int.attribute 10} {Attributes.map.attribute.some.map.key some value} {Attributes.string.attribute some string} {Body Example ddddddddddlog record} {Resource.service.name my.service} {Scope.my.scope.attribute some scope attribute} {Scope.name my.library} {Scope.version 1.0.0} {SeverityNumber 10} {SeverityText Information} {SpanId eee19b7ec3c1b174} {TraceFlags 0} {TraceId 5b8efff798038103d269b633813fc60c}]
```

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
- [ ] Benchmark for potential performance loss.

---------

Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2024-09-26 09:35:28 +02:00
Aliaksandr Valialkin
2ec0cfec62 docs/VictoriaLogs/CHANGELOG.md: document the fix for Windows build
This is a follow-up for 264c2ec6bd

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6998
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6973
2024-09-26 09:15:27 +02:00
Aliaksandr Valialkin
c6b2cac892 docs/VictoriaLogs/CHANGELOG.md: typo fix after 255d1d4e13: returns -> return 2024-09-26 09:00:55 +02:00
Aliaksandr Valialkin
255d1d4e13 app/vlselect/logsql: clone the query with the current timestamp when performing live tailing requests in the loop
Previously the original timestamp was used in the copied query, so _time:duration filters
were applied to the original time range: (timestamp-duration ... timestamp]. This resulted
in stopped live tailing, since new logs have timestamps bigger than the original time range.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7028
2024-09-26 08:57:23 +02:00
Github Actions
c89a7a0b62 Automatic update operator docs from VictoriaMetrics/operator@5271a59 (#7099)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-09-25 23:23:08 +02:00
Aliaksandr Valialkin
c66da8b0ba docs/LTS-releases.md: consistently use v prefix in front of VictoriaMetrics releases 2024-09-25 19:29:30 +02:00
Aliaksandr Valialkin
e9950f6307 lib/logstorage: add blocks_count pipe
This pipe is useful for debugging purposes when the number of processed blocks must be calculated for the given query:

    <query> | blocks_count

This helps detecting the root cause of query performance slowdown in cases like https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7070
2024-09-25 19:17:48 +02:00
Aliaksandr Valialkin
65b93b17b1 lib/logstorage: lazily read column headers metadata during queries
This improves performance for analytical queries, which do not need column headers metadata.
For example, the following query doesn't need column headers metadata, since _stream and min(_time)
are stored in block header, which is read separately from colum headers metadata:

  _time:1w | stats by (_stream) min(_time) min_time

This commit significantly improves the performance for this query.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7070
2024-09-25 19:17:48 +02:00
Aliaksandr Valialkin
4599429f51 lib/logstorage: read timestamps column when it is really needed during query execution
Previously timestamps column was read unconditionally on every query.
This could significantly slow down queries, which do not need reading this column
like in https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7070 .
2024-09-25 19:17:47 +02:00
Andrii Chubatiuk
f934f71708 docs/victorialogs/data-ingestion: removed FluentBit Elasticsearch from examples (#7093)
removed FluentBit Elasticsearch example from docs as custom headers are
not supported by elasticsearch output till
https://github.com/fluent/fluent-bit/pull/9416 is merged and released

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6985

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-25 18:43:26 +02:00
Andrii Chubatiuk
e75ae1b274 deployment: restructure victorialogs examples (#6971)
### Describe Your Changes

- Use common compose.yaml file for all victorialogs setups to set
version in a single place and override it on demand for each agent and
protocol
- Replaced multiple victorialogs instances in HA setup with single setup
with `deploy.replica` parameter set
- Added fluentd setup

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-25 18:33:26 +02:00
Github Actions
612be0954c Automatic update operator docs from VictoriaMetrics/operator@1feab7d (#7092)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-09-25 15:02:24 +02:00
Roman Khavronenko
6b1b47df54 app/vmalert: bump default values for sending data to remoteWrite.url (#7084)
* `remoteWrite.maxQueueSize` from `100_000` to `1_000_000`, this should
improve resiliency of recording rules that produce many series;
* `remoteWrite.maxBatchSize` from `1_000` to `10_000`, this should be
more efficient to send from netwroking perspective;
* `remoteWrite.concurrency` from `1` to `4`, this should imrpove speed
of sending the generated series.

The new settings should improve remote write performance of vmalert with
default settings.

### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: Hui Wang <haley@victoriametrics.com>
2024-09-25 15:01:39 +02:00
Aliaksandr Valialkin
7f1ba18719 lib/logstorage: improve the performance of obtaining _stream column value
Substitute global streamTagsCache with per-blockSearch cache for ((stream.id) -> (_stream value)) entries.
This improves scalability of obtaining _stream values on a machine with many CPU cores, since every CPU
has its own blockSearch instance.

This also should reduce memory usage when querying logs over big number of streams, since per-blockSearch
cache of ((stream.id) -> (_stream value)) entries is limited in size, and its lifetime is bounded by a single query.
2024-09-24 20:57:00 +02:00
Aliaksandr Valialkin
cf2e7d0d92 lib/logstorage/consts.go: document that it isn't recommended setting maxColumnsPerBlock constant to too big values
This should help avoiding cases like this one - https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6425#issuecomment-2337446083
2024-09-24 18:51:46 +02:00
Aliaksandr Valialkin
f86e093b20 lib/logstorage: improve performance for streamID.marshalString() by more than 2x
The streamID.marshalString() is executed in hot path if the query selects _stream_id field.

Command to run the benchmark:

go test ./lib/logstorage/ -run=NONE -bench=BenchmarkStreamIDMarshalString -benchtime=5s

Results before the commit:

BenchmarkStreamIDMarshalString-16    	438480714	        14.04 ns/op	  71.23 MB/s	       0 B/op	       0 allocs/op

Results after the commit:

BenchmarkStreamIDMarshalString-16    	982459660	         6.049 ns/op	 165.30 MB/s	       0 B/op	       0 allocs/op
2024-09-24 18:35:04 +02:00
Aliaksandr Valialkin
919d2dc90e lib/logstorage: add benchmark for streamID.marshalString 2024-09-24 18:31:38 +02:00
Roman Khavronenko
9a0f697622 docs: update CONTRIBUTING.md with practical requirements (#7087)
The change supposed to have more practical recommendations and reflect
the real processes for maintaining the project.


Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-24 18:22:18 +02:00
Github Actions
e28265fa39 Automatic update operator docs from VictoriaMetrics/operator@27ad7e1 (#7086)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: f41gh7 <18450869+f41gh7@users.noreply.github.com>
2024-09-24 15:47:30 +02:00
hagen1778
8bb3f2fd43 lib/promscrape: make linter happy
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-24 15:12:55 +02:00
hagen1778
c7569dac50 lib/promscrape: temporary disable TestClientProxyReadOk
This test is very flaky and prevents other tests from running in CI.
Disabling this test should improve tests quality, since it isn't reliable anyway.

There is a ticket to fix this test - https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7062

Once fixed, this test should be uncommented.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-24 14:59:25 +02:00
Zhu Jiekun
5319acb8ed vmagent: remote write respect Retry-After in header (#6124)
### Describe Your Changes
related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6097

#### Changed
- Remote write retry policy in `vmagent` is changed into:
  1. Respect `Retry-After` duration if exists.
2. Otherwise, calculate next retry duration by backoff policy (x2) and
max retry duration limit.
 
#### Docs
- `CHANGELOG.md`.

---
### Checklist
The following checks are mandatory:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Zakhar Bessarab <me@zekker-dev.tk>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
2024-09-24 12:44:03 +02:00
Dmytro Kozlov
cbeb7d50e8 lib/promscrape: show only unhealthy targets if show_only_unhealthy filter is enabled (#6960)
### Describe Your Changes

It is better to show only unhealthy targets instead of all of them when
`show_only_unhealthy` filter is enabled.
Related issue:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3536

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-09-24 12:18:24 +02:00
Phuong Le
df665a13c9 docs: update logos files and usage rules (#6980)
### Describe Your Changes

New logos and usage guideline

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-24 11:53:58 +02:00
Zhu Jiekun
fea4433362 docs: [VictoriaLogs] OTel Collector elasticsearchexporter header note (#7074)
### Describe Your Changes

By default, the `elasticsearchexporter` in OTel Collector puts the log
message under a field other than `_msg` (e.g., `Body`). Without
specifying via an HTTP header, those logs may not be queried correctly.
See also:
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6785.

This pull request updates the example configuration and notes for the
`elasticsearchexporter`.

### Checklist

The following checks are **mandatory**:

- [X] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-24 11:52:09 +02:00
Dmytro Kozlov
91b28d0527 deployment/docker: update grafana datasources to the latest version (#7083)
### Describe Your Changes

Updated grafana plugins to the latest releases 

### Checklist

The following checks are **mandatory**:

- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
2024-09-24 11:51:18 +02:00
Github Actions
524579d9bd Automatic update operator docs from VictoriaMetrics/operator@75bc1b4 (#7080)
Automated changes by
[create-pull-request](https://github.com/peter-evans/create-pull-request)
GitHub action

Signed-off-by: Github Actions <133988544+victoriametrics-bot@users.noreply.github.com>
Co-authored-by: AndrewChubatiuk <3162380+AndrewChubatiuk@users.noreply.github.com>
2024-09-24 11:50:50 +02:00
hagen1778
a5c002edef deployment/alerts: fix copy&paste typo in TooHighGoroutineSchedulingLatency
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-24 11:48:19 +02:00
Roman Khavronenko
4d0b41e63b deployment: add panel and alerts for displying go scheduler latency (#7078)
The panel and alerting rule should help to understand whether VM
component doesn't have enough CPU resources or gets throttled. The alert
is applicable for all VM components.
The panel was added to vmalert, vmagent, vmsingle, vm clusert and
victorialogs dashes.

-------------------

This alerting rule should have help us identify resource shortage for
sandbox vmagent - see [this
link](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/?g0.range_input=23d13h25m25s424ms&g0.end_input=2024-09-23T14%3A11%3A00&g0.relative_time=none&g0.tab=0&g0.expr=histogram_quantile%280.99%2C+sum%28rate%28go_sched_latencies_seconds_bucket%7Bjob%3D%22vmagent-monitoring-vmagent%22%7D%5B5m%5D%29%29+by+%28le%2C+job%2C+instance%29%29+%3E+0.1)
for example. We weren't aware of resource shortage, because VM metrics
assumed this vmagent had 1vCPU while in fact its limit was 0.2vCPU.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2024-09-23 16:54:42 +02:00
Aliaksandr Valialkin
109772bdc4 lib/cgroup: round GOMAXPROCS to the lower integer value of cpuQuota
Rounding GOMAXPROCS to the upper interger value of cpuQuota increases chances of CPU starvation,
non-optimimal goroutine scheduling and additional CPU overhead related to context switching.

So it is better to round GOMAXPROCS to the lower integer value of cpuQuota.
2024-09-23 16:09:12 +02:00
Aliaksandr Valialkin
3964889705 app/vmselect/promql: consistently replace NaN data points with non-NaN values for range_first and range_last functions
It is expected that range_first and range_last functions return non-nan const value across all the points
if the original series contains at least a single non-NaN value. Previously this rule was violated for NaN data points
in the original series. This could confuse users.

While at it, add tests for series with NaN values across all the range_* and running_* functions, in order to maintain
consistent handling of NaN values across these functions.
2024-09-23 14:59:29 +02:00
2181 changed files with 158689 additions and 69913 deletions

View File

@@ -60,8 +60,8 @@ body:
For VictoriaMetrics health-state issues please provide full-length screenshots
of Grafana dashboards if possible:
* [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229/)
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176/)
* [Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/grafana/dashboards/10229)
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176)
See how to setup monitoring here:
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)

View File

@@ -85,9 +85,38 @@ jobs:
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
- name: Run tests
run: make ${{ matrix.scenario}}
run: GOGC=10 make ${{ matrix.scenario}}
- name: Publish coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
file: ./coverage.txt
integration-test:
name: integration-test
needs: [lint, test]
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v4
- name: Setup Go
id: go
uses: actions/setup-go@v5
with:
cache: false
go-version: stable
- name: Cache Go artifacts
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
- name: Run integration tests
run: make integration-test

View File

@@ -175,7 +175,7 @@
END OF TERMS AND CONDITIONS
Copyright 2019-2024 VictoriaMetrics, Inc.
Copyright 2019-2025 VictoriaMetrics, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -27,6 +27,7 @@ include package/release/Makefile
all: \
victoria-metrics-prod \
victoria-logs-prod \
vlogscli-prod \
vmagent-prod \
vmalert-prod \
vmalert-tool-prod \
@@ -51,6 +52,7 @@ publish: \
package: \
package-victoria-metrics \
package-victoria-logs \
package-vlogscli \
package-vmagent \
package-vmalert \
package-vmalert-tool \
@@ -263,6 +265,14 @@ release-victoria-metrics-windows-goarch: victoria-metrics-windows-$(GOARCH)-prod
cd bin && rm -rf \
victoria-metrics-windows-$(GOARCH)-prod.exe
release-victoria-logs-bundle: \
release-victoria-logs \
release-vlogscli
publish-victoria-logs-bundle: \
publish-victoria-logs \
publish-vlogscli
release-victoria-logs:
$(MAKE_PARALLEL) release-victoria-logs-linux-386 \
release-victoria-logs-linux-amd64 \
@@ -320,6 +330,63 @@ release-victoria-logs-windows-goarch: victoria-logs-windows-$(GOARCH)-prod
cd bin && rm -rf \
victoria-logs-windows-$(GOARCH)-prod.exe
release-vlogscli:
$(MAKE_PARALLEL) release-vlogscli-linux-386 \
release-vlogscli-linux-amd64 \
release-vlogscli-linux-arm \
release-vlogscli-linux-arm64 \
release-vlogscli-darwin-amd64 \
release-vlogscli-darwin-arm64 \
release-vlogscli-freebsd-amd64 \
release-vlogscli-openbsd-amd64 \
release-vlogscli-windows-amd64
release-vlogscli-linux-386:
GOOS=linux GOARCH=386 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-linux-amd64:
GOOS=linux GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-linux-arm:
GOOS=linux GOARCH=arm $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-linux-arm64:
GOOS=linux GOARCH=arm64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-darwin-amd64:
GOOS=darwin GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-darwin-arm64:
GOOS=darwin GOARCH=arm64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-freebsd-amd64:
GOOS=freebsd GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-openbsd-amd64:
GOOS=openbsd GOARCH=amd64 $(MAKE) release-vlogscli-goos-goarch
release-vlogscli-windows-amd64:
GOARCH=amd64 $(MAKE) release-vlogscli-windows-goarch
release-vlogscli-goos-goarch: vlogscli-$(GOOS)-$(GOARCH)-prod
cd bin && \
tar $(TAR_OWNERSHIP) --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf vlogscli-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
vlogscli-$(GOOS)-$(GOARCH)-prod \
&& sha256sum vlogscli-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
vlogscli-$(GOOS)-$(GOARCH)-prod \
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > vlogscli-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf vlogscli-$(GOOS)-$(GOARCH)-prod
release-vlogscli-windows-goarch: vlogscli-windows-$(GOARCH)-prod
cd bin && \
zip vlogscli-windows-$(GOARCH)-$(PKG_TAG).zip \
vlogscli-windows-$(GOARCH)-prod.exe \
&& sha256sum vlogscli-windows-$(GOARCH)-$(PKG_TAG).zip \
vlogscli-windows-$(GOARCH)-prod.exe \
> vlogscli-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf \
vlogscli-windows-$(GOARCH)-prod.exe
release-vmutils: \
release-vmutils-linux-386 \
release-vmutils-linux-amd64 \
@@ -434,29 +501,34 @@ pprof-cpu:
fmt:
gofmt -l -w -s ./lib
gofmt -l -w -s ./app
gofmt -l -w -s ./apptest
vet:
go vet ./lib/...
go vet ./app/...
go vet ./apptest/...
check-all: fmt vet golangci-lint govulncheck
clean-checkers: remove-golangci-lint remove-govulncheck
test:
DISABLE_FSYNC_FOR_TESTING=1 go test ./lib/... ./app/...
go test ./lib/... ./app/...
test-race:
DISABLE_FSYNC_FOR_TESTING=1 go test -race ./lib/... ./app/...
go test -race ./lib/... ./app/...
test-pure:
DISABLE_FSYNC_FOR_TESTING=1 CGO_ENABLED=0 go test ./lib/... ./app/...
CGO_ENABLED=0 go test ./lib/... ./app/...
test-full:
DISABLE_FSYNC_FOR_TESTING=1 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
test-full-386:
DISABLE_FSYNC_FOR_TESTING=1 GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
integration-test: victoria-metrics vmagent vmalert vmauth
go test ./apptest/... -skip="^TestCluster.*"
benchmark:
go test -bench=. ./lib/...
@@ -495,7 +567,7 @@ golangci-lint: install-golangci-lint
golangci-lint run
install-golangci-lint:
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.60.3
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.63.4
remove-golangci-lint:
rm -rf `which golangci-lint`

View File

@@ -1,12 +1,14 @@
# VictoriaMetrics
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
[![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics.svg?maxAge=604800)](https://hub.docker.com/r/victoriametrics/victoria-metrics)
[![Slack](https://img.shields.io/badge/join%20slack-%23victoriametrics-brightgreen.svg)](https://slack.victoriametrics.com/)
[![GitHub license](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics.svg)](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
[![Build Status](https://github.com/VictoriaMetrics/VictoriaMetrics/workflows/main/badge.svg)](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
[![codecov](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics/branch/master/graph/badge.svg)](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
![Latest Release](https://img.shields.io/github/v/release/VictoriaMetrics/VictoriaMetrics?sort=semver&label=&filter=!*-victorialogs&logo=github&labelColor=gray&color=gray&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Freleases%2Flatest)
![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics?label=&logo=docker&logoColor=white&labelColor=2496ED&color=2496ED&link=https%3A%2F%2Fhub.docker.com%2Fr%2Fvictoriametrics%2Fvictoria-metrics)
![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics?link=https%3A%2F%2Fgoreportcard.com%2Freport%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics)
![Build Status](https://github.com/VictoriaMetrics/VictoriaMetrics/workflows/main/badge.svg?link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Factions)
![codecov](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics/branch/master/graph/badge.svg?link=https%3A%2F%2Fcodecov.io%2Fgh%2FVictoriaMetrics%2FVictoriaMetrics)
![License](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics?labelColor=green&label=&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Fblob%2Fmaster%2FLICENSE)
![Slack](https://img.shields.io/badge/Join-4A154B?logo=slack&link=https%3A%2F%2Fslack.victoriametrics.com)
![X](https://img.shields.io/twitter/follow/VictoriaMetrics?style=flat&label=Follow&color=black&logo=x&labelColor=black&link=https%3A%2F%2Fx.com%2FVictoriaMetrics)
![Reddit](https://img.shields.io/reddit/subreddit-subscribers/VictoriaMetrics?style=flat&label=Join&labelColor=red&logoColor=white&logo=reddit&link=https%3A%2F%2Fwww.reddit.com%2Fr%2FVictoriaMetrics)
<picture>
<source srcset="docs/logo_white.webp" media="(prefers-color-scheme: dark)">
@@ -22,8 +24,8 @@ Here are some resources and information about VictoriaMetrics:
- Case studies: [Grammarly, Roblox, Wix,...](https://docs.victoriametrics.com/casestudies/).
- Available: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics)
- Deployment types: [Single-node version](https://docs.victoriametrics.com/), [Cluster version](https://docs.victoriametrics.com/cluster-victoriametrics/), and [Enterprise version](https://docs.victoriametrics.com/enterprise/)
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/changelog/), and [How to upgrade](#how-to-upgrade-victoriametrics)
- Community: [Slack](https://slack.victoriametrics.com/), [Twitter](https://twitter.com/VictoriaMetrics), [LinkedIn](https://www.linkedin.com/company/victoriametrics/), [YouTube](https://www.youtube.com/@VictoriaMetrics)
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/changelog/), and [How to upgrade](https://docs.victoriametrics.com/#how-to-upgrade-victoriametrics)
- Community: [Slack](https://slack.victoriametrics.com/), [X (Twitter)](https://x.com/VictoriaMetrics), [LinkedIn](https://www.linkedin.com/company/victoriametrics/), [YouTube](https://www.youtube.com/@VictoriaMetrics)
Yes, we open-source both the single-node VictoriaMetrics and the cluster version.
@@ -38,17 +40,17 @@ VictoriaMetrics is optimized for timeseries data, even when old time series are
* **Easy to setup**: No dependencies, single [small binary](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d), configuration through command-line flags, but the default is also fine-tuned; backup and restore with [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
* **Global query view**: Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics and queried via a single query.
* **Various Protocols**: Support metric scraping, ingestion and backfilling in various protocol.
* [Prometheus exporters](#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](#prometheus-setup), [Prometheus exposition format](#how-to-import-data-in-prometheus-exposition-format).
* [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
* [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
* [OpenTSDB put message](#sending-data-via-telnet-put-protocol).
* [HTTP OpenTSDB /api/put requests](#sending-opentsdb-data-via-http-apiput-requests).
* [JSON line format](#how-to-import-data-in-json-line-format).
* [Arbitrary CSV data](#how-to-import-csv-data).
* [Native binary format](#how-to-import-data-in-native-format).
* [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
* [NewRelic infrastructure agent](#how-to-send-data-from-newrelic-agent).
* [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
* [Prometheus exporters](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/#prometheus-setup), [Prometheus exposition format](https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format).
* [InfluxDB line protocol](https://docs.victoriametrics.com/#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
* [Graphite plaintext protocol](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
* [OpenTSDB put message](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol).
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/#sending-opentsdb-data-via-http-apiput-requests).
* [JSON line format](https://docs.victoriametrics.com/#how-to-import-data-in-json-line-format).
* [Arbitrary CSV data](https://docs.victoriametrics.com/#how-to-import-csv-data).
* [Native binary format](https://docs.victoriametrics.com/#how-to-import-data-in-native-format).
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent).
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/#how-to-send-data-from-newrelic-agent).
* [OpenTelemetry metrics format](https://docs.victoriametrics.com/#sending-data-via-opentelemetry).
* **NFS-based storages**: Supports storing data on NFS-based storages such as Amazon EFS, Google Filestore.
* And many other features such as metrics relabeling, cardinality limiter, etc.
@@ -84,7 +86,7 @@ Some good benchmarks VictoriaMetrics achieved:
Feel free asking any questions regarding VictoriaMetrics:
* [Slack Inviter](https://slack.victoriametrics.com/) and [Slack channel](https://victoriametrics.slack.com/)
* [Twitter](https://twitter.com/VictoriaMetrics/)
* [X (Twitter)](https://x.com/VictoriaMetrics/)
* [Linkedin](https://www.linkedin.com/company/victoriametrics/)
* [Reddit](https://www.reddit.com/r/VictoriaMetrics/)
* [Telegram-en](https://t.me/VictoriaMetrics_en)
@@ -95,30 +97,31 @@ If you like VictoriaMetrics and want to contribute, then please [read these docs
## VictoriaMetrics Logo
[Zip](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different image orientations (main color and inverted version).
The provided [ZIP file](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different logo orientations. Each folder includes the following file types:
Files included in each folder:
* JPEG: Preview files
* PNG: Preview files with transparent background
* AI: Adobe Illustrator files
* 2 JPEG Preview files
* 2 PNG Preview files with transparent background
* 2 EPS Adobe Illustrator EPS10 files
### VictoriaMetrics Logo Usage Guidelines
### Logo Usage Guidelines
#### Font
#### Font used
* Lato Black
* Lato Regular
* Font Used: Lato Black
* Download here: [Lato Font](https://fonts.google.com/specimen/Lato)
#### Color Palette
* HEX [#110f0f](https://www.color-hex.com/color/110f0f)
* HEX [#ffffff](https://www.color-hex.com/color/ffffff)
* Black [#000000](https://www.color-hex.com/color/000000)
* Purple [#4d0e82](https://www.color-hex.com/color/4d0e82)
* Orange [#ff2e00](https://www.color-hex.com/color/ff2e00)
* White [#ffffff](https://www.color-hex.com/color/ffffff)
### We kindly ask
### Logo Usage Rules
* Please don't use any other font instead of suggested.
* To keep enough clear space around the logo.
* Do not change spacing, alignment, or relative locations of the design elements.
* Do not change the proportions for any of the design elements or the design itself.
You may resize as needed but must retain all proportions.
* Only use the Lato Black font as specified.
* Maintain sufficient clear space around the logo for visibility.
* Do not modify the spacing, alignment, or positioning of design elements.
* You may resize the logo as needed, but ensure all proportions remain intact.
Thank you for your cooperation!

Binary file not shown.

View File

@@ -92,6 +92,9 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
if vlselect.RequestHandler(w, r) {
return true
}
if vlstorage.RequestHandler(w, r) {
return true
}
return false
}

View File

@@ -14,6 +14,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
@@ -39,9 +40,24 @@ var (
"The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. "+
"Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmsingle can receive per second. Data ingestion is paused when the limit is exceeded. "+
"By default there are no limits on samples ingestion rate.")
finalDedupScheduleInterval = flag.Duration("storage.finalDedupScheduleCheckInterval", time.Hour, "The interval for checking when final deduplication process should be started."+
"Storage unconditionally adds 25% jitter to the interval value on each check evaluation."+
" Changing the interval to the bigger values may delay downsampling, deduplication for historical data."+
" See also https://docs.victoriametrics.com/#deduplication")
)
func main() {
// VictoriaMetrics is optimized for reduced memory allocations,
// so it can run with the reduced GOGC in order to reduce the used memory,
// while keeping CPU usage spent in GC at low levels.
//
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
// exceeds 0.05 for extended periods of time.
cgroup.SetGOGC(30)
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
flag.Usage = usage
@@ -74,8 +90,13 @@ func main() {
startTime := time.Now()
storage.SetDedupInterval(*minScrapeInterval)
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
if *finalDedupScheduleInterval < time.Hour {
logger.Fatalf("-dedup.finalDedupScheduleCheckInterval cannot be smaller than 1 hour; got %s", *finalDedupScheduleInterval)
}
storage.SetFinalDedupScheduleInterval(*finalDedupScheduleInterval)
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
vmselect.Init()
vminsertcommon.StartIngestionRateLimiter(*maxIngestionRate)
vminsert.Init()
startSelfScraper()
@@ -97,6 +118,7 @@ func main() {
}
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
vminsert.Stop()
vminsertcommon.StopIngestionRateLimiter()
vmstorage.Stop()
vmselect.Stop()

View File

@@ -10,9 +10,10 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
)
var (
@@ -48,7 +49,7 @@ func selfScraper(scrapeInterval time.Duration) {
var bb bytesutil.ByteBuffer
var rows prometheus.Rows
var mrs []storage.MetricRow
var labels []prompb.Label
var labels []prompbmarshal.Label
t := time.NewTicker(scrapeInterval)
f := func(currentTime time.Time, sendStaleMarkers bool) {
currentTimestamp := currentTime.UnixNano() / 1e6
@@ -68,6 +69,10 @@ func selfScraper(scrapeInterval time.Duration) {
t := &r.Tags[j]
labels = addLabel(labels, t.Key, t.Value)
}
if timeserieslimits.IsExceeding(labels) {
// Skip metric with exceeding labels.
continue
}
if len(mrs) < cap(mrs) {
mrs = mrs[:len(mrs)+1]
} else {
@@ -99,11 +104,11 @@ func selfScraper(scrapeInterval time.Duration) {
}
}
func addLabel(dst []prompb.Label, key, value string) []prompb.Label {
func addLabel(dst []prompbmarshal.Label, key, value string) []prompbmarshal.Label {
if len(dst) < cap(dst) {
dst = dst[:len(dst)+1]
} else {
dst = append(dst, prompb.Label{})
dst = append(dst, prompbmarshal.Label{})
}
lb := &dst[len(dst)-1]
lb.Name = key

View File

@@ -2,10 +2,10 @@
"name": "subquery-aggregation",
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184",
"data": [
"forms_daily_count;item=x 1 {TIME_S-1m}",
"forms_daily_count;item=x 2 {TIME_S-2m}",
"forms_daily_count;item=y 3 {TIME_S-1m}",
"forms_daily_count;item=y 4 {TIME_S-2m}"],
"forms_daily_count;item=x 1 {TIME_S-59s}",
"forms_daily_count;item=x 2 {TIME_S-1m59s}",
"forms_daily_count;item=y 3 {TIME_S-59s}",
"forms_daily_count;item=y 4 {TIME_S-1m59s}"],
"query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}&latency_offset=1ms"],
"result_query": {
"status":"success",

View File

@@ -0,0 +1,274 @@
package datadog
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
)
var (
datadogStreamFields = flagutil.NewArrayString("datadog.streamFields", "Datadog tags to be used as stream fields.")
datadogIgnoreFields = flagutil.NewArrayString("datadog.ignoreFields", "Datadog tags to ignore.")
)
var parserPool fastjson.ParserPool
// RequestHandler processes Datadog insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
switch path {
case "/api/v1/validate":
fmt.Fprintf(w, `{}`)
return true
case "/api/v2/logs":
return datadogLogsIngestion(w, r)
default:
return false
}
}
func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
w.Header().Add("Content-Type", "application/json")
startTime := time.Now()
v2LogsRequestsTotal.Inc()
reader := r.Body
var ts int64
if tsValue := r.Header.Get("dd-message-timestamp"); tsValue != "" && tsValue != "0" {
var err error
ts, err = strconv.ParseInt(tsValue, 10, 64)
if err != nil {
httpserver.Errorf(w, r, "could not parse dd-message-timestamp header value: %s", err)
return true
}
ts *= 1e6
} else {
ts = startTime.UnixNano()
}
if r.Header.Get("Content-Encoding") == "gzip" {
zr, err := common.GetGzipReader(reader)
if err != nil {
httpserver.Errorf(w, r, "cannot read gzipped logs request: %s", err)
return true
}
defer common.PutGzipReader(zr)
reader = zr
}
wcr := writeconcurrencylimiter.GetReader(reader)
data, err := io.ReadAll(wcr)
writeconcurrencylimiter.PutReader(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return true
}
cp, err := insertutils.GetCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
if len(cp.StreamFields) == 0 {
cp.StreamFields = *datadogStreamFields
}
if len(cp.IgnoreFields) == 0 {
cp.IgnoreFields = *datadogIgnoreFields
}
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
lmp := cp.NewLogMessageProcessor("datadog")
err = readLogsRequest(ts, data, lmp)
lmp.MustClose()
if err != nil {
logger.Warnf("cannot decode log message in /api/v2/logs request: %s, stream fields: %s", err, cp.StreamFields)
return true
}
// update v2LogsRequestDuration only for successfully parsed requests
// There is no need in updating v2LogsRequestDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
v2LogsRequestDuration.UpdateDuration(startTime)
fmt.Fprintf(w, `{}`)
return true
}
var (
v2LogsRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/datadog/api/v2/logs"}`)
v2LogsRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/datadog/api/v2/logs"}`)
)
// datadog message field has two formats:
// - regular log message with string text
// - nested json format for serverless plugins
// which has folowing format:
// {"message": {"message": "text","lamdba": {"arn": "string","requestID": "string"}, "timestamp": int64} }
//
// See https://github.com/DataDog/datadog-lambda-extension/blob/28b90c7e4e985b72d60b5f5a5147c69c7ac693c4/bottlecap/src/logs/lambda/mod.rs#L24
func appendMsgFields(fields []logstorage.Field, v *fastjson.Value) ([]logstorage.Field, error) {
switch v.Type() {
case fastjson.TypeString:
val := v.GetStringBytes()
fields = append(fields, logstorage.Field{
Name: "_msg",
Value: bytesutil.ToUnsafeString(val),
})
case fastjson.TypeObject:
var firstErr error
v.GetObject().Visit(func(k []byte, v *fastjson.Value) {
if firstErr != nil {
return
}
switch bytesutil.ToUnsafeString(k) {
case "message":
val := v.GetStringBytes()
fields = append(fields, logstorage.Field{
Name: "_msg",
Value: bytesutil.ToUnsafeString(val),
})
case "status":
val := v.GetStringBytes()
fields = append(fields, logstorage.Field{
Name: "status",
Value: bytesutil.ToUnsafeString(val),
})
case "lamdba":
obj, err := v.Object()
if err != nil {
firstErr = err
firstErr = fmt.Errorf("unexpected lambda value type for %q:%q; want object", k, v)
return
}
obj.Visit(func(k []byte, v *fastjson.Value) {
if firstErr != nil {
return
}
val, err := v.StringBytes()
if err != nil {
firstErr = fmt.Errorf("unexpected lambda label value type for %q:%q; want string", k, v)
return
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(val),
})
})
}
})
default:
return fields, fmt.Errorf("unsupported message type %q", v.Type().String())
}
return fields, nil
}
// readLogsRequest parses data according to DataDog logs format
// https://docs.datadoghq.com/api/latest/logs/#send-logs
func readLogsRequest(ts int64, data []byte, lmp insertutils.LogMessageProcessor) error {
p := parserPool.Get()
defer parserPool.Put(p)
v, err := p.ParseBytes(data)
if err != nil {
return fmt.Errorf("cannot parse JSON request body: %w", err)
}
records, err := v.Array()
if err != nil {
return fmt.Errorf("cannot extract array from parsed JSON: %w", err)
}
var fields []logstorage.Field
for _, r := range records {
o, err := r.Object()
if err != nil {
return fmt.Errorf("could not extract log record: %w", err)
}
o.Visit(func(k []byte, v *fastjson.Value) {
if err != nil {
return
}
switch bytesutil.ToUnsafeString(k) {
case "message":
fields, err = appendMsgFields(fields, v)
if err != nil {
return
}
case "timestamp":
val, e := v.Int64()
if e != nil {
err = fmt.Errorf("failed to parse timestamp for %q:%q", k, v)
}
if val > 0 {
ts = val * 1e6
}
case "ddtags":
// https://docs.datadoghq.com/getting_started/tagging/
val, e := v.StringBytes()
if e != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
var pair []byte
idx := 0
for idx >= 0 {
idx = bytes.IndexByte(val, ',')
if idx < 0 {
pair = val
} else {
pair = val[:idx]
val = val[idx+1:]
}
if len(pair) > 0 {
n := bytes.IndexByte(pair, ':')
if n < 0 {
// No tag value.
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(pair),
Value: "no_label_value",
})
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(pair[:n]),
Value: bytesutil.ToUnsafeString(pair[n+1:]),
})
}
}
default:
val, e := v.StringBytes()
if e != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(val),
})
}
})
if err != nil {
return err
}
lmp.AddRow(ts, fields, nil)
fields = fields[:0]
}
return nil
}

View File

@@ -0,0 +1,104 @@
package datadog
import (
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestReadLogsRequestFailure(t *testing.T) {
f := func(data string) {
t.Helper()
ts := time.Now().UnixNano()
lmp := &insertutils.TestLogMessageProcessor{}
if err := readLogsRequest(ts, []byte(data), lmp); err == nil {
t.Fatalf("expecting non-empty error")
}
if err := lmp.Verify(nil, ""); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
f("foobar")
f(`{}`)
f(`["create":{}]`)
f(`{"create":{}}
foobar`)
}
func TestReadLogsRequestSuccess(t *testing.T) {
f := func(data string, rowsExpected int, resultExpected string) {
t.Helper()
ts := time.Now().UnixNano()
var timestampsExpected []int64
for i := 0; i < rowsExpected; i++ {
timestampsExpected = append(timestampsExpected, ts)
}
lmp := &insertutils.TestLogMessageProcessor{}
if err := readLogsRequest(ts, []byte(data), lmp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := lmp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
// Verify non-empty data
data := `[
{
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"bar",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":{"message": "nested"},
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"foobar",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"baz",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":"tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}, {
"ddsource": "nginx",
"ddtags":"tag1:value1,tag2:value2,",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}, {
"ddsource":"nginx",
"ddtags":",tag1:value1,tag2:value2",
"hostname":"127.0.0.1",
"message":"xyz",
"service":"test"
}
]`
rowsExpected := 7
resultExpected := `{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"bar","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"nested","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"foobar","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"baz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}`
f(data, rowsExpected, resultExpected)
}

View File

@@ -1,14 +1,10 @@
package elasticsearch
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"math"
"net/http"
"strconv"
"strings"
"time"
@@ -103,9 +99,10 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
httpserver.Errorf(w, r, "%s", err)
return true
}
lmp := cp.NewLogMessageProcessor()
lmp := cp.NewLogMessageProcessor("elasticsearch_bulk")
isGzip := r.Header.Get("Content-Encoding") == "gzip"
n, err := readBulkRequest(r.Body, isGzip, cp.TimeField, cp.MsgField, lmp)
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
n, err := readBulkRequest(streamName, r.Body, isGzip, cp.TimeField, cp.MsgFields, lmp)
lmp.MustClose()
if err != nil {
logger.Warnf("cannot decode log message #%d in /_bulk request: %s, stream fields: %s", n, err, cp.StreamFields)
@@ -131,11 +128,10 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
var (
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="elasticsearch_bulk"}`)
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
)
func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string, lmp insertutils.LogMessageProcessor) (int, error) {
func readBulkRequest(streamName string, r io.Reader, isGzip bool, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (int, error) {
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
if isGzip {
@@ -150,48 +146,29 @@ func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string, lmp i
wcr := writeconcurrencylimiter.GetReader(r)
defer writeconcurrencylimiter.PutReader(wcr)
lb := lineBufferPool.Get()
defer lineBufferPool.Put(lb)
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
sc := bufio.NewScanner(wcr)
sc.Buffer(lb.B, len(lb.B))
lr := insertutils.NewLineReader(streamName, wcr)
n := 0
nCheckpoint := 0
for {
ok, err := readBulkLine(sc, timeField, msgField, lmp)
ok, err := readBulkLine(lr, timeField, msgFields, lmp)
wcr.DecConcurrency()
if err != nil || !ok {
rowsIngestedTotal.Add(n - nCheckpoint)
return n, err
}
n++
if batchSize := n - nCheckpoint; n >= 1000 {
rowsIngestedTotal.Add(batchSize)
nCheckpoint = n
}
}
}
var lineBufferPool bytesutil.ByteBufferPool
func readBulkLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils.LogMessageProcessor) (bool, error) {
func readBulkLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
var line []byte
// Read the command, must be "create" or "index"
for len(line) == 0 {
if !sc.Scan() {
if err := sc.Err(); err != nil {
if errors.Is(err, bufio.ErrTooLong) {
return false, fmt.Errorf(`cannot read "create" or "index" command, since its size exceeds -insert.maxLineSizeBytes=%d`,
insertutils.MaxLineSizeBytes.IntN())
}
return false, err
}
return false, nil
if !lr.NextLine() {
err := lr.Err()
return false, err
}
line = sc.Bytes()
line = lr.Line
}
lineStr := bytesutil.ToUnsafeString(line)
if !strings.Contains(lineStr, `"create"`) && !strings.Contains(lineStr, `"index"`) {
@@ -199,16 +176,18 @@ func readBulkLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils
}
// Decode log message
if !sc.Scan() {
if err := sc.Err(); err != nil {
if errors.Is(err, bufio.ErrTooLong) {
return false, fmt.Errorf("cannot read log message, since its size exceeds -insert.maxLineSizeBytes=%d", insertutils.MaxLineSizeBytes.IntN())
}
if !lr.NextLine() {
if err := lr.Err(); err != nil {
return false, err
}
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
}
line = sc.Bytes()
line = lr.Line
if len(line) == 0 {
// Special case - the line could be too long, so it was skipped.
// Continue parsing next lines.
return true, nil
}
p := logstorage.GetJSONParser()
if err := p.ParseLogMessage(line); err != nil {
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
@@ -221,8 +200,8 @@ func readBulkLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils
if ts == 0 {
ts = time.Now().UnixNano()
}
logstorage.RenameField(p.Fields, msgField, "_msg")
lmp.AddRow(ts, p.Fields)
logstorage.RenameField(p.Fields, msgFields, "_msg")
lmp.AddRow(ts, p.Fields, nil)
logstorage.PutJSONParser(p)
return true, nil
@@ -251,19 +230,8 @@ func parseElasticsearchTimestamp(s string) (int64, error) {
return 0, nil
}
if len(s) < len("YYYY-MM-DD") || s[len("YYYY")] != '-' {
// Try parsing timestamp in milliseconds
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse timestamp in milliseconds from %q: %w", s, err)
}
if n > int64(math.MaxInt64)/1e6 {
return 0, fmt.Errorf("too big timestamp in milliseconds: %d; mustn't exceed %d", n, int64(math.MaxInt64)/1e6)
}
if n < int64(math.MinInt64)/1e6 {
return 0, fmt.Errorf("too small timestamp in milliseconds: %d; must be bigger than %d", n, int64(math.MinInt64)/1e6)
}
n *= 1e6
return n, nil
// Try parsing timestamp in seconds or milliseconds
return insertutils.ParseUnixTimestamp(s)
}
if len(s) == len("YYYY-MM-DD") {
t, err := time.Parse("2006-01-02", s)

View File

@@ -15,7 +15,7 @@ func TestReadBulkRequest_Failure(t *testing.T) {
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
rows, err := readBulkRequest(r, false, "_time", "_msg", tlp)
rows, err := readBulkRequest("test", r, false, "_time", []string{"_msg"}, tlp)
if err == nil {
t.Fatalf("expecting non-empty error")
}
@@ -33,21 +33,22 @@ foobar`)
}
func TestReadBulkRequest_Success(t *testing.T) {
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
t.Helper()
msgFields := []string{"non_existing_foo", msgField, "non_exiting_bar"}
tlp := &insertutils.TestLogMessageProcessor{}
// Read the request without compression
r := bytes.NewBufferString(data)
rows, err := readBulkRequest(r, false, timeField, msgField, tlp)
rows, err := readBulkRequest("test", r, false, timeField, msgFields, tlp)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if rows != rowsExpected {
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
if rows != len(timestampsExpected) {
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
}
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
@@ -55,39 +56,41 @@ func TestReadBulkRequest_Success(t *testing.T) {
tlp = &insertutils.TestLogMessageProcessor{}
compressedData := compressData(data)
r = bytes.NewBufferString(compressedData)
rows, err = readBulkRequest(r, true, timeField, msgField, tlp)
rows, err = readBulkRequest("test", r, true, timeField, msgFields, tlp)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if rows != rowsExpected {
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
if rows != len(timestampsExpected) {
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
}
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatalf("verification failure after compression: %s", err)
}
}
// Verify an empty data
f("", "_time", "_msg", 0, nil, "")
f("\n", "_time", "_msg", 0, nil, "")
f("\n\n", "_time", "_msg", 0, nil, "")
f("", "_time", "_msg", nil, "")
f("\n", "_time", "_msg", nil, "")
f("\n\n", "_time", "_msg", nil, "")
// Verify non-empty data
data := `{"create":{"_index":"filebeat-8.8.0"}}
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
{"create":{"_index":"filebeat-8.8.0"}}
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
{"@timestamp":"2023-06-06 04:48:12.735+01:00","message":"baz"}
{"index":{"_index":"filebeat-8.8.0"}}
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
{"message":"xyz","@timestamp":"1686026893735","x":"y"}
{"create":{"_index":"filebeat-8.8.0"}}
{"message":"qwe rty","@timestamp":"1686026893"}
`
timeField := "@timestamp"
msgField := "message"
rowsExpected := 3
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
{"@timestamp":"","_msg":"baz"}
{"_msg":"xyz","@timestamp":"","x":"y"}`
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000, 1686026893000000000}
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
{"_msg":"baz"}
{"_msg":"xyz","x":"y"}
{"_msg":"qwe rty"}`
f(data, timeField, msgField, timestampsExpected, resultExpected)
}
func compressData(s string) string {

View File

@@ -32,7 +32,7 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
dataBytes := bytesutil.ToUnsafeBytes(data)
timeField := "@timestamp"
msgField := "message"
msgFields := []string{"message"}
blp := &insertutils.BenchmarkLogMessageProcessor{}
b.ReportAllocs()
@@ -41,7 +41,7 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
r := &bytes.Reader{}
for pb.Next() {
r.Reset(dataBytes)
_, err := readBulkRequest(r, isGzip, timeField, msgField, blp)
_, err := readBulkRequest("test", r, isGzip, timeField, msgFields, blp)
if err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}

View File

@@ -1,7 +1,10 @@
package insertutils
import (
"flag"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
@@ -16,15 +19,21 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
)
var (
defaultMsgValue = flag.String("defaultMsgValue", "missing _msg field; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field",
"Default value for _msg field if the ingested log entry doesn't contain it; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field")
)
// CommonParams contains common HTTP parameters used by log ingestion APIs.
//
// See https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters
type CommonParams struct {
TenantID logstorage.TenantID
TimeField string
MsgField string
MsgFields []string
StreamFields []string
IgnoreFields []string
ExtraFields []logstorage.Field
Debug bool
DebugRequestURI string
@@ -39,44 +48,25 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
return nil, err
}
// Extract time field name from _time_field query arg or header
timeField := "_time"
if tf := r.FormValue("_time_field"); tf != "" {
timeField = tf
} else if tf = r.Header.Get("VL-Time-Field"); tf != "" {
if tf := httputils.GetRequestValue(r, "_time_field", "VL-Time-Field"); tf != "" {
timeField = tf
}
// Extract message field name from _msg_field query arg or header
msgField := ""
if msgf := r.FormValue("_msg_field"); msgf != "" {
msgField = msgf
} else if msgf = r.Header.Get("VL-Msg-Field"); msgf != "" {
msgField = msgf
msgFields := httputils.GetArray(r, "_msg_field", "VL-Msg-Field")
streamFields := httputils.GetArray(r, "_stream_fields", "VL-Stream-Fields")
ignoreFields := httputils.GetArray(r, "ignore_fields", "VL-Ignore-Fields")
extraFields, err := getExtraFields(r)
if err != nil {
return nil, err
}
streamFields := httputils.GetArray(r, "_stream_fields")
if len(streamFields) == 0 {
if sf := r.Header.Get("VL-Stream-Fields"); len(sf) > 0 {
streamFields = strings.Split(sf, ",")
}
}
ignoreFields := httputils.GetArray(r, "ignore_fields")
if len(ignoreFields) == 0 {
if f := r.Header.Get("VL-Ignore-Fields"); len(f) > 0 {
ignoreFields = strings.Split(f, ",")
}
}
debug := httputils.GetBool(r, "debug")
if !debug {
if dh := r.Header.Get("VL-Debug"); len(dh) > 0 {
hv := strings.ToLower(dh)
switch hv {
case "", "0", "f", "false", "no":
default:
debug = true
}
debug := false
if dv := httputils.GetRequestValue(r, "debug", "VL-Debug"); dv != "" {
debug, err = strconv.ParseBool(dv)
if err != nil {
return nil, fmt.Errorf("cannot parse debug=%q: %w", dv, err)
}
}
debugRequestURI := ""
@@ -89,9 +79,10 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
cp := &CommonParams{
TenantID: tenantID,
TimeField: timeField,
MsgField: msgField,
MsgFields: msgFields,
StreamFields: streamFields,
IgnoreFields: ignoreFields,
ExtraFields: extraFields,
Debug: debug,
DebugRequestURI: debugRequestURI,
DebugRemoteAddr: debugRemoteAddr,
@@ -100,18 +91,45 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
return cp, nil
}
func getExtraFields(r *http.Request) ([]logstorage.Field, error) {
efs := httputils.GetArray(r, "extra_fields", "VL-Extra-Fields")
if len(efs) == 0 {
return nil, nil
}
extraFields := make([]logstorage.Field, len(efs))
for i, ef := range efs {
n := strings.Index(ef, "=")
if n <= 0 || n == len(ef)-1 {
return nil, fmt.Errorf(`invalid extra_field format: %q; must be in the form "field=value"`, ef)
}
extraFields[i] = logstorage.Field{
Name: ef[:n],
Value: ef[n+1:],
}
}
return extraFields, nil
}
// GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID.
func GetCommonParamsForSyslog(tenantID logstorage.TenantID) *CommonParams {
func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignoreFields []string, extraFields []logstorage.Field) *CommonParams {
// See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe
cp := &CommonParams{
TenantID: tenantID,
TimeField: "timestamp",
MsgField: "message",
StreamFields: []string{
if streamFields == nil {
streamFields = []string{
"hostname",
"app_name",
"proc_id",
}
}
cp := &CommonParams{
TenantID: tenantID,
TimeField: "timestamp",
MsgFields: []string{
"message",
},
StreamFields: streamFields,
IgnoreFields: ignoreFields,
ExtraFields: extraFields,
}
return cp
@@ -119,10 +137,12 @@ func GetCommonParamsForSyslog(tenantID logstorage.TenantID) *CommonParams {
// LogMessageProcessor is an interface for log message processors.
type LogMessageProcessor interface {
// AddRow must add row to the LogMessageProcessor with the given timestamp and the given fields.
// AddRow must add row to the LogMessageProcessor with the given timestamp and fields.
//
// If streamFields is non-nil, then the given streamFields must be used as log stream fields instead of pre-configured fields.
//
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can re-use them.
AddRow(timestamp int64, fields []logstorage.Field)
AddRow(timestamp int64, fields, streamFields []logstorage.Field)
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
MustClose()
@@ -136,6 +156,9 @@ type logMessageProcessor struct {
cp *CommonParams
lr *logstorage.LogRows
rowsIngestedTotal *metrics.Counter
bytesIngestedTotal *metrics.Counter
}
func (lmp *logMessageProcessor) initPeriodicFlush() {
@@ -165,18 +188,24 @@ func (lmp *logMessageProcessor) initPeriodicFlush() {
}
// AddRow adds new log message to lmp with the given timestamp and fields.
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
//
// If streamFields is non-nil, then it is used as log stream fields instead of the pre-configured stream fields.
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
lmp.mu.Lock()
defer lmp.mu.Unlock()
lmp.rowsIngestedTotal.Inc()
n := logstorage.EstimatedJSONRowLen(fields)
lmp.bytesIngestedTotal.Add(n)
if len(fields) > *MaxFieldsPerLine {
rf := logstorage.RowFormatter(fields)
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
line := logstorage.MarshalFieldsToJSON(nil, fields)
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, line)
rowsDroppedTotalTooManyFields.Inc()
return
}
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields)
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields, streamFields)
if lmp.cp.Debug {
s := lmp.lr.GetRowString(0)
lmp.lr.ResetKeepSettings()
@@ -209,12 +238,17 @@ func (lmp *logMessageProcessor) MustClose() {
// NewLogMessageProcessor returns new LogMessageProcessor for the given cp.
//
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
func (cp *CommonParams) NewLogMessageProcessor() LogMessageProcessor {
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
func (cp *CommonParams) NewLogMessageProcessor(protocolName string) LogMessageProcessor {
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields, cp.ExtraFields, *defaultMsgValue)
rowsIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_rows_ingested_total{type=%q}", protocolName))
bytesIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_bytes_ingested_total{type=%q}", protocolName))
lmp := &logMessageProcessor{
cp: cp,
lr: lr,
rowsIngestedTotal: rowsIngestedTotal,
bytesIngestedTotal: bytesIngestedTotal,
stopCh: make(chan struct{}),
}
lmp.initPeriodicFlush()

View File

@@ -8,8 +8,10 @@ import (
var (
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers")
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers; "+
"see https://docs.victoriametrics.com/victorialogs/faq/#what-length-a-log-record-is-expected-to-have")
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers")
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers; "+
"see https://docs.victoriametrics.com/victorialogs/faq/#how-many-fields-a-single-log-entry-may-contain")
)

View File

@@ -0,0 +1,146 @@
package insertutils
import (
"bytes"
"errors"
"fmt"
"io"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// LineReader reads newline-delimited lines from the underlying reader
type LineReader struct {
// Line contains the next line read after the call to NextLine
//
// The Line contents is valid until the next call to NextLine.
Line []byte
// name is the LineReader name
name string
// r is the underlying reader to read data from
r io.Reader
// buf is a buffer for reading the next line
buf []byte
// bufOffset is the offset at buf to read the next line from
bufOffset int
// err is the last error when reading data from r
err error
// eofReached is set to true when all the data is read from r
eofReached bool
}
// NewLineReader returns LineReader for r.
func NewLineReader(name string, r io.Reader) *LineReader {
return &LineReader{
name: name,
r: r,
}
}
// NextLine reads the next line from the underlying reader.
//
// It returns true if the next line is successfully read into Line.
// If the line length exceeds MaxLineSizeBytes, then this line is skipped
// and an empty line is returned instead.
//
// If false is returned, then no more lines left to read from r.
// Check for Err in this case.
func (lr *LineReader) NextLine() bool {
for {
if lr.bufOffset >= len(lr.buf) {
if lr.err != nil || lr.eofReached {
return false
}
if !lr.readMoreData() {
return false
}
if lr.bufOffset >= len(lr.buf) && lr.eofReached {
return false
}
}
buf := lr.buf[lr.bufOffset:]
if n := bytes.IndexByte(buf, '\n'); n >= 0 {
lr.Line = buf[:n]
lr.bufOffset += n + 1
return true
}
if lr.eofReached {
lr.Line = buf
lr.bufOffset += len(buf)
return true
}
if !lr.readMoreData() {
return false
}
}
}
// Err returns the last error after NextLine call.
func (lr *LineReader) Err() error {
if lr.err == nil {
return nil
}
return fmt.Errorf("%s: %s", lr.name, lr.err)
}
func (lr *LineReader) readMoreData() bool {
if lr.bufOffset > 0 {
lr.buf = append(lr.buf[:0], lr.buf[lr.bufOffset:]...)
lr.bufOffset = 0
}
bufLen := len(lr.buf)
if bufLen >= MaxLineSizeBytes.IntN() {
logger.Warnf("%s: the line length exceeds -insert.maxLineSizeBytes=%d; skipping it; line contents=%q", lr.name, MaxLineSizeBytes.IntN(), lr.buf)
tooLongLinesSkipped.Inc()
return lr.skipUntilNextLine()
}
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
n, err := lr.r.Read(lr.buf[bufLen:])
lr.buf = lr.buf[:bufLen+n]
if err != nil {
if errors.Is(err, io.EOF) {
lr.eofReached = true
return true
}
lr.err = fmt.Errorf("cannot read the next line: %s", err)
}
return n > 0
}
var tooLongLinesSkipped = metrics.NewCounter("vl_too_long_lines_skipped_total")
func (lr *LineReader) skipUntilNextLine() bool {
for {
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
n, err := lr.r.Read(lr.buf)
lr.buf = lr.buf[:n]
if err != nil {
if errors.Is(err, io.EOF) {
lr.eofReached = true
lr.buf = lr.buf[:0]
return true
}
lr.err = fmt.Errorf("cannot skip the current line: %s", err)
return false
}
if n := bytes.IndexByte(lr.buf, '\n'); n >= 0 {
// Include \n in the buf, so too long line is replaced with an empty line.
// This is needed for maintaining synchorinzation consistency between lines
// in protocols such as Elasticsearch bulk import.
lr.buf = append(lr.buf[:0], lr.buf[n:]...)
return true
}
}
}

View File

@@ -0,0 +1,161 @@
package insertutils
import (
"bytes"
"fmt"
"io"
"reflect"
"testing"
)
func TestLineReader_Success(t *testing.T) {
f := func(data string, linesExpected []string) {
t.Helper()
r := bytes.NewBufferString(data)
lr := NewLineReader("foo", r)
var lines []string
for lr.NextLine() {
lines = append(lines, string(lr.Line))
}
if err := lr.Err(); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if lr.NextLine() {
t.Fatalf("expecting error on the second call to NextLine()")
}
if !reflect.DeepEqual(lines, linesExpected) {
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
}
}
f("", nil)
f("\n", []string{""})
f("\n\n", []string{"", ""})
f("foo", []string{"foo"})
f("foo\n", []string{"foo"})
f("\nfoo", []string{"", "foo"})
f("foo\n\n", []string{"foo", ""})
f("foo\nbar", []string{"foo", "bar"})
f("foo\nbar\n", []string{"foo", "bar"})
f("\nfoo\n\nbar\n\n", []string{"", "foo", "", "bar", ""})
}
func TestLineReader_SkipUntilNextLine(t *testing.T) {
f := func(data string, linesExpected []string) {
t.Helper()
r := bytes.NewBufferString(data)
lr := NewLineReader("foo", r)
var lines []string
for lr.NextLine() {
lines = append(lines, string(lr.Line))
}
if err := lr.Err(); err != nil {
t.Fatalf("unexpected error for data=%q: %s", data, err)
}
if lr.NextLine() {
t.Fatalf("expecting error on the second call to NextLine()")
}
if !reflect.DeepEqual(lines, linesExpected) {
t.Fatalf("unexpected lines for data=%q\ngot\n%q\nwant\n%q", data, lines, linesExpected)
}
}
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
longLineLen := MaxLineSizeBytes.IntN() + overflow
longLine := string(make([]byte, longLineLen))
// Single long line
data := longLine
f(data, nil)
// Multiple long lines
data = longLine + "\n" + longLine
f(data, []string{""})
data = longLine + "\n" + longLine + "\n"
f(data, []string{"", ""})
// Long line in the middle
data = "foo\n" + longLine + "\nbar"
f(data, []string{"foo", "", "bar"})
// Multiple long lines in the middle
data = "foo\n" + longLine + "\n" + longLine + "\nbar"
f(data, []string{"foo", "", "", "bar"})
// Long line in the end
data = "foo\n" + longLine
f(data, []string{"foo"})
// Long line in the end
data = "foo\n" + longLine + "\n"
f(data, []string{"foo", ""})
}
}
func TestLineReader_Failure(t *testing.T) {
f := func(data string, linesExpected []string) {
t.Helper()
fr := &failureReader{
r: bytes.NewBufferString(data),
}
lr := NewLineReader("foo", fr)
var lines []string
for lr.NextLine() {
lines = append(lines, string(lr.Line))
}
if err := lr.Err(); err == nil {
t.Fatalf("expecting non-nil error")
}
if lr.NextLine() {
t.Fatalf("expecting error on the second call to NextLine()")
}
if err := lr.Err(); err == nil {
t.Fatalf("expecting non-nil error on the second call")
}
if !reflect.DeepEqual(lines, linesExpected) {
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
}
}
f("", nil)
f("foo", nil)
f("foo\n", []string{"foo"})
f("\n", []string{""})
f("foo\nbar", []string{"foo"})
f("foo\nbar\n", []string{"foo", "bar"})
f("\nfoo\nbar\n\n", []string{"", "foo", "bar", ""})
// long line
longLineLen := MaxLineSizeBytes.IntN()
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
longLine := string(make([]byte, longLineLen+overflow))
data := longLine
f(data, nil)
data = "foo\n" + longLine
f(data, []string{"foo"})
data = longLine + "\nfoo"
f(data, []string{""})
data = longLine + "\nfoo\n"
f(data, []string{"", "foo"})
}
}
type failureReader struct {
r io.Reader
}
func (r *failureReader) Read(p []byte) (int, error) {
n, _ := r.r.Read(p)
if n > 0 {
return n, nil
}
return 0, fmt.Errorf("some error")
}

View File

@@ -15,7 +15,10 @@ type TestLogMessageProcessor struct {
}
// AddRow adds row with the given timestamp and fields to tlp
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
if streamFields != nil {
panic(fmt.Errorf("BUG: streamFields must be nil; got %v", streamFields))
}
tlp.timestamps = append(tlp.timestamps, timestamp)
tlp.rows = append(tlp.rows, string(logstorage.MarshalFieldsToJSON(nil, fields)))
}
@@ -25,10 +28,10 @@ func (tlp *TestLogMessageProcessor) MustClose() {
}
// Verify verifies the number of rows, timestamps and results after AddRow calls.
func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected []int64, resultExpected string) error {
func (tlp *TestLogMessageProcessor) Verify(timestampsExpected []int64, resultExpected string) error {
result := strings.Join(tlp.rows, "\n")
if len(tlp.rows) != rowsExpected {
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), rowsExpected, result, resultExpected)
if len(tlp.rows) != len(timestampsExpected) {
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), len(timestampsExpected), result, resultExpected)
}
if !reflect.DeepEqual(tlp.timestamps, timestampsExpected) {
@@ -45,7 +48,7 @@ func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected
type BenchmarkLogMessageProcessor struct{}
// AddRow implements LogMessageProcessor interface.
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _ []logstorage.Field) {
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _, _ []logstorage.Field) {
}
// MustClose implements LogMessageProcessor interface.

View File

@@ -2,32 +2,69 @@ package insertutils
import (
"fmt"
"strconv"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
// ExtractTimestampRFC3339NanoFromFields extracts RFC3339 timestamp in nanoseconds from the field with the name timeField at fields.
// ExtractTimestampFromFields extracts timestamp in nanoseconds from the field with the name timeField at fields.
//
// The value for the timeField is set to empty string after returning from the function,
// so it could be ignored during data ingestion.
//
// The current timestamp is returned if fields do not contain a field with timeField name or if the timeField value is empty.
func ExtractTimestampRFC3339NanoFromFields(timeField string, fields []logstorage.Field) (int64, error) {
func ExtractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
for i := range fields {
f := &fields[i]
if f.Name != timeField {
continue
}
if f.Value == "" || f.Value == "0" {
return time.Now().UnixNano(), nil
}
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(f.Value)
if !ok {
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp from %s=%q", timeField, f.Value)
nsecs, err := parseTimestamp(f.Value)
if err != nil {
return 0, fmt.Errorf("cannot parse timestamp from field %q: %s", timeField, err)
}
f.Value = ""
if nsecs == 0 {
nsecs = time.Now().UnixNano()
}
return nsecs, nil
}
return time.Now().UnixNano(), nil
}
func parseTimestamp(s string) (int64, error) {
if s == "" || s == "0" {
return time.Now().UnixNano(), nil
}
if len(s) <= len("YYYY") || s[len("YYYY")] != '-' {
return ParseUnixTimestamp(s)
}
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
if !ok {
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp %q", s)
}
return nsecs, nil
}
// ParseUnixTimestamp parses s as unix timestamp in seconds, milliseconds, microseconds or nanoseconds and returns the parsed timestamp in nanoseconds.
func ParseUnixTimestamp(s string) (int64, error) {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse unix timestamp from %q: %w", s, err)
}
if n < (1<<31) && n >= (-1<<31) {
// The timestamp is in seconds.
return n * 1e9, nil
}
if n < 1e3*(1<<31) && n >= 1e3*(-1<<31) {
// The timestamp is in milliseconds.
return n * 1e6, nil
}
if n < 1e6*(1<<31) && n >= 1e6*(-1<<31) {
// The timestamp is in microseconds.
return n * 1e3, nil
}
// The timestamp is in nanoseconds
return n, nil
}

View File

@@ -6,11 +6,11 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
func TestExtractTimestampFromFields_Success(t *testing.T) {
f := func(timeField string, fields []logstorage.Field, nsecsExpected int64) {
t.Helper()
nsecs, err := ExtractTimestampRFC3339NanoFromFields(timeField, fields)
nsecs, err := ExtractTimestampFromFields(timeField, fields)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
@@ -27,35 +27,63 @@ func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
}
}
// UTC time
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20Z"},
}, 1718753840000000000)
// Time with timezone
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20+08:00"},
}, 1718725040000000000)
// SQL datetime format
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "2024-06-18T23:37:20.123-05:30"},
{Name: "time", Value: "2024-06-18 23:37:20.123-05:30"},
}, 1718773640123000000)
// Time with nanosecond precision
f("time", []logstorage.Field{
{Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"},
{Name: "foo", Value: "bar"},
}, 1718773640123456789)
// Unix timestamp in nanoseconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640123456789"},
}, 1718773640123456789)
// Unix timestamp in microseconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640123456"},
}, 1718773640123456000)
// Unix timestamp in milliseconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640123"},
}, 1718773640123000000)
// Unix timestamp in seconds
f("time", []logstorage.Field{
{Name: "foo", Value: "bar"},
{Name: "time", Value: "1718773640"},
}, 1718773640000000000)
}
func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {
func TestExtractTimestampFromFields_Error(t *testing.T) {
f := func(s string) {
t.Helper()
fields := []logstorage.Field{
{Name: "time", Value: s},
}
nsecs, err := ExtractTimestampRFC3339NanoFromFields("time", fields)
nsecs, err := ExtractTimestampFromFields("time", fields)
if err == nil {
t.Fatalf("expecting non-nil error")
}
@@ -64,11 +92,9 @@ func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {
}
}
// invalid time
f("foobar")
// no Z at the end
f("2024-06-18T23:37:20")
// incomplete time
f("2024-06-18")
f("2024-06-18T23:37")

View File

@@ -0,0 +1,250 @@
package journald
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"io"
"net/http"
"regexp"
"slices"
"strconv"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
)
const (
journaldEntryMaxNameLen = 64
)
var (
bodyBufferPool bytesutil.ByteBufferPool
allowedJournaldEntryNameChars = regexp.MustCompile(`^[A-Z_][A-Z0-9_]+`)
)
var (
journaldStreamFields = flagutil.NewArrayString("journald.streamFields", "Journal fields to be used as stream fields. "+
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
journaldIgnoreFields = flagutil.NewArrayString("journald.ignoreFields", "Journal fields to ignore. "+
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
journaldTimeField = flag.String("journald.timeField", "__REALTIME_TIMESTAMP", "Journal field to be used as time field. "+
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
journaldTenantID = flag.String("journald.tenantID", "0:0", "TenantID for logs ingested via the Journald endpoint.")
journaldIncludeEntryMetadata = flag.Bool("journald.includeEntryMetadata", false, "Include journal entry fields, which with double underscores.")
)
func getCommonParams(r *http.Request) (*insertutils.CommonParams, error) {
cp, err := insertutils.GetCommonParams(r)
if err != nil {
return nil, err
}
if cp.TenantID.AccountID == 0 && cp.TenantID.ProjectID == 0 {
tenantID, err := logstorage.ParseTenantID(*journaldTenantID)
if err != nil {
return nil, fmt.Errorf("cannot parse -journald.tenantID=%q for journald: %w", *journaldTenantID, err)
}
cp.TenantID = tenantID
}
if cp.TimeField != "" {
cp.TimeField = *journaldTimeField
}
if len(cp.StreamFields) == 0 {
cp.StreamFields = *journaldStreamFields
}
if len(cp.IgnoreFields) == 0 {
cp.IgnoreFields = *journaldIgnoreFields
}
cp.MsgFields = []string{"MESSAGE"}
return cp, nil
}
// RequestHandler processes Journald Export insert requests
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
switch path {
case "/upload":
if r.Header.Get("Content-Type") != "application/vnd.fdo.journal" {
httpserver.Errorf(w, r, "only application/vnd.fdo.journal encoding is supported for Journald")
return true
}
handleJournald(r, w)
return true
default:
return false
}
}
// handleJournald parses Journal binary entries
func handleJournald(r *http.Request, w http.ResponseWriter) {
startTime := time.Now()
requestsJournaldTotal.Inc()
if err := vlstorage.CanWriteData(); err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
reader := r.Body
var err error
wcr := writeconcurrencylimiter.GetReader(reader)
data, err := io.ReadAll(wcr)
if err != nil {
httpserver.Errorf(w, r, "cannot read request body: %s", err)
return
}
writeconcurrencylimiter.PutReader(wcr)
bb := bodyBufferPool.Get()
defer bodyBufferPool.Put(bb)
if r.Header.Get("Content-Encoding") == "zstd" {
bb.B, err = zstd.Decompress(bb.B[:0], data)
if err != nil {
httpserver.Errorf(w, r, "cannot decompress zstd-encoded request with length %d: %s", len(data), err)
return
}
data = bb.B
}
cp, err := getCommonParams(r)
if err != nil {
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
return
}
lmp := cp.NewLogMessageProcessor("journald")
err = parseJournaldRequest(data, lmp, cp)
lmp.MustClose()
if err != nil {
errorsTotal.Inc()
httpserver.Errorf(w, r, "cannot parse Journald protobuf request: %s", err)
return
}
// update requestJournaldDuration only for successfully parsed requests
// There is no need in updating requestJournaldDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
requestJournaldDuration.UpdateDuration(startTime)
}
var (
requestsJournaldTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/journald/upload"}`)
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/journald/upload"}`)
requestJournaldDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/journald/upload"}`)
)
// See https://systemd.io/JOURNAL_EXPORT_FORMATS/#journal-export-format
func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *insertutils.CommonParams) error {
var fields []logstorage.Field
var ts int64
var size uint64
var name, value string
var line []byte
currentTimestamp := time.Now().UnixNano()
for len(data) > 0 {
idx := bytes.IndexByte(data, '\n')
switch {
case idx > 0:
// process fields
line = data[:idx]
data = data[idx+1:]
case idx == 0:
// next message or end of file
// double new line is a separator for the next message
if len(fields) > 0 {
if ts == 0 {
ts = currentTimestamp
}
lmp.AddRow(ts, fields, nil)
fields = fields[:0]
}
// skip newline separator
data = data[1:]
continue
case idx < 0:
return fmt.Errorf("missing new line separator, unread data left=%d", len(data))
}
idx = bytes.IndexByte(line, '=')
// could b either e key=value\n pair
// or just key\n
// with binary data at the buffer
if idx > 0 {
name = bytesutil.ToUnsafeString(line[:idx])
value = bytesutil.ToUnsafeString(line[idx+1:])
} else {
name = bytesutil.ToUnsafeString(line)
if len(data) == 0 {
return fmt.Errorf("unexpected zero data for binary field value of key=%s", name)
}
// size of binary data encoded as le i64 at the begging
idx, err := binary.Decode(data, binary.LittleEndian, &size)
if err != nil {
return fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
}
// skip binary data sise
data = data[idx:]
if size == 0 {
return fmt.Errorf("unexpected zero binary data size decoded %d", size)
}
if int(size) > len(data) {
return fmt.Errorf("binary data size=%d cannot exceed size of the data at buffer=%d", size, len(data))
}
value = bytesutil.ToUnsafeString(data[:size])
data = data[int(size):]
// binary data must has new line separator for the new line or next field
if len(data) == 0 {
return fmt.Errorf("unexpected empty buffer after binary field=%s read", name)
}
lastB := data[0]
if lastB != '\n' {
return fmt.Errorf("expected new line separator after binary field=%s, got=%s", name, string(lastB))
}
data = data[1:]
}
// https://github.com/systemd/systemd/blob/main/src/libsystemd/sd-journal/journal-file.c#L1703
if len(name) > journaldEntryMaxNameLen {
return fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
}
if !allowedJournaldEntryNameChars.MatchString(name) {
return fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
}
if name == cp.TimeField {
n, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse Journald timestamp, %w", err)
}
ts = n * 1e3
continue
}
if slices.Contains(cp.MsgFields, name) {
name = "_msg"
}
if *journaldIncludeEntryMetadata || !strings.HasPrefix(name, "__") {
fields = append(fields, logstorage.Field{
Name: name,
Value: value,
})
}
}
if len(fields) > 0 {
if ts == 0 {
ts = currentTimestamp
}
lmp.AddRow(ts, fields, nil)
}
return nil
}

View File

@@ -0,0 +1,68 @@
package journald
import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
)
func TestPushJournaldOk(t *testing.T) {
f := func(src string, timestampsExpected []int64, resultExpected string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
cp := &insertutils.CommonParams{
TimeField: "__REALTIME_TIMESTAMP",
MsgFields: []string{"MESSAGE"},
}
if err := parseJournaldRequest([]byte(src), tlp, cp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
// Single event
f("__REALTIME_TIMESTAMP=91723819283\nMESSAGE=Test message\n",
[]int64{91723819283000},
"{\"_msg\":\"Test message\"}",
)
// Multiple events
f("__REALTIME_TIMESTAMP=91723819283\nMESSAGE=Test message\n\n__REALTIME_TIMESTAMP=91723819284\nMESSAGE=Test message2\n",
[]int64{91723819283000, 91723819284000},
"{\"_msg\":\"Test message\"}\n{\"_msg\":\"Test message2\"}",
)
// Parse binary data
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\n__MONOTONIC_TIMESTAMP=206357648416\n__SEQNUM=7942\n__SEQNUM_ID=e0afe8412a6a49d2bfcf66aa7927b588\n_BOOT_ID=f778b6e2f7584a77b991a2366612a7b5\n_UID=0\n_GID=0\n_MACHINE_ID=a4a970370c30a925df02a13c67167847\n_HOSTNAME=ecd5e4555787\n_RUNTIME_SCOPE=system\n_TRANSPORT=journal\n_CAP_EFFECTIVE=1ffffffffff\n_SYSTEMD_CGROUP=/init.scope\n_SYSTEMD_UNIT=init.scope\n_SYSTEMD_SLICE=-.slice\nCODE_FILE=<stdin>\nCODE_LINE=1\nCODE_FUNC=<module>\nSYSLOG_IDENTIFIER=python3\n_COMM=python3\n_EXE=/usr/bin/python3.12\n_CMDLINE=python3\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasda\nasda\n_PID=2763\n_SOURCE_REALTIME_TIMESTAMP=1729698775704375\n\n",
[]int64{1729698775704404000},
"{\"_BOOT_ID\":\"f778b6e2f7584a77b991a2366612a7b5\",\"_UID\":\"0\",\"_GID\":\"0\",\"_MACHINE_ID\":\"a4a970370c30a925df02a13c67167847\",\"_HOSTNAME\":\"ecd5e4555787\",\"_RUNTIME_SCOPE\":\"system\",\"_TRANSPORT\":\"journal\",\"_CAP_EFFECTIVE\":\"1ffffffffff\",\"_SYSTEMD_CGROUP\":\"/init.scope\",\"_SYSTEMD_UNIT\":\"init.scope\",\"_SYSTEMD_SLICE\":\"-.slice\",\"CODE_FILE\":\"\\u003cstdin>\",\"CODE_LINE\":\"1\",\"CODE_FUNC\":\"\\u003cmodule>\",\"SYSLOG_IDENTIFIER\":\"python3\",\"_COMM\":\"python3\",\"_EXE\":\"/usr/bin/python3.12\",\"_CMDLINE\":\"python3\",\"_msg\":\"foo\\nbar\\n\\n\\nasda\\nasda\",\"_PID\":\"2763\",\"_SOURCE_REALTIME_TIMESTAMP\":\"1729698775704375\"}",
)
}
func TestPushJournald_Failure(t *testing.T) {
f := func(data string) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
cp := &insertutils.CommonParams{
TimeField: "__REALTIME_TIMESTAMP",
MsgFields: []string{"MESSAGE"},
}
if err := parseJournaldRequest([]byte(data), tlp, cp); err == nil {
t.Fatalf("expected non nil error")
}
}
// missing new line terminator for binary encoded message
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasdaasda2")
// missing new line terminator
f("__REALTIME_TIMESTAMP=91723819283\n=Test message")
// empty field name
f("__REALTIME_TIMESTAMP=91723819283\n=Test message\n")
// field name starting with number
f("__REALTIME_TIMESTAMP=91723819283\n1incorrect=Test message\n")
// field name exceeds 64 limit
f("__REALTIME_TIMESTAMP=91723819283\ntoolooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooongcorrecooooooooooooong=Test message\n")
// Only allow A-Z0-9 and '_'
f("__REALTIME_TIMESTAMP=91723819283\nbadC!@$!@$as=Test message\n")
}

View File

@@ -1,8 +1,6 @@
package jsonline
import (
"bufio"
"errors"
"fmt"
"io"
"net/http"
@@ -10,7 +8,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
@@ -52,8 +49,9 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
reader = zr
}
lmp := cp.NewLogMessageProcessor()
err = processStreamInternal(reader, cp.TimeField, cp.MsgField, lmp)
lmp := cp.NewLogMessageProcessor("jsonline")
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
err = processStreamInternal(streamName, reader, cp.TimeField, cp.MsgFields, lmp)
lmp.MustClose()
if err != nil {
@@ -66,20 +64,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
}
}
func processStreamInternal(r io.Reader, timeField, msgField string, lmp insertutils.LogMessageProcessor) error {
func processStreamInternal(streamName string, r io.Reader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) error {
wcr := writeconcurrencylimiter.GetReader(r)
defer writeconcurrencylimiter.PutReader(wcr)
lb := lineBufferPool.Get()
defer lineBufferPool.Put(lb)
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
sc := bufio.NewScanner(wcr)
sc.Buffer(lb.B, len(lb.B))
lr := insertutils.NewLineReader(streamName, wcr)
n := 0
for {
ok, err := readLine(sc, timeField, msgField, lmp)
ok, err := readLine(lr, timeField, msgFields, lmp)
wcr.DecConcurrency()
if err != nil {
errorsTotal.Inc()
@@ -89,45 +82,35 @@ func processStreamInternal(r io.Reader, timeField, msgField string, lmp insertut
return nil
}
n++
rowsIngestedTotal.Inc()
}
}
func readLine(sc *bufio.Scanner, timeField, msgField string, lmp insertutils.LogMessageProcessor) (bool, error) {
func readLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
var line []byte
for len(line) == 0 {
if !sc.Scan() {
if err := sc.Err(); err != nil {
if errors.Is(err, bufio.ErrTooLong) {
return false, fmt.Errorf(`cannot read json line, since its size exceeds -insert.maxLineSizeBytes=%d`, insertutils.MaxLineSizeBytes.IntN())
}
return false, err
}
return false, nil
if !lr.NextLine() {
err := lr.Err()
return false, err
}
line = sc.Bytes()
line = lr.Line
}
p := logstorage.GetJSONParser()
if err := p.ParseLogMessage(line); err != nil {
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
}
ts, err := insertutils.ExtractTimestampRFC3339NanoFromFields(timeField, p.Fields)
ts, err := insertutils.ExtractTimestampFromFields(timeField, p.Fields)
if err != nil {
return false, fmt.Errorf("cannot get timestamp: %w", err)
}
logstorage.RenameField(p.Fields, msgField, "_msg")
lmp.AddRow(ts, p.Fields)
logstorage.RenameField(p.Fields, msgFields, "_msg")
lmp.AddRow(ts, p.Fields, nil)
logstorage.PutJSONParser(p)
return true, nil
}
var lineBufferPool bytesutil.ByteBufferPool
var (
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/jsonline"}`)

View File

@@ -8,32 +8,43 @@ import (
)
func TestProcessStreamInternal_Success(t *testing.T) {
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
t.Helper()
msgFields := []string{msgField}
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
if err := processStreamInternal(r, timeField, msgField, tlp); err != nil {
if err := processStreamInternal("test", r, timeField, msgFields, tlp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
data := `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
{"@timestamp":"2023-06-06T04:48:12.735+01:00","message":"baz"}
{"message":"xyz","@timestamp":"2023-06-06 04:48:13.735Z","x":"y"}
`
timeField := "@timestamp"
msgField := "message"
rowsExpected := 3
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
{"@timestamp":"","_msg":"baz"}
{"_msg":"xyz","@timestamp":"","x":"y"}`
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000}
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
{"_msg":"baz"}
{"_msg":"xyz","x":"y"}`
f(data, timeField, msgField, timestampsExpected, resultExpected)
// Non-existing msgField
data = `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
{"@timestamp":"2023-06-06T04:48:12.735+01:00","message":"baz"}
`
timeField = "@timestamp"
msgField = "foobar"
timestampsExpected = []int64{1686026891735000000, 1686023292735000000}
resultExpected = `{"log.offset":"71770","log.file.path":"/var/log/auth.log","message":"foobar"}
{"message":"baz"}`
f(data, timeField, msgField, timestampsExpected, resultExpected)
}
func TestProcessStreamInternal_Failure(t *testing.T) {
@@ -42,7 +53,7 @@ func TestProcessStreamInternal_Failure(t *testing.T) {
tlp := &insertutils.TestLogMessageProcessor{}
r := bytes.NewBufferString(data)
if err := processStreamInternal(r, "time", "", tlp); err == nil {
if err := processStreamInternal("test", r, "time", nil, tlp); err == nil {
t.Fatalf("expecting non-nil error")
}
}

View File

@@ -8,6 +8,9 @@ import (
"strconv"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
@@ -15,8 +18,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
)
var parserPool fastjson.ParserPool
@@ -52,16 +53,15 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
httpserver.Errorf(w, r, "%s", err)
return
}
lmp := cp.NewLogMessageProcessor()
n, err := parseJSONRequest(data, lmp)
lmp := cp.NewLogMessageProcessor("loki_json")
useDefaultStreamFields := len(cp.StreamFields) == 0
err = parseJSONRequest(data, lmp, useDefaultStreamFields)
lmp.MustClose()
if err != nil {
httpserver.Errorf(w, r, "cannot parse Loki json request: %s", err)
httpserver.Errorf(w, r, "cannot parse Loki json request: %s; data=%s", err, data)
return
}
rowsIngestedJSONTotal.Add(n)
// update requestJSONDuration only for successfully parsed requests
// There is no need in updating requestJSONDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
@@ -69,31 +69,29 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
}
var (
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="json"}`)
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
)
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
p := parserPool.Get()
defer parserPool.Put(p)
v, err := p.ParseBytes(data)
if err != nil {
return 0, fmt.Errorf("cannot parse JSON request body: %w", err)
return fmt.Errorf("cannot parse JSON request body: %w", err)
}
streamsV := v.Get("streams")
if streamsV == nil {
return 0, fmt.Errorf("missing `streams` item in the parsed JSON: %q", v)
return fmt.Errorf("missing `streams` item in the parsed JSON")
}
streams, err := streamsV.Array()
if err != nil {
return 0, fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
return fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
}
currentTimestamp := time.Now().UnixNano()
var commonFields []logstorage.Field
rowsIngested := 0
for _, stream := range streams {
// populate common labels from `stream` dict
commonFields = commonFields[:0]
@@ -102,14 +100,11 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
if labelsV != nil {
o, err := labelsV.Object()
if err != nil {
return rowsIngested, fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
return fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
}
labels = o
}
labels.Visit(func(k []byte, v *fastjson.Value) {
if err != nil {
return
}
vStr, errLocal := v.StringBytes()
if errLocal != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
@@ -121,37 +116,37 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
})
})
if err != nil {
return rowsIngested, fmt.Errorf("error when parsing `stream` object: %w", err)
return fmt.Errorf("error when parsing `stream` object: %w", err)
}
// populate messages from `values` array
linesV := stream.Get("values")
if linesV == nil {
return rowsIngested, fmt.Errorf("missing `values` item in the parsed JSON %q", stream)
return fmt.Errorf("missing `values` item in the parsed `stream` object %q", stream)
}
lines, err := linesV.Array()
if err != nil {
return rowsIngested, fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
return fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
}
fields := commonFields
for _, line := range lines {
lineA, err := line.Array()
if err != nil {
return rowsIngested, fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
return fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
}
if len(lineA) != 2 {
return rowsIngested, fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2", line, len(lineA))
if len(lineA) < 2 || len(lineA) > 3 {
return fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2 or 3", line, len(lineA))
}
// parse timestamp
timestamp, err := lineA[0].StringBytes()
if err != nil {
return rowsIngested, fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
return fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
}
ts, err := parseLokiTimestamp(bytesutil.ToUnsafeString(timestamp))
if err != nil {
return rowsIngested, fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
return fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
}
if ts == 0 {
ts = currentTimestamp
@@ -160,19 +155,46 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
// parse log message
msg, err := lineA[1].StringBytes()
if err != nil {
return rowsIngested, fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
return fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
}
fields = append(fields[:len(commonFields)], logstorage.Field{
Name: "_msg",
Value: bytesutil.ToUnsafeString(msg),
})
lmp.AddRow(ts, fields)
// parse structured metadata - see https://grafana.com/docs/loki/latest/reference/loki-http-api/#ingest-logs
if len(lineA) > 2 {
structuredMetadata, err := lineA[2].Object()
if err != nil {
return fmt.Errorf("unexpected structured metadata type for %q; want JSON object", lineA[2])
}
structuredMetadata.Visit(func(k []byte, v *fastjson.Value) {
vStr, errLocal := v.StringBytes()
if errLocal != nil {
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
return
}
fields = append(fields, logstorage.Field{
Name: bytesutil.ToUnsafeString(k),
Value: bytesutil.ToUnsafeString(vStr),
})
})
if err != nil {
return fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
}
}
var streamFields []logstorage.Field
if useDefaultStreamFields {
streamFields = commonFields
}
lmp.AddRow(ts, fields, streamFields)
}
rowsIngested += len(lines)
}
return rowsIngested, nil
return nil
}
func parseLokiTimestamp(s string) (int64, error) {

View File

@@ -11,12 +11,11 @@ func TestParseJSONRequest_Failure(t *testing.T) {
t.Helper()
tlp := &insertutils.TestLogMessageProcessor{}
n, err := parseJSONRequest([]byte(s), tlp)
if err == nil {
if err := parseJSONRequest([]byte(s), tlp, false); err == nil {
t.Fatalf("expecting non-nil error")
}
if n != 0 {
t.Fatalf("unexpected number of parsed lines: %d; want 0", n)
if err := tlp.Verify(nil, ""); err != nil {
t.Fatalf("unexpected error: %s", err)
}
}
f(``)
@@ -45,13 +44,19 @@ func TestParseJSONRequest_Failure(t *testing.T) {
// Invalid length of `values` individual item
f(`{"streams":[{"values":[[]]}]}`)
f(`{"streams":[{"values":[["123"]]}]}`)
f(`{"streams":[{"values":[["123","456","789"]]}]}`)
f(`{"streams":[{"values":[["123","456","789","8123"]]}]}`)
// Invalid type for timestamp inside `values` individual item
f(`{"streams":[{"values":[[123,"456"]}]}`)
// Invalid type for log message
f(`{"streams":[{"values":[["123",1234]]}]}`)
// invalid structured metadata type
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", ["metadata_1", "md_value"]]]}]}`)
// structured metadata with unexpected value type
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": 1}]] }]}`)
}
func TestParseJSONRequest_Success(t *testing.T) {
@@ -60,11 +65,10 @@ func TestParseJSONRequest_Success(t *testing.T) {
tlp := &insertutils.TestLogMessageProcessor{}
n, err := parseJSONRequest([]byte(s), tlp)
if err != nil {
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
@@ -116,4 +120,8 @@ func TestParseJSONRequest_Success(t *testing.T) {
}`, []int64{1577836800000000001, 1577836900005000002, 1877836900005000002}, `{"foo":"bar","a":"b","_msg":"foo bar"}
{"foo":"bar","a":"b","_msg":"abc"}
{"x":"y","_msg":"yx"}`)
// values with metadata
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": "md_value"}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar","metadata_1":"md_value"}`)
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
}

View File

@@ -28,8 +28,7 @@ func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
b.RunParallel(func(pb *testing.PB) {
data := getJSONBody(streams, rows, labels)
for pb.Next() {
_, err := parseJSONRequest(data, blp)
if err != nil {
if err := parseJSONRequest(data, blp, false); err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
}

View File

@@ -44,16 +44,15 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
httpserver.Errorf(w, r, "%s", err)
return
}
lmp := cp.NewLogMessageProcessor()
n, err := parseProtobufRequest(data, lmp)
lmp := cp.NewLogMessageProcessor("loki_protobuf")
useDefaultStreamFields := len(cp.StreamFields) == 0
err = parseProtobufRequest(data, lmp, useDefaultStreamFields)
lmp.MustClose()
if err != nil {
httpserver.Errorf(w, r, "cannot parse Loki protobuf request: %s", err)
return
}
rowsIngestedProtobufTotal.Add(n)
// update requestProtobufDuration only for successfully parsed requests
// There is no need in updating requestProtobufDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
@@ -61,18 +60,17 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
}
var (
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
)
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
bb := bytesBufPool.Get()
defer bytesBufPool.Put(bb)
buf, err := snappy.Decode(bb.B[:cap(bb.B)], data)
if err != nil {
return 0, fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
return fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
}
bb.B = buf
@@ -81,13 +79,12 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
err = req.UnmarshalProtobuf(bb.B)
if err != nil {
return 0, fmt.Errorf("cannot parse request body: %w", err)
return fmt.Errorf("cannot parse request body: %w", err)
}
fields := getFields()
defer putFields(fields)
rowsIngested := 0
streams := req.Streams
currentTimestamp := time.Now().UnixNano()
for i := range streams {
@@ -96,7 +93,7 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
// Labels are same for all entries in the stream.
fields.fields, err = parsePromLabels(fields.fields[:0], stream.Labels)
if err != nil {
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
return fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
}
commonFieldsLen := len(fields.fields)
@@ -122,11 +119,14 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
ts = currentTimestamp
}
lmp.AddRow(ts, fields.fields)
var streamFields []logstorage.Field
if useDefaultStreamFields {
streamFields = fields.fields[:commonFieldsLen]
}
lmp.AddRow(ts, fields.fields, streamFields)
}
rowsIngested += len(stream.Entries)
}
return rowsIngested, nil
return nil
}
func getFields() *fields {

View File

@@ -15,7 +15,10 @@ type testLogMessageProcessor struct {
pr PushRequest
}
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
if streamFields != nil {
panic(fmt.Errorf("unexpected non-nil streamFields: %v", streamFields))
}
msg := ""
for _, f := range fields {
if f.Name == "_msg" {
@@ -50,23 +53,21 @@ func TestParseProtobufRequest_Success(t *testing.T) {
t.Helper()
tlp := &testLogMessageProcessor{}
n, err := parseJSONRequest([]byte(s), tlp)
if err != nil {
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if n != len(tlp.pr.Streams) {
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), n)
if len(tlp.pr.Streams) != len(timestampsExpected) {
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), len(timestampsExpected))
}
data := tlp.pr.MarshalProtobuf(nil)
encodedData := snappy.Encode(nil, data)
tlp2 := &insertutils.TestLogMessageProcessor{}
n, err = parseProtobufRequest(encodedData, tlp2)
if err != nil {
if err := parseProtobufRequest(encodedData, tlp2, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp2.Verify(n, timestampsExpected, resultExpected); err != nil {
if err := tlp2.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}

View File

@@ -31,8 +31,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
b.RunParallel(func(pb *testing.PB) {
body := getProtobufBody(streams, rows, labels)
for pb.Next() {
_, err := parseProtobufRequest(body, blp)
if err != nil {
if err := parseProtobufRequest(body, blp, false); err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
}

View File

@@ -17,7 +17,7 @@ var mp easyproto.MarshalerPool
// PushRequest represents Loki PushRequest
//
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L14C1-L14C20
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L14
type PushRequest struct {
Streams []Stream
@@ -87,7 +87,7 @@ func (pr *PushRequest) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []Labe
// Stream represents Loki stream.
//
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L23
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L23
type Stream struct {
Labels string
Entries []Entry
@@ -139,7 +139,7 @@ func (s *Stream) unmarshalProtobuf(entriesBuf []Entry, labelPairBuf []LabelPair,
// Entry represents Loki entry.
//
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L38
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L38
type Entry struct {
Timestamp time.Time
Line string
@@ -203,7 +203,7 @@ func (e *Entry) unmarshalProtobuf(labelPairBuf []LabelPair, src []byte) ([]Label
// LabelPair represents Loki label pair.
//
// See https://github.com/grafana/loki/blob/4220737a52da7ab6c9346b12d5a5d7bedbcd641d/pkg/push/push.proto#L33
// See https://github.com/grafana/loki/blob/ada4b7b8713385fbe9f5984a5a0aaaddf1a7b851/pkg/push/push.proto#L33
type LabelPair struct {
Name string
Value string

View File

@@ -1,10 +1,13 @@
package vlinsert
import (
"fmt"
"net/http"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/datadog"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/journald"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/opentelemetry"
@@ -24,6 +27,7 @@ func Stop() {
// RequestHandler handles insert requests for VictoriaLogs
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
path := r.URL.Path
if !strings.HasPrefix(path, "/insert/") {
// Skip requests, which do not start with /insert/, since these aren't our requests.
return false
@@ -31,9 +35,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
path = strings.TrimPrefix(path, "/insert")
path = strings.ReplaceAll(path, "//", "/")
if path == "/jsonline" {
switch path {
case "/jsonline":
jsonline.RequestHandler(w, r)
return true
case "/ready":
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
fmt.Fprintf(w, `{"status":"ok"}`)
return true
}
switch {
case strings.HasPrefix(path, "/elasticsearch/"):
@@ -45,6 +55,12 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
case strings.HasPrefix(path, "/opentelemetry/"):
path = strings.TrimPrefix(path, "/opentelemetry")
return opentelemetry.RequestHandler(path, w, r)
case strings.HasPrefix(path, "/journald/"):
path = strings.TrimPrefix(path, "/journald")
return journald.RequestHandler(path, w, r)
case strings.HasPrefix(path, "/datadog/"):
path = strings.TrimPrefix(path, "/datadog")
return datadog.RequestHandler(path, w, r)
default:
return false
}

View File

@@ -66,16 +66,15 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
return
}
lmp := cp.NewLogMessageProcessor()
n, err := pushProtobufRequest(data, lmp)
lmp := cp.NewLogMessageProcessor("opentelelemtry_protobuf")
useDefaultStreamFields := len(cp.StreamFields) == 0
err = pushProtobufRequest(data, lmp, useDefaultStreamFields)
lmp.MustClose()
if err != nil {
httpserver.Errorf(w, r, "cannot parse OpenTelemetry protobuf request: %s", err)
return
}
rowsIngestedProtobufTotal.Add(n)
// update requestProtobufDuration only for successfully parsed requests
// There is no need in updating requestProtobufDuration for request errors,
// since their timings are usually much smaller than the timing for successful request parsing.
@@ -83,22 +82,19 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
}
var (
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="opentelemetry",format="protobuf"}`)
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
)
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
var req pb.ExportLogsServiceRequest
if err := req.UnmarshalProtobuf(data); err != nil {
errorsTotal.Inc()
return 0, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
return fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
}
var rowsIngested int
var commonFields []logstorage.Field
for _, rl := range req.ResourceLogs {
attributes := rl.Resource.Attributes
@@ -109,16 +105,14 @@ func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int,
}
commonFieldsLen := len(commonFields)
for _, sc := range rl.ScopeLogs {
var scopeIngested int
commonFields, scopeIngested = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp)
rowsIngested += scopeIngested
commonFields = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp, useDefaultStreamFields)
}
}
return rowsIngested, nil
return nil
}
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor) ([]logstorage.Field, int) {
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) []logstorage.Field {
fields := commonFields
for _, lr := range sc.LogRecords {
fields = fields[:len(commonFields)]
@@ -137,7 +131,11 @@ func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field,
Value: lr.FormatSeverity(),
})
lmp.AddRow(lr.ExtractTimestampNano(), fields)
var streamFields []logstorage.Field
if useDefaultStreamFields {
streamFields = commonFields
}
lmp.AddRow(lr.ExtractTimestampNano(), fields, streamFields)
}
return fields, len(sc.LogRecords)
return fields
}

View File

@@ -16,12 +16,11 @@ func TestPushProtoOk(t *testing.T) {
pData := lr.MarshalProtobuf(nil)
tlp := &insertutils.TestLogMessageProcessor{}
n, err := pushProtobufRequest(pData, tlp)
if err != nil {
if err := pushProtobufRequest(pData, tlp, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}

View File

@@ -27,8 +27,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
b.RunParallel(func(pb *testing.PB) {
body := getProtobufBody(streams, rows, labels)
for pb.Next() {
_, err := pushProtobufRequest(body, blp)
if err != nil {
if err := pushProtobufRequest(body, blp, false); err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
}

View File

@@ -3,11 +3,13 @@ package syslog
import (
"bufio"
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"net"
"sort"
"strconv"
"strings"
"sync"
@@ -35,10 +37,25 @@ var (
syslogTimezone = flag.String("syslog.timezone", "Local", "Timezone to use when parsing timestamps in RFC3164 syslog messages. Timezone must be a valid IANA Time Zone. "+
"For example: America/New_York, Europe/Berlin, Etc/GMT+3 . See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
syslogTenantIDTCP = flagutil.NewArrayString("syslog.tenantID.tcp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
syslogTenantIDUDP = flagutil.NewArrayString("syslog.tenantID.udp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
streamFieldsTCP = flagutil.NewArrayString("syslog.streamFields.tcp", "Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields`)
streamFieldsUDP = flagutil.NewArrayString("syslog.streamFields.udp", "Fields to use as log stream labels for logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#stream-fields`)
ignoreFieldsTCP = flagutil.NewArrayString("syslog.ignoreFields.tcp", "Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields`)
ignoreFieldsUDP = flagutil.NewArrayString("syslog.ignoreFields.udp", "Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields`)
extraFieldsTCP = flagutil.NewArrayString("syslog.extraFields.tcp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.tcp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields`)
extraFieldsUDP = flagutil.NewArrayString("syslog.extraFields.udp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.udp. "+
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields`)
tenantIDTCP = flagutil.NewArrayString("syslog.tenantID.tcp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.tcp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy")
tenantIDUDP = flagutil.NewArrayString("syslog.tenantID.udp", "TenantID for logs ingested via the corresponding -syslog.listenAddr.udp. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#multitenancy")
listenAddrTCP = flagutil.NewArrayString("syslog.listenAddr.tcp", "Comma-separated list of TCP addresses to listen to for Syslog messages. "+
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/")
@@ -150,7 +167,7 @@ func runUDPListener(addr string, argIdx int) {
logger.Fatalf("cannot start UDP syslog server at %q: %s", addr, err)
}
tenantIDStr := syslogTenantIDUDP.GetOptionalArg(argIdx)
tenantIDStr := tenantIDUDP.GetOptionalArg(argIdx)
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.tenantID.udp=%q for -syslog.listenAddr.udp=%q: %s", tenantIDStr, addr, err)
@@ -161,9 +178,27 @@ func runUDPListener(addr string, argIdx int) {
useLocalTimestamp := useLocalTimestampUDP.GetOptionalArg(argIdx)
streamFieldsStr := streamFieldsUDP.GetOptionalArg(argIdx)
streamFields, err := parseFieldsList(streamFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.streamFields.udp=%q for -syslog.listenAddr.udp=%q: %s", streamFieldsStr, addr, err)
}
ignoreFieldsStr := ignoreFieldsUDP.GetOptionalArg(argIdx)
ignoreFields, err := parseFieldsList(ignoreFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.ignoreFields.udp=%q for -syslog.listenAddr.udp=%q: %s", ignoreFieldsStr, addr, err)
}
extraFieldsStr := extraFieldsUDP.GetOptionalArg(argIdx)
extraFields, err := parseExtraFields(extraFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.extraFields.udp=%q for -syslog.listenAddr.udp=%q: %s", extraFieldsStr, addr, err)
}
doneCh := make(chan struct{})
go func() {
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp)
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
close(doneCh)
}()
@@ -193,7 +228,7 @@ func runTCPListener(addr string, argIdx int) {
logger.Fatalf("syslog: cannot start TCP listener at %s: %s", addr, err)
}
tenantIDStr := syslogTenantIDTCP.GetOptionalArg(argIdx)
tenantIDStr := tenantIDTCP.GetOptionalArg(argIdx)
tenantID, err := logstorage.ParseTenantID(tenantIDStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.tenantID.tcp=%q for -syslog.listenAddr.tcp=%q: %s", tenantIDStr, addr, err)
@@ -204,9 +239,27 @@ func runTCPListener(addr string, argIdx int) {
useLocalTimestamp := useLocalTimestampTCP.GetOptionalArg(argIdx)
streamFieldsStr := streamFieldsTCP.GetOptionalArg(argIdx)
streamFields, err := parseFieldsList(streamFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.streamFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", streamFieldsStr, addr, err)
}
ignoreFieldsStr := ignoreFieldsTCP.GetOptionalArg(argIdx)
ignoreFields, err := parseFieldsList(ignoreFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.ignoreFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", ignoreFieldsStr, addr, err)
}
extraFieldsStr := extraFieldsTCP.GetOptionalArg(argIdx)
extraFields, err := parseExtraFields(extraFieldsStr)
if err != nil {
logger.Fatalf("cannot parse -syslog.extraFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", extraFieldsStr, addr, err)
}
doneCh := make(chan struct{})
go func() {
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp)
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
close(doneCh)
}()
@@ -228,7 +281,7 @@ func checkCompressMethod(compressMethod, addr, protocol string) {
}
}
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) {
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
gomaxprocs := cgroup.AvailableCPUs()
var wg sync.WaitGroup
localAddr := ln.LocalAddr()
@@ -236,7 +289,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod st
wg.Add(1)
go func() {
defer wg.Done()
cp := insertutils.GetCommonParamsForSyslog(tenantID)
cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
var bb bytesutil.ByteBuffer
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
for {
@@ -261,7 +314,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod st
}
bb.B = bb.B[:n]
udpRequestsTotal.Inc()
if err := processStream(bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
if err := processStream("udp", bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
logger.Errorf("syslog: cannot process UDP data from %s at %s: %s", remoteAddr, localAddr, err)
}
}
@@ -270,7 +323,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod st
wg.Wait()
}
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool) {
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
var cm ingestserver.ConnsMap
cm.Init("syslog")
@@ -300,8 +353,8 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod stri
wg.Add(1)
go func() {
cp := insertutils.GetCommonParamsForSyslog(tenantID)
if err := processStream(c, compressMethod, useLocalTimestamp, cp); err != nil {
cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
if err := processStream("tcp", c, compressMethod, useLocalTimestamp, cp); err != nil {
logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err)
}
@@ -316,12 +369,12 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod stri
}
// processStream parses a stream of syslog messages from r and ingests them into vlstorage.
func processStream(r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
func processStream(protocol string, r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
if err := vlstorage.CanWriteData(); err != nil {
return err
}
lmp := cp.NewLogMessageProcessor()
lmp := cp.NewLogMessageProcessor("syslog_" + protocol)
err := processStreamInternal(r, compressMethod, useLocalTimestamp, lmp)
lmp.MustClose()
@@ -383,7 +436,6 @@ func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertut
return fmt.Errorf("cannot read line #%d: %s", n, err)
}
n++
rowsIngestedTotal.Inc()
}
return slr.Error()
}
@@ -508,24 +560,56 @@ func processLine(line []byte, currentYear int, timezone *time.Location, useLocal
if useLocalTimestamp {
ts = time.Now().UnixNano()
} else {
nsecs, err := insertutils.ExtractTimestampRFC3339NanoFromFields("timestamp", p.Fields)
nsecs, err := insertutils.ExtractTimestampFromFields("timestamp", p.Fields)
if err != nil {
return fmt.Errorf("cannot get timestamp from syslog line %q: %w", line, err)
}
ts = nsecs
}
logstorage.RenameField(p.Fields, "message", "_msg")
lmp.AddRow(ts, p.Fields)
logstorage.RenameField(p.Fields, msgFields, "_msg")
lmp.AddRow(ts, p.Fields, nil)
logstorage.PutSyslogParser(p)
return nil
}
var (
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="syslog"}`)
var msgFields = []string{"message"}
var (
errorsTotal = metrics.NewCounter(`vl_errors_total{type="syslog"}`)
udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`)
udpErrorsTotal = metrics.NewCounter(`vl_udp_errors_total{type="syslog"}`)
)
func parseFieldsList(s string) ([]string, error) {
if s == "" {
return nil, nil
}
var a []string
err := json.Unmarshal([]byte(s), &a)
return a, err
}
func parseExtraFields(s string) ([]logstorage.Field, error) {
if s == "" {
return nil, nil
}
var m map[string]string
if err := json.Unmarshal([]byte(s), &m); err != nil {
return nil, err
}
fields := make([]logstorage.Field, 0, len(m))
for k, v := range m {
fields = append(fields, logstorage.Field{
Name: k,
Value: v,
})
}
sort.Slice(fields, func(i, j int) bool {
return fields[i].Name < fields[j].Name
})
return fields, nil
}

View File

@@ -75,7 +75,7 @@ func TestSyslogLineReader_Failure(t *testing.T) {
}
func TestProcessStreamInternal_Success(t *testing.T) {
f := func(data string, currentYear, rowsExpected int, timestampsExpected []int64, resultExpected string) {
f := func(data string, currentYear int, timestampsExpected []int64, resultExpected string) {
t.Helper()
MustInit()
@@ -89,7 +89,7 @@ func TestProcessStreamInternal_Success(t *testing.T) {
if err := processStreamInternal(r, "", false, tlp); err != nil {
t.Fatalf("unexpected error: %s", err)
}
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
t.Fatal(err)
}
}
@@ -99,12 +99,11 @@ func TestProcessStreamInternal_Success(t *testing.T) {
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
`
currentYear := 2023
rowsExpected := 3
timestampsExpected := []int64{1685794113000000000, 1685880513000000000, 1685814132345000000}
resultExpected := `{"format":"rfc3164","timestamp":"","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","timestamp":"","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","timestamp":"","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
f(data, currentYear, rowsExpected, timestampsExpected, resultExpected)
resultExpected := `{"format":"rfc3164","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
f(data, currentYear, timestampsExpected, resultExpected)
}
func TestProcessStreamInternal_Failure(t *testing.T) {

109
app/vlogscli/Makefile Normal file
View File

@@ -0,0 +1,109 @@
# All these commands must run from repository root.
vlogscli:
APP_NAME=vlogscli $(MAKE) app-local
vlogscli-race:
APP_NAME=vlogscli RACE=-race $(MAKE) app-local
vlogscli-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker
vlogscli-pure-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-pure
vlogscli-linux-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-amd64
vlogscli-linux-arm-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-arm
vlogscli-linux-arm64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-arm64
vlogscli-linux-ppc64le-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-ppc64le
vlogscli-linux-386-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-linux-386
vlogscli-darwin-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-darwin-amd64
vlogscli-darwin-arm64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-darwin-arm64
vlogscli-freebsd-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-freebsd-amd64
vlogscli-openbsd-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-openbsd-amd64
vlogscli-windows-amd64-prod:
APP_NAME=vlogscli $(MAKE) app-via-docker-windows-amd64
package-vlogscli:
APP_NAME=vlogscli $(MAKE) package-via-docker
package-vlogscli-pure:
APP_NAME=vlogscli $(MAKE) package-via-docker-pure
package-vlogscli-amd64:
APP_NAME=vlogscli $(MAKE) package-via-docker-amd64
package-vlogscli-arm:
APP_NAME=vlogscli $(MAKE) package-via-docker-arm
package-vlogscli-arm64:
APP_NAME=vlogscli $(MAKE) package-via-docker-arm64
package-vlogscli-ppc64le:
APP_NAME=vlogscli $(MAKE) package-via-docker-ppc64le
package-vlogscli-386:
APP_NAME=vlogscli $(MAKE) package-via-docker-386
publish-vlogscli:
APP_NAME=vlogscli $(MAKE) publish-via-docker
vlogscli-linux-amd64:
APP_NAME=vlogscli CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-linux-arm:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
vlogscli-linux-arm64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
vlogscli-linux-ppc64le:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
vlogscli-linux-s390x:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
vlogscli-linux-loong64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
vlogscli-linux-386:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
vlogscli-darwin-amd64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-darwin-arm64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
vlogscli-freebsd-amd64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-openbsd-amd64:
APP_NAME=vlogscli CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
vlogscli-windows-amd64:
GOARCH=amd64 APP_NAME=vlogscli $(MAKE) app-local-windows-goarch
vlogscli-pure:
APP_NAME=vlogscli $(MAKE) app-local-pure
run-vlogscli:
APP_NAME=vlogscli $(MAKE) run-via-docker

5
app/vlogscli/README.md Normal file
View File

@@ -0,0 +1,5 @@
# vlogscli
Command-line utility for querying [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/).
See [these docs](https://docs.victoriametrics.com/victorialogs/querying/vlogscli/).

View File

@@ -0,0 +1,6 @@
ARG base_image=non-existing
FROM $base_image
ENTRYPOINT ["/vlogscli-prod"]
ARG src_binary=non-existing
COPY $src_binary ./vlogscli-prod

View File

@@ -0,0 +1,245 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"io"
"sort"
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
type outputMode int
const (
outputModeJSONMultiline = outputMode(0)
outputModeJSONSingleline = outputMode(1)
outputModeLogfmt = outputMode(2)
outputModeCompact = outputMode(3)
)
func getOutputFormatter(outputMode outputMode) func(w io.Writer, fields []logstorage.Field) error {
switch outputMode {
case outputModeJSONMultiline:
return func(w io.Writer, fields []logstorage.Field) error {
return writeJSONObject(w, fields, true)
}
case outputModeJSONSingleline:
return func(w io.Writer, fields []logstorage.Field) error {
return writeJSONObject(w, fields, false)
}
case outputModeLogfmt:
return writeLogfmtObject
case outputModeCompact:
return writeCompactObject
default:
panic(fmt.Errorf("BUG: unexpected outputMode=%d", outputMode))
}
}
type jsonPrettifier struct {
r io.ReadCloser
formatter func(w io.Writer, fields []logstorage.Field) error
d *json.Decoder
pr *io.PipeReader
pw *io.PipeWriter
bw *bufio.Writer
wg sync.WaitGroup
}
func newJSONPrettifier(r io.ReadCloser, outputMode outputMode) *jsonPrettifier {
d := json.NewDecoder(r)
pr, pw := io.Pipe()
bw := bufio.NewWriter(pw)
formatter := getOutputFormatter(outputMode)
jp := &jsonPrettifier{
r: r,
formatter: formatter,
d: d,
pr: pr,
pw: pw,
bw: bw,
}
jp.wg.Add(1)
go func() {
defer jp.wg.Done()
err := jp.prettifyJSONLines()
jp.closePipesWithError(err)
}()
return jp
}
func (jp *jsonPrettifier) closePipesWithError(err error) {
_ = jp.pr.CloseWithError(err)
_ = jp.pw.CloseWithError(err)
}
func (jp *jsonPrettifier) prettifyJSONLines() error {
for jp.d.More() {
fields, err := readNextJSONObject(jp.d)
if err != nil {
return err
}
sort.Slice(fields, func(i, j int) bool {
return fields[i].Name < fields[j].Name
})
if err := jp.formatter(jp.bw, fields); err != nil {
return err
}
// Flush bw after every output line in order to show results as soon as they appear.
if err := jp.bw.Flush(); err != nil {
return err
}
}
return nil
}
func (jp *jsonPrettifier) Close() error {
jp.closePipesWithError(io.ErrUnexpectedEOF)
err := jp.r.Close()
jp.wg.Wait()
return err
}
func (jp *jsonPrettifier) Read(p []byte) (int, error) {
return jp.pr.Read(p)
}
func readNextJSONObject(d *json.Decoder) ([]logstorage.Field, error) {
t, err := d.Token()
if err != nil {
return nil, fmt.Errorf("cannot read '{': %w", err)
}
delim, ok := t.(json.Delim)
if !ok || delim.String() != "{" {
return nil, fmt.Errorf("unexpected token read; got %q; want '{'", delim)
}
var fields []logstorage.Field
for {
// Read object key
t, err := d.Token()
if err != nil {
return nil, fmt.Errorf("cannot read JSON object key or closing brace: %w", err)
}
delim, ok := t.(json.Delim)
if ok {
if delim.String() == "}" {
return fields, nil
}
return nil, fmt.Errorf("unexpected delimiter read; got %q; want '}'", delim)
}
key, ok := t.(string)
if !ok {
return nil, fmt.Errorf("unexpected token read for object key: %v; want string or '}'", t)
}
// read object value
t, err = d.Token()
if err != nil {
return nil, fmt.Errorf("cannot read JSON object value: %w", err)
}
value, ok := t.(string)
if !ok {
return nil, fmt.Errorf("unexpected token read for oject value: %v; want string", t)
}
fields = append(fields, logstorage.Field{
Name: key,
Value: value,
})
}
}
func writeLogfmtObject(w io.Writer, fields []logstorage.Field) error {
data := logstorage.MarshalFieldsToLogfmt(nil, fields)
_, err := fmt.Fprintf(w, "%s\n", data)
return err
}
func writeCompactObject(w io.Writer, fields []logstorage.Field) error {
if len(fields) == 1 {
// Just write field value as is without name
_, err := fmt.Fprintf(w, "%s\n", fields[0].Value)
return err
}
if len(fields) == 2 && (fields[0].Name == "_time" || fields[1].Name == "_time") {
// Write _time\tfieldValue as is
if fields[0].Name == "_time" {
_, err := fmt.Fprintf(w, "%s\t%s\n", fields[0].Value, fields[1].Value)
return err
}
_, err := fmt.Fprintf(w, "%s\t%s\n", fields[1].Value, fields[0].Value)
return err
}
// Fall back to logfmt
return writeLogfmtObject(w, fields)
}
func writeJSONObject(w io.Writer, fields []logstorage.Field, isMultiline bool) error {
if len(fields) == 0 {
fmt.Fprintf(w, "{}\n")
return nil
}
fmt.Fprintf(w, "{")
writeNewlineIfNeeded(w, isMultiline)
if err := writeJSONObjectKeyValue(w, fields[0], isMultiline); err != nil {
return err
}
for _, f := range fields[1:] {
fmt.Fprintf(w, ",")
writeNewlineIfNeeded(w, isMultiline)
if err := writeJSONObjectKeyValue(w, f, isMultiline); err != nil {
return err
}
}
writeNewlineIfNeeded(w, isMultiline)
fmt.Fprintf(w, "}\n")
return nil
}
func writeNewlineIfNeeded(w io.Writer, isMultiline bool) {
if isMultiline {
fmt.Fprintf(w, "\n")
}
}
func writeJSONObjectKeyValue(w io.Writer, f logstorage.Field, isMultiline bool) error {
key := getJSONString(f.Name)
value := getJSONString(f.Value)
if isMultiline {
_, err := fmt.Fprintf(w, " %s: %s", key, value)
return err
}
_, err := fmt.Fprintf(w, "%s:%s", key, value)
return err
}
func getJSONString(s string) string {
data, err := json.Marshal(s)
if err != nil {
panic(fmt.Errorf("unexpected error when marshaling string to JSON: %w", err))
}
return jsonHTMLReplacer.Replace(string(data))
}
var jsonHTMLReplacer = strings.NewReplacer(
`\u003c`, "\u003c",
`\u003e`, "\u003e",
`\u0026`, "\u0026",
)

View File

@@ -0,0 +1,120 @@
package main
import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/signal"
"sync"
"syscall"
"github.com/mattn/go-isatty"
)
func isTerminal() bool {
return isatty.IsTerminal(os.Stdout.Fd()) && isatty.IsTerminal(os.Stderr.Fd())
}
func readWithLess(r io.Reader, wrapLongLines bool) error {
if !isTerminal() {
// Just write everything to stdout if no terminal is available.
_, err := io.Copy(os.Stdout, r)
if err != nil && !isErrPipe(err) {
return fmt.Errorf("error when forwarding data to stdout: %w", err)
}
if err := os.Stdout.Sync(); err != nil {
return fmt.Errorf("cannot sync data to stdout: %w", err)
}
return nil
}
pr, pw, err := os.Pipe()
if err != nil {
return fmt.Errorf("cannot create pipe: %w", err)
}
defer func() {
_ = pr.Close()
_ = pw.Close()
}()
// Ignore Ctrl+C in the current process, so 'less' could handle it properly
cancel := ignoreSignals(os.Interrupt)
defer cancel()
// Start 'less' process
path, err := exec.LookPath("less")
if err != nil {
return fmt.Errorf("cannot find 'less' command: %w", err)
}
opts := []string{"less", "-F", "-X"}
if !wrapLongLines {
opts = append(opts, "-S")
}
p, err := os.StartProcess(path, opts, &os.ProcAttr{
Env: append(os.Environ(), "LESSCHARSET=utf-8"),
Files: []*os.File{pr, os.Stdout, os.Stderr},
})
if err != nil {
return fmt.Errorf("cannot start 'less' process: %w", err)
}
// Close pr after 'less' finishes in a parallel goroutine
// in order to unblock forwarding data to stopped 'less' below.
waitch := make(chan *os.ProcessState)
go func() {
// Wait for 'less' process to finish.
ps, err := p.Wait()
if err != nil {
fatalf("unexpected error when waiting for 'less' process: %w", err)
}
_ = pr.Close()
waitch <- ps
}()
// Forward data from r to 'less'
_, err = io.Copy(pw, r)
_ = pw.Sync()
_ = pw.Close()
// Wait until 'less' finished
ps := <-waitch
// Verify 'less' status.
if !ps.Success() {
return fmt.Errorf("'less' finished with unexpected code %d", ps.ExitCode())
}
if err != nil && !isErrPipe(err) {
return fmt.Errorf("error when forwarding data to 'less': %w", err)
}
return nil
}
func isErrPipe(err error) bool {
return errors.Is(err, syscall.EPIPE) || errors.Is(err, io.ErrClosedPipe)
}
func ignoreSignals(sigs ...os.Signal) func() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, sigs...)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
_, ok := <-ch
if !ok {
return
}
}
}()
return func() {
signal.Stop(ch)
close(ch)
wg.Wait()
}
}

436
app/vlogscli/main.go Normal file
View File

@@ -0,0 +1,436 @@
package main
import (
"context"
"errors"
"flag"
"fmt"
"io"
"io/fs"
"net/http"
"net/url"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/ergochat/readline"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
var (
datasourceURL = flag.String("datasource.url", "http://localhost:9428/select/logsql/query", "URL for querying VictoriaLogs; "+
"see https://docs.victoriametrics.com/victorialogs/querying/#querying-logs . See also -tail.url")
tailURL = flag.String("tail.url", "", "URL for live tailing queries to VictoriaLogs; see https://docs.victoriametrics.com/victorialogs/querying/#live-tailing ."+
"The url is automatically detected from -datasource.url by replacing /query with /tail at the end if -tail.url is empty")
historyFile = flag.String("historyFile", "vlogscli-history", "Path to file with command history")
header = flagutil.NewArrayString("header", "Optional header to pass in request -datasource.url in the form 'HeaderName: value'")
accountID = flag.Int("accountID", 0, "Account ID to query; see https://docs.victoriametrics.com/victorialogs/#multitenancy")
projectID = flag.Int("projectID", 0, "Project ID to query; see https://docs.victoriametrics.com/victorialogs/#multitenancy")
)
const (
firstLinePrompt = ";> "
nextLinePrompt = ""
)
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
flag.Usage = usage
envflag.Parse()
buildinfo.Init()
logger.InitNoLogFlags()
hes, err := parseHeaders(*header)
if err != nil {
fatalf("cannot parse -header command-line flag: %s", err)
}
headers = hes
incompleteLine := ""
cfg := &readline.Config{
Prompt: firstLinePrompt,
DisableAutoSaveHistory: true,
Listener: func(line []rune, pos int, _ rune) ([]rune, int, bool) {
incompleteLine = string(line)
return line, pos, false
},
}
rl, err := readline.NewFromConfig(cfg)
if err != nil {
fatalf("cannot initialize readline: %s", err)
}
fmt.Fprintf(rl, "sending queries to -datasource.url=%s\n", *datasourceURL)
fmt.Fprintf(rl, `type ? and press enter to see available commands`+"\n")
runReadlineLoop(rl, &incompleteLine)
if err := rl.Close(); err != nil {
fatalf("cannot close readline: %s", err)
}
}
func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
historyLines, err := loadFromHistory(*historyFile)
if err != nil {
fatalf("cannot load query history: %s", err)
}
for _, line := range historyLines {
if err := rl.SaveToHistory(line); err != nil {
fatalf("cannot initialize query history: %s", err)
}
}
outputMode := outputModeJSONMultiline
wrapLongLines := false
s := ""
for {
line, err := rl.ReadLine()
if err != nil {
switch err {
case io.EOF:
if s != "" {
// This is non-interactive query execution.
executeQuery(context.Background(), rl, s, outputMode, wrapLongLines)
}
return
case readline.ErrInterrupt:
if s == "" && *incompleteLine == "" {
fmt.Fprintf(rl, "interrupted\n")
os.Exit(128 + int(syscall.SIGINT))
}
// Default value for Ctrl+C - clear the prompt and store the incompletely entered line into history
s += *incompleteLine
historyLines = pushToHistory(rl, historyLines, s)
s = ""
rl.SetPrompt(firstLinePrompt)
continue
default:
fatalf("unexpected error in readline: %s", err)
}
}
s += line
if s == "" {
// Skip empty lines
continue
}
if isQuitCommand(s) {
fmt.Fprintf(rl, "bye!\n")
_ = pushToHistory(rl, historyLines, s)
return
}
if isHelpCommand(s) {
printCommandsHelp(rl)
historyLines = pushToHistory(rl, historyLines, s)
s = ""
continue
}
if s == `\s` {
fmt.Fprintf(rl, "singleline json output mode\n")
outputMode = outputModeJSONSingleline
historyLines = pushToHistory(rl, historyLines, s)
s = ""
continue
}
if s == `\m` {
fmt.Fprintf(rl, "multiline json output mode\n")
outputMode = outputModeJSONMultiline
historyLines = pushToHistory(rl, historyLines, s)
s = ""
continue
}
if s == `\c` {
fmt.Fprintf(rl, "compact output mode\n")
outputMode = outputModeCompact
historyLines = pushToHistory(rl, historyLines, s)
s = ""
continue
}
if s == `\logfmt` {
fmt.Fprintf(rl, "logfmt output mode\n")
outputMode = outputModeLogfmt
historyLines = pushToHistory(rl, historyLines, s)
s = ""
continue
}
if s == `\wrap_long_lines` {
if wrapLongLines {
wrapLongLines = false
fmt.Fprintf(rl, "wrapping of long lines is disabled\n")
} else {
wrapLongLines = true
fmt.Fprintf(rl, "wrapping of long lines is enabled\n")
}
historyLines = pushToHistory(rl, historyLines, s)
s = ""
continue
}
if line != "" && !strings.HasSuffix(line, ";") {
// Assume the query is incomplete and allow the user finishing the query on the next line
s += "\n"
rl.SetPrompt(nextLinePrompt)
continue
}
// Execute the query
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
executeQuery(ctx, rl, s, outputMode, wrapLongLines)
cancel()
historyLines = pushToHistory(rl, historyLines, s)
s = ""
rl.SetPrompt(firstLinePrompt)
}
}
func pushToHistory(rl *readline.Instance, historyLines []string, s string) []string {
s = strings.TrimSpace(s)
if len(historyLines) == 0 || historyLines[len(historyLines)-1] != s {
historyLines = append(historyLines, s)
if len(historyLines) > 500 {
historyLines = historyLines[len(historyLines)-500:]
}
if err := saveToHistory(*historyFile, historyLines); err != nil {
fatalf("cannot save query history: %s", err)
}
}
if err := rl.SaveToHistory(s); err != nil {
fatalf("cannot update query history: %s", err)
}
return historyLines
}
func loadFromHistory(filePath string) ([]string, error) {
data, err := os.ReadFile(filePath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, nil
}
return nil, err
}
linesQuoted := strings.Split(string(data), "\n")
lines := make([]string, 0, len(linesQuoted))
i := 0
for _, lineQuoted := range linesQuoted {
i++
if lineQuoted == "" {
continue
}
line, err := strconv.Unquote(lineQuoted)
if err != nil {
return nil, fmt.Errorf("cannot parse line #%d at %s: %w; line: [%s]", i, filePath, err, line)
}
lines = append(lines, line)
}
return lines, nil
}
func saveToHistory(filePath string, lines []string) error {
linesQuoted := make([]string, len(lines))
for i, line := range lines {
lineQuoted := strconv.Quote(line)
linesQuoted[i] = lineQuoted
}
data := strings.Join(linesQuoted, "\n")
return os.WriteFile(filePath, []byte(data), 0600)
}
func isQuitCommand(s string) bool {
switch s {
case `\q`, "q", "quit", "exit":
return true
default:
return false
}
}
func isHelpCommand(s string) bool {
switch s {
case `\h`, "h", "help", "?":
return true
default:
return false
}
}
func printCommandsHelp(w io.Writer) {
fmt.Fprintf(w, "%s", `Available commands:
\q - quit
\h - show this help
\s - singleline json output mode
\m - multiline json output mode
\c - compact output mode
\logfmt - logfmt output mode
\wrap_long_lines - toggles wrapping long lines
\tail <query> - live tail <query> results
See https://docs.victoriametrics.com/victorialogs/querying/vlogscli/ for more details
`)
}
func executeQuery(ctx context.Context, output io.Writer, qStr string, outputMode outputMode, wrapLongLines bool) {
if strings.HasPrefix(qStr, `\tail `) {
tailQuery(ctx, output, qStr, outputMode)
return
}
respBody := getQueryResponse(ctx, output, qStr, outputMode, *datasourceURL)
if respBody == nil {
return
}
defer func() {
_ = respBody.Close()
}()
if err := readWithLess(respBody, wrapLongLines); err != nil {
fmt.Fprintf(output, "error when reading query response: %s\n", err)
return
}
}
func tailQuery(ctx context.Context, output io.Writer, qStr string, outputMode outputMode) {
qStr = strings.TrimPrefix(qStr, `\tail `)
qURL, err := getTailURL()
if err != nil {
fmt.Fprintf(output, "%s\n", err)
return
}
respBody := getQueryResponse(ctx, output, qStr, outputMode, qURL)
if respBody == nil {
return
}
defer func() {
_ = respBody.Close()
}()
if _, err := io.Copy(output, respBody); err != nil {
if !errors.Is(err, context.Canceled) && !isErrPipe(err) {
fmt.Fprintf(output, "error when live tailing query response: %s\n", err)
}
fmt.Fprintf(output, "\n")
return
}
}
func getTailURL() (string, error) {
if *tailURL != "" {
return *tailURL, nil
}
u, err := url.Parse(*datasourceURL)
if err != nil {
return "", fmt.Errorf("cannot parse -datasource.url=%q: %w", *datasourceURL, err)
}
if !strings.HasSuffix(u.Path, "/query") {
return "", fmt.Errorf("cannot find /query suffix in -datasource.url=%q", *datasourceURL)
}
u.Path = u.Path[:len(u.Path)-len("/query")] + "/tail"
return u.String(), nil
}
func getQueryResponse(ctx context.Context, output io.Writer, qStr string, outputMode outputMode, qURL string) io.ReadCloser {
// Parse the query and convert it to canonical view.
qStr = strings.TrimSuffix(qStr, ";")
q, err := logstorage.ParseQuery(qStr)
if err != nil {
fmt.Fprintf(output, "cannot parse query: %s\n", err)
return nil
}
qStr = q.String()
fmt.Fprintf(output, "executing [%s]...", qStr)
// Prepare HTTP request for qURL
args := make(url.Values)
args.Set("query", qStr)
data := strings.NewReader(args.Encode())
req, err := http.NewRequestWithContext(ctx, "POST", qURL, data)
if err != nil {
panic(fmt.Errorf("BUG: cannot prepare request to server: %w", err))
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
for _, h := range headers {
req.Header.Set(h.Name, h.Value)
}
req.Header.Set("AccountID", strconv.Itoa(*accountID))
req.Header.Set("ProjectID", strconv.Itoa(*projectID))
// Execute HTTP request at qURL
startTime := time.Now()
resp, err := httpClient.Do(req)
queryDuration := time.Since(startTime)
fmt.Fprintf(output, "; duration: %.3fs\n", queryDuration.Seconds())
if err != nil {
if errors.Is(err, context.Canceled) {
fmt.Fprintf(output, "\n")
} else {
fmt.Fprintf(output, "cannot execute query: %s\n", err)
}
return nil
}
// Verify response code
if resp.StatusCode != http.StatusOK {
body, err := io.ReadAll(resp.Body)
if err != nil {
body = []byte(fmt.Sprintf("cannot read response body: %s", err))
}
fmt.Fprintf(output, "unexpected status code: %d; response body:\n%s\n", resp.StatusCode, body)
return nil
}
// Prettify the response body
jp := newJSONPrettifier(resp.Body, outputMode)
return jp
}
var httpClient = &http.Client{}
var headers []headerEntry
type headerEntry struct {
Name string
Value string
}
func parseHeaders(a []string) ([]headerEntry, error) {
hes := make([]headerEntry, len(a))
for i, s := range a {
a := strings.SplitN(s, ":", 2)
if len(a) != 2 {
return nil, fmt.Errorf("cannot parse header=%q; it must contain at least one ':'; for example, 'Cookie: foo'", s)
}
hes[i] = headerEntry{
Name: strings.TrimSpace(a[0]),
Value: strings.TrimSpace(a[1]),
}
}
return hes, nil
}
func fatalf(format string, args ...any) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
os.Exit(1)
}
func usage() {
const s = `
vlogscli is a command-line tool for querying VictoriaLogs.
See the docs at https://docs.victoriametrics.com/victorialogs/querying/vlogscli/
`
flagutil.Usage(s)
}

View File

@@ -0,0 +1,11 @@
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
ARG certs_image=non-existing
ARG root_image=non-existing
FROM $certs_image AS certs
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
FROM $root_image
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
ENTRYPOINT ["/vlogscli-prod"]
ARG TARGETARCH
COPY vlogscli-linux-${TARGETARCH}-prod ./vlogscli-prod

View File

@@ -45,6 +45,8 @@ var (
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
u64FieldsPerLog = flag.Int("u64FieldsPerLog", 1, "The number of fields with uint64 values to generate per each log entry; "+
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
i64FieldsPerLog = flag.Int("i64FieldsPerLog", 1, "The number of fields with int64 values to generate per each log entry; "+
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
floatFieldsPerLog = flag.Int("floatFieldsPerLog", 1, "The number of fields with float64 values to generate per each log entry; "+
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
ipFieldsPerLog = flag.Int("ipFieldsPerLog", 1, "The number of fields with IPv4 values to generate per each log entry; "+
@@ -254,6 +256,9 @@ func generateLogsAtTimestamp(bw *bufio.Writer, workerID int, ts int64, firstStre
for j := 0; j < *u64FieldsPerLog; j++ {
fmt.Fprintf(bw, `,"u64_%d":"%d"`, j, rand.Uint64())
}
for j := 0; j < *i64FieldsPerLog; j++ {
fmt.Fprintf(bw, `,"i64_%d":"%d"`, j, int64(rand.Uint64()))
}
for j := 0; j < *floatFieldsPerLog; j++ {
fmt.Fprintf(bw, `,"float_%d":"%v"`, j, math.Round(10_000*rand.Float64())/1000)
}

View File

@@ -0,0 +1,49 @@
{% import (
"slices"
) %}
{% stripspace %}
{% func FacetsResponse(m map[string][]facetEntry) %}
{
{% code
sortedKeys := make([]string, 0, len(m))
for k := range m {
sortedKeys = append(sortedKeys, k)
}
slices.Sort(sortedKeys)
%}
"facets":[
{% if len(sortedKeys) > 0 %}
{%= facetsLine(m, sortedKeys[0]) %}
{% for _, k := range sortedKeys[1:] %}
,{%= facetsLine(m, k) %}
{% endfor %}
{% endif %}
]
}
{% endfunc %}
{% func facetsLine(m map[string][]facetEntry, k string) %}
{
"field_name":{%q= k %},
"values":[
{% code fes := m[k] %}
{% if len(fes) > 0 %}
{%= facetLine(fes[0]) %}
{% for _, fe := range fes[1:] %}
,{%= facetLine(fe) %}
{% endfor %}
{% endif %}
]
}
{% endfunc %}
{% func facetLine(fe facetEntry) %}
{
"field_value":{%q= fe.value %},
"hits":{%s= fe.hits %}
}
{% endfunc %}
{% endstripspace %}

View File

@@ -0,0 +1,178 @@
// Code generated by qtc from "facets_response.qtpl". DO NOT EDIT.
// See https://github.com/valyala/quicktemplate for details.
//line app/vlselect/logsql/facets_response.qtpl:1
package logsql
//line app/vlselect/logsql/facets_response.qtpl:1
import (
"slices"
)
//line app/vlselect/logsql/facets_response.qtpl:7
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vlselect/logsql/facets_response.qtpl:7
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vlselect/logsql/facets_response.qtpl:7
func StreamFacetsResponse(qw422016 *qt422016.Writer, m map[string][]facetEntry) {
//line app/vlselect/logsql/facets_response.qtpl:7
qw422016.N().S(`{`)
//line app/vlselect/logsql/facets_response.qtpl:10
sortedKeys := make([]string, 0, len(m))
for k := range m {
sortedKeys = append(sortedKeys, k)
}
slices.Sort(sortedKeys)
//line app/vlselect/logsql/facets_response.qtpl:15
qw422016.N().S(`"facets":[`)
//line app/vlselect/logsql/facets_response.qtpl:17
if len(sortedKeys) > 0 {
//line app/vlselect/logsql/facets_response.qtpl:18
streamfacetsLine(qw422016, m, sortedKeys[0])
//line app/vlselect/logsql/facets_response.qtpl:19
for _, k := range sortedKeys[1:] {
//line app/vlselect/logsql/facets_response.qtpl:19
qw422016.N().S(`,`)
//line app/vlselect/logsql/facets_response.qtpl:20
streamfacetsLine(qw422016, m, k)
//line app/vlselect/logsql/facets_response.qtpl:21
}
//line app/vlselect/logsql/facets_response.qtpl:22
}
//line app/vlselect/logsql/facets_response.qtpl:22
qw422016.N().S(`]}`)
//line app/vlselect/logsql/facets_response.qtpl:25
}
//line app/vlselect/logsql/facets_response.qtpl:25
func WriteFacetsResponse(qq422016 qtio422016.Writer, m map[string][]facetEntry) {
//line app/vlselect/logsql/facets_response.qtpl:25
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vlselect/logsql/facets_response.qtpl:25
StreamFacetsResponse(qw422016, m)
//line app/vlselect/logsql/facets_response.qtpl:25
qt422016.ReleaseWriter(qw422016)
//line app/vlselect/logsql/facets_response.qtpl:25
}
//line app/vlselect/logsql/facets_response.qtpl:25
func FacetsResponse(m map[string][]facetEntry) string {
//line app/vlselect/logsql/facets_response.qtpl:25
qb422016 := qt422016.AcquireByteBuffer()
//line app/vlselect/logsql/facets_response.qtpl:25
WriteFacetsResponse(qb422016, m)
//line app/vlselect/logsql/facets_response.qtpl:25
qs422016 := string(qb422016.B)
//line app/vlselect/logsql/facets_response.qtpl:25
qt422016.ReleaseByteBuffer(qb422016)
//line app/vlselect/logsql/facets_response.qtpl:25
return qs422016
//line app/vlselect/logsql/facets_response.qtpl:25
}
//line app/vlselect/logsql/facets_response.qtpl:27
func streamfacetsLine(qw422016 *qt422016.Writer, m map[string][]facetEntry, k string) {
//line app/vlselect/logsql/facets_response.qtpl:27
qw422016.N().S(`{"field_name":`)
//line app/vlselect/logsql/facets_response.qtpl:29
qw422016.N().Q(k)
//line app/vlselect/logsql/facets_response.qtpl:29
qw422016.N().S(`,"values":[`)
//line app/vlselect/logsql/facets_response.qtpl:31
fes := m[k]
//line app/vlselect/logsql/facets_response.qtpl:32
if len(fes) > 0 {
//line app/vlselect/logsql/facets_response.qtpl:33
streamfacetLine(qw422016, fes[0])
//line app/vlselect/logsql/facets_response.qtpl:34
for _, fe := range fes[1:] {
//line app/vlselect/logsql/facets_response.qtpl:34
qw422016.N().S(`,`)
//line app/vlselect/logsql/facets_response.qtpl:35
streamfacetLine(qw422016, fe)
//line app/vlselect/logsql/facets_response.qtpl:36
}
//line app/vlselect/logsql/facets_response.qtpl:37
}
//line app/vlselect/logsql/facets_response.qtpl:37
qw422016.N().S(`]}`)
//line app/vlselect/logsql/facets_response.qtpl:40
}
//line app/vlselect/logsql/facets_response.qtpl:40
func writefacetsLine(qq422016 qtio422016.Writer, m map[string][]facetEntry, k string) {
//line app/vlselect/logsql/facets_response.qtpl:40
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vlselect/logsql/facets_response.qtpl:40
streamfacetsLine(qw422016, m, k)
//line app/vlselect/logsql/facets_response.qtpl:40
qt422016.ReleaseWriter(qw422016)
//line app/vlselect/logsql/facets_response.qtpl:40
}
//line app/vlselect/logsql/facets_response.qtpl:40
func facetsLine(m map[string][]facetEntry, k string) string {
//line app/vlselect/logsql/facets_response.qtpl:40
qb422016 := qt422016.AcquireByteBuffer()
//line app/vlselect/logsql/facets_response.qtpl:40
writefacetsLine(qb422016, m, k)
//line app/vlselect/logsql/facets_response.qtpl:40
qs422016 := string(qb422016.B)
//line app/vlselect/logsql/facets_response.qtpl:40
qt422016.ReleaseByteBuffer(qb422016)
//line app/vlselect/logsql/facets_response.qtpl:40
return qs422016
//line app/vlselect/logsql/facets_response.qtpl:40
}
//line app/vlselect/logsql/facets_response.qtpl:42
func streamfacetLine(qw422016 *qt422016.Writer, fe facetEntry) {
//line app/vlselect/logsql/facets_response.qtpl:42
qw422016.N().S(`{"field_value":`)
//line app/vlselect/logsql/facets_response.qtpl:44
qw422016.N().Q(fe.value)
//line app/vlselect/logsql/facets_response.qtpl:44
qw422016.N().S(`,"hits":`)
//line app/vlselect/logsql/facets_response.qtpl:45
qw422016.N().S(fe.hits)
//line app/vlselect/logsql/facets_response.qtpl:45
qw422016.N().S(`}`)
//line app/vlselect/logsql/facets_response.qtpl:47
}
//line app/vlselect/logsql/facets_response.qtpl:47
func writefacetLine(qq422016 qtio422016.Writer, fe facetEntry) {
//line app/vlselect/logsql/facets_response.qtpl:47
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vlselect/logsql/facets_response.qtpl:47
streamfacetLine(qw422016, fe)
//line app/vlselect/logsql/facets_response.qtpl:47
qt422016.ReleaseWriter(qw422016)
//line app/vlselect/logsql/facets_response.qtpl:47
}
//line app/vlselect/logsql/facets_response.qtpl:47
func facetLine(fe facetEntry) string {
//line app/vlselect/logsql/facets_response.qtpl:47
qb422016 := qt422016.AcquireByteBuffer()
//line app/vlselect/logsql/facets_response.qtpl:47
writefacetLine(qb422016, fe)
//line app/vlselect/logsql/facets_response.qtpl:47
qs422016 := string(qb422016.B)
//line app/vlselect/logsql/facets_response.qtpl:47
qt422016.ReleaseByteBuffer(qb422016)
//line app/vlselect/logsql/facets_response.qtpl:47
return qs422016
//line app/vlselect/logsql/facets_response.qtpl:47
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"math"
"net/http"
"regexp"
"slices"
"sort"
"strconv"
@@ -13,6 +14,7 @@ import (
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/valyala/fastjson"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
@@ -23,6 +25,82 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
// ProcessFacetsRequest handles /select/logsql/facets request.
//
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-facets
func ProcessFacetsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
q, tenantIDs, err := parseCommonArgs(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
limit, err := httputils.GetInt(r, "limit")
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
maxValuesPerField, err := httputils.GetInt(r, "max_values_per_field")
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
maxValueLen, err := httputils.GetInt(r, "max_value_len")
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
keepConstFields := httputils.GetBool(r, "keep_const_fields")
q.DropAllPipes()
q.AddFacetsPipe(limit, maxValuesPerField, maxValueLen, keepConstFields)
var mLock sync.Mutex
m := make(map[string][]facetEntry)
writeBlock := func(_ uint, _ []int64, columns []logstorage.BlockColumn) {
if len(columns) == 0 || len(columns[0].Values) == 0 {
return
}
if len(columns) != 3 {
logger.Panicf("BUG: expecting 3 columns; got %d columns", len(columns))
}
fieldNames := columns[0].Values
fieldValues := columns[1].Values
hits := columns[2].Values
bb := blockResultPool.Get()
for i := range fieldNames {
fieldName := strings.Clone(fieldNames[i])
fieldValue := strings.Clone(fieldValues[i])
hitsStr := strings.Clone(hits[i])
mLock.Lock()
m[fieldName] = append(m[fieldName], facetEntry{
value: fieldValue,
hits: hitsStr,
})
mLock.Unlock()
}
blockResultPool.Put(bb)
}
// Execute the query
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
return
}
// Write response
w.Header().Set("Content-Type", "application/json")
WriteFacetsResponse(w, m)
}
type facetEntry struct {
value string
hits string
}
// ProcessHitsRequest handles /select/logsql/hits request.
//
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-hits-stats
@@ -73,7 +151,6 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
}
// Prepare the query for hits count.
q.Optimize()
q.DropAllPipes()
q.AddCountByTimePipe(int64(step), int64(offset), fields)
@@ -204,7 +281,6 @@ func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *htt
}
// Obtain field names for the given query
q.Optimize()
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
if err != nil {
httpserver.Errorf(w, r, "cannot obtain field names: %s", err)
@@ -244,7 +320,6 @@ func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *ht
}
// Obtain unique values for the given field
q.Optimize()
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
if err != nil {
httpserver.Errorf(w, r, "cannot obtain values for field %q: %s", fieldName, err)
@@ -267,7 +342,6 @@ func ProcessStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter,
}
// Obtain stream field names for the given query
q.Optimize()
names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q)
if err != nil {
httpserver.Errorf(w, r, "cannot obtain stream field names: %s", err)
@@ -306,7 +380,6 @@ func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter,
}
// Obtain stream field values for the given query and the given fieldName
q.Optimize()
values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
if err != nil {
httpserver.Errorf(w, r, "cannot obtain stream field values: %s", err)
@@ -338,7 +411,6 @@ func ProcessStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http
}
// Obtain streamIDs for the given query
q.Optimize()
streamIDs, err := vlstorage.GetStreamIDs(ctx, tenantIDs, q, uint64(limit))
if err != nil {
httpserver.Errorf(w, r, "cannot obtain stream_ids: %s", err)
@@ -370,7 +442,6 @@ func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.R
}
// Obtain streams for the given query
q.Optimize()
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
if err != nil {
httpserver.Errorf(w, r, "cannot obtain streams: %s", err)
@@ -394,9 +465,10 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
return
}
if !q.CanLiveTail() {
httpserver.Errorf(w, r, "the query [%s] cannot be used in live tailing; see https://docs.victoriametrics.com/victorialogs/querying/#live-tailing for details", q)
httpserver.Errorf(w, r, "the query [%s] cannot be used in live tailing; "+
"see https://docs.victoriametrics.com/victorialogs/querying/#live-tailing for details", q)
return
}
q.Optimize()
refreshIntervalMsecs, err := httputils.GetDuration(r, "refresh_interval", 1000)
if err != nil {
@@ -405,25 +477,37 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
}
refreshInterval := time.Millisecond * time.Duration(refreshIntervalMsecs)
startOffsetMsecs, err := httputils.GetDuration(r, "start_offset", 5*1000)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
startOffset := startOffsetMsecs * 1e6
offsetMsecs, err := httputils.GetDuration(r, "offset", 1000)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
offset := offsetMsecs * 1e6
ctxWithCancel, cancel := context.WithCancel(ctx)
tp := newTailProcessor(cancel)
ticker := time.NewTicker(refreshInterval)
defer ticker.Stop()
end := time.Now().UnixNano()
end := time.Now().UnixNano() - offset
start := end - startOffset
doneCh := ctxWithCancel.Done()
flusher, ok := w.(http.Flusher)
if !ok {
logger.Panicf("BUG: it is expected that http.ResponseWriter (%T) supports http.Flusher interface", w)
}
qOrig := q
for {
start := end - tailOffsetNsecs
end = time.Now().UnixNano()
qCopy := q.Clone()
qCopy.AddTimeFilter(start, end)
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, qCopy, tp.writeBlock); err != nil {
q = qOrig.CloneWithTimeFilter(end, start, end)
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, tp.writeBlock); err != nil {
httpserver.Errorf(w, r, "cannot execute tail query [%s]: %s", q, err)
return
}
@@ -441,6 +525,8 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
case <-doneCh:
return
case <-ticker.C:
start = end - tailOffsetNsecs
end = time.Now().UnixNano() - offset
}
}
}
@@ -599,18 +685,16 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
return
}
q.Optimize()
m := make(map[string]*statsSeries)
var mLock sync.Mutex
timestamp := q.GetTimestamp()
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
clonedColumnNames := make([]string, len(columns))
for i, c := range columns {
clonedColumnNames[i] = strings.Clone(c.Name)
}
for i := range timestamps {
timestamp := q.GetTimestamp()
labels := make([]logstorage.Field, 0, len(byFields))
for j, c := range columns {
if c.Name == "_time" {
@@ -711,8 +795,6 @@ func ProcessStatsQueryRequest(ctx context.Context, w http.ResponseWriter, r *htt
return
}
q.Optimize()
var rows []statsRow
var rowsLock sync.Mutex
@@ -812,7 +894,6 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
q.AddPipeLimit(uint64(limit))
}
q.Optimize()
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
if len(columns) == 0 || len(columns[0].Values) == 0 {
@@ -843,7 +924,6 @@ type row struct {
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
limitUpper := 2 * limit
q.AddPipeLimit(uint64(limitUpper))
q.Optimize()
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
if err != nil {
@@ -862,11 +942,8 @@ func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID,
qOrig := q
for {
q = qOrig.Clone()
q.AddTimeFilter(start, end)
// q.Optimize() call is needed for converting '*' into filterNoop.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6785#issuecomment-2358547733
q.Optimize()
timestamp := qOrig.GetTimestamp()
q = qOrig.CloneWithTimeFilter(timestamp, start, end)
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limitUpper)
if err != nil {
return nil, err
@@ -970,14 +1047,29 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
}
tenantIDs := []logstorage.TenantID{tenantID}
// Parse optional start and end args
start, okStart, err := getTimeNsec(r, "start")
if err != nil {
return nil, nil, err
}
end, okEnd, err := getTimeNsec(r, "end")
if err != nil {
return nil, nil, err
}
// Parse optional time arg
timestamp, okTime, err := getTimeNsec(r, "time")
if err != nil {
return nil, nil, err
}
if !okTime {
// If time arg is missing, then evaluate query at the current timestamp
timestamp = time.Now().UnixNano()
// If time arg is missing, then evaluate query either at the end timestamp (if it is set)
// or at the current timestamp (if end query arg isn't set)
if okEnd {
timestamp = end
} else {
timestamp = time.Now().UnixNano()
}
}
// decrease timestamp by one nanosecond in order to avoid capturing logs belonging
@@ -991,16 +1083,8 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
return nil, nil, fmt.Errorf("cannot parse query [%s]: %s", qStr, err)
}
// Parse optional start and end args
start, okStart, err := getTimeNsec(r, "start")
if err != nil {
return nil, nil, err
}
end, okEnd, err := getTimeNsec(r, "end")
if err != nil {
return nil, nil, err
}
if okStart || okEnd {
// Add _time:[start, end] filter if start or end args were set.
if !okStart {
start = math.MinInt64
}
@@ -1010,6 +1094,22 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
q.AddTimeFilter(start, end)
}
// Parse optional extra_filters
extraFiltersStr := r.FormValue("extra_filters")
extraFilters, err := parseExtraFilters(extraFiltersStr)
if err != nil {
return nil, nil, err
}
q.AddExtraFilters(extraFilters)
// Parse optional extra_stream_filters
extraStreamFiltersStr := r.FormValue("extra_stream_filters")
extraStreamFilters, err := parseExtraStreamFilters(extraStreamFiltersStr)
if err != nil {
return nil, nil, err
}
q.AddExtraFilters(extraStreamFilters)
return q, tenantIDs, nil
}
@@ -1025,3 +1125,115 @@ func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
}
return nsecs, true, nil
}
func parseExtraFilters(s string) (*logstorage.Filter, error) {
if s == "" {
return nil, nil
}
if !strings.HasPrefix(s, `{"`) {
return logstorage.ParseFilter(s)
}
// Extra filters in the form {"field":"value",...}.
kvs, err := parseExtraFiltersJSON(s)
if err != nil {
return nil, err
}
filters := make([]string, len(kvs))
for i, kv := range kvs {
if len(kv.values) == 1 {
filters[i] = fmt.Sprintf("%q:=%q", kv.key, kv.values[0])
} else {
orValues := make([]string, len(kv.values))
for j, v := range kv.values {
orValues[j] = fmt.Sprintf("%q", v)
}
filters[i] = fmt.Sprintf("%q:in(%s)", kv.key, strings.Join(orValues, ","))
}
}
s = strings.Join(filters, " ")
return logstorage.ParseFilter(s)
}
func parseExtraStreamFilters(s string) (*logstorage.Filter, error) {
if s == "" {
return nil, nil
}
if !strings.HasPrefix(s, `{"`) {
return logstorage.ParseFilter(s)
}
// Extra stream filters in the form {"field":"value",...}.
kvs, err := parseExtraFiltersJSON(s)
if err != nil {
return nil, err
}
filters := make([]string, len(kvs))
for i, kv := range kvs {
if len(kv.values) == 1 {
filters[i] = fmt.Sprintf("%q=%q", kv.key, kv.values[0])
} else {
orValues := make([]string, len(kv.values))
for j, v := range kv.values {
orValues[j] = regexp.QuoteMeta(v)
}
filters[i] = fmt.Sprintf("%q=~%q", kv.key, strings.Join(orValues, "|"))
}
}
s = "{" + strings.Join(filters, ",") + "}"
return logstorage.ParseFilter(s)
}
type extraFilter struct {
key string
values []string
}
func parseExtraFiltersJSON(s string) ([]extraFilter, error) {
v, err := fastjson.Parse(s)
if err != nil {
return nil, err
}
o := v.GetObject()
var errOuter error
var filters []extraFilter
o.Visit(func(k []byte, v *fastjson.Value) {
if errOuter != nil {
return
}
switch v.Type() {
case fastjson.TypeString:
filters = append(filters, extraFilter{
key: string(k),
values: []string{string(v.GetStringBytes())},
})
case fastjson.TypeArray:
a := v.GetArray()
if len(a) == 0 {
return
}
orValues := make([]string, len(a))
for i, av := range a {
ov, err := av.StringBytes()
if err != nil {
errOuter = fmt.Errorf("cannot obtain string item at the array for key %q; item: %s", k, av)
return
}
orValues[i] = string(ov)
}
filters = append(filters, extraFilter{
key: string(k),
values: orValues,
})
default:
errOuter = fmt.Errorf("unexpected type of value for key %q: %s; value: %s", k, v.Type(), v)
}
})
if errOuter != nil {
return nil, errOuter
}
return filters, nil
}

View File

@@ -0,0 +1,103 @@
package logsql
import (
"testing"
)
func TestParseExtraFilters_Success(t *testing.T) {
f := func(s, resultExpected string) {
t.Helper()
f, err := parseExtraFilters(s)
if err != nil {
t.Fatalf("unexpected error in parseExtraFilters: %s", err)
}
result := f.String()
if result != resultExpected {
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
}
}
f("", "")
// JSON string
f(`{"foo":"bar"}`, `foo:=bar`)
f(`{"foo":["bar","baz"]}`, `foo:in(bar,baz)`)
f(`{"z":"=b ","c":["d","e,"],"a":[],"_msg":"x"}`, `z:="=b " c:in(d,"e,") =x`)
// LogsQL filter
f(`foobar`, `foobar`)
f(`foo:bar`, `foo:bar`)
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
}
func TestParseExtraFilters_Failure(t *testing.T) {
f := func(s string) {
t.Helper()
_, err := parseExtraFilters(s)
if err == nil {
t.Fatalf("expecting non-nil error")
}
}
// Invalid JSON
f(`{"foo"}`)
f(`[1,2]`)
f(`{"foo":[1]}`)
// Invliad LogsQL filter
f(`foo:(bar`)
// excess pipe
f(`foo | count()`)
}
func TestParseExtraStreamFilters_Success(t *testing.T) {
f := func(s, resultExpected string) {
t.Helper()
f, err := parseExtraStreamFilters(s)
if err != nil {
t.Fatalf("unexpected error in parseExtraStreamFilters: %s", err)
}
result := f.String()
if result != resultExpected {
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
}
}
f("", "")
// JSON string
f(`{"foo":"bar"}`, `{foo="bar"}`)
f(`{"foo":["bar","baz"]}`, `{foo=~"bar|baz"}`)
f(`{"z":"b","c":["d","e|\""],"a":[],"_msg":"x"}`, `{z="b",c=~"d|e\\|\"",_msg="x"}`)
// LogsQL filter
f(`foobar`, `foobar`)
f(`foo:bar`, `foo:bar`)
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
}
func TestParseExtraStreamFilters_Failure(t *testing.T) {
f := func(s string) {
t.Helper()
_, err := parseExtraStreamFilters(s)
if err == nil {
t.Fatalf("expecting non-nil error")
}
}
// Invalid JSON
f(`{"foo"}`)
f(`[1,2]`)
f(`{"foo":[1]}`)
// Invliad LogsQL filter
f(`foo:(bar`)
// excess pipe
f(`foo | count()`)
}

View File

@@ -6,15 +6,31 @@
// JSONRow creates JSON row from the given fields.
{% func JSONRow(columns []logstorage.BlockColumn, rowIdx int) %}
{
{% code c := &columns[0] %}
{% code
i := 0
for i < len(columns) && columns[i].Values[rowIdx] == "" {
i++
}
columns = columns[i:]
%}
{% if len(columns) == 0 %}
{% return %}
{% endif %}
{
{% code c := &columns[0] %}
{%q= c.Name %}:{%q= c.Values[rowIdx] %}
{% code columns = columns[1:] %}
{% for colIdx := range columns %}
{% code c := &columns[colIdx] %}
{% code
c := &columns[colIdx]
v := c.Values[rowIdx]
%}
{% if v == "" %}
{% continue %}
{% endif %}
,{%q= c.Name %}:{%q= c.Values[rowIdx] %}
{% endfor %}
}{% newline %}
}{% newline %}
{% endfunc %}
// JSONRows prints formatted rows
@@ -23,7 +39,11 @@
{% return %}
{% endif %}
{% for _, fields := range rows %}
{
{% code fields = logstorage.SkipLeadingFieldsWithoutValues(fields) %}
{% if len(fields) == 0 %}
{% continue %}
{% endif %}
{
{% if len(fields) > 0 %}
{% code
f := fields[0]
@@ -31,10 +51,13 @@
%}
{%q= f.Name %}:{%q= f.Value %}
{% for _, f := range fields %}
{% if f.Value == "" %}
{% continue %}
{% endif %}
,{%q= f.Name %}:{%q= f.Value %}
{% endfor %}
{% endif %}
}{% newline %}
}{% newline %}
{% endfor %}
{% endfunc %}

View File

@@ -26,141 +26,176 @@ var (
//line app/vlselect/logsql/query_response.qtpl:8
func StreamJSONRow(qw422016 *qt422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
//line app/vlselect/logsql/query_response.qtpl:8
qw422016.N().S(`{`)
//line app/vlselect/logsql/query_response.qtpl:10
i := 0
for i < len(columns) && columns[i].Values[rowIdx] == "" {
i++
}
columns = columns[i:]
//line app/vlselect/logsql/query_response.qtpl:16
if len(columns) == 0 {
//line app/vlselect/logsql/query_response.qtpl:17
return
//line app/vlselect/logsql/query_response.qtpl:18
}
//line app/vlselect/logsql/query_response.qtpl:18
qw422016.N().S(`{`)
//line app/vlselect/logsql/query_response.qtpl:20
c := &columns[0]
//line app/vlselect/logsql/query_response.qtpl:11
//line app/vlselect/logsql/query_response.qtpl:21
qw422016.N().Q(c.Name)
//line app/vlselect/logsql/query_response.qtpl:11
//line app/vlselect/logsql/query_response.qtpl:21
qw422016.N().S(`:`)
//line app/vlselect/logsql/query_response.qtpl:11
//line app/vlselect/logsql/query_response.qtpl:21
qw422016.N().Q(c.Values[rowIdx])
//line app/vlselect/logsql/query_response.qtpl:12
//line app/vlselect/logsql/query_response.qtpl:22
columns = columns[1:]
//line app/vlselect/logsql/query_response.qtpl:13
//line app/vlselect/logsql/query_response.qtpl:23
for colIdx := range columns {
//line app/vlselect/logsql/query_response.qtpl:14
//line app/vlselect/logsql/query_response.qtpl:25
c := &columns[colIdx]
v := c.Values[rowIdx]
//line app/vlselect/logsql/query_response.qtpl:14
//line app/vlselect/logsql/query_response.qtpl:28
if v == "" {
//line app/vlselect/logsql/query_response.qtpl:29
continue
//line app/vlselect/logsql/query_response.qtpl:30
}
//line app/vlselect/logsql/query_response.qtpl:30
qw422016.N().S(`,`)
//line app/vlselect/logsql/query_response.qtpl:15
//line app/vlselect/logsql/query_response.qtpl:31
qw422016.N().Q(c.Name)
//line app/vlselect/logsql/query_response.qtpl:15
//line app/vlselect/logsql/query_response.qtpl:31
qw422016.N().S(`:`)
//line app/vlselect/logsql/query_response.qtpl:15
//line app/vlselect/logsql/query_response.qtpl:31
qw422016.N().Q(c.Values[rowIdx])
//line app/vlselect/logsql/query_response.qtpl:16
//line app/vlselect/logsql/query_response.qtpl:32
}
//line app/vlselect/logsql/query_response.qtpl:16
//line app/vlselect/logsql/query_response.qtpl:32
qw422016.N().S(`}`)
//line app/vlselect/logsql/query_response.qtpl:17
//line app/vlselect/logsql/query_response.qtpl:33
qw422016.N().S(`
`)
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
}
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
func WriteJSONRow(qq422016 qtio422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
StreamJSONRow(qw422016, columns, rowIdx)
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
qt422016.ReleaseWriter(qw422016)
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
}
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
func JSONRow(columns []logstorage.BlockColumn, rowIdx int) string {
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
qb422016 := qt422016.AcquireByteBuffer()
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
WriteJSONRow(qb422016, columns, rowIdx)
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
qs422016 := string(qb422016.B)
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
qt422016.ReleaseByteBuffer(qb422016)
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
return qs422016
//line app/vlselect/logsql/query_response.qtpl:18
//line app/vlselect/logsql/query_response.qtpl:34
}
// JSONRows prints formatted rows
//line app/vlselect/logsql/query_response.qtpl:21
//line app/vlselect/logsql/query_response.qtpl:37
func StreamJSONRows(qw422016 *qt422016.Writer, rows [][]logstorage.Field) {
//line app/vlselect/logsql/query_response.qtpl:22
//line app/vlselect/logsql/query_response.qtpl:38
if len(rows) == 0 {
//line app/vlselect/logsql/query_response.qtpl:23
//line app/vlselect/logsql/query_response.qtpl:39
return
//line app/vlselect/logsql/query_response.qtpl:24
//line app/vlselect/logsql/query_response.qtpl:40
}
//line app/vlselect/logsql/query_response.qtpl:25
//line app/vlselect/logsql/query_response.qtpl:41
for _, fields := range rows {
//line app/vlselect/logsql/query_response.qtpl:25
//line app/vlselect/logsql/query_response.qtpl:42
fields = logstorage.SkipLeadingFieldsWithoutValues(fields)
//line app/vlselect/logsql/query_response.qtpl:43
if len(fields) == 0 {
//line app/vlselect/logsql/query_response.qtpl:44
continue
//line app/vlselect/logsql/query_response.qtpl:45
}
//line app/vlselect/logsql/query_response.qtpl:45
qw422016.N().S(`{`)
//line app/vlselect/logsql/query_response.qtpl:27
//line app/vlselect/logsql/query_response.qtpl:47
if len(fields) > 0 {
//line app/vlselect/logsql/query_response.qtpl:29
//line app/vlselect/logsql/query_response.qtpl:49
f := fields[0]
fields = fields[1:]
//line app/vlselect/logsql/query_response.qtpl:32
//line app/vlselect/logsql/query_response.qtpl:52
qw422016.N().Q(f.Name)
//line app/vlselect/logsql/query_response.qtpl:32
//line app/vlselect/logsql/query_response.qtpl:52
qw422016.N().S(`:`)
//line app/vlselect/logsql/query_response.qtpl:32
//line app/vlselect/logsql/query_response.qtpl:52
qw422016.N().Q(f.Value)
//line app/vlselect/logsql/query_response.qtpl:33
//line app/vlselect/logsql/query_response.qtpl:53
for _, f := range fields {
//line app/vlselect/logsql/query_response.qtpl:33
//line app/vlselect/logsql/query_response.qtpl:54
if f.Value == "" {
//line app/vlselect/logsql/query_response.qtpl:55
continue
//line app/vlselect/logsql/query_response.qtpl:56
}
//line app/vlselect/logsql/query_response.qtpl:56
qw422016.N().S(`,`)
//line app/vlselect/logsql/query_response.qtpl:34
//line app/vlselect/logsql/query_response.qtpl:57
qw422016.N().Q(f.Name)
//line app/vlselect/logsql/query_response.qtpl:34
//line app/vlselect/logsql/query_response.qtpl:57
qw422016.N().S(`:`)
//line app/vlselect/logsql/query_response.qtpl:34
//line app/vlselect/logsql/query_response.qtpl:57
qw422016.N().Q(f.Value)
//line app/vlselect/logsql/query_response.qtpl:35
//line app/vlselect/logsql/query_response.qtpl:58
}
//line app/vlselect/logsql/query_response.qtpl:36
//line app/vlselect/logsql/query_response.qtpl:59
}
//line app/vlselect/logsql/query_response.qtpl:36
//line app/vlselect/logsql/query_response.qtpl:59
qw422016.N().S(`}`)
//line app/vlselect/logsql/query_response.qtpl:37
//line app/vlselect/logsql/query_response.qtpl:60
qw422016.N().S(`
`)
//line app/vlselect/logsql/query_response.qtpl:38
//line app/vlselect/logsql/query_response.qtpl:61
}
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
}
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
func WriteJSONRows(qq422016 qtio422016.Writer, rows [][]logstorage.Field) {
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
StreamJSONRows(qw422016, rows)
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
qt422016.ReleaseWriter(qw422016)
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
}
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
func JSONRows(rows [][]logstorage.Field) string {
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
qb422016 := qt422016.AcquireByteBuffer()
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
WriteJSONRows(qb422016, rows)
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
qs422016 := string(qb422016.B)
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
qt422016.ReleaseByteBuffer(qb422016)
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
return qs422016
//line app/vlselect/logsql/query_response.qtpl:39
//line app/vlselect/logsql/query_response.qtpl:62
}

View File

@@ -23,7 +23,7 @@ var (
"See also -search.maxQueueDuration")
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the search request waits for execution when -search.maxConcurrentRequests "+
"limit is reached; see also -search.maxQueryDuration")
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution. It can be overridden on a per-query basis via 'timeout' query arg")
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution. It can be overridden to a smaller value on a per-query basis via 'timeout' query arg")
)
func getDefaultMaxConcurrentRequests() int {
@@ -177,6 +177,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
func processSelectRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, path string) bool {
httpserver.EnableCORS(w, r)
switch path {
case "/select/logsql/facets":
logsqlFacetsRequests.Inc()
logsql.ProcessFacetsRequest(ctx, w, r)
return true
case "/select/logsql/field_names":
logsqlFieldNamesRequests.Inc()
logsql.ProcessFieldNamesRequest(ctx, w, r)
@@ -236,6 +240,7 @@ func getMaxQueryDuration(r *http.Request) time.Duration {
}
var (
logsqlFacetsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/facets"}`)
logsqlFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_names"}`)
logsqlFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_values"}`)
logsqlHitsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/hits"}`)

View File

@@ -1,13 +1,12 @@
{
"files": {
"main.css": "./static/css/main.c9cc37dd.css",
"main.js": "./static/js/main.867f457f.js",
"main.css": "./static/css/main.02a1c6cb.css",
"main.js": "./static/js/main.55c8060b.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.c9cc37dd.css",
"static/js/main.867f457f.js"
"static/css/main.02a1c6cb.css",
"static/js/main.55c8060b.js"
]
}

View File

@@ -0,0 +1,5 @@
{
"license": {
"type": "opensource"
}
}

View File

@@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.867f457f.js"></script><link href="./static/css/main.c9cc37dd.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.55c8060b.js"></script><link href="./static/css/main.02a1c6cb.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -18,12 +18,12 @@ import (
)
var (
retentionPeriod = flagutil.NewDuration("retentionPeriod", "7d", "Log entries with timestamps older than now-retentionPeriod are automatically deleted; "+
retentionPeriod = flagutil.NewRetentionDuration("retentionPeriod", "7d", "Log entries with timestamps older than now-retentionPeriod are automatically deleted; "+
"log entries with timestamps outside the retention are also rejected during data ingestion; the minimum supported retention is 1d (one day); "+
"see https://docs.victoriametrics.com/victorialogs/#retention ; see also -retention.maxDiskSpaceUsageBytes")
maxDiskSpaceUsageBytes = flagutil.NewBytes("retention.maxDiskSpaceUsageBytes", 0, "The maximum disk space usage at -storageDataPath before older per-day "+
"partitions are automatically dropped; see https://docs.victoriametrics.com/victorialogs/#retention-by-disk-space-usage ; see also -retentionPeriod")
futureRetention = flagutil.NewDuration("futureRetention", "2d", "Log entries with timestamps bigger than now+futureRetention are rejected during data ingestion; "+
futureRetention = flagutil.NewRetentionDuration("futureRetention", "2d", "Log entries with timestamps bigger than now+futureRetention are rejected during data ingestion; "+
"see https://docs.victoriametrics.com/victorialogs/#retention")
storageDataPath = flag.String("storageDataPath", "victoria-logs-data", "Path to directory where to store VictoriaLogs data; "+
"see https://docs.victoriametrics.com/victorialogs/#storage")
@@ -37,6 +37,8 @@ var (
"see https://docs.victoriametrics.com/victorialogs/data-ingestion/ ; see also -logNewStreams")
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 10e6, "The minimum free disk space at -storageDataPath after which "+
"the storage stops accepting new data")
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages. It overrides -httpAuth.*")
)
// Init initializes vlstorage.
@@ -87,6 +89,28 @@ func Stop() {
strg = nil
}
// RequestHandler is a storage request handler.
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
path := r.URL.Path
if path == "/internal/force_merge" {
if !httpserver.CheckAuthFlag(w, r, forceMergeAuthKey) {
return true
}
// Run force merge in background
partitionNamePrefix := r.FormValue("partition_prefix")
go func() {
activeForceMerges.Inc()
defer activeForceMerges.Dec()
logger.Infof("forced merge for partition_prefix=%q has been started", partitionNamePrefix)
startTime := time.Now()
strg.MustForceMerge(partitionNamePrefix)
logger.Infof("forced merge for partition_prefix=%q has been successfully finished in %.3f seconds", partitionNamePrefix, time.Since(startTime).Seconds())
}()
return true
}
return false
}
var strg *logstorage.Storage
var storageMetrics *metrics.Set
@@ -205,3 +229,5 @@ func writeStorageMetrics(w io.Writer, strg *logstorage.Storage) {
metrics.WriteCounterUint64(w, `vl_rows_dropped_total{reason="too_big_timestamp"}`, ss.RowsDroppedTooBigTimestamp)
metrics.WriteCounterUint64(w, `vl_rows_dropped_total{reason="too_small_timestamp"}`, ss.RowsDroppedTooSmallTimestamp)
}
var activeForceMerges = metrics.NewCounter("vl_active_force_merges")

View File

@@ -36,7 +36,7 @@ var (
//
// See https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener/
func InsertHandlerForReader(at *auth.Token, r io.Reader, isGzipped bool) error {
return stream.Parse(r, isGzipped, "", "", func(db string, rows []parser.Row) error {
return stream.Parse(r, true, isGzipped, "", "", func(db string, rows []parser.Row) error {
return insertRows(at, db, rows, nil)
})
}
@@ -50,11 +50,12 @@ func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
return err
}
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
isStreamMode := req.Header.Get("Stream-Mode") == "1"
q := req.URL.Query()
precision := q.Get("precision")
// Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint
db := q.Get("db")
return stream.Parse(req.Body, isGzipped, precision, db, func(db string, rows []parser.Row) error {
return stream.Parse(req.Body, isStreamMode, isGzipped, precision, db, func(db string, rows []parser.Row) error {
return insertRows(at, db, rows, extraLabels)
})
}

View File

@@ -30,6 +30,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
@@ -45,6 +46,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
)
var (
@@ -77,6 +79,9 @@ var (
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running vmagent. The following files are checked: "+
"-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig, -remoteWrite.streamAggr.config . "+
"Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag")
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 0, "The maximum number of labels per time series to be accepted. Series with superfluous labels are ignored. In this case the vm_rows_ignored_total{reason=\"too_many_labels\"} metric at /metrics page is incremented")
maxLabelNameLen = flag.Int("maxLabelNameLen", 0, "The maximum length of label names in the accepted time series. Series with longer label name are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_name\"} metric at /metrics page is incremented")
maxLabelValueLen = flag.Int("maxLabelValueLen", 0, "The maximum length of label values in the accepted time series. Series with longer label value are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_value\"} metric at /metrics page is incremented")
)
var (
@@ -93,6 +98,15 @@ var (
)
func main() {
// vmagent is optimized for reduced memory allocations,
// so it can run with the reduced GOGC in order to reduce the used memory,
// while keeping CPU usage spent in GC at low levels.
//
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
// exceeds 0.05 for extended periods of time.
cgroup.SetGOGC(30)
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
flag.Usage = usage
@@ -100,6 +114,7 @@ func main() {
remotewrite.InitSecretFlags()
buildinfo.Init()
logger.Init()
timeserieslimits.Init(*maxLabelsPerTimeseries, *maxLabelNameLen, *maxLabelValueLen)
if promscrape.IsDryRun() {
if err := promscrape.CheckConfig(); err != nil {
@@ -498,7 +513,7 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
httpserver.Errorf(w, r, `unsupported multitenant prefix: %q; expected "insert"`, p.Prefix)
return true
}
at, err := auth.NewToken(p.AuthToken)
at, err := auth.NewTokenPossibleMultitenant(p.AuthToken)
if err != nil {
httpserver.Errorf(w, r, "cannot obtain auth token: %s", err)
return true
@@ -510,7 +525,13 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
httpserver.Errorf(w, r, "%s", err)
return true
}
w.WriteHeader(http.StatusNoContent)
statusCode := http.StatusNoContent
if strings.HasPrefix(p.Suffix, "prometheus/api/v1/import/prometheus/metrics/job/") {
// Return 200 status code for pushgateway requests.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3636
statusCode = http.StatusOK
}
w.WriteHeader(statusCode)
return true
}
if strings.HasPrefix(p.Suffix, "datadog/") {

View File

@@ -7,12 +7,11 @@ import (
"io"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/awsapi"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
@@ -23,6 +22,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
"github.com/VictoriaMetrics/metrics"
)
var (
@@ -35,7 +35,7 @@ var (
"By default, the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
"is sent after temporary unavailability of the remote storage. See also -maxIngestionRate")
sendTimeout = flagutil.NewArrayDuration("remoteWrite.sendTimeout", time.Minute, "Timeout for sending a single block of data to the corresponding -remoteWrite.url")
retryMinInterval = flagutil.NewArrayDuration("remoteWrite.retryMinInterval", time.Second, "The minimum delay between retry attempts to send a block of data to the corresponding -remoteWrite.url. Every next retry attempt will double the delay to prevent hammering of remote database. See also -remoteWrite.retryMaxInterval")
retryMinInterval = flagutil.NewArrayDuration("remoteWrite.retryMinInterval", time.Second, "The minimum delay between retry attempts to send a block of data to the corresponding -remoteWrite.url. Every next retry attempt will double the delay to prevent hammering of remote database. See also -remoteWrite.retryMaxTime")
retryMaxTime = flagutil.NewArrayDuration("remoteWrite.retryMaxTime", time.Minute, "The max time spent on retry attempts to send a block of data to the corresponding -remoteWrite.url. Change this value if it is expected for -remoteWrite.url to be unreachable for more than -remoteWrite.retryMaxTime. See also -remoteWrite.retryMinInterval")
proxyURL = flagutil.NewArrayString("remoteWrite.proxyURL", "Optional proxy URL for writing data to the corresponding -remoteWrite.url. "+
"Supported proxies: http, https, socks5. Example: -remoteWrite.proxyURL=socks5://proxy:1234")
@@ -463,10 +463,10 @@ again:
// Unexpected status code returned
retriesCount++
retryDuration *= 2
if retryDuration > maxRetryDuration {
retryDuration = maxRetryDuration
}
retryAfterHeader := parseRetryAfterHeader(resp.Header.Get("Retry-After"))
retryDuration = getRetryDuration(retryAfterHeader, retryDuration, maxRetryDuration)
// Handle response
body, err := io.ReadAll(resp.Body)
_ = resp.Body.Close()
if err != nil {
@@ -488,3 +488,49 @@ again:
}
var remoteWriteRejectedLogger = logger.WithThrottler("remoteWriteRejected", 5*time.Second)
// getRetryDuration returns retry duration.
// retryAfterDuration has the highest priority.
// If retryAfterDuration is not specified, retryDuration gets doubled.
// retryDuration can't exceed maxRetryDuration.
//
// Also see: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6097
func getRetryDuration(retryAfterDuration, retryDuration, maxRetryDuration time.Duration) time.Duration {
// retryAfterDuration has the highest priority duration
if retryAfterDuration > 0 {
return timeutil.AddJitterToDuration(retryAfterDuration)
}
// default backoff retry policy
retryDuration *= 2
if retryDuration > maxRetryDuration {
retryDuration = maxRetryDuration
}
return retryDuration
}
// parseRetryAfterHeader parses `Retry-After` value retrieved from HTTP response header.
// retryAfterString should be in either HTTP-date or a number of seconds.
// It will return time.Duration(0) if `retryAfterString` does not follow RFC 7231.
func parseRetryAfterHeader(retryAfterString string) (retryAfterDuration time.Duration) {
if retryAfterString == "" {
return retryAfterDuration
}
defer func() {
v := retryAfterDuration.Seconds()
logger.Infof("'Retry-After: %s' parsed into %.2f second(s)", retryAfterString, v)
}()
// Retry-After could be in "Mon, 02 Jan 2006 15:04:05 GMT" format.
if parsedTime, err := time.Parse(http.TimeFormat, retryAfterString); err == nil {
return time.Duration(time.Until(parsedTime).Seconds()) * time.Second
}
// Retry-After could be in seconds.
if seconds, err := strconv.Atoi(retryAfterString); err == nil {
return time.Duration(seconds) * time.Second
}
return 0
}

View File

@@ -0,0 +1,99 @@
package remotewrite
import (
"math"
"net/http"
"testing"
"time"
)
func TestCalculateRetryDuration(t *testing.T) {
// `testFunc` call `calculateRetryDuration` for `n` times
// and evaluate if the result of `calculateRetryDuration` is
// 1. >= expectMinDuration
// 2. <= expectMinDuration + 10% (see timeutil.AddJitterToDuration)
f := func(retryAfterDuration, retryDuration time.Duration, n int, expectMinDuration time.Duration) {
t.Helper()
for i := 0; i < n; i++ {
retryDuration = getRetryDuration(retryAfterDuration, retryDuration, time.Minute)
}
expectMaxDuration := helper(expectMinDuration)
expectMinDuration = expectMinDuration - (1000 * time.Millisecond) // Avoid edge case when calculating time.Until(now)
if !(retryDuration >= expectMinDuration && retryDuration <= expectMaxDuration) {
t.Fatalf(
"incorrect retry duration, want (ms): [%d, %d], got (ms): %d",
expectMinDuration.Milliseconds(), expectMaxDuration.Milliseconds(),
retryDuration.Milliseconds(),
)
}
}
// Call calculateRetryDuration for 1 time.
{
// default backoff policy
f(0, time.Second, 1, 2*time.Second)
// default backoff policy exceed max limit"
f(0, 10*time.Minute, 1, time.Minute)
// retry after > default backoff policy
f(10*time.Second, 1*time.Second, 1, 10*time.Second)
// retry after < default backoff policy
f(1*time.Second, 10*time.Second, 1, 1*time.Second)
// retry after invalid and < default backoff policy
f(0, time.Second, 1, 2*time.Second)
}
// Call calculateRetryDuration for multiple times.
{
// default backoff policy 2 times
f(0, time.Second, 2, 4*time.Second)
// default backoff policy 3 times
f(0, time.Second, 3, 8*time.Second)
// default backoff policy N times exceed max limit
f(0, time.Second, 10, time.Minute)
// retry after 120s 1 times
f(120*time.Second, time.Second, 1, 120*time.Second)
// retry after 120s 2 times
f(120*time.Second, time.Second, 2, 120*time.Second)
}
}
func TestParseRetryAfterHeader(t *testing.T) {
f := func(retryAfterString string, expectResult time.Duration) {
t.Helper()
result := parseRetryAfterHeader(retryAfterString)
// expect `expectResult == result` when retryAfterString is in seconds or invalid
// expect the difference between result and expectResult to be lower than 10%
if !(expectResult == result || math.Abs(float64(expectResult-result))/float64(expectResult) < 0.10) {
t.Fatalf(
"incorrect retry after duration, want (ms): %d, got (ms): %d",
expectResult.Milliseconds(), result.Milliseconds(),
)
}
}
// retry after header in seconds
f("10", 10*time.Second)
// retry after header in date time
f(time.Now().Add(30*time.Second).UTC().Format(http.TimeFormat), 30*time.Second)
// retry after header invalid
f("invalid-retry-after", 0)
// retry after header not in GMT
f(time.Now().Add(10*time.Second).Format("Mon, 02 Jan 2006 15:04:05 FAKETZ"), 0)
}
// helper calculate the max possible time duration calculated by timeutil.AddJitterToDuration.
func helper(d time.Duration) time.Duration {
dv := d / 10
if dv > 10*time.Second {
dv = 10 * time.Second
}
return d + dv
}

View File

@@ -7,13 +7,10 @@ import (
"net/url"
"path/filepath"
"slices"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
@@ -21,6 +18,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
@@ -30,6 +28,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
"github.com/VictoriaMetrics/metrics"
"github.com/cespare/xxhash/v2"
)
@@ -99,9 +98,6 @@ var (
// rwctxsGlobal contains statically populated entries when -remoteWrite.url is specified.
rwctxsGlobal []*remoteWriteCtx
// Data without tenant id is written to defaultAuthToken if -enableMultitenantHandlers is specified.
defaultAuthToken = &auth.Token{}
// ErrQueueFullHTTPRetry must be returned when TryPush() returns false.
ErrQueueFullHTTPRetry = &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("remote storage systems cannot keep up with the data ingestion rate; retry the request later " +
@@ -209,7 +205,7 @@ func Init() {
initStreamAggrConfigGlobal()
rwctxsGlobal = newRemoteWriteCtxs(nil, *remoteWriteURLs)
rwctxsGlobal = newRemoteWriteCtxs(*remoteWriteURLs)
disableOnDiskQueues := []bool(*disableOnDiskQueue)
disableOnDiskQueueAny = slices.Contains(disableOnDiskQueues, true)
@@ -294,7 +290,7 @@ var (
relabelConfigTimestamp = metrics.NewCounter(`vmagent_relabel_config_last_reload_success_timestamp_seconds`)
)
func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
func newRemoteWriteCtxs(urls []string) []*remoteWriteCtx {
if len(urls) == 0 {
logger.Panicf("BUG: urls must be non-empty")
}
@@ -316,11 +312,6 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
logger.Fatalf("invalid -remoteWrite.url=%q: %s", remoteWriteURL, err)
}
sanitizedURL := fmt.Sprintf("%d:secret-url", i+1)
if at != nil {
// Construct full remote_write url for the given tenant according to https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
remoteWriteURL.Path = fmt.Sprintf("%s/insert/%d:%d/prometheus/api/v1/write", remoteWriteURL.Path, at.AccountID, at.ProjectID)
sanitizedURL = fmt.Sprintf("%s:%d:%d", sanitizedURL, at.AccountID, at.ProjectID)
}
if *showRemoteWriteURL {
sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL)
}
@@ -411,11 +402,6 @@ func TryPush(at *auth.Token, wr *prompbmarshal.WriteRequest) bool {
func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnFailure bool) bool {
tss := wr.Timeseries
if at == nil && MultitenancyEnabled() {
// Write data to default tenant if at isn't set when multitenancy is enabled.
at = defaultAuthToken
}
var tenantRctx *relabelCtx
if at != nil {
// Convert at to (vm_account_id, vm_project_id) labels.
@@ -485,6 +471,15 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnF
rowsCountAfterRelabel := getRowsCount(tssBlock)
rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
}
if timeserieslimits.Enabled() {
tmpBlock := tssBlock[:0]
for _, ts := range tssBlock {
if !timeserieslimits.IsExceeding(ts.Labels) {
tmpBlock = append(tmpBlock, ts)
}
}
tssBlock = tmpBlock
}
sortLabelsIfNeeded(tssBlock)
tssBlock = limitSeriesCardinality(tssBlock)
if sas.IsEnabled() {
@@ -729,29 +724,14 @@ func logSkippedSeries(labels []prompbmarshal.Label, flagName string, flagValue i
select {
case <-logSkippedSeriesTicker.C:
// Do not use logger.WithThrottler() here, since this will increase CPU usage
// because every call to logSkippedSeries will result to a call to labelsToString.
logger.Warnf("skip series %s because %s=%d reached", labelsToString(labels), flagName, flagValue)
// because every call to logSkippedSeries will result to a call to prompbmarshal.LabelsToString.
logger.Warnf("skip series %s because %s=%d reached", prompbmarshal.LabelsToString(labels), flagName, flagValue)
default:
}
}
var logSkippedSeriesTicker = time.NewTicker(5 * time.Second)
func labelsToString(labels []prompbmarshal.Label) string {
var b []byte
b = append(b, '{')
for i, label := range labels {
b = append(b, label.Name...)
b = append(b, '=')
b = strconv.AppendQuote(b, label.Value)
if i+1 < len(labels) {
b = append(b, ',')
}
}
b = append(b, '}')
return string(b)
}
var (
globalRowsPushedBeforeRelabel = metrics.NewCounter("vmagent_remotewrite_global_rows_pushed_before_relabel_total")
rowsDroppedByGlobalRelabel = metrics.NewCounter("vmagent_remotewrite_global_relabel_metrics_dropped_total")

View File

@@ -3,6 +3,7 @@ package remotewrite
import (
"flag"
"fmt"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
@@ -23,7 +24,7 @@ var (
streamAggrGlobalDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation "+
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
"are written to remote storages write. See also -streamAggr.keepInput and https://docs.victoriametrics.com/stream-aggregation/")
streamAggrGlobalDedupInterval = flagutil.NewDuration("streamAggr.dedupInterval", "0s", "Input samples are de-duplicated with this interval on "+
streamAggrGlobalDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval on "+
"aggregator before optional aggregation with -streamAggr.config . "+
"See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/stream-aggregation/#deduplication")
streamAggrGlobalIgnoreOldSamples = flag.Bool("streamAggr.ignoreOldSamples", false, "Whether to ignore input samples with old timestamps outside the "+
@@ -56,6 +57,7 @@ var (
"See https://docs.victoriametrics.com/stream-aggregation/#ignore-aggregation-intervals-on-start")
streamAggrDropInputLabels = flagutil.NewArrayString("remoteWrite.streamAggr.dropInputLabels", "An optional list of labels to drop from samples "+
"before stream de-duplication and aggregation with -remoteWrite.streamAggr.config and -remoteWrite.streamAggr.dedupInterval at the corresponding -remoteWrite.url. "+
"Multiple labels per remoteWrite.url must be delimited by '^^': -remoteWrite.streamAggr.dropInputLabels='replica^^az,replica'. "+
"See https://docs.victoriametrics.com/stream-aggregation/#dropping-unneeded-labels")
)
@@ -131,7 +133,7 @@ func initStreamAggrConfigGlobal() {
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_successful{path=%q}`, filePath)).Set(1)
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_streamaggr_config_reload_success_timestamp_seconds{path=%q}`, filePath)).Set(fasttime.UnixTimestamp())
}
dedupInterval := streamAggrGlobalDedupInterval.Duration()
dedupInterval := *streamAggrGlobalDedupInterval
if dedupInterval > 0 {
deduplicatorGlobal = streamaggr.NewDeduplicator(pushToRemoteStoragesTrackDropped, dedupInterval, *streamAggrGlobalDropInputLabels, "dedup-global")
}
@@ -155,7 +157,11 @@ func (rwctx *remoteWriteCtx) initStreamAggrConfig() {
dedupInterval := streamAggrDedupInterval.GetOptionalArg(idx)
if dedupInterval > 0 {
alias := fmt.Sprintf("dedup-%d", idx+1)
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, dedupInterval, *streamAggrDropInputLabels, alias)
var dropLabels []string
if streamAggrDropInputLabels.GetOptionalArg(idx) != "" {
dropLabels = strings.Split(streamAggrDropInputLabels.GetOptionalArg(idx), "^^")
}
rwctx.deduplicator = streamaggr.NewDeduplicator(rwctx.pushInternalTrackDropped, dedupInterval, dropLabels, alias)
}
}
@@ -196,7 +202,7 @@ func newStreamAggrConfigGlobal() (*streamaggr.Aggregators, error) {
}
opts := &streamaggr.Options{
DedupInterval: streamAggrGlobalDedupInterval.Duration(),
DedupInterval: *streamAggrGlobalDedupInterval,
DropInputLabels: *streamAggrGlobalDropInputLabels,
IgnoreOldSamples: *streamAggrGlobalIgnoreOldSamples,
IgnoreFirstIntervals: *streamAggrGlobalIgnoreFirstIntervals,
@@ -224,9 +230,13 @@ func newStreamAggrConfigPerURL(idx int, pushFunc streamaggr.PushFunc) (*streamag
if *showRemoteWriteURL {
alias = fmt.Sprintf("%d:%s", idx+1, remoteWriteURLs.GetOptionalArg(idx))
}
var dropLabels []string
if streamAggrDropInputLabels.GetOptionalArg(idx) != "" {
dropLabels = strings.Split(streamAggrDropInputLabels.GetOptionalArg(idx), "^^")
}
opts := &streamaggr.Options{
DedupInterval: streamAggrDedupInterval.GetOptionalArg(idx),
DropInputLabels: *streamAggrDropInputLabels,
DropInputLabels: dropLabels,
IgnoreOldSamples: streamAggrIgnoreOldSamples.GetOptionalArg(idx),
IgnoreFirstIntervals: streamAggrIgnoreFirstIntervals.GetOptionalArg(idx),
KeepInput: streamAggrKeepInput.GetOptionalArg(idx),

View File

@@ -51,9 +51,14 @@ Examples:
Usage: `Optional external URL to template in rule's labels or annotations.`,
Required: false,
},
&cli.StringFlag{
Name: "loggerLevel",
Usage: `Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "ERROR").`,
Required: false,
},
},
Action: func(c *cli.Context) error {
if failed := unittest.UnitTest(c.StringSlice("files"), c.Bool("disableAlertgroupLabel"), c.StringSlice("external.label"), c.String("external.url")); failed {
if failed := unittest.UnitTest(c.StringSlice("files"), c.Bool("disableAlertgroupLabel"), c.StringSlice("external.label"), c.String("external.url"), c.String("loggerLevel")); failed {
return fmt.Errorf("unittest failed")
}
return nil

View File

@@ -43,18 +43,33 @@ func httpWrite(address string, r io.Reader) {
// writeInputSeries send input series to vmstorage and flush them
func writeInputSeries(input []series, interval *promutils.Duration, startStamp time.Time, dst string) error {
r := testutil.WriteRequest{}
var err error
r.Timeseries, err = parseInputSeries(input, interval, startStamp)
if err != nil {
return err
}
data := testutil.Compress(r)
// write input series to vm
httpWrite(dst, bytes.NewBuffer(data))
vmstorage.Storage.DebugFlush()
return nil
}
func parseInputSeries(input []series, interval *promutils.Duration, startStamp time.Time) ([]testutil.TimeSeries, error) {
var res []testutil.TimeSeries
for _, data := range input {
expr, err := metricsql.Parse(data.Series)
if err != nil {
return fmt.Errorf("failed to parse series %s: %v", data.Series, err)
return res, fmt.Errorf("failed to parse series %s: %v", data.Series, err)
}
promvals, err := parseInputValue(data.Values, true)
if err != nil {
return fmt.Errorf("failed to parse input series value %s: %v", data.Values, err)
return res, fmt.Errorf("failed to parse input series value %s: %v", data.Values, err)
}
metricExpr, ok := expr.(*metricsql.MetricExpr)
if !ok {
return fmt.Errorf("failed to parse series %s to metric expr: %v", data.Series, err)
if !ok || len(metricExpr.LabelFilterss) != 1 {
return res, fmt.Errorf("got invalid input series %s: %v", data.Series, err)
}
samples := make([]testutil.Sample, 0, len(promvals))
ts := startStamp
@@ -71,14 +86,9 @@ func writeInputSeries(input []series, interval *promutils.Duration, startStamp t
for _, filter := range metricExpr.LabelFilterss[0] {
ls = append(ls, testutil.Label{Name: filter.Label, Value: filter.Value})
}
r.Timeseries = append(r.Timeseries, testutil.TimeSeries{Labels: ls, Samples: samples})
res = append(res, testutil.TimeSeries{Labels: ls, Samples: samples})
}
data := testutil.Compress(r)
// write input series to vm
httpWrite(dst, bytes.NewBuffer(data))
vmstorage.Storage.DebugFlush()
return nil
return res, nil
}
// parseInputValue support input like "1", "1+1x1 _ -4 3+20x1", see more examples in test.

View File

@@ -2,8 +2,10 @@ package unittest
import (
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
func TestParseInputValue_Failure(t *testing.T) {
@@ -43,7 +45,7 @@ func TestParseInputValue_Success(t *testing.T) {
if decimal.IsStaleNaN(outputExpected[i].Value) && decimal.IsStaleNaN(output[i].Value) {
continue
}
t.Fatalf("unexpeccted Value field in the output\ngot\n%v\nwant\n%v", output, outputExpected)
t.Fatalf("unexpected Value field in the output\ngot\n%v\nwant\n%v", output, outputExpected)
}
}
}
@@ -64,3 +66,34 @@ func TestParseInputValue_Success(t *testing.T) {
f("1+1x1 _ -4 stale 3+20x1", []sequenceValue{{Value: 1}, {Value: 2}, {Omitted: true}, {Value: -4}, {Value: decimal.StaleNaN}, {Value: 3}, {Value: 23}})
}
func TestParseInputSeries_Success(t *testing.T) {
f := func(input []series) {
t.Helper()
var interval promutils.Duration
_, err := parseInputSeries(input, &interval, time.Now())
if err != nil {
t.Fatalf("expect to see no error: %v", err)
}
}
f([]series{{Series: "test", Values: "1"}})
f([]series{{Series: "test{}", Values: "1"}})
f([]series{{Series: "test{env=\"prod\",job=\"a\" }", Values: "1"}})
f([]series{{Series: "{__name__=\"test\",env=\"prod\",job=\"a\" }", Values: "1"}})
}
func TestParseInputSeries_Fail(t *testing.T) {
f := func(input []series) {
t.Helper()
var interval promutils.Duration
_, err := parseInputSeries(input, &interval, time.Now())
if err == nil {
t.Fatalf("expect to see error: %v", err)
}
}
f([]series{{Series: "", Values: "1"}})
f([]series{{Series: "{}", Values: "1"}})
f([]series{{Series: "{env=\"prod\",job=\"a\" or env=\"dev\",job=\"b\"}", Values: "1"}})
}

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/metricsql"
)
@@ -48,7 +49,7 @@ Outer:
}
var expSamples []parsedSample
for _, s := range mt.ExpSamples {
expLb := datasource.Labels{}
expLb := []prompbmarshal.Label{}
if s.Labels != "" {
metricsqlExpr, err := metricsql.Parse(s.Labels)
if err != nil {
@@ -57,16 +58,18 @@ Outer:
continue Outer
}
metricsqlMetricExpr, ok := metricsqlExpr.(*metricsql.MetricExpr)
if !ok {
if !ok || len(metricsqlMetricExpr.LabelFilterss) > 1 {
checkErrs = append(checkErrs, fmt.Errorf("\n expr: %q, time: %s, err: %v", mt.Expr,
mt.EvalTime.Duration().String(), fmt.Errorf("got unsupported metricsql type")))
mt.EvalTime.Duration().String(), fmt.Errorf("got invalid exp_samples: %q", s.Labels)))
continue Outer
}
for _, l := range metricsqlMetricExpr.LabelFilterss[0] {
expLb = append(expLb, datasource.Label{
Name: l.Label,
Value: l.Value,
})
if len(metricsqlMetricExpr.LabelFilterss) > 0 {
for _, l := range metricsqlMetricExpr.LabelFilterss[0] {
expLb = append(expLb, prompbmarshal.Label{
Name: l.Label,
Value: l.Value,
})
}
}
}
sort.Slice(expLb, func(i, j int) bool {

View File

@@ -0,0 +1,4 @@
rule_files:
- non-existing-file.yaml
tests: []

View File

@@ -5,6 +5,7 @@ import (
"flag"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
@@ -46,17 +47,24 @@ var (
testRemoteWritePath = "http://127.0.0.1" + httpListenAddr
testHealthHTTPPath = "http://127.0.0.1" + httpListenAddr + "/health"
testLogLevel = "ERROR"
disableAlertgroupLabel bool
)
const (
testStoragePath = "vmalert-unittest"
testLogLevel = "ERROR"
)
// UnitTest runs unittest for files
func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, externalURL string) bool {
if err := templates.Load([]string{}, true); err != nil {
func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, externalURL, logLevel string) bool {
if logLevel != "" {
testLogLevel = logLevel
}
eu, err := url.Parse(externalURL)
if err != nil {
logger.Fatalf("failed to parse external URL: %w", err)
}
if err := templates.Load([]string{}, *eu); err != nil {
logger.Fatalf("failed to load template: %v", err)
}
storagePath = filepath.Join(os.TempDir(), testStoragePath)
@@ -74,8 +82,7 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
logger.Fatalf("failed to load test files %q: %v", files, err)
}
if len(testfiles) == 0 {
fmt.Println("no test file found")
return false
logger.Fatalf("no test file found")
}
labels := make(map[string]string)
@@ -97,8 +104,8 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
var failed bool
for fileName, file := range testfiles {
if err := ruleUnitTest(fileName, file, labels); err != nil {
fmt.Println(" FAILED")
fmt.Printf("\nfailed to run unit test for file %q: \n%v", fileName, err)
fmt.Println("FAILED")
fmt.Printf("failed to run unit test for file %q: \n%v", fileName, err)
failed = true
} else {
fmt.Println(" SUCCESS")
@@ -109,7 +116,7 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
}
func ruleUnitTest(filename string, content []byte, externalLabels map[string]string) []error {
fmt.Println("\nUnit Testing: ", filename)
fmt.Println("\n\nUnit Testing: ", filename)
var unitTestInp unitTestFile
if err := yaml.UnmarshalStrict(content, &unitTestInp); err != nil {
return []error{fmt.Errorf("failed to unmarshal file: %w", err)}
@@ -139,6 +146,9 @@ func ruleUnitTest(filename string, content []byte, externalLabels map[string]str
if err != nil {
return []error{fmt.Errorf("failed to parse `rule_files`: %w", err)}
}
if len(testGroups) == 0 {
return []error{fmt.Errorf("found no rule group in %v", unitTestInp.RuleFiles)}
}
var errs []error
for _, t := range unitTestInp.Tests {
@@ -250,7 +260,7 @@ checkCheck:
if readyCheckFunc() {
break checkCheck
}
time.Sleep(3 * time.Second)
time.Sleep(100 * time.Millisecond)
}
}
}
@@ -270,6 +280,9 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
// tear down vmstorage and clean the data dir
defer tearDown()
if tg.Interval == nil {
tg.Interval = promutils.NewDuration(evalInterval)
}
err := writeInputSeries(tg.InputSeries, tg.Interval, testStartTime, testPromWriteHTTPPath)
if err != nil {
return []error{err}

View File

@@ -1,30 +1,21 @@
package unittest
import (
"os"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
)
func TestMain(m *testing.M) {
if err := templates.Load([]string{}, true); err != nil {
os.Exit(1)
}
os.Exit(m.Run())
}
func TestUnitTest_Failure(t *testing.T) {
f := func(files []string) {
t.Helper()
failed := UnitTest(files, false, nil, "")
failed := UnitTest(files, false, nil, "", "")
if !failed {
t.Fatalf("expecting failed test")
}
}
// failing test
f([]string{"./testdata/failed-test-with-missing-rulefile.yaml"})
f([]string{"./testdata/failed-test.yaml"})
}
@@ -32,7 +23,7 @@ func TestUnitTest_Success(t *testing.T) {
f := func(disableGroupLabel bool, files []string, externalLabels []string, externalURL string) {
t.Helper()
failed := UnitTest(files, disableGroupLabel, externalLabels, externalURL)
failed := UnitTest(files, disableGroupLabel, externalLabels, externalURL, "")
if failed {
t.Fatalf("unexpected failed test")
}

View File

@@ -1,19 +1,25 @@
package config
import (
"bytes"
"crypto/md5"
"flag"
"fmt"
"hash/fnv"
"io"
"net/url"
"sort"
"strings"
"gopkg.in/yaml.v2"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/log"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"gopkg.in/yaml.v2"
)
var (
defaultRuleType = flag.String("rule.defaultRuleType", "prometheus", `Default type for rule expressions, can be overridden via "type" parameter on the group level, see https://docs.victoriametrics.com/vmalert/#groups. Supported values: "graphite", "prometheus" and "vlogs".`)
)
// Group contains list of Rules grouped into
@@ -58,11 +64,9 @@ func (g *Group) UnmarshalYAML(unmarshal func(any) error) error {
if err != nil {
return fmt.Errorf("failed to marshal group configuration for checksum: %w", err)
}
// change default value to prometheus datasource.
if g.Type.Get() == "" {
g.Type.Set(NewPrometheusType())
g.Type = NewRawType(*defaultRuleType)
}
h := md5.New()
h.Write(b)
g.Checksum = fmt.Sprintf("%x", h.Sum(nil))
@@ -298,16 +302,30 @@ func parseConfig(data []byte) ([]Group, error) {
if err != nil {
return nil, fmt.Errorf("cannot expand environment vars: %w", err)
}
g := struct {
var result []Group
type cfgFile struct {
Groups []Group `yaml:"groups"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]any `yaml:",inline"`
}{}
err = yaml.Unmarshal(data, &g)
if err != nil {
return nil, err
}
return g.Groups, checkOverflow(g.XXX, "config")
decoder := yaml.NewDecoder(bytes.NewReader(data))
for {
var cf cfgFile
if err = decoder.Decode(&cf); err != nil {
if err == io.EOF { // EOF indicates no more documents to read
break
}
return nil, err
}
if err = checkOverflow(cf.XXX, "config"); err != nil {
return nil, err
}
result = append(result, cf.Groups...)
}
return result, nil
}
func checkOverflow(m map[string]any, ctx string) error {

View File

@@ -9,15 +9,14 @@ import (
"testing"
"time"
"gopkg.in/yaml.v2"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"gopkg.in/yaml.v2"
)
func TestMain(m *testing.M) {
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
os.Exit(1)
}
os.Exit(m.Run())
@@ -40,6 +39,34 @@ groups:
w.Write([]byte(`
groups:
- name: TestGroup
rules:
- record: conns
expr: max(vm_tcplistener_conns)`))
})
mux.HandleFunc("/good-multi-doc", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`
groups:
- name: foo
rules:
- record: conns
expr: max(vm_tcplistener_conns)
---
groups:
- name: bar
rules:
- record: conns
expr: max(vm_tcplistener_conns)`))
})
mux.HandleFunc("/bad-multi-doc", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`
bad_field:
- name: foo
rules:
- record: conns
expr: max(vm_tcplistener_conns)
---
groups:
- name: bar
rules:
- record: conns
expr: max(vm_tcplistener_conns)`))
@@ -48,13 +75,23 @@ groups:
srv := httptest.NewServer(mux)
defer srv.Close()
if _, err := Parse([]string{srv.URL + "/good-alert", srv.URL + "/good-rr"}, notifier.ValidateTemplates, true); err != nil {
t.Fatalf("error parsing URLs %s", err)
f := func(urls []string, expErr bool) {
for i, u := range urls {
urls[i] = srv.URL + u
}
_, err := Parse(urls, notifier.ValidateTemplates, true)
if err != nil && !expErr {
t.Fatalf("error parsing URLs %s", err)
}
if err == nil && expErr {
t.Fatalf("expecting error parsing URLs but got none")
}
}
if _, err := Parse([]string{srv.URL + "/bad"}, notifier.ValidateTemplates, true); err == nil {
t.Fatalf("expected parsing error: %s", err)
}
f([]string{"/good-alert", "/good-rr", "/good-multi-doc"}, false)
f([]string{"/bad"}, true)
f([]string{"/bad-multi-doc"}, true)
f([]string{"/good-alert", "/bad"}, true)
}
func TestParse_Success(t *testing.T) {
@@ -85,7 +122,10 @@ func TestParse_Failure(t *testing.T) {
f([]string{"testdata/dir/rules3-bad.rules"}, "either `record` or `alert` must be set")
f([]string{"testdata/dir/rules4-bad.rules"}, "either `record` or `alert` must be set")
f([]string{"testdata/rules/rules1-bad.rules"}, "bad graphite expr")
f([]string{"testdata/rules/vlog-rules0-bad.rules"}, "bad LogsQL expr")
f([]string{"testdata/dir/rules6-bad.rules"}, "missing ':' in header")
f([]string{"testdata/rules/rules-multi-doc-bad.rules"}, "unknown fields")
f([]string{"testdata/rules/rules-multi-doc-duplicates-bad.rules"}, "duplicate")
f([]string{"http://unreachable-url"}, "failed to")
}
@@ -201,7 +241,7 @@ func TestGroupValidate_Failure(t *testing.T) {
}, false, "duplicate")
f(&Group{
Name: "test graphite prometheus bad expr",
Name: "test graphite with prometheus expr",
Type: NewGraphiteType(),
Rules: []Rule{
{
@@ -228,6 +268,20 @@ func TestGroupValidate_Failure(t *testing.T) {
},
}, false, "either `record` or `alert` must be set")
f(&Group{
Name: "test vlogs with prometheus expr",
Type: NewVLogsType(),
Rules: []Rule{
{
Expr: "sum(up == 0 ) by (host)",
For: promutils.NewDuration(10 * time.Millisecond),
},
{
Expr: "sumSeries(time('foo.bar',10))",
},
},
}, false, "invalid rule")
// validate expressions
f(&Group{
Name: "test",
@@ -258,6 +312,16 @@ func TestGroupValidate_Failure(t *testing.T) {
}},
},
}, true, "bad graphite expr")
f(&Group{
Name: "test vlogs",
Type: NewVLogsType(),
Rules: []Rule{
{Alert: "alert", Expr: "stats count(*) as requests", Labels: map[string]string{
"description": "some-description",
}},
},
}, true, "bad LogsQL expr")
}
func TestGroupValidate_Success(t *testing.T) {
@@ -297,7 +361,7 @@ func TestGroupValidate_Success(t *testing.T) {
},
}, false, false)
// validate annotiations
// validate annotations
f(&Group{
Name: "test",
Rules: []Rule{
@@ -324,6 +388,15 @@ func TestGroupValidate_Success(t *testing.T) {
}},
},
}, false, true)
f(&Group{
Name: "test victorialogs",
Type: NewVLogsType(),
Rules: []Rule{
{Alert: "alert", Expr: " _time: 1m | stats count(*) as requests", Labels: map[string]string{
"description": "{{ value|query }}",
}},
},
}, false, true)
}
func TestHashRule_NotEqual(t *testing.T) {

View File

@@ -7,7 +7,7 @@ groups:
labels:
label: bar
annotations:
summary: "{{ $value }"
summary: "{{ }}"
description: "{{$labels}}"
- alert: UnkownAnnotationsFunction
for: 5m

View File

@@ -0,0 +1,29 @@
groups:
- name: groupTest
rules:
- alert: VMRows
for: 1ms
expr: vm_rows > 0
labels:
label: bar
host: "{{ $labels.instance }}"
annotations:
summary: "{{ $value }}"
invalid-field-1: invalid-value-1
invalid-field-2: invalid-value-2
---
groups:
- name: TestGroup
interval: 2s
concurrency: 2
type: graphite
rules:
- alert: Conns
expr: filterSeries(sumSeries(host.receiver.interface.cons),'last','>', 500)
for: 3m
annotations:
summary: Too high connection number for {{$labels.instance}}
description: "It is {{ $value }} connections for {{$labels.instance}}"
invalid-field-2: invalid-value-2
invalid-field-3: invalid-value-3

View File

@@ -0,0 +1,11 @@
groups:
- name: foo
rules:
- alert: VMRows
expr: vm_rows > 0
---
groups:
- name: foo
rules:
- alert: VMRows
expr: vm_rows > 0

View File

@@ -0,0 +1,15 @@
---
groups:
- name: groupTest
rules:
- alert: VMRows
for: 1ms
expr: vm_rows > 0
labels:
label: bar
host: "{{ $labels.instance }}"
annotations:
summary: "{{ $value }}"
---
groups:

View File

@@ -0,0 +1,46 @@
---
groups:
- name: groupTest
rules:
- alert: VMRows
for: 1ms
expr: vm_rows > 0
labels:
label: bar
host: "{{ $labels.instance }}"
annotations:
summary: "{{ $value }}"
- name: groupTest-2
rules:
- alert: VMRows-2
for: 1ms
expr: vm_rows_2 > 0
labels:
label: bar2
host: "{{ $labels.instance }}"
annotations:
summary: "\n markdown result is : \n---\n # header\n body: \n text \n----\n"
---
groups:
- name: groupTest-3
rules:
- alert: VMRows-3
for: 1ms
expr: vm_rows_3 > 0
labels:
label: bar_3
host: "{{ $labels.instance }}"
annotations:
summary: "{{ $value }}"
- name: groupTest-4
rules:
- alert: VMRows-4
for: 1ms
expr: vm_rows_4 > 0
labels:
label: bar4
host: "{{ $labels.instance }}"
annotations:
summary: "{{ $value }}"
---
groups:

View File

@@ -0,0 +1,10 @@
groups:
- name: InvalidStatsLogsql
type: vlogs
interval: 5m
rules:
- record: MissingFilter
expr: 'stats count(*) as requests'
- record: MissingStatsPipe
expr: 'service: "nginx"'

View File

@@ -0,0 +1,29 @@
groups:
- name: RequestCount
type: vlogs
interval: 5m
rules:
- record: nginxRequestCount
expr: 'env: "test" AND service: "nginx" | stats count(*) as requests'
annotations:
description: "Service nginx on env test accepted {{$labels.requests}} requests in the last 5 minutes"
- record: prodRequestCount
expr: 'env: "prod" | stats by (service) count(*) as requests'
annotations:
description: "Service {{$labels.service}} on env prod accepted {{$labels.requests}} requests in the last 5 minutes"
- name: ServiceLog
type: vlogs
interval: 5m
rules:
- alert: HasErrorLog
expr: 'env: "prod" AND status:~"error|warn" | stats by (service) count(*) as errorLog | filter errorLog:>0'
annotations:
description: "Service {{$labels.service}} generated {{$labels.errorLog}} error logs in the last 5 minutes"
- name: ServiceRequest
type: vlogs
interval: 10m
rules:
- alert: TooManyFailedRequest
expr: '* | extract "ip=<ip> " | extract "status_code=<code>;" | stats by (ip) count() if (code:!~200) as failed, count() as total| math failed / total as failed_percentage| filter failed_percentage :> 0.01 | fields ip,failed_percentage'
annotations:
description: "Connection from address {{$labels.ip}} has {{$value}} failed requests ratio in last 10 minutes"

View File

@@ -5,6 +5,7 @@ import (
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/graphiteql"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
"github.com/VictoriaMetrics/metricsql"
)
@@ -27,6 +28,13 @@ func NewGraphiteType() Type {
}
}
// NewVLogsType returns victorialogs datasource type
func NewVLogsType() Type {
return Type{
Name: "vlogs",
}
}
// NewRawType returns datasource type from raw string
// without validation.
func NewRawType(d string) Type {
@@ -62,6 +70,10 @@ func (t *Type) ValidateExpr(expr string) error {
if _, err := metricsql.Parse(expr); err != nil {
return fmt.Errorf("bad prometheus expr: %q, err: %w", expr, err)
}
case "vlogs":
if _, err := logstorage.ParseStatsQuery(expr, 0); err != nil {
return fmt.Errorf("bad LogsQL expr: %q, err: %w", expr, err)
}
default:
return fmt.Errorf("unknown datasource type=%q", t.Name)
}
@@ -74,13 +86,10 @@ func (t *Type) UnmarshalYAML(unmarshal func(any) error) error {
if err := unmarshal(&s); err != nil {
return err
}
if s == "" {
s = "prometheus"
}
switch s {
case "graphite", "prometheus":
case "graphite", "prometheus", "vlogs":
default:
return fmt.Errorf("unknown datasource type=%q, want %q or %q", s, "prometheus", "graphite")
return fmt.Errorf("unknown datasource type=%q, want prometheus, graphite or vlogs", s)
}
t.Name = s
return nil

View File

@@ -0,0 +1,333 @@
package datasource
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
)
type datasourceType string
const (
datasourcePrometheus datasourceType = "prometheus"
datasourceGraphite datasourceType = "graphite"
datasourceVLogs datasourceType = "vlogs"
)
func toDatasourceType(s string) datasourceType {
switch s {
case string(datasourcePrometheus):
return datasourcePrometheus
case string(datasourceGraphite):
return datasourceGraphite
case string(datasourceVLogs):
return datasourceVLogs
default:
logger.Panicf("BUG: unknown datasource type %q", s)
}
return ""
}
// Client is a datasource entity for reading data,
// supported clients are enumerated in datasourceType.
// WARN: when adding a new field, remember to check if Clone() method needs to be updated.
type Client struct {
c *http.Client
authCfg *promauth.Config
datasourceURL string
appendTypePrefix bool
queryStep time.Duration
dataSourceType datasourceType
// ApplyIntervalAsTimeFilter is only valid for vlogs datasource.
// Set to true if there is no [timeFilter](https://docs.victoriametrics.com/victorialogs/logsql/#time-filter) in the rule expression,
// and we will add evaluation interval as an additional timeFilter when querying.
applyIntervalAsTimeFilter bool
// evaluationInterval will help setting request's `step` param,
// or adding time filter for LogsQL expression.
evaluationInterval time.Duration
// extraParams contains params to be attached to each HTTP request
extraParams url.Values
// extraHeaders are headers to be attached to each HTTP request
extraHeaders []keyValue
// whether to print additional log messages
// for each sent request
debug bool
}
type keyValue struct {
key string
value string
}
// Clone clones shared http client and other configuration to the new client.
func (c *Client) Clone() *Client {
ns := &Client{
c: c.c,
authCfg: c.authCfg,
datasourceURL: c.datasourceURL,
appendTypePrefix: c.appendTypePrefix,
queryStep: c.queryStep,
dataSourceType: c.dataSourceType,
evaluationInterval: c.evaluationInterval,
// init map so it can be populated below
extraParams: url.Values{},
debug: c.debug,
}
if len(c.extraHeaders) > 0 {
ns.extraHeaders = make([]keyValue, len(c.extraHeaders))
copy(ns.extraHeaders, c.extraHeaders)
}
for k, v := range c.extraParams {
ns.extraParams[k] = v
}
return ns
}
// ApplyParams - changes given querier params.
func (c *Client) ApplyParams(params QuerierParams) *Client {
if params.DataSourceType != "" {
c.dataSourceType = toDatasourceType(params.DataSourceType)
}
c.evaluationInterval = params.EvaluationInterval
c.applyIntervalAsTimeFilter = params.ApplyIntervalAsTimeFilter
if params.QueryParams != nil {
if c.extraParams == nil {
c.extraParams = url.Values{}
}
for k, vl := range params.QueryParams {
// custom query params are prior to default ones
if c.extraParams.Has(k) {
c.extraParams.Del(k)
}
for _, v := range vl {
// don't use .Set() instead of Del/Add since it is allowed
// for GET params to be duplicated
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4908
c.extraParams.Add(k, v)
}
}
}
if params.Headers != nil {
for key, value := range params.Headers {
kv := keyValue{key: key, value: value}
c.extraHeaders = append(c.extraHeaders, kv)
}
}
c.debug = params.Debug
return c
}
// BuildWithParams - implements interface.
func (c *Client) BuildWithParams(params QuerierParams) Querier {
return c.Clone().ApplyParams(params)
}
// NewPrometheusClient returns a new prometheus datasource client.
func NewPrometheusClient(baseURL string, authCfg *promauth.Config, appendTypePrefix bool, c *http.Client) *Client {
return &Client{
c: c,
authCfg: authCfg,
datasourceURL: strings.TrimSuffix(baseURL, "/"),
appendTypePrefix: appendTypePrefix,
queryStep: *queryStep,
dataSourceType: datasourcePrometheus,
extraParams: url.Values{},
}
}
// Query executes the given query and returns parsed response
func (c *Client) Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error) {
req, err := c.newQueryRequest(ctx, query, ts)
if err != nil {
return Result{}, nil, err
}
resp, err := c.do(req)
if err != nil {
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) && !netutil.IsTrivialNetworkError(err) {
// Return unexpected error to the caller.
return Result{}, nil, err
}
// Something in the middle between client and datasource might be closing
// the connection. So we do a one more attempt in hope request will succeed.
req, err = c.newQueryRequest(ctx, query, ts)
if err != nil {
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
}
resp, err = c.do(req)
if err != nil {
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
}
}
// Process the received response.
var parseFn func(req *http.Request, resp *http.Response) (Result, error)
switch c.dataSourceType {
case datasourcePrometheus:
parseFn = parsePrometheusResponse
case datasourceGraphite:
parseFn = parseGraphiteResponse
case datasourceVLogs:
parseFn = parseVLogsResponse
default:
logger.Panicf("BUG: unsupported datasource type %q to parse query response", c.dataSourceType)
}
result, err := parseFn(req, resp)
_ = resp.Body.Close()
return result, req, err
}
// QueryRange executes the given query on the given time range.
// For Prometheus type see https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
// Graphite type isn't supported.
func (c *Client) QueryRange(ctx context.Context, query string, start, end time.Time) (res Result, err error) {
if c.dataSourceType == datasourceGraphite {
return res, fmt.Errorf("%q is not supported for QueryRange", c.dataSourceType)
}
// TODO: disable range query LogsQL with time filter now
if c.dataSourceType == datasourceVLogs && !c.applyIntervalAsTimeFilter {
return res, fmt.Errorf("range query is not supported for LogsQL expression %q because it contains time filter. Remove time filter from the expression and try again", query)
}
if start.IsZero() {
return res, fmt.Errorf("start param is missing")
}
if end.IsZero() {
return res, fmt.Errorf("end param is missing")
}
req, err := c.newQueryRangeRequest(ctx, query, start, end)
if err != nil {
return res, err
}
resp, err := c.do(req)
if err != nil {
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) && !netutil.IsTrivialNetworkError(err) {
// Return unexpected error to the caller.
return res, err
}
// Something in the middle between client and datasource might be closing
// the connection. So we do a one more attempt in hope request will succeed.
req, err = c.newQueryRangeRequest(ctx, query, start, end)
if err != nil {
return res, fmt.Errorf("second attempt: %w", err)
}
resp, err = c.do(req)
if err != nil {
return res, fmt.Errorf("second attempt: %w", err)
}
}
// Process the received response.
var parseFn func(req *http.Request, resp *http.Response) (Result, error)
switch c.dataSourceType {
case datasourcePrometheus:
parseFn = parsePrometheusResponse
case datasourceVLogs:
parseFn = parseVLogsResponse
default:
logger.Panicf("BUG: unsupported datasource type %q to parse query range response", c.dataSourceType)
}
res, err = parseFn(req, resp)
_ = resp.Body.Close()
return res, err
}
func (c *Client) do(req *http.Request) (*http.Response, error) {
ru := req.URL.Redacted()
if *showDatasourceURL {
ru = req.URL.String()
}
if c.debug {
logger.Infof("DEBUG datasource request: executing %s request with params %q", req.Method, ru)
}
resp, err := c.c.Do(req)
if err != nil {
return nil, fmt.Errorf("error getting response from %s: %w", ru, err)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
_ = resp.Body.Close()
return nil, fmt.Errorf("unexpected response code %d for %s. Response body %s", resp.StatusCode, ru, body)
}
return resp, nil
}
func (c *Client) newQueryRangeRequest(ctx context.Context, query string, start, end time.Time) (*http.Request, error) {
req, err := c.newRequest(ctx)
if err != nil {
return nil, fmt.Errorf("cannot create query_range request to datasource %q: %w", c.datasourceURL, err)
}
switch c.dataSourceType {
case datasourcePrometheus:
c.setPrometheusRangeReqParams(req, query, start, end)
case datasourceVLogs:
c.setVLogsRangeReqParams(req, query, start, end)
default:
logger.Panicf("BUG: unsupported datasource type %q to create range query request", c.dataSourceType)
}
return req, nil
}
func (c *Client) newQueryRequest(ctx context.Context, query string, ts time.Time) (*http.Request, error) {
req, err := c.newRequest(ctx)
if err != nil {
return nil, fmt.Errorf("cannot create query request to datasource %q: %w", c.datasourceURL, err)
}
switch c.dataSourceType {
case datasourcePrometheus:
c.setPrometheusInstantReqParams(req, query, ts)
case datasourceGraphite:
c.setGraphiteReqParams(req, query)
case datasourceVLogs:
c.setVLogsInstantReqParams(req, query, ts)
default:
logger.Panicf("BUG: unsupported datasource type %q to create query request", c.dataSourceType)
}
return req, nil
}
func (c *Client) newRequest(ctx context.Context) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.datasourceURL, nil)
if err != nil {
logger.Panicf("BUG: unexpected error from http.NewRequest(%q): %s", c.datasourceURL, err)
}
req.Header.Set("Content-Type", "application/json")
if c.authCfg != nil {
err = c.authCfg.SetHeaders(req, true)
if err != nil {
return nil, err
}
}
for _, h := range c.extraHeaders {
req.Header.Set(h.key, h.value)
}
return req, nil
}
// setReqParams adds query and other extra params for the request.
func (c *Client) setReqParams(r *http.Request, query string) {
q := r.URL.Query()
for k, vs := range c.extraParams {
if q.Has(k) { // extraParams are prior to params in URL
q.Del(k)
}
for _, v := range vs {
q.Add(k, v)
}
}
q.Set("query", query)
r.URL.RawQuery = q.Encode()
}

View File

@@ -46,8 +46,8 @@ const (
graphitePrefix = "/graphite"
)
func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string) {
if s.appendTypePrefix {
func (c *Client) setGraphiteReqParams(r *http.Request, query string) {
if c.appendTypePrefix {
r.URL.Path += graphitePrefix
}
r.URL.Path += graphitePath
@@ -58,7 +58,7 @@ func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string) {
q.Set("target", query)
q.Set("until", "now")
for k, vs := range s.extraParams {
for k, vs := range c.extraParams {
if q.Has(k) { // extraParams are prior to params in URL
q.Del(k)
}

View File

@@ -9,14 +9,16 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/valyala/fastjson"
)
var (
disablePathAppend = flag.Bool("remoteRead.disablePathAppend", false, "Whether to disable automatic appending of '/api/v1/query' path "+
disablePathAppend = flag.Bool("remoteRead.disablePathAppend", false, "Whether to disable automatic appending of '/api/v1/query' or '/select/logsql/stats_query' path "+
"to the configured -datasource.url and -remoteRead.url")
disableStepParam = flag.Bool("datasource.disableStepParam", false, "Whether to disable adding 'step' param to the issued instant queries. "+
disableStepParam = flag.Bool("datasource.disableStepParam", false, "Whether to disable adding 'step' param in instant queries to the configured -datasource.url and -remoteRead.url. "+
"Only valid for prometheus datasource. "+
"This might be useful when using vmalert with datasources that do not support 'step' param for instant queries, like Google Managed Prometheus. "+
"It is not recommended to enable this flag if you use vmalert with VictoriaMetrics.")
)
@@ -81,14 +83,14 @@ func (pi *promInstant) Unmarshal(b []byte) error {
labels := metric.GetObject()
r := &pi.ms[i]
r.Labels = make([]Label, 0, labels.Len())
r.Labels = make([]prompbmarshal.Label, 0, labels.Len())
labels.Visit(func(key []byte, v *fastjson.Value) {
lv, errLocal := v.StringBytes()
if errLocal != nil {
err = fmt.Errorf("error when parsing label value %q: %s", v, errLocal)
return
}
r.Labels = append(r.Labels, Label{
r.Labels = append(r.Labels, prompbmarshal.Label{
Name: string(key),
Value: string(lv),
})
@@ -171,7 +173,7 @@ const (
func parsePrometheusResponse(req *http.Request, resp *http.Response) (res Result, err error) {
r := &promResponse{}
if err = json.NewDecoder(resp.Body).Decode(r); err != nil {
return res, fmt.Errorf("error parsing prometheus metrics for %s: %w", req.URL.Redacted(), err)
return res, fmt.Errorf("error parsing response from %s: %w", req.URL.Redacted(), err)
}
if r.Status == statusError {
return res, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL.Redacted(), r.ErrorType, r.Error)
@@ -218,8 +220,8 @@ func parsePrometheusResponse(req *http.Request, resp *http.Response) (res Result
return res, nil
}
func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string, timestamp time.Time) {
if s.appendTypePrefix {
func (c *Client) setPrometheusInstantReqParams(r *http.Request, query string, timestamp time.Time) {
if c.appendTypePrefix {
r.URL.Path += "/prometheus"
}
if !*disablePathAppend {
@@ -227,22 +229,22 @@ func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string,
}
q := r.URL.Query()
q.Set("time", timestamp.Format(time.RFC3339))
if !*disableStepParam && s.evaluationInterval > 0 { // set step as evaluationInterval by default
if !*disableStepParam && c.evaluationInterval > 0 { // set step as evaluationInterval by default
// always convert to seconds to keep compatibility with older
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
q.Set("step", fmt.Sprintf("%ds", int(s.evaluationInterval.Seconds())))
q.Set("step", fmt.Sprintf("%ds", int(c.evaluationInterval.Seconds())))
}
if !*disableStepParam && s.queryStep > 0 { // override step with user-specified value
if !*disableStepParam && c.queryStep > 0 { // override step with user-specified value
// always convert to seconds to keep compatibility with older
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
q.Set("step", fmt.Sprintf("%ds", int(s.queryStep.Seconds())))
q.Set("step", fmt.Sprintf("%ds", int(c.queryStep.Seconds())))
}
r.URL.RawQuery = q.Encode()
s.setPrometheusReqParams(r, query)
c.setReqParams(r, query)
}
func (s *VMStorage) setPrometheusRangeReqParams(r *http.Request, query string, start, end time.Time) {
if s.appendTypePrefix {
func (c *Client) setPrometheusRangeReqParams(r *http.Request, query string, start, end time.Time) {
if c.appendTypePrefix {
r.URL.Path += "/prometheus"
}
if !*disablePathAppend {
@@ -251,25 +253,11 @@ func (s *VMStorage) setPrometheusRangeReqParams(r *http.Request, query string, s
q := r.URL.Query()
q.Add("start", start.Format(time.RFC3339))
q.Add("end", end.Format(time.RFC3339))
if s.evaluationInterval > 0 { // set step as evaluationInterval by default
if c.evaluationInterval > 0 { // set step as evaluationInterval by default
// always convert to seconds to keep compatibility with older
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
q.Set("step", fmt.Sprintf("%ds", int(s.evaluationInterval.Seconds())))
q.Set("step", fmt.Sprintf("%ds", int(c.evaluationInterval.Seconds())))
}
r.URL.RawQuery = q.Encode()
s.setPrometheusReqParams(r, query)
}
func (s *VMStorage) setPrometheusReqParams(r *http.Request, query string) {
q := r.URL.Query()
for k, vs := range s.extraParams {
if q.Has(k) { // extraParams are prior to params in URL
q.Del(k)
}
for _, v := range vs {
q.Add(k, v)
}
}
q.Set("query", query)
r.URL.RawQuery = q.Encode()
c.setReqParams(r, query)
}

View File

@@ -14,6 +14,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
var (
@@ -24,8 +25,10 @@ var (
Username: basicAuthName,
Password: promauth.NewSecret(basicAuthPass),
}
query = "vm_rows"
queryRender = "constantLine(10)"
vmQuery = "vm_rows"
queryRender = "constantLine(10)"
vlogsQuery = "_time: 5m | stats by (foo) count() total"
vlogsRangeQuery = "* | stats by (foo) count() total"
)
func TestVMInstantQuery(t *testing.T) {
@@ -42,8 +45,8 @@ func TestVMInstantQuery(t *testing.T) {
if name, pass, _ := r.BasicAuth(); name != basicAuthName || pass != basicAuthPass {
t.Fatalf("expected %s:%s as basic auth got %s:%s", basicAuthName, basicAuthPass, name, pass)
}
if r.URL.Query().Get("query") != query {
t.Fatalf("expected %s in query param, got %s", query, r.URL.Query().Get("query"))
if r.URL.Query().Get("query") != vmQuery {
t.Fatalf("expected %s in query param, got %s", vmQuery, r.URL.Query().Get("query"))
}
timeParam := r.URL.Query().Get("time")
if timeParam == "" {
@@ -78,6 +81,31 @@ func TestVMInstantQuery(t *testing.T) {
w.Write([]byte(`[{"target":"constantLine(10)","tags":{"name":"constantLine(10)"},"datapoints":[[10,1611758343],[10,1611758373],[10,1611758403]]}]`))
}
})
mux.HandleFunc("/select/logsql/stats_query", func(w http.ResponseWriter, r *http.Request) {
c++
if r.Method != http.MethodPost {
t.Fatalf("expected POST method got %s", r.Method)
}
if name, pass, _ := r.BasicAuth(); name != basicAuthName || pass != basicAuthPass {
t.Fatalf("expected %s:%s as basic auth got %s:%s", basicAuthName, basicAuthPass, name, pass)
}
if r.URL.Query().Get("query") != vlogsQuery {
t.Fatalf("expected %s in query param, got %s", vlogsQuery, r.URL.Query().Get("query"))
}
timeParam := r.URL.Query().Get("time")
if timeParam == "" {
t.Fatalf("expected 'time' in query param, got nil instead")
}
if _, err := time.Parse(time.RFC3339, timeParam); err != nil {
t.Fatalf("failed to parse 'time' query param %q: %s", timeParam, err)
}
switch c {
case 9:
w.Write([]byte("[]"))
case 10:
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"total","foo":"bar"},"value":[1583786142,"13763"]},{"metric":{"__name__":"total","foo":"baz"},"value":[1583786140,"2000"]}]}}`))
}
})
srv := httptest.NewServer(mux)
defer srv.Close()
@@ -86,13 +114,13 @@ func TestVMInstantQuery(t *testing.T) {
if err != nil {
t.Fatalf("unexpected: %s", err)
}
s := NewVMStorage(srv.URL, authCfg, 0, false, srv.Client())
s := NewPrometheusClient(srv.URL, authCfg, false, srv.Client())
p := datasourcePrometheus
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(p), EvaluationInterval: 15 * time.Second})
ts := time.Now()
expErr := func(err string) {
expErr := func(query, err string) {
_, _, gotErr := pq.Query(ctx, query, ts)
if gotErr == nil {
t.Fatalf("expected %q got nil", err)
@@ -102,13 +130,13 @@ func TestVMInstantQuery(t *testing.T) {
}
}
expErr("500") // 0
expErr("error parsing prometheus metrics") // 1
expErr("response error") // 2
expErr("unknown status") // 3
expErr("unexpected end of JSON input") // 4
expErr(vmQuery, "500") // 0
expErr(vmQuery, "error parsing response") // 1
expErr(vmQuery, "response error") // 2
expErr(vmQuery, "unknown status") // 3
expErr(vmQuery, "unexpected end of JSON input") // 4
res, _, err := pq.Query(ctx, query, ts) // 5 - vector
res, _, err := pq.Query(ctx, vmQuery, ts) // 5 - vector
if err != nil {
t.Fatalf("unexpected %s", err)
}
@@ -117,19 +145,19 @@ func TestVMInstantQuery(t *testing.T) {
}
expected := []Metric{
{
Labels: []Label{{Value: "vm_rows", Name: "__name__"}, {Value: "bar", Name: "foo"}},
Labels: []prompbmarshal.Label{{Value: "vm_rows", Name: "__name__"}, {Value: "bar", Name: "foo"}},
Timestamps: []int64{1583786142},
Values: []float64{13763},
},
{
Labels: []Label{{Value: "vm_requests", Name: "__name__"}, {Value: "baz", Name: "foo"}},
Labels: []prompbmarshal.Label{{Value: "vm_requests", Name: "__name__"}, {Value: "baz", Name: "foo"}},
Timestamps: []int64{1583786140},
Values: []float64{2000},
},
}
metricsEqual(t, res.Data, expected)
res, req, err := pq.Query(ctx, query, ts) // 6 - scalar
res, req, err := pq.Query(ctx, vmQuery, ts) // 6 - scalar
if err != nil {
t.Fatalf("unexpected %s", err)
}
@@ -154,7 +182,7 @@ func TestVMInstantQuery(t *testing.T) {
res.SeriesFetched)
}
res, _, err = pq.Query(ctx, query, ts) // 7 - scalar with stats
res, _, err = pq.Query(ctx, vmQuery, ts) // 7 - scalar with stats
if err != nil {
t.Fatalf("unexpected %s", err)
}
@@ -175,6 +203,7 @@ func TestVMInstantQuery(t *testing.T) {
*res.SeriesFetched)
}
// test graphite
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
res, _, err = gq.Query(ctx, queryRender, ts) // 8 - graphite
@@ -186,12 +215,39 @@ func TestVMInstantQuery(t *testing.T) {
}
exp := []Metric{
{
Labels: []Label{{Value: "constantLine(10)", Name: "name"}},
Labels: []prompbmarshal.Label{{Value: "constantLine(10)", Name: "name"}},
Timestamps: []int64{1611758403},
Values: []float64{10},
},
}
metricsEqual(t, res.Data, exp)
// test victorialogs
vlogs := datasourceVLogs
pq = s.BuildWithParams(QuerierParams{DataSourceType: string(vlogs), EvaluationInterval: 15 * time.Second})
expErr(vlogsQuery, "error parsing response") // 9
res, _, err = pq.Query(ctx, vlogsQuery, ts) // 10
if err != nil {
t.Fatalf("unexpected %s", err)
}
if len(res.Data) != 2 {
t.Fatalf("expected 2 metrics got %d in %+v", len(res.Data), res.Data)
}
expected = []Metric{
{
Labels: []prompbmarshal.Label{{Value: "total", Name: "stats_result"}, {Value: "bar", Name: "foo"}},
Timestamps: []int64{1583786142},
Values: []float64{13763},
},
{
Labels: []prompbmarshal.Label{{Value: "total", Name: "stats_result"}, {Value: "baz", Name: "foo"}},
Timestamps: []int64{1583786140},
Values: []float64{2000},
},
}
metricsEqual(t, res.Data, expected)
}
func TestVMInstantQueryWithRetry(t *testing.T) {
@@ -202,8 +258,8 @@ func TestVMInstantQueryWithRetry(t *testing.T) {
c := -1
mux.HandleFunc("/api/v1/query", func(w http.ResponseWriter, r *http.Request) {
c++
if r.URL.Query().Get("query") != query {
t.Fatalf("expected %s in query param, got %s", query, r.URL.Query().Get("query"))
if r.URL.Query().Get("query") != vmQuery {
t.Fatalf("expected %s in query param, got %s", vmQuery, r.URL.Query().Get("query"))
}
switch c {
case 0:
@@ -225,11 +281,11 @@ func TestVMInstantQueryWithRetry(t *testing.T) {
srv := httptest.NewServer(mux)
defer srv.Close()
s := NewVMStorage(srv.URL, nil, 0, false, srv.Client())
s := NewPrometheusClient(srv.URL, nil, false, srv.Client())
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus)})
expErr := func(err string) {
_, _, gotErr := pq.Query(ctx, query, time.Now())
_, _, gotErr := pq.Query(ctx, vmQuery, time.Now())
if gotErr == nil {
t.Fatalf("expected %q got nil", err)
}
@@ -239,7 +295,7 @@ func TestVMInstantQueryWithRetry(t *testing.T) {
}
expValue := func(v float64) {
res, _, err := pq.Query(ctx, query, time.Now())
res, _, err := pq.Query(ctx, vmQuery, time.Now())
if err != nil {
t.Fatalf("unexpected %s", err)
}
@@ -300,8 +356,8 @@ func TestVMRangeQuery(t *testing.T) {
if name, pass, _ := r.BasicAuth(); name != basicAuthName || pass != basicAuthPass {
t.Fatalf("expected %s:%s as basic auth got %s:%s", basicAuthName, basicAuthPass, name, pass)
}
if r.URL.Query().Get("query") != query {
t.Fatalf("expected %s in query param, got %s", query, r.URL.Query().Get("query"))
if r.URL.Query().Get("query") != vmQuery {
t.Fatalf("expected %s in query param, got %s", vmQuery, r.URL.Query().Get("query"))
}
startTS := r.URL.Query().Get("start")
if startTS == "" {
@@ -326,6 +382,40 @@ func TestVMRangeQuery(t *testing.T) {
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"vm_rows"},"values":[[1583786142,"13763"]]}]}}`))
}
})
mux.HandleFunc("/select/logsql/stats_query_range", func(w http.ResponseWriter, r *http.Request) {
c++
if r.Method != http.MethodPost {
t.Fatalf("expected POST method got %s", r.Method)
}
if name, pass, _ := r.BasicAuth(); name != basicAuthName || pass != basicAuthPass {
t.Fatalf("expected %s:%s as basic auth got %s:%s", basicAuthName, basicAuthPass, name, pass)
}
if r.URL.Query().Get("query") != vlogsRangeQuery {
t.Fatalf("expected %s in query param, got %s", vmQuery, r.URL.Query().Get("query"))
}
startTS := r.URL.Query().Get("start")
if startTS == "" {
t.Fatalf("expected 'start' in query param, got nil instead")
}
if _, err := time.Parse(time.RFC3339, startTS); err != nil {
t.Fatalf("failed to parse 'start' query param: %s", err)
}
endTS := r.URL.Query().Get("end")
if endTS == "" {
t.Fatalf("expected 'end' in query param, got nil instead")
}
if _, err := time.Parse(time.RFC3339, endTS); err != nil {
t.Fatalf("failed to parse 'end' query param: %s", err)
}
step := r.URL.Query().Get("step")
if step != "60s" {
t.Fatalf("expected 'step' query param to be 60s; got %q instead", step)
}
switch c {
case 1:
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"total"},"values":[[1583786142,"10"]]}]}}`))
}
})
srv := httptest.NewServer(mux)
defer srv.Close()
@@ -334,19 +424,19 @@ func TestVMRangeQuery(t *testing.T) {
if err != nil {
t.Fatalf("unexpected: %s", err)
}
s := NewVMStorage(srv.URL, authCfg, *queryStep, false, srv.Client())
s := NewPrometheusClient(srv.URL, authCfg, false, srv.Client())
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus), EvaluationInterval: 15 * time.Second})
_, err = pq.QueryRange(ctx, query, time.Now(), time.Time{})
_, err = pq.QueryRange(ctx, vmQuery, time.Now(), time.Time{})
expectError(t, err, "is missing")
_, err = pq.QueryRange(ctx, query, time.Time{}, time.Now())
_, err = pq.QueryRange(ctx, vmQuery, time.Time{}, time.Now())
expectError(t, err, "is missing")
start, end := time.Now().Add(-time.Minute), time.Now()
res, err := pq.QueryRange(ctx, query, start, end)
res, err := pq.QueryRange(ctx, vmQuery, start, end)
if err != nil {
t.Fatalf("unexpected %s", err)
}
@@ -355,7 +445,7 @@ func TestVMRangeQuery(t *testing.T) {
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
}
expected := Metric{
Labels: []Label{{Value: "vm_rows", Name: "__name__"}},
Labels: []prompbmarshal.Label{{Value: "vm_rows", Name: "__name__"}},
Timestamps: []int64{1583786142},
Values: []float64{13763},
}
@@ -363,33 +453,66 @@ func TestVMRangeQuery(t *testing.T) {
t.Fatalf("unexpected metric %+v want %+v", m[0], expected)
}
// test unsupported graphite
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
_, err = gq.QueryRange(ctx, queryRender, start, end)
expectError(t, err, "is not supported")
// unsupported logsql
gq = s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceVLogs), EvaluationInterval: 60 * time.Second})
res, err = gq.QueryRange(ctx, vlogsRangeQuery, start, end)
expectError(t, err, "is not supported")
// supported logsql
gq = s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceVLogs), EvaluationInterval: 60 * time.Second, ApplyIntervalAsTimeFilter: true})
res, err = gq.QueryRange(ctx, vlogsRangeQuery, start, end)
if err != nil {
t.Fatalf("unexpected %s", err)
}
m = res.Data
if len(m) != 1 {
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
}
expected = Metric{
Labels: []prompbmarshal.Label{{Value: "total", Name: "stats_result"}},
Timestamps: []int64{1583786142},
Values: []float64{10},
}
if !reflect.DeepEqual(m[0], expected) {
t.Fatalf("unexpected metric %+v want %+v", m[0], expected)
}
}
func TestRequestParams(t *testing.T) {
query := "up"
vlogsQuery := "_time: 5m | stats count() total"
timestamp := time.Date(2001, 2, 3, 4, 5, 6, 0, time.UTC)
f := func(isQueryRange bool, vm *VMStorage, checkFn func(t *testing.T, r *http.Request)) {
f := func(isQueryRange bool, c *Client, checkFn func(t *testing.T, r *http.Request)) {
t.Helper()
req, err := vm.newRequest(ctx)
req, err := c.newRequest(ctx)
if err != nil {
t.Fatalf("error in newRequest: %s", err)
}
switch vm.dataSourceType {
case "", datasourcePrometheus:
switch c.dataSourceType {
case datasourcePrometheus:
if isQueryRange {
vm.setPrometheusRangeReqParams(req, query, timestamp, timestamp)
c.setPrometheusRangeReqParams(req, query, timestamp, timestamp)
} else {
vm.setPrometheusInstantReqParams(req, query, timestamp)
c.setPrometheusInstantReqParams(req, query, timestamp)
}
case datasourceGraphite:
vm.setGraphiteReqParams(req, query)
c.setGraphiteReqParams(req, query)
case datasourceVLogs:
if isQueryRange {
c.setVLogsRangeReqParams(req, vlogsQuery, timestamp, timestamp)
} else {
c.setVLogsInstantReqParams(req, vlogsQuery, timestamp)
}
}
checkFn(t, req)
@@ -399,19 +522,19 @@ func TestRequestParams(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
storage := VMStorage{
storage := Client{
extraParams: url.Values{"round_digits": {"10"}},
}
// prometheus path
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourcePrometheus,
}, func(t *testing.T, r *http.Request) {
checkEqualString(t, "/api/v1/query", r.URL.Path)
})
// prometheus prefix
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourcePrometheus,
appendTypePrefix: true,
}, func(t *testing.T, r *http.Request) {
@@ -419,14 +542,14 @@ func TestRequestParams(t *testing.T) {
})
// prometheus range path
f(true, &VMStorage{
f(true, &Client{
dataSourceType: datasourcePrometheus,
}, func(t *testing.T, r *http.Request) {
checkEqualString(t, "/api/v1/query_range", r.URL.Path)
})
// prometheus range prefix
f(true, &VMStorage{
f(true, &Client{
dataSourceType: datasourcePrometheus,
appendTypePrefix: true,
}, func(t *testing.T, r *http.Request) {
@@ -434,14 +557,14 @@ func TestRequestParams(t *testing.T) {
})
// graphite path
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourceGraphite,
}, func(t *testing.T, r *http.Request) {
checkEqualString(t, graphitePath, r.URL.Path)
})
// graphite prefix
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourceGraphite,
appendTypePrefix: true,
}, func(t *testing.T, r *http.Request) {
@@ -449,21 +572,27 @@ func TestRequestParams(t *testing.T) {
})
// default params
f(false, &VMStorage{}, func(t *testing.T, r *http.Request) {
f(false, &Client{dataSourceType: datasourcePrometheus}, func(t *testing.T, r *http.Request) {
exp := url.Values{"query": {query}, "time": {timestamp.Format(time.RFC3339)}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
f(false, &Client{dataSourceType: datasourcePrometheus, applyIntervalAsTimeFilter: true}, func(t *testing.T, r *http.Request) {
exp := url.Values{"query": {query}, "time": {timestamp.Format(time.RFC3339)}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
// default range params
f(true, &VMStorage{}, func(t *testing.T, r *http.Request) {
f(true, &Client{dataSourceType: datasourcePrometheus}, func(t *testing.T, r *http.Request) {
ts := timestamp.Format(time.RFC3339)
exp := url.Values{"query": {query}, "start": {ts}, "end": {ts}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
// basic auth
f(false, &VMStorage{
authCfg: authCfg,
f(false, &Client{
dataSourceType: datasourcePrometheus,
authCfg: authCfg,
}, func(t *testing.T, r *http.Request) {
u, p, _ := r.BasicAuth()
checkEqualString(t, "foo", u)
@@ -471,8 +600,9 @@ func TestRequestParams(t *testing.T) {
})
// basic auth range
f(true, &VMStorage{
authCfg: authCfg,
f(true, &Client{
dataSourceType: datasourcePrometheus,
authCfg: authCfg,
}, func(t *testing.T, r *http.Request) {
u, p, _ := r.BasicAuth()
checkEqualString(t, "foo", u)
@@ -480,7 +610,8 @@ func TestRequestParams(t *testing.T) {
})
// evaluation interval
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourcePrometheus,
evaluationInterval: 15 * time.Second,
}, func(t *testing.T, r *http.Request) {
evalInterval := 15 * time.Second
@@ -489,8 +620,9 @@ func TestRequestParams(t *testing.T) {
})
// step override
f(false, &VMStorage{
queryStep: time.Minute,
f(false, &Client{
dataSourceType: datasourcePrometheus,
queryStep: time.Minute,
}, func(t *testing.T, r *http.Request) {
exp := url.Values{
"query": {query},
@@ -501,7 +633,8 @@ func TestRequestParams(t *testing.T) {
})
// step to seconds
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourcePrometheus,
evaluationInterval: 3 * time.Hour,
}, func(t *testing.T, r *http.Request) {
evalInterval := 3 * time.Hour
@@ -510,15 +643,17 @@ func TestRequestParams(t *testing.T) {
})
// prometheus extra params
f(false, &VMStorage{
extraParams: url.Values{"round_digits": {"10"}},
f(false, &Client{
dataSourceType: datasourcePrometheus,
extraParams: url.Values{"round_digits": {"10"}},
}, func(t *testing.T, r *http.Request) {
exp := url.Values{"query": {query}, "round_digits": {"10"}, "time": {timestamp.Format(time.RFC3339)}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
// prometheus extra params range
f(true, &VMStorage{
f(true, &Client{
dataSourceType: datasourcePrometheus,
extraParams: url.Values{
"nocache": {"1"},
"max_lookback": {"1h"},
@@ -536,7 +671,8 @@ func TestRequestParams(t *testing.T) {
// custom params overrides the original params
f(false, storage.Clone().ApplyParams(QuerierParams{
QueryParams: url.Values{"round_digits": {"2"}},
DataSourceType: string(datasourcePrometheus),
QueryParams: url.Values{"round_digits": {"2"}},
}), func(t *testing.T, r *http.Request) {
exp := url.Values{"query": {query}, "round_digits": {"2"}, "time": {timestamp.Format(time.RFC3339)}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
@@ -544,14 +680,15 @@ func TestRequestParams(t *testing.T) {
// allow duplicates in query params
f(false, storage.Clone().ApplyParams(QuerierParams{
QueryParams: url.Values{"extra_labels": {"env=dev", "foo=bar"}},
DataSourceType: string(datasourcePrometheus),
QueryParams: url.Values{"extra_labels": {"env=dev", "foo=bar"}},
}), func(t *testing.T, r *http.Request) {
exp := url.Values{"query": {query}, "round_digits": {"10"}, "extra_labels": {"env=dev", "foo=bar"}, "time": {timestamp.Format(time.RFC3339)}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
// graphite extra params
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourceGraphite,
extraParams: url.Values{
"nocache": {"1"},
@@ -563,7 +700,7 @@ func TestRequestParams(t *testing.T) {
})
// graphite extra params allows to override from
f(false, &VMStorage{
f(false, &Client{
dataSourceType: datasourceGraphite,
extraParams: url.Values{
"from": {"-10m"},
@@ -572,10 +709,38 @@ func TestRequestParams(t *testing.T) {
exp := fmt.Sprintf("format=json&from=-10m&target=%s&until=now", query)
checkEqualString(t, exp, r.URL.RawQuery)
})
// test vlogs
f(false, &Client{
dataSourceType: datasourceVLogs,
evaluationInterval: time.Minute,
}, func(t *testing.T, r *http.Request) {
exp := url.Values{"query": {vlogsQuery}, "time": {timestamp.Format(time.RFC3339)}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
f(false, &Client{
dataSourceType: datasourceVLogs,
evaluationInterval: time.Minute,
applyIntervalAsTimeFilter: true,
}, func(t *testing.T, r *http.Request) {
ts := timestamp.Format(time.RFC3339)
exp := url.Values{"query": {vlogsQuery}, "time": {ts}, "start": {timestamp.Add(-time.Minute).Format(time.RFC3339)}, "end": {ts}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
f(true, &Client{
dataSourceType: datasourceVLogs,
evaluationInterval: time.Minute,
}, func(t *testing.T, r *http.Request) {
ts := timestamp.Format(time.RFC3339)
exp := url.Values{"query": {vlogsQuery}, "start": {ts}, "end": {ts}, "step": {"60s"}}
checkEqualString(t, exp.Encode(), r.URL.RawQuery)
})
}
func TestHeaders(t *testing.T) {
f := func(vmFn func() *VMStorage, checkFn func(t *testing.T, r *http.Request)) {
f := func(vmFn func() *Client, checkFn func(t *testing.T, r *http.Request)) {
t.Helper()
vm := vmFn()
@@ -587,12 +752,12 @@ func TestHeaders(t *testing.T) {
}
// basic auth
f(func() *VMStorage {
f(func() *Client {
cfg, err := utils.AuthConfig(utils.WithBasicAuth("foo", "bar", ""))
if err != nil {
t.Fatalf("Error get auth config: %s", err)
}
return &VMStorage{authCfg: cfg}
return NewPrometheusClient("", cfg, false, nil)
}, func(t *testing.T, r *http.Request) {
u, p, _ := r.BasicAuth()
checkEqualString(t, "foo", u)
@@ -600,12 +765,12 @@ func TestHeaders(t *testing.T) {
})
// bearer auth
f(func() *VMStorage {
f(func() *Client {
cfg, err := utils.AuthConfig(utils.WithBearer("foo", ""))
if err != nil {
t.Fatalf("Error get auth config: %s", err)
}
return &VMStorage{authCfg: cfg}
return NewPrometheusClient("", cfg, false, nil)
}, func(t *testing.T, r *http.Request) {
reqToken := r.Header.Get("Authorization")
splitToken := strings.Split(reqToken, "Bearer ")
@@ -617,11 +782,13 @@ func TestHeaders(t *testing.T) {
})
// custom extraHeaders
f(func() *VMStorage {
return &VMStorage{extraHeaders: []keyValue{
f(func() *Client {
c := NewPrometheusClient("", nil, false, nil)
c.extraHeaders = []keyValue{
{key: "Foo", value: "bar"},
{key: "Baz", value: "qux"},
}}
}
return c
}, func(t *testing.T, r *http.Request) {
h1 := r.Header.Get("Foo")
checkEqualString(t, "bar", h1)
@@ -630,17 +797,16 @@ func TestHeaders(t *testing.T) {
})
// custom header overrides basic auth
f(func() *VMStorage {
f(func() *Client {
cfg, err := utils.AuthConfig(utils.WithBasicAuth("foo", "bar", ""))
if err != nil {
t.Fatalf("Error get auth config: %s", err)
}
return &VMStorage{
authCfg: cfg,
extraHeaders: []keyValue{
{key: "Authorization", value: "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="},
},
c := NewPrometheusClient("", cfg, false, nil)
c.extraHeaders = []keyValue{
{key: "Authorization", value: "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="},
}
return c
}, func(t *testing.T, r *http.Request) {
u, p, _ := r.BasicAuth()
checkEqualString(t, "Aladdin", u)

View File

@@ -0,0 +1,61 @@
package datasource
import (
"fmt"
"net/http"
"time"
)
func (c *Client) setVLogsInstantReqParams(r *http.Request, query string, timestamp time.Time) {
// there is no type path prefix in victorialogs APIs right now, ignore appendTypePrefix.
if !*disablePathAppend {
r.URL.Path += "/select/logsql/stats_query"
}
q := r.URL.Query()
// set `time` param explicitly, it will be used as the timestamp of query results.
q.Set("time", timestamp.Format(time.RFC3339))
// set the `start` and `end` params if applyIntervalAsTimeFilter is enabled(time filter is missing in the rule expr),
// so the query will be executed in time range [timestamp - evaluationInterval, timestamp].
if c.applyIntervalAsTimeFilter && c.evaluationInterval > 0 {
q.Set("start", timestamp.Add(-c.evaluationInterval).Format(time.RFC3339))
q.Set("end", timestamp.Format(time.RFC3339))
}
r.URL.RawQuery = q.Encode()
c.setReqParams(r, query)
}
func (c *Client) setVLogsRangeReqParams(r *http.Request, query string, start, end time.Time) {
// there is no type path prefix in victorialogs APIs right now, ignore appendTypePrefix.
if !*disablePathAppend {
r.URL.Path += "/select/logsql/stats_query_range"
}
q := r.URL.Query()
q.Add("start", start.Format(time.RFC3339))
q.Add("end", end.Format(time.RFC3339))
// set step as evaluationInterval by default
if c.evaluationInterval > 0 {
q.Set("step", fmt.Sprintf("%ds", int(c.evaluationInterval.Seconds())))
}
r.URL.RawQuery = q.Encode()
c.setReqParams(r, query)
}
func parseVLogsResponse(req *http.Request, resp *http.Response) (res Result, err error) {
res, err = parsePrometheusResponse(req, resp)
if err != nil {
return Result{}, err
}
for i := range res.Data {
m := &res.Data[i]
for j := range m.Labels {
// reserve the stats func result name with a new label `stats_result` instead of dropping it,
// since there could be multiple stats results in a single query, for instance:
// _time:5m | stats quantile(0.5, request_duration_seconds) p50, quantile(0.9, request_duration_seconds) p90
if m.Labels[j].Name == "__name__" {
m.Labels[j].Name = "stats_result"
break
}
}
}
return
}

View File

@@ -8,6 +8,8 @@ import (
"sort"
"strconv"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
// Querier interface wraps Query and QueryRange methods
@@ -42,16 +44,20 @@ type QuerierBuilder interface {
// QuerierParams params for Querier.
type QuerierParams struct {
DataSourceType string
EvaluationInterval time.Duration
QueryParams url.Values
Headers map[string]string
Debug bool
DataSourceType string
// ApplyIntervalAsTimeFilter is only valid for vlogs datasource.
// Set to true if there is no [timeFilter](https://docs.victoriametrics.com/victorialogs/logsql/#time-filter) in the rule expression,
// and we will add evaluation interval as an additional timeFilter when querying.
ApplyIntervalAsTimeFilter bool
EvaluationInterval time.Duration
QueryParams url.Values
Headers map[string]string
Debug bool
}
// Metric is the basic entity which should be return by datasource
type Metric struct {
Labels []Label
Labels []prompbmarshal.Label
Timestamps []int64
Values []float64
}
@@ -68,22 +74,9 @@ func (m *Metric) SetLabel(key, value string) {
m.AddLabel(key, value)
}
// SetLabels sets the given map as Metric labels
func (m *Metric) SetLabels(ls map[string]string) {
var i int
m.Labels = make([]Label, len(ls))
for k, v := range ls {
m.Labels[i] = Label{
Name: k,
Value: v,
}
i++
}
}
// AddLabel appends the given label to the label set
func (m *Metric) AddLabel(key, value string) {
m.Labels = append(m.Labels, Label{Name: key, Value: value})
m.Labels = append(m.Labels, prompbmarshal.Label{Name: key, Value: value})
}
// DelLabel deletes the given label from the label set
@@ -106,14 +99,8 @@ func (m *Metric) Label(key string) string {
return ""
}
// Label represents metric's label
type Label struct {
Name string
Value string
}
// Labels is collection of Label
type Labels []Label
type Labels []prompbmarshal.Label
func (ls Labels) Len() int { return len(ls) }
func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
@@ -168,7 +155,7 @@ func LabelCompare(a, b Labels) int {
// ConvertToLabels convert map to Labels
func ConvertToLabels(m map[string]string) (labelset Labels) {
for k, v := range m {
labelset = append(labelset, Label{
labelset = append(labelset, prompbmarshal.Label{
Name: k,
Value: v,
})

View File

@@ -11,7 +11,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
)
@@ -48,22 +47,15 @@ var (
oauth2TokenURL = flag.String("datasource.oauth2.tokenUrl", "", "Optional OAuth2 tokenURL to use for -datasource.url")
oauth2Scopes = flag.String("datasource.oauth2.scopes", "", "Optional OAuth2 scopes to use for -datasource.url. Scopes must be delimited by ';'")
lookBack = flag.Duration("datasource.lookback", 0, `Deprecated: please adjust "-search.latencyOffset" at datasource side `+
`or specify "latency_offset" in rule group's params. Lookback defines how far into the past to look when evaluating queries. `+
`For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.`)
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries. "+
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries to the configured -datasource.url and -remoteRead.url. Only valid for prometheus datasource. "+
"For example, if -datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query. "+
"If set to 0, rule's evaluation interval will be used instead.")
queryTimeAlignment = flag.Bool("datasource.queryTimeAlignment", true, `Deprecated: please use "eval_alignment" in rule group instead. `+
`Whether to align "time" parameter with evaluation interval. `+
"Alignment supposed to produce deterministic results despite number of vmalert replicas or time they were started. "+
"See more details at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
idleConnectionTimeout = flag.Duration("datasource.idleConnTimeout", 50*time.Second, `Defines a duration for idle (keep-alive connections) to exist. Consider setting this value less than "-http.idleConnTimeout". It must prevent possible "write: broken pipe" and "read: connection reset by peer" errors.`)
disableKeepAlive = flag.Bool("datasource.disableKeepAlive", false, `Whether to disable long-lived connections to the datasource. `+
`If true, disables HTTP keep-alive and will only use the connection to the server for a single HTTP request.`)
roundDigits = flag.Int("datasource.roundDigits", 0, `Adds "round_digits" GET param to datasource requests. `+
`In VM "round_digits" limits the number of digits after the decimal point in response values.`)
roundDigits = flag.Int("datasource.roundDigits", 0, `Adds "round_digits" GET param to datasource requests which limits the number of digits after the decimal point in response values. `+
`Only valid for VictoriaMetrics as the datasource.`)
)
// InitSecretFlags must be called after flag.Parse and before any logging
@@ -90,12 +82,6 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
if *addr == "" {
return nil, fmt.Errorf("datasource.url is empty")
}
if !*queryTimeAlignment {
logger.Warnf("flag `-datasource.queryTimeAlignment` is deprecated and will be removed in next releases. Please use `eval_alignment` in rule group instead.")
}
if *lookBack != 0 {
logger.Warnf("flag `-datasource.lookback` is deprecated and will be removed in next releases. Please adjust `-search.latencyOffset` at datasource side or specify `latency_offset` in rule group's params. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
}
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
@@ -133,13 +119,12 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
return nil, fmt.Errorf("failed to set request auth header to datasource %q: %w", *addr, err)
}
return &VMStorage{
return &Client{
c: &http.Client{Transport: tr},
authCfg: authCfg,
datasourceURL: strings.TrimSuffix(*addr, "/"),
appendTypePrefix: *appendTypePrefix,
queryStep: *queryStep,
dataSourceType: datasourcePrometheus,
extraParams: extraParams,
}, nil
}

View File

@@ -1,272 +0,0 @@
package datasource
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
)
type datasourceType string
const (
datasourcePrometheus datasourceType = "prometheus"
datasourceGraphite datasourceType = "graphite"
)
func toDatasourceType(s string) datasourceType {
if s == string(datasourceGraphite) {
return datasourceGraphite
}
return datasourcePrometheus
}
// VMStorage represents vmstorage entity with ability to read and write metrics
// WARN: when adding a new field, remember to update Clone() method.
type VMStorage struct {
c *http.Client
authCfg *promauth.Config
datasourceURL string
appendTypePrefix bool
queryStep time.Duration
dataSourceType datasourceType
// evaluationInterval will help setting request's `step` param.
evaluationInterval time.Duration
// extraParams contains params to be attached to each HTTP request
extraParams url.Values
// extraHeaders are headers to be attached to each HTTP request
extraHeaders []keyValue
// whether to print additional log messages
// for each sent request
debug bool
}
type keyValue struct {
key string
value string
}
// Clone makes clone of VMStorage, shares http client.
func (s *VMStorage) Clone() *VMStorage {
ns := &VMStorage{
c: s.c,
authCfg: s.authCfg,
datasourceURL: s.datasourceURL,
appendTypePrefix: s.appendTypePrefix,
queryStep: s.queryStep,
dataSourceType: s.dataSourceType,
evaluationInterval: s.evaluationInterval,
// init map so it can be populated below
extraParams: url.Values{},
debug: s.debug,
}
if len(s.extraHeaders) > 0 {
ns.extraHeaders = make([]keyValue, len(s.extraHeaders))
copy(ns.extraHeaders, s.extraHeaders)
}
for k, v := range s.extraParams {
ns.extraParams[k] = v
}
return ns
}
// ApplyParams - changes given querier params.
func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage {
s.dataSourceType = toDatasourceType(params.DataSourceType)
s.evaluationInterval = params.EvaluationInterval
if params.QueryParams != nil {
if s.extraParams == nil {
s.extraParams = url.Values{}
}
for k, vl := range params.QueryParams {
// custom query params are prior to default ones
if s.extraParams.Has(k) {
s.extraParams.Del(k)
}
for _, v := range vl {
// don't use .Set() instead of Del/Add since it is allowed
// for GET params to be duplicated
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4908
s.extraParams.Add(k, v)
}
}
}
if params.Headers != nil {
for key, value := range params.Headers {
kv := keyValue{key: key, value: value}
s.extraHeaders = append(s.extraHeaders, kv)
}
}
s.debug = params.Debug
return s
}
// BuildWithParams - implements interface.
func (s *VMStorage) BuildWithParams(params QuerierParams) Querier {
return s.Clone().ApplyParams(params)
}
// NewVMStorage is a constructor for VMStorage
func NewVMStorage(baseURL string, authCfg *promauth.Config, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
return &VMStorage{
c: c,
authCfg: authCfg,
datasourceURL: strings.TrimSuffix(baseURL, "/"),
appendTypePrefix: appendTypePrefix,
queryStep: queryStep,
dataSourceType: datasourcePrometheus,
extraParams: url.Values{},
}
}
// Query executes the given query and returns parsed response
func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error) {
req, err := s.newQueryRequest(ctx, query, ts)
if err != nil {
return Result{}, nil, err
}
resp, err := s.do(req)
if err != nil {
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) && !netutil.IsTrivialNetworkError(err) {
// Return unexpected error to the caller.
return Result{}, nil, err
}
// Something in the middle between client and datasource might be closing
// the connection. So we do a one more attempt in hope request will succeed.
req, err = s.newQueryRequest(ctx, query, ts)
if err != nil {
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
}
resp, err = s.do(req)
if err != nil {
return Result{}, nil, fmt.Errorf("second attempt: %w", err)
}
}
// Process the received response.
parseFn := parsePrometheusResponse
if s.dataSourceType != datasourcePrometheus {
parseFn = parseGraphiteResponse
}
result, err := parseFn(req, resp)
_ = resp.Body.Close()
return result, req, err
}
// QueryRange executes the given query on the given time range.
// For Prometheus type see https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
// Graphite type isn't supported.
func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end time.Time) (res Result, err error) {
if s.dataSourceType != datasourcePrometheus {
return res, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType)
}
if start.IsZero() {
return res, fmt.Errorf("start param is missing")
}
if end.IsZero() {
return res, fmt.Errorf("end param is missing")
}
req, err := s.newQueryRangeRequest(ctx, query, start, end)
if err != nil {
return res, err
}
resp, err := s.do(req)
if err != nil {
if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) && !netutil.IsTrivialNetworkError(err) {
// Return unexpected error to the caller.
return res, err
}
// Something in the middle between client and datasource might be closing
// the connection. So we do a one more attempt in hope request will succeed.
req, err = s.newQueryRangeRequest(ctx, query, start, end)
if err != nil {
return res, fmt.Errorf("second attempt: %w", err)
}
resp, err = s.do(req)
if err != nil {
return res, fmt.Errorf("second attempt: %w", err)
}
}
// Process the received response.
res, err = parsePrometheusResponse(req, resp)
_ = resp.Body.Close()
return res, err
}
func (s *VMStorage) do(req *http.Request) (*http.Response, error) {
ru := req.URL.Redacted()
if *showDatasourceURL {
ru = req.URL.String()
}
if s.debug {
logger.Infof("DEBUG datasource request: executing %s request with params %q", req.Method, ru)
}
resp, err := s.c.Do(req)
if err != nil {
return nil, fmt.Errorf("error getting response from %s: %w", ru, err)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
_ = resp.Body.Close()
return nil, fmt.Errorf("unexpected response code %d for %s. Response body %s", resp.StatusCode, ru, body)
}
return resp, nil
}
func (s *VMStorage) newQueryRangeRequest(ctx context.Context, query string, start, end time.Time) (*http.Request, error) {
req, err := s.newRequest(ctx)
if err != nil {
return nil, fmt.Errorf("cannot create query_range request to datasource %q: %w", s.datasourceURL, err)
}
s.setPrometheusRangeReqParams(req, query, start, end)
return req, nil
}
func (s *VMStorage) newQueryRequest(ctx context.Context, query string, ts time.Time) (*http.Request, error) {
req, err := s.newRequest(ctx)
if err != nil {
return nil, fmt.Errorf("cannot create query request to datasource %q: %w", s.datasourceURL, err)
}
switch s.dataSourceType {
case "", datasourcePrometheus:
s.setPrometheusInstantReqParams(req, query, ts)
case datasourceGraphite:
s.setGraphiteReqParams(req, query)
default:
logger.Panicf("BUG: engine not found: %q", s.dataSourceType)
}
return req, nil
}
func (s *VMStorage) newRequest(ctx context.Context) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.datasourceURL, nil)
if err != nil {
logger.Panicf("BUG: unexpected error from http.NewRequest(%q): %s", s.datasourceURL, err)
}
req.Header.Set("Content-Type", "application/json")
if s.authCfg != nil {
err = s.authCfg.SetHeaders(req, true)
if err != nil {
return nil, err
}
}
for _, h := range s.extraHeaders {
req.Header.Set(h.key, h.value)
}
return req, nil
}

View File

@@ -3,6 +3,8 @@ package datasource
import (
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
func TestPromInstant_UnmarshalPositive(t *testing.T) {
@@ -21,7 +23,7 @@ func TestPromInstant_UnmarshalPositive(t *testing.T) {
f(`[{"metric":{"__name__":"up"},"value":[1583780000,"42"]}]`, []Metric{
{
Labels: []Label{{Name: "__name__", Value: "up"}},
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "up"}},
Timestamps: []int64{1583780000},
Values: []float64{42},
},
@@ -31,17 +33,17 @@ func TestPromInstant_UnmarshalPositive(t *testing.T) {
{"metric":{"__name__":"foo"},"value":[1583780001,"7"]},
{"metric":{"__name__":"baz", "instance":"bar"},"value":[1583780002,"8"]}]`, []Metric{
{
Labels: []Label{{Name: "__name__", Value: "up"}},
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "up"}},
Timestamps: []int64{1583780000},
Values: []float64{42},
},
{
Labels: []Label{{Name: "__name__", Value: "foo"}},
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "foo"}},
Timestamps: []int64{1583780001},
Values: []float64{7},
},
{
Labels: []Label{{Name: "__name__", Value: "baz"}, {Name: "instance", Value: "bar"}},
Labels: []prompbmarshal.Label{{Name: "__name__", Value: "baz"}, {Name: "instance", Value: "bar"}},
Timestamps: []int64{1583780002},
Values: []float64{8},
},

View File

@@ -31,14 +31,14 @@ import (
)
var (
rulePath = flagutil.NewArrayString("rule", `Path to the files or http url with alerting and/or recording rules.
rulePath = flagutil.NewArrayString("rule", `Path to the files or http url with alerting and/or recording rules in YAML format.
Supports hierarchical patterns and regexpes.
Examples:
-rule="/path/to/file". Path to a single file with alerting rules.
-rule="http://<some-server-addr>/path/to/rules". HTTP URL to a page with alerting rules.
-rule="dir/*.yaml" -rule="/*.yaml" -rule="gcs://vmalert-rules/tenant_%{TENANT_ID}/prod".
-rule="dir/**/*.yaml". Includes all the .yaml files in "dir" subfolders recursively.
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
Rule files support YAML multi-document. Files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
Enterprise version of vmalert supports S3 and GCS paths to rules.
For example: gs://bucket/path/to/rules, s3://bucket/path/to/rules
@@ -66,7 +66,7 @@ absolute path to all .tpl files in root.
evaluationInterval = flag.Duration("evaluationInterval", time.Minute, "How often to evaluate the rules")
validateTemplates = flag.Bool("rule.validateTemplates", true, "Whether to validate annotation and label templates")
validateExpressions = flag.Bool("rule.validateExpressions", true, "Whether to validate rules expressions via MetricsQL engine")
validateExpressions = flag.Bool("rule.validateExpressions", true, "Whether to validate rules expressions for different types.")
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.")
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
@@ -76,14 +76,15 @@ absolute path to all .tpl files in root.
`Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. `+
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
"Pass multiple -label flags in order to add multiple label sets.")
remoteReadIgnoreRestoreErrors = flag.Bool("remoteRead.ignoreRestoreErrors", true, "Whether to ignore errors from remote storage when restoring alerts state on startup. DEPRECATED - this flag has no effect and will be removed in the next releases.")
"In case of conflicts, original labels are kept with prefix `exported_`.")
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.")
)
var alertURLGeneratorFn notifier.AlertURLGenerator
var (
alertURLGeneratorFn notifier.AlertURLGenerator
extURL *url.URL
)
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
@@ -97,13 +98,15 @@ func main() {
buildinfo.Init()
logger.Init()
if !*remoteReadIgnoreRestoreErrors {
logger.Warnf("flag `remoteRead.ignoreRestoreErrors` is deprecated and will be removed in next releases.")
var err error
extURL, err = getExternalURL(*externalURL)
if err != nil {
logger.Fatalf("failed to init external.url %q: %s", *externalURL, err)
}
err := templates.Load(*ruleTemplatesPath, true)
err = templates.Load(*ruleTemplatesPath, *extURL)
if err != nil {
logger.Fatalf("failed to parse %q: %s", *ruleTemplatesPath, err)
logger.Fatalf("failed to load template %q: %s", *ruleTemplatesPath, err)
}
if *dryRun {
@@ -117,12 +120,7 @@ func main() {
return
}
eu, err := getExternalURL(*externalURL)
if err != nil {
logger.Fatalf("failed to init `-external.url`: %s", err)
}
alertURLGeneratorFn, err = getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
alertURLGeneratorFn, err = getAlertURLGenerator(extURL, *externalAlertSource, *validateTemplates)
if err != nil {
logger.Fatalf("failed to init `external.alert.source`: %s", err)
}
@@ -310,7 +308,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
}
templated, err := alert.ExecTemplate(qFn, alert.Labels, m)
if err != nil {
logger.Errorf("can not exec source template %s", err)
logger.Errorf("cannot template alert source: %s", err)
}
return fmt.Sprintf("%s/%s", externalURL, templated["tpl"])
}, nil
@@ -365,7 +363,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
logger.Errorf("failed to reload notifier config: %s", err)
continue
}
err := templates.Load(*ruleTemplatesPath, false)
err := templates.Load(*ruleTemplatesPath, *extURL)
if err != nil {
setConfigError(err)
logger.Errorf("failed to load new templates: %s", err)

View File

@@ -74,7 +74,10 @@ func TestGetAlertURLGenerator(t *testing.T) {
func TestConfigReload(t *testing.T) {
originalRulePath := *rulePath
originalExternalURL := extURL
extURL = &url.URL{}
defer func() {
extURL = originalExternalURL
*rulePath = originalRulePath
}()

View File

@@ -160,8 +160,8 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
// it is important to call InterruptEval before the update, because cancel fn
// can be re-assigned during the update.
item.old.InterruptEval()
go func(old *rule.Group, new *rule.Group) {
old.UpdateWith(new)
go func(oldGroup *rule.Group, newGroup *rule.Group) {
oldGroup.UpdateWith(newGroup)
wg.Done()
}(item.old, item.new)
}

View File

@@ -3,6 +3,7 @@ package main
import (
"context"
"math/rand"
"net/url"
"os"
"strings"
"sync"
@@ -18,7 +19,7 @@ import (
)
func TestMain(m *testing.M) {
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
os.Exit(1)
}
os.Exit(m.Run())

Some files were not shown because too many files have changed in this diff Show More