Compare commits

...

107 Commits

Author SHA1 Message Date
Max Kotliar
d4208f2cde fix 2026-01-29 16:44:44 +02:00
Max Kotliar
cce1b50df5 app/vmselect/promql: Add test that demo unstable sort behavior
Related to
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10189

Debug notes
https://github.com/VictoriaMetrics/debug-notes/tree/main/gh10189
2026-01-29 16:39:54 +02:00
Hui Wang
1db7597e45 vmalert: disallow setting the -notifier.url command-line flag to a null value
Previously, running a vmalert with an empty notifier.url does not produce an error and leads to vmalert which will never send a notification successfully.

 This commit properly validates notifier.url empty value.

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10355
2026-01-28 14:09:18 +01:00
Artem Fetishev
23fe7db35c lib/storage: follow-up for making searchAndMerge profile-friendly
Follow-up for c705da74f6
2026-01-28 14:07:26 +01:00
Hui Wang
817f2dc9e7 app/vmselect/promql: fix gaps at changes() functions
After changing the scrape interval from a smaller value (e.g., 30s) to a larger value (e.g., 60s), the changes() function starts to yield non-zero values even when the underlying values have not changed.

 This commit keeps unchanged series values when a large gap occurs between samples or when the scrape interval decreases.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10280
2026-01-28 14:06:51 +01:00
Max Kotliar
731ba17962 docs: Update vmctl flags in docs with a command (#10357)
### Describe Your Changes

The commit extends make docs-update-flags command so it updates vmctl
flags as well. It creates one md file with global flags and several
files per supported mode.


### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-28 14:12:45 +02:00
Max Kotliar
bb163692ba docs: add avilable_from to request body buffering vmauth doc
Follow-up for
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10310 and
e31abfc25c
2026-01-28 12:50:25 +02:00
Nikolay
952ef51cd1 lib/fs: properly check for partially deleted directories (#10342)
Commit 83da33d8cf introduced a check to
detect directories partially removed via IsPartiallyRemovedDir.

However, the check was performed using the full path, while de.Name()
returns only the current entry name (without the path). As a result, the
check always succeeded and the function did not behave as intended.
2026-01-28 10:30:35 +01:00
Nikolay
1fc548b63a lib/fs: add fs.disableMincore flag
This flag allows disabling the mincore() syscall introduced in
50fc48ac47. On older ZFS filesystems,
mincore() may trigger a bug related to ZFSÕs own in-memory cache. Mixing
reads from mmap()ed files and direct disk reads can corrupt the ZFS ARC
cache and lead to data read corruption.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10327
2026-01-27 20:29:01 +01:00
Nikolay
aa5236877c lib/storage: properly aggregate per IndexedDB cache stats
Commit f62893c151 added an attempt to fix
stats for `tagFiltersCache`, `metricIDCache`, and `dateMetricIDCache`.
Instead of aggregated stats, it returned the largest cache stats by
cache size.

This resulted in possible counter decreases for counter metric types. It
made aggregated metrics less usable.

This commit changes cache stats aggregation by metric type:
* size-related gauge metrics are returned based on max cache size usage
* metric counters are reported as a sum of all counters

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10275
2026-01-27 20:27:41 +01:00
Artem Fetishev
c705da74f6 lib/storage: make pt and legacy idbs visible in golang profiles
Rewrite the searchAndMerge so that golang profiles could show exactly
how much resources is consumed by each idb type.
2026-01-27 20:26:39 +01:00
Aliaksandr Valialkin
2e9bda2bff lib/{mergeset,storage}: add a comment explaining why the strange construct with anonymous function is needed
This is a follow-up for the commit 2a0e382a99

Updates https://github.com/VictoriaMetrics/VictoriaLogs/issues/1020
2026-01-27 19:44:49 +01:00
Jiekun
e1413536fc chore: add build version information to the home page for consistency with other projects
The build version added to:
- victoria-metrics
- vmagent
- vmalert

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10249

Co-authored-by: Hui Wang <haley@victoriametrics.com>
Signed-off-by: Zhu Jiekun <jiekun@victoriametrics.com>
2026-01-27 18:28:15 +02:00
Jayice
1a438a04ba introduce new alert for vmagent persistenqueue capacity 2026-01-27 18:14:02 +02:00
Aliaksandr Valialkin
4ad47d6fe3 docs/victoriametrics/README.md: remove obsolete docs about staleness markers during deduplication after the commit 7bd5d19f62
Staleness markers are ignored on the deduplication interval if there are other numeric samples exist on that interval.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10196
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5587
2026-01-27 16:08:45 +01:00
Aliaksandr Valialkin
879443f915 lib/storage/dedup.go: remove obsolete comment from DeduplicateSamples - it doesnt keep stale NaNs on purpose after the commit 7bd5d19f62
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5587
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10196
2026-01-27 16:08:44 +01:00
Max Kotliar
bd6788cb8f docs/changelog: fix ordering after merging pr.
related pr https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10320
2026-01-27 16:37:49 +02:00
Jayice
22696f378c lib/promscrape: apply promscrape.maxScrapeSize to decompressed data
Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9481
2026-01-27 16:30:38 +02:00
Artur Minchukou
7205f479aa app/vmui: fix build of vmui by handling playground env variable correctly (#10354)
### Describe Your Changes

Fixed build of vmui by handling playground env variable correctly.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-27 16:24:18 +02:00
Yury Moladau
5c7031c000 vmui: fix "Percentage from total" for multiple metrics in Cardinality Explorer (#10323)
### Describe Your Changes

In the Cardinality Explorer, when filtering, a "Percentage from total"
stat appears. This stat is documented as "the share of these series in
the total number of time series".

This works for pages for individual metrics. However, if using a filter
that returns *multiple* metrics, the value of "Percentage from total"
will only account for the size of the *first* metric. One can have a
filter that returns, say, 10k time series (out of, say, 100k in the VM
cluster), and if the first metric returned has 1k time series, then
"Percentage from total" will show 1%, not 10%.

This PR fixes that calculation.

Credits to @PleasingFungus for the original fix (PR #10288).

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

Signed-off-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Max Kotliar <mkotlyar@victoriametrics.com>
Co-authored-by: PleasingFungus <PleasingFungus@users.noreply.github.com>
2026-01-27 15:37:34 +02:00
Artur Minchukou
a227128467 app/vmui: move node from ci to docker and update build steps (#10299)
### Describe Your Changes

Moved node from CI to make command and update build steps.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-27 15:23:35 +02:00
Nikolay
777c8913b3 follow-up after e35a9a366c
Commit e35a9a366c changed the order of wg.Add calls in the Graphite transform package. Previously, all wg.Add calls were made upfront, but after that change it became possible for wg.Wait to exit earlier than expected.

This commit fixes the issue by spawning all background goroutines first and starting the goroutine that calls wg.Wait afterward.
2026-01-27 13:50:07 +01:00
Aliaksandr Valialkin
e35a9a366c all: consistently use sync.WaitGroup.Go() instead of sync.WaitGroup.Add(1) + sync.WaitGroup.Done()
This improves code readability a bit.
2026-01-27 00:29:47 +01:00
JAYICE
6bbc03ecf8 app/vmagent: support configuring different -remoteWrite-queues per url
Previously vmagent had remoteWrite.queues as a global setting that was be applied to every persistentqueue. However, it could be useful to specify remotewrite.queues per remotewrite.url.

Considering each rw might have different workload(latency, throughput, and availability), so it will be more flexible for tuning if we can set remoteWrite.queues separately for specific rw.

This commit, makes `-remoteWrite-queues` configurable per remoteWrite.url. 

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10270
2026-01-26 20:09:35 +01:00
Max Kotliar
ca34ae48b4 docs/changelog: chore changelog
- rename `these docs` link to a more explisit link
- Add thank you for contribution.
2026-01-26 18:45:04 +02:00
Max Kotliar
f18fd37433 docs: run make docs-update-flags 2026-01-26 18:43:35 +02:00
Zhu Jiekun
f191a052dc lib/promscrape: ceiling the last scrape size
ceiling the last scrape size as an integer in bytes or kilobytes to
avoid misleading dots.

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10307
2026-01-26 12:46:24 +01:00
Max Kotliar
0fdd5cb435 app/vmauth: fix backend healthcheck for url prefixes defined inside url_map
Previously health checks for url prefixes defined inside `url_map` were
not properly stopped. See STR in
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10334#issuecomment-3791401822

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10334
2026-01-26 11:47:43 +01:00
f41gh7
76dd8f4adb lib/storage: properly search searchTenantsOnDate
Initial implementation of searchTenantsOnDate used a index scan for the given prefix (index prefix + tenant + date).
It did not check whether the date prefix was actually outside the current date.

This commit adds the missing date check and makes the tenant search results accurate.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10295
2026-01-26 11:35:27 +01:00
Aliaksandr Valialkin
e31abfc25c app/vmauth: allow buffering request body before proxying it to the backend
This should help reducing load on backends when many concurrent clients
send requests over slow networks (for example, when many IoT devices send metrics
to vmauth over slow connections).

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10309

This commit is based on top of https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10310
Thanks to @makasim for the initial idea.
2026-01-26 03:02:32 +01:00
Aliaksandr Valialkin
ac6d9d632f app/vmauth: properly increment vmauth_user_concurrent_requests_limit_reached_total and vmauth_unauthorized_user_concurrent_requests_limit_reached_total metrics when the request is rejected because of the concurrency limit
These metrics must be incremented when the request couldn't be processed because of the configured per-user concurrency limit.
The commit 76176ac1d3 moved the counter increase to the place when the current request
is put in the wait queue because of the concurrency limit is reached. This is incorrect, since such requests
can still be successfully processed during -maxQueueDuration . This also contradicts the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/#concurrency-limiting

There is a small practical sense in counting the number of times the concurrency limit is reached,
while the request is successfully processed during the -maxQueueDuration after that.

Add missing alerting rule for rejected unauthorized requests because of the concurrency limit.

Add missing grouping by instance for per-user counter of rejected queries because of the concurrency limit.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
2026-01-25 21:43:44 +01:00
Aliaksandr Valialkin
e43de2a2b3 app/vmauth: put comments into the correct places after the commit 5f67f04f6b 2026-01-25 21:19:01 +01:00
Aliaksandr Valialkin
efe4a3b2dd vendor: update github.com/VictoriaMetrics/VictoriaLogs from v1.36.2-0.20251008164716-21c0fb3de84d to v0.0.0-20260125191521-bc89d84cd61d 2026-01-25 20:24:04 +01:00
Aliaksandr Valialkin
3bf5f0297b LICENSE: update the end copyright year from 2025 to 2026 2026-01-25 20:14:07 +01:00
Aliaksandr Valialkin
5632ccc64a lib/logger: count both printed and suppressed logs at vm_log_messages_total metric
This simplifies troubleshooting by investigating the vm_log_messages_total metric
when logs are unavailable. The logs may be unavailable when the -loggerLevel command-line
flag is set to value other than INFO. The logs may be unavailable when clients
use Monitoring of Monitoring service ( https://victoriametrics.com/products/mom/ ),
which provides metrics, but doesn't provide logs from VictoriaMetrics components
running at the client side.

Add `is_printed` label to the `vm_log_messages_total` metric in order to detect whether
the given log has been suppressed or printed.

See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10304

While at it, make more readable the description for the TooManyLogs alert,
which is based on the vm_log_messages_total metric.
Also return back the `level!="info"` instead of `level="error"` filter
in the query for this alerting rule, in order to be consistent with queries
at the official dashboards for VictoriaMetrics components.
TODO: investigate too high warnings rate at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2760
and fix it at the source of these warnings instead of modifying the query
for the TooManyLogs alert.
2026-01-25 17:43:20 +01:00
Nikolay
446452857c lib/storage: tsdb stats fallback to legacy idb
Add fallback to legacy indexDB for stats search

After introducing the new partition index
(f97f627f79), storage stopped returning
stats for date ranges outside the partition index. This made the
migration backward incompatible, as there was no way to retrieve stats
for dates prior to the migration.

This change adds a fallback to the legacy indexDB search when the status
search on the current partition index returns zero series.

This is an imperfect solution: due to tag filters, the TSDB status
search may legitimately return empty results. However, the additional
overhead is small and acceptable.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10315
2026-01-23 16:43:13 +01:00
Max Kotliar
4ff409eb27 docs/changelog: mention already fixed bug fix in vmauth
The bug was fixed in
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10233 before we
realized it was a bug, at that time we considered it as improvement - do
less retries. But later in
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10318 we
realized that we actually fixed a bug.

Adding postfactum a record about bugfix to the changelog
2026-01-22 16:57:58 +02:00
Phuong Le
1b7f0172d2 fsutil: fix a typo related to default concurrent goroutines working with files
s/265/256
2026-01-20 21:46:38 +01:00
Andrii Chubatiuk
1c77ee9527 app/vmui: removed anomaly ui (#10316)
The vmanomaly has been moved to a separate repository. This means that the functionality related to vmanomaly is no longer needed in the app/vmui located in the VictoriaMetrics repository.

This commit removes all the functionality and unnecessary abstractions related to vmanomaly from the app/vmui repository. This should help improving long-term maintenance of the code.

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9755
2026-01-20 21:46:09 +01:00
Phuong Le
2a0e382a99 lib/storage, lib/mergeset: avoid deadlock on panic while merging
Related to
https://github.com/VictoriaMetrics/VictoriaLogs/issues/1020#issuecomment-3763912067
2026-01-20 21:43:12 +01:00
Max Kotliar
02c8ea5a48 docs/changelog: fix typo in security upgrade 2026-01-20 21:53:52 +02:00
Aliaksandr Valialkin
34f242a6b8 vendor: run make vendor-update 2026-01-19 15:29:25 +01:00
f41gh7
bc8f6c5688 docs: point examples to the v1.134.0 release 2026-01-19 14:28:00 +01:00
f41gh7
c0fe67c2db docs: cut LTS releases v1.110.28 and v1.122.13
Signed-off-by: f41gh7 <nik@victoriametrics.com>
2026-01-19 14:26:36 +01:00
Fred Navruzov
ede1c2cde9 docs/vmanomaly: release v1.28.5 (#10311)
### Describe Your Changes

- Adjusted vmanomaly docs for v1.28.5
- Added missing `server` page at /anomaly-detection/components/server

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-17 21:52:48 +02:00
Aliaksandr Valialkin
ad34a5eb53 lib/protoparser/protoparserutil: reduce memory usage in ReadUncompressedData() when processing big number of incoming connections
Wait for the first byte from the reader passed to ReadUncompressedData()
before obtaining concurrency token from -maxConcurrentInserts and before allocating
buffers needed for reading the request body in memory.
This should limit the amounts of memory needed for processing a big number of concurrent
HTTP requests via Prometheus remote_write protocol and via other HTTP-based data ingestion
protocols where every request contains a single block of data to process.
Now the maximum memory usage is limited by -maxConcurrentInserts, while the server
can process much more than -maxConcurrentInserts concurrent HTTP requests by pausing the excess requests.

Previously the memory usage wasn't limited by -maxConcurrentInserts, since buffers for reading the data
from concurrent connections were allocated before obtaining the concurrency token from -maxConcurrentInserts.

While at it, use protoparserutil.ReadUncompressedData() in lib/protoparser/promremotewrite/stream.Parse()
for the sake of consistency across parsers for protocols, which send the full block of data per every incoming HTTP request.

This is a follow-up for the commit d107dee9c7
2026-01-17 15:49:53 +01:00
f41gh7
eaf7a68c92 CHANGELOG.md: cut v1.134.0 release 2026-01-16 20:49:31 +01:00
Max Kotliar
c5e43e1c91 docs: use canonical link 2026-01-16 19:09:49 +02:00
f41gh7
b343f541f0 make vmui-update 2026-01-16 16:46:26 +01:00
f41gh7
a23a902953 deployment: update Go builder from v1.25.5 to v1.25.6
See https://github.com/golang/go/issues?q=milestone%3AGo1.25.6%20label%3ACherryPickApproved
2026-01-16 16:26:27 +01:00
Hui Wang
54c60706ca lib/streamaggr: prefer numerical values over stale markers when sample share the same timestamp during deduplication (#10300)
follow up
7bd5d19f62,
apply the same logic in stream aggregation.
2026-01-16 16:14:09 +01:00
Nikolay
cd2e11b7cf lib/storage: increase rotation time for daily metricID cache
This is follow-up for c5713a09d3

Originally, dateMetricID cache was fully rotate every 20 minutes. It
made daily-index pre-creation less efficient and caused CPU usage spikes
for index records lookup at midnight.

storage pre-fills index records for the next day in 1 hour before night.
But this rotation made only last 20 minutes before midnight visible in
the cache.

 This commit changes rotation period from 20 minutes to 2 hours ( 1 hour
 tick interval). While
it could slighlty increase cache memory usage ( in practice it shouldn't
be noticeable). It prevents from CPU usage spikes.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10064
2026-01-16 16:12:59 +01:00
Aliaksandr Valialkin
5423d5e93a docs/victoriametrics/relabeling.md: add an alias seen in wild - https://docs.victoriametrics.com/victoria-metrics/relabeling/
Google sends users to this alias according to the report on 404 pages.
2026-01-16 15:46:55 +01:00
Aliaksandr Valialkin
48819b6781 docs/victoriametrics/CaseStudies.md: added alias for this page seen on the Internet - https://docs.victoriametrics.com/casestudies.html
Google sends users to this url according to the report on 404 pages.
2026-01-16 15:32:40 +01:00
JAYICE
c4bff27f46 lib/storage: properly search for LabelNames and LabelValues
Issue was introduced at d6ef8a807b commit.

Due to variable shadowing, if filter matched more than 100_000 metricIDs, it's fallback to the indexDB scan.
But because of type, `filter` value was not properly updated. And it triggered incorrect results.

 This commit fixes this typo and adds test to verify this case.


fixes  https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10294
2026-01-16 13:53:29 +01:00
Max Kotliar
432b313a48 docs: cleanup changelog a bit before release 2026-01-16 12:51:08 +02:00
Haley Wang
7bd5d19f62 lib/storage: prefer numerical values over stale markers when samples share the same timestamp during deduplication
Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10196

Prefer the non StaleNaN  value when both StaleNaN and non-StaleNaN samples share the timestamp during deduplication(downsampling). The scenario can occur when:
1. Multiple vmagent instances scrape the same target(without -promscrape.cluster.name flag), one instance fails to scrape due to issues such as network, while others succeed.
2. Multiple vmalert instances evaluate the same recording rule, with one instance receiving a partial response while others receive a complete response.

In both cases, since the samples share the same timestamp and represent the metric state at that moment,
the non-StaleNaN value is entirely valid, whereas the StaleNaN could be caused by other unknown issues.
Therefore, it is reasonable to prioritize the non-StaleNaN value.
2026-01-16 09:25:11 +02:00
Haley Wang
8d18bc288f vmselect: use the last 20 raw samples to auto-calculate the lookbehind during range query
Previously, the first 20 raw samples were used for calculation.
But compare to the first 20 samples, the last 20 samples represent the latest state of the metrics,
so the lookbehind window calculated from them should be more accurate when applied to the most recent samples,
resulting in better query results for recent time ranges.

For example,if the scrape interval changes at day4, and the query range is set to last 7 days.
Applying the window derived from the first 20 samples(the old scrape interval) to new samples could result in consistently incorrect results from day4 through day11.
Conversely, applying the window derived from the last 20 samples (the new scrape interval) could lead to incorrect results for [day0-day4),
which are old states and generally less important.

This pull request does not address any specific bug, but change the general behavior, so there is no changelog.

Inspired by https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10280, but not the fix for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10280.
2026-01-16 09:00:51 +02:00
Nikolay
ff6e5c2983 app/vmstorage: reduce default value for storage.vminsertConnsShutdownDuration
This commit reduces default value for
`storage.vminsertConnsShutdownDuration` flag from `25s` to `10s`
seconds.
This change should help to reduce probability of ungraceful storage
shutdown at Kubernetes based environments, which has 30 seconds default
graceful termination period value (terminationGracePeriodSeconds).

Related issue
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10063
2026-01-15 17:26:12 +01:00
Yury Moladau
23af0086d8 app/vmui: fix heatmap rendering for uniform or sparse histogram buckets (#10292)
* Fixed a heatmap crash that happened when all visible cells had the
same value (division by zero produced invalid color indices).
* Improved how histogram buckets are chosen for display when the data is
very sparse, so the heatmap doesn’t look empty or drop the only
meaningful bucket.

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10240
2026-01-15 16:55:24 +01:00
Yury Moladau
8657470068 app/vmui: bump package versions (#10291)
### Describe Your Changes

Updated project dependencies to the latest versions.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

Signed-off-by: Yury Molodov <yurymolodov@gmail.com>
2026-01-15 16:53:23 +02:00
Aliaksandr Valialkin
3f16bc7cb2 docs/victoriametrics/Articles.md: add https://www.keyvalue.systems/blog/kubernetes-observability-with-victoriametrics-loki-grafana/ 2026-01-15 13:53:26 +01:00
Aliaksandr Valialkin
655a0eb0c3 app/vmstorage/main.go: typo fix after the commit 7cbd2a8600: partition -> snapshot 2026-01-15 12:50:24 +01:00
Aliaksandr Valialkin
7cbd2a8600 app/vmstorage: delete just created snapshot if the client canceled the request for creating the snapshot
It is better to delete the snapshot, since the client is no longer interested in it.
This should prevent from creating many unused snapshots when clients cancel creating snapshots
because of timeouts. This is the real production case from one of VictoriaMetrics users:
the disk IO subsystem became very slow, so creating a snapshot took a lot of time, so vmbackup
was canceling creating the snapshot because of the timeout. But vmstorage was still continue
creating the snapshot. This resulted in the increasing number of created but unused snapshots.
2026-01-15 12:36:48 +01:00
Max Kotliar
5f67f04f6b app/vmauth: measure client cancelled requests
Without measuring this, we have a blind spot. Exposing it as a metric
improves visibility and should save time during future debugging
sessions.

Inspired by review commit
c9596a0364 (r173621968)
2026-01-15 12:13:35 +01:00
Nikolay
2056e5b46d lib/mergeset: do no cache inmemoryBlock with single item
indexDB mergeset has an edge for single item inmemoryBlock. It stores
such items blocks in-memory at blockheader firstItem. So there is no
need to perform on-disk read operations and storing copy of it at cache.

 It also may result in incorrect search results, inmemoryBlock with a
 single item has always zero index block offset. Which causes collisions
if it's cached with the next index block at part.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10239
Probably fixes
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10063
2026-01-15 12:12:08 +01:00
Hui Wang
4d1f262ec4 vmalert: add support for $isPartial variable in alerting rule annotation templating
fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4531
2026-01-15 12:10:07 +01:00
Vadim Rutkovsky
afca599a46 app/vmauth: Ffx typo in auth config warning message 2026-01-15 12:09:36 +01:00
Yury Moladau
d667f694bc app/vmui: fix tenant ID handling via URL path (#10287)
**Problem**

* VMUI had two tenant ID sources:

  * URL path: `/select/<accountID>/vmui/`
  * Query param: `tenantID`
* These could differ, causing confusion and inconsistent behavior.

**Solution**

* Removed the legacy `tenantID` query parameter.
* Use the URL path as the single source of truth for tenant ID.
* Changing the tenant in the UI now updates the URL path.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10232
2026-01-15 12:05:12 +01:00
Aliaksandr Valialkin
fe2c60c79b dashboards: follow-up for the commit 36460f6297
Use $__range duration instead of 1h duration for the 'Retention errors' stats panel
in the similar way it was done in the commit 36460f6297
for the 'Backup errors' stats panel.

While at it, run `make dashboards-sync` in order to sync the dashboards
in the dasbhoards/vm/ folder. See https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards/README.md
for details.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10279
2026-01-14 23:30:08 +01:00
Stephan Burns
36460f6297 Make stats panel use the range specified in grafana (#10279)
### Describe Your Changes

The Backups errors panel uses a hard coded rate, when looking over a
large period of time this number would likely stay low do to the hard
coded rate when in reality the amount of errors is much larger.

This change addresses this by using the __rate variable in Grafana so
the rate will align with the date/time range in Grafana.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-14 23:21:16 +01:00
Aliaksandr Valialkin
d107dee9c7 lib/writeconcurrencylimiter: remove Reader.DecConcurrency() method
Call decConcurrency() inside Reader.Read() before calling the Read() at the underlying reader.
This reduces chances of improper use of the writeconcurrencylimiter.Reader by callers.

While at it, move the creation of writeconcurrencylimiter.GetReader() to the top of stream parser functions
at lib/protoparser/* packages, and call incConcurrency() inside GetReader() call.
This reduces the frequency of decConcurrency() / incConcurrency() calls
for typical buffered reads when parsing the incoming data. This, in turn,
reduces the contention on the concurrencyLimitCh.
2026-01-14 22:55:17 +01:00
Max Kotliar
b33d7c3ef9 dashboards: remove timezone from vmagent dashboard
The bug introduced in
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10267 and breaks
helm charts customization, see discussion
415ff27c74 (r174600675)
2026-01-14 13:28:35 +02:00
JAYICE
d3848f6802 vmagent: fix calculation of vm_persistentqueue_free_disk_space_bytes (#10271)
### Describe Your Changes

follow up https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10242,
see discussion in
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10267#issuecomment-3729577415
for more context

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-13 20:12:31 +02:00
Jayice
415ff27c74 dashboards: add Persistent queue Full ETA panel to the Drilldown section in vmagent dashboard 2026-01-13 20:03:43 +02:00
Max Kotliar
90f59383b2 docs: Add docs-update-flags step to release. (#10284)
### Describe Your Changes

Previously, we had to manually update flags in documentation whenever we
made flag-related changes in source code. Someone did it by hand, others
compiled enterprise binaries, executed them with `-help` flag, and
copied and pasted output to the documentation.

In https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9632 a
command `make docs-update-flags` was introduced. It automates the whole
process. It compiles binaries, runs `-help,` and syncs output to changes
automatically.

Now, we can **omit updating doc flags in the PR** and do it once before
releasing a new version.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-13 17:09:49 +02:00
Fred Navruzov
8fec7005d0 docs/vmanomaly-release-v1.28.4 (#10283)
### Describe Your Changes

Docs upgrades, including v1.28.4 adjustments, some diagrams refinement
and deprecations

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-13 15:32:03 +02:00
Max Kotliar
4d42b291e5 docs: run make docs-update-flags 2026-01-13 10:56:14 +02:00
Max Kotliar
50f4fbf28e lib/flagutil: Add explicit month duration unit (M) for -retentionPeriod.
Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10181
2026-01-13 10:37:02 +02:00
Max Kotliar
a5da6afb88 docs: run make docs-update-flags 2026-01-13 10:28:16 +02:00
Max Kotliar
71f9e7f2c4 app/vmctl: fix link to documentation
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10268

Co-authored-by: Danijel Tasov <data@consol.de>
Signed-off-by: Danijel Tasov <data@consol.de>
2026-01-12 20:53:35 +02:00
Max Kotliar
eb7c5df65e dashboards: run make dashboards-sync 2026-01-09 19:07:03 +02:00
Max Kotliar
5af493297a docs/changelog: move 2025 changes to CHANGELOG_2025.md, create CHANGELOG_2026.md 2026-01-09 13:24:47 +02:00
JAYICE
2f61fa867e Makefile: Move enterprise-only flags to a separate block in document (#10241)
### Describe Your Changes

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10218.

Improve `docs-update-flags` command to move enterprise-only flags to a
separate block.

<img width="936" height="964" alt="image"
src="https://github.com/user-attachments/assets/f96a3515-4acc-4a65-94b1-55e01fab6e25"
/>


### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-08 19:42:33 +02:00
Max Kotliar
729b1099d8 dashboards: Enhance VictoriaMetrics - single-node dashboard stats raw. (#10260)
### Describe Your Changes

Currently, the stats are small and hard to read (see screenshot in the
PR). In addition, the version and uptime panels work well for a single
vmsingle, but become inconvenient when multiple instances are present,
since only one is visible.

This PR changes the version and uptime panels from single stat to time
series, aligning them with the VictoriaMetrics – cluster dashboard. It
also enlarges the remaining stats so the values are easier to read,
consistent with the cluster dashboard (see screenshot in the PR).

Follow up on
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10187 and
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10132

Before:
<img width="1512" height="364" alt="Screenshot 2026-01-07 at 21 38 17"
src="https://github.com/user-attachments/assets/8d8baa86-b31b-4c58-ae22-cef94a1607e6"
/>

After:
<img width="1512" height="670" alt="Screenshot 2026-01-07 at 22 07 10"
src="https://github.com/user-attachments/assets/9e60596d-72ec-4060-af11-a69ce554d3b1"
/>

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

Co-authored-by: Hui Wang <haley@victoriametrics.com>
2026-01-08 13:49:46 +02:00
Yury Moladau
945ca569b9 app/vmui: add localStorage availability checks
* Added browser `localStorage` availability checks with user-facing
error reporting.
* Introduced `VMUI:`-prefixed `localStorage` keys to avoid key
collisions.
* Added migration logic for existing unprefixed `localStorage` keys.

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10085
2026-01-08 11:13:38 +01:00
Hui Wang
7fb8a8a0b2 vmalert: skip alert annotation templating in replay mode
In alerting rules, annotations are only attached to alert messages that
are sent to the notifier (such as Alertmanager). These annotations
typically contain human-readable information, such as instructions for
resolving the alert.

In [replay
mode](https://docs.victoriametrics.com/victoriametrics/vmalert/#rules-backfilling),
vmalert does not send alert messages to the notifier at all(no notifier
is configured), as these alerts are outdated. Therefore, it does not
need to template the annotations in this mode.

Related PR https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10262
2026-01-08 11:09:21 +01:00
JAYICE
89f95f74ed vmagent: add metric for persistentqueue capacity
This commit adds new metric `vm_persistentqueue_free_disk_space_bytes`, which helps
to track free space for persistent queue.

part of implementation for
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10193
2026-01-08 11:07:28 +01:00
Hui Wang
46e13fe0ca vmselect: expose vm_rollup_result_cache_requests_total metric
which tracks the number of requests to the query rollup cache


As described in
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10117, when
retrieving cached data from the rollup result cache, there can be mixed
`get()` and `getBig()` calls to the underlaying fastcache. And it's
unpredictable how many times `getBig()` will call `get()`, so the
metrics from fastcache cannot be used to indicate query cache miss
ratio.
Exposing a new counter `vm_rollup_result_cache_requests_total` to track
the number of requests to the query rollup cache, together with the
existing `vm_rollup_result_cache_miss_total`, allows for monitoring the
rollup cache miss rate per query (or subquery), which is more
user-facing.

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10117
related to https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5056
2026-01-08 11:05:25 +01:00
Fred Navruzov
50d8ad6733 docs/vmanomaly-release-v1.28.3 (#10258)
### Describe Your Changes

Docs update for vmanomaly v1.28.3 release + `retention` doc section for
model artifacts

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-07 20:40:32 +02:00
JAYICE
3b8550adb1 dashboard: refine vmsingle dashboard and align it to vmcluster dashboard (#10187)
### Describe Your Changes

Fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10132

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

---------

Co-authored-by: Max Kotliar <mkotlyar@victoriametrics.com>
2026-01-07 12:58:49 +02:00
Max Kotliar
1708b73312 lib/promscrape: show (N/A) instead of hiding target response link when original labels are dropped (#10244)
Related to
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10237,
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9901

When `-promscrape.dropOriginalLabels=true` is enabled, original target
labels are unavailable. These labels are required to compute the target
ID used by the /target_response endpoint, so the response link cannot be
generated. See

7a5003212e/lib/promscrape/targetstatus.qtpl (L236)

Previously, the link silently disappeared from the UI. Now the UI shows
(N/A) Not available, explicitly indicating that required data is
missing.
2026-01-06 12:31:18 +01:00
f41gh7
57defe7ab4 apptest: add zabbixconnector integration test
follow-up for 859435a8df
2026-01-06 12:27:01 +01:00
Sinotov Vladimir
d58cfb7f36 app/vminsert: properly route zabbixconnector requests
Previously VictoriaMetrics: Single-node version used

`http://<victoriametrics-addr>:8428/zabbixconnector/api/v1/history`
resulted in a missing path error. The issue was introduced during changes back-porting from vmagent.


Additionally, the http response was fixed. Zabbix expects a 200 status
code during normal operation.

 Related PR https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10214
2026-01-06 11:17:09 +01:00
Cancai Cai
a244750bc6 doc/table: fix typo (#10243)
### Describe Your Changes

Please provide a brief description of the changes you made. Be as
specific as possible to help others understand the purpose and impact of
your modifications.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

Signed-off-by: cancaicai <2356672992@qq.com>
2026-01-05 21:42:01 +02:00
Max Kotliar
f06e7f9a6e app/vmagent: replace go.yaml.in/yaml/v3 package with gopkg.in.yaml.v2
It address the comment:
https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10213/files#r2662305818

The reasons:
- It was decidede to use v2 for now and do not upgrade to v3.
- The later package is used in more places so it is better to use it
here too.
2026-01-05 21:34:08 +02:00
Artem Fetishev
7a5003212e docs: bump VictoriaMetrics components version
Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2026-01-05 19:22:53 +01:00
Artem Fetishev
846392405e deployment/docker: bump VictoriaMetrics component version
Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2026-01-05 19:18:49 +01:00
Artem Fetishev
37c3d8c26b CHANGELOG.md: fix issue links
Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2026-01-05 19:15:50 +01:00
Artem Fetishev
8bc0475ee7 docs: update LTS releases
Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2026-01-05 18:42:21 +01:00
Zhu Jiekun
89414062bf bugfix: allow reloading when init with empty remote write relabeling flags (#10213)
### Describe Your Changes

fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10211

This pull request adds `flagSet bool` field to `relabelConfigs` struct.
And use this flagSet value as the result of `isSet()` function.

The reloading should be available when at least one of the command-line
flags `-remoteWrite.relabelConfig` / `-remoteWrite.urlRelabelConfig` is
set.

### Checklist

The following checks are **mandatory**:

- [x] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [x] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

---------

Co-authored-by: Hui Wang <haley@victoriametrics.com>
Co-authored-by: Max Kotliar <mkotlyar@victoriametrics.com>
2026-01-05 12:53:52 +02:00
JAYICE
67c51b009d document: guide users to use --data-binary in curl when import multi lines influx data (#10198)
### Describe Your Changes

fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10165.

Refer to [curl docs](https://curl.se/docs/manpage.html#--data).

> When --data is told to read from a file like that, carriage returns,
newlines and null bytes are stripped out

If users import multiple lines of data in file via `/api/v2/write`, he
may follow the example we gave to use `-d` to instruct curl, then
newlines will be stripped out, hence the parse error in VictoriaMetrics.

It's not VictoriaMetrics' bug, but it will be better to guide users to
use `--data-binary` just like how
[/api/v1/import](https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1import)
did.

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

---------

Co-authored-by: Zhu Jiekun <jiekun@victoriametrics.com>
2026-01-05 10:54:47 +02:00
Artem Fetishev
e8160fc8fb CHANGELOG.md: cut v1.133.0 release
Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2026-01-02 12:17:38 +00:00
Artem Fetishev
e3a4ceaef3 deployment/docker: upgrade base docker image (Alpine) from 3.22.2 to 3.23.2
Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2026-01-02 10:22:41 +00:00
Andrii Chubatiuk
e9cedca8c8 docs: replace old grafana datasource page with links to a new one (#10231)
### Describe Your Changes

fixes https://github.com/VictoriaMetrics/vmdocs/issues/192

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-01-01 18:54:52 +02:00
Andrii Chubatiuk
b720e55c13 vmsingle: properly proxy requests to all supported vmalert paths (#10179)
### Describe Your Changes

modify initial request path before sending request to vmalert with a
proper value
sync vmalert proxy implementation with one in cluster branch
fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10178

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).

---------

Co-authored-by: Hui Wang <haley@victoriametrics.com>
2026-01-01 16:28:55 +02:00
Artem Fetishev
ab1429c896 lib/storage: fix tagFiltersCache stats collection (#10230)
Since the cache may be reset too often, using the sizeBytes as an
indicator that this is the first met indexDB to collect tfssCache stats
is unreliable because it often can be zero all indexDB instances. Use
Requests metric instead because it is never reset.

Follow-up for #10204.

---------

Signed-off-by: Artem Fetishev <rtm@victoriametrics.com>
2025-12-31 13:35:54 +01:00
1802 changed files with 94090 additions and 87437 deletions

View File

@@ -34,33 +34,39 @@ jobs:
- name: Code checkout
uses: actions/checkout@v6
- name: Setup Node
uses: actions/setup-node@v6
- name: Cache node_modules
id: cache
uses: actions/cache@v5
with:
node-version: '24.x'
path: app/vmui/packages/vmui/node_modules
key: vmui-deps-${{ runner.os }}-${{ hashFiles('app/vmui/packages/vmui/package-lock.json', 'app/vmui/Dockerfile-build') }}
restore-keys: |
vmui-deps-${{ runner.os }}-
- name: Cache node-modules
uses: actions/cache@v4
with:
path: |
app/vmui/packages/vmui/node_modules
key: vmui-artifacts-${{ runner.os }}-${{ hashFiles('package-lock.json') }}
restore-keys: vmui-artifacts-${{ runner.os }}-
- name: Install dependencies
if: steps.cache.outputs.cache-hit != 'true'
run: make vmui-install
- name: Run lint
id: lint
run: make vmui-lint
continue-on-error: true
env:
VMUI_SKIP_INSTALL: true
- name: Run tests
id: test
run: make vmui-test
continue-on-error: true
env:
VMUI_SKIP_INSTALL: true
- name: Run typecheck
id: typecheck
run: make vmui-typecheck
continue-on-error: true
env:
VMUI_SKIP_INSTALL: true
- name: Annotate Code Linting Results
uses: ataylorme/eslint-annotate-action@v3

View File

@@ -175,7 +175,7 @@
END OF TERMS AND CONDITIONS
Copyright 2019-2025 VictoriaMetrics, Inc.
Copyright 2019-2026 VictoriaMetrics, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -134,6 +134,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
}
w.Header().Add("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(w, "<h2>Single-node VictoriaMetrics</h2></br>")
fmt.Fprintf(w, "Version %s<br>", buildinfo.Version)
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/'>https://docs.victoriametrics.com/</a></br>")
fmt.Fprintf(w, "Useful endpoints:</br>")
httpserver.WriteAPIHelp(w, [][2]string{

View File

@@ -29,11 +29,9 @@ var selfScraperWG sync.WaitGroup
func startSelfScraper() {
selfScraperStopCh = make(chan struct{})
selfScraperWG.Add(1)
go func() {
defer selfScraperWG.Done()
selfScraperWG.Go(func() {
selfScraper(*selfScrapeInterval)
}()
})
}
func stopSelfScraper() {

View File

@@ -245,6 +245,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
}
w.Header().Add("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(w, "<h2>vmagent</h2>")
fmt.Fprintf(w, "Version %s<br>", buildinfo.Version)
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/victoriametrics/vmagent/'>https://docs.victoriametrics.com/victoriametrics/vmagent/</a></br>")
fmt.Fprintf(w, "Useful endpoints:</br>")
httpserver.WriteAPIHelp(w, [][2]string{

View File

@@ -202,14 +202,10 @@ func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
c.retriesCount = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.sanitizedURL))
c.sendDuration = metrics.GetOrCreateFloatCounter(fmt.Sprintf(`vmagent_remotewrite_send_duration_seconds_total{url=%q}`, c.sanitizedURL))
metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_queues{url=%q}`, c.sanitizedURL), func() float64 {
return float64(*queues)
return float64(concurrency)
})
for i := 0; i < concurrency; i++ {
c.wg.Add(1)
go func() {
defer c.wg.Done()
c.runWorker()
}()
for range concurrency {
c.wg.Go(c.runWorker)
}
logger.Infof("initialized client for -remoteWrite.url=%q", c.sanitizedURL)
}

View File

@@ -48,11 +48,7 @@ func newPendingSeries(fq *persistentqueue.FastQueue, isVMRemoteWrite *atomic.Boo
ps.wr.significantFigures = significantFigures
ps.wr.roundDigits = roundDigits
ps.stopCh = make(chan struct{})
ps.periodicFlusherWG.Add(1)
go func() {
defer ps.periodicFlusherWG.Done()
ps.periodicFlusher()
}()
ps.periodicFlusherWG.Go(ps.periodicFlusher)
return &ps
}

View File

@@ -9,14 +9,14 @@ import (
"sync"
"sync/atomic"
"github.com/VictoriaMetrics/metrics"
"gopkg.in/yaml.v2"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"go.yaml.in/yaml/v3"
"github.com/VictoriaMetrics/metrics"
)
var (
@@ -139,6 +139,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
remoteWriteRelabelConfigData.Store(&rawCfg)
rcs.global = global
}
if len(*relabelConfigPaths) > len(*remoteWriteURLs) {
return nil, fmt.Errorf("too many -remoteWrite.urlRelabelConfig args: %d; it mustn't exceed the number of -remoteWrite.url args: %d",
len(*relabelConfigPaths), (len(*remoteWriteURLs)))
@@ -176,19 +177,9 @@ type relabelConfigs struct {
perURL []*promrelabel.ParsedConfigs
}
// isSet indicates whether (global or per-URL) command-line flags is set
func (rcs *relabelConfigs) isSet() bool {
if rcs == nil {
return false
}
if rcs.global.Len() > 0 {
return true
}
for _, pc := range rcs.perURL {
if pc.Len() > 0 {
return true
}
}
return false
return *relabelConfigPathGlobal != "" || len(*relabelConfigPaths) > 0
}
// initLabelsGlobal must be called after parsing command-line flags.

View File

@@ -59,7 +59,7 @@ var (
"See also -remoteWrite.maxDiskUsagePerURL and -remoteWrite.disableOnDiskQueue")
keepDanglingQueues = flag.Bool("remoteWrite.keepDanglingQueues", false, "Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. "+
"Useful when -remoteWrite.url is changed temporarily and persistent queue files will be needed later on.")
queues = flag.Int("remoteWrite.queues", cgroup.AvailableCPUs()*2, "The number of concurrent queues to each -remoteWrite.url. Set more queues if default number of queues "+
queues = flagutil.NewArrayInt("remoteWrite.queues", cgroup.AvailableCPUs()*2, "The number of concurrent queues to each -remoteWrite.url. Set more queues if default number of queues "+
"isn't enough for sending high volume of collected data to remote storage. "+
"Default value depends on the number of available CPU cores. It should work fine in most cases since it minimizes resource usage")
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
@@ -176,13 +176,6 @@ func Init() {
})
}
if *queues > maxQueues {
*queues = maxQueues
}
if *queues <= 0 {
*queues = 1
}
if len(*shardByURLLabels) > 0 && len(*shardByURLIgnoreLabels) > 0 {
logger.Fatalf("-remoteWrite.shardByURL.labels and -remoteWrite.shardByURL.ignoreLabels cannot be set simultaneously; " +
"see https://docs.victoriametrics.com/victoriametrics/vmagent/#sharding-among-remote-storages")
@@ -215,9 +208,7 @@ func Init() {
dropDanglingQueues()
// Start config reloader.
configReloaderWG.Add(1)
go func() {
defer configReloaderWG.Done()
configReloaderWG.Go(func() {
for {
select {
case <-configReloaderStopCh:
@@ -227,7 +218,7 @@ func Init() {
reloadRelabelConfigs()
reloadStreamAggrConfigs()
}
}()
})
}
func dropDanglingQueues() {
@@ -267,17 +258,6 @@ func initRemoteWriteCtxs(urls []string) {
if len(urls) == 0 {
logger.Panicf("BUG: urls must be non-empty")
}
maxInmemoryBlocks := memory.Allowed() / len(urls) / *maxRowsPerBlock / 100
if maxInmemoryBlocks / *queues > 100 {
// There is no much sense in keeping higher number of blocks in memory,
// since this means that the producer outperforms consumer and the queue
// will continue growing. It is better storing the queue to file.
maxInmemoryBlocks = 100 * *queues
}
if maxInmemoryBlocks < 2 {
maxInmemoryBlocks = 2
}
rwctxs := make([]*remoteWriteCtx, len(urls))
rwctxIdx := make([]int, len(urls))
if retryMaxTime.String() != "" {
@@ -292,7 +272,7 @@ func initRemoteWriteCtxs(urls []string) {
if *showRemoteWriteURL {
sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL)
}
rwctxs[i] = newRemoteWriteCtx(i, remoteWriteURL, maxInmemoryBlocks, sanitizedURL)
rwctxs[i] = newRemoteWriteCtx(i, remoteWriteURL, sanitizedURL)
rwctxIdx[i] = i
}
@@ -558,11 +538,9 @@ func tryPushMetadataToRemoteStorages(rwctxs []*remoteWriteCtx, mms []prompb.Metr
// Push metadata to remote storage systems in parallel to reduce
// the time needed for sending the data to multiple remote storage systems.
var wg sync.WaitGroup
wg.Add(len(rwctxs))
var anyPushFailed atomic.Bool
for _, rwctx := range rwctxs {
go func(rwctx *remoteWriteCtx) {
defer wg.Done()
wg.Go(func() {
if !rwctx.tryPushMetadataInternal(mms) {
rwctx.pushFailures.Inc()
if forceDropSamplesOnFailure {
@@ -571,7 +549,7 @@ func tryPushMetadataToRemoteStorages(rwctxs []*remoteWriteCtx, mms []prompb.Metr
}
anyPushFailed.Store(true)
}
}(rwctx)
})
}
wg.Wait()
return !anyPushFailed.Load()
@@ -603,15 +581,13 @@ func tryPushTimeSeriesToRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []prom
// Push tssBlock to remote storage systems in parallel to reduce
// the time needed for sending the data to multiple remote storage systems.
var wg sync.WaitGroup
wg.Add(len(rwctxs))
var anyPushFailed atomic.Bool
for _, rwctx := range rwctxs {
go func(rwctx *remoteWriteCtx) {
defer wg.Done()
wg.Go(func() {
if !rwctx.TryPushTimeSeries(tssBlock, forceDropSamplesOnFailure) {
anyPushFailed.Store(true)
}
}(rwctx)
})
}
wg.Wait()
return !anyPushFailed.Load()
@@ -633,13 +609,11 @@ func tryShardingTimeSeriesAmongRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock
if len(shard) == 0 {
continue
}
wg.Add(1)
go func(rwctx *remoteWriteCtx, tss []prompb.TimeSeries) {
defer wg.Done()
if !rwctx.TryPushTimeSeries(tss, forceDropSamplesOnFailure) {
wg.Go(func() {
if !rwctx.TryPushTimeSeries(shard, forceDropSamplesOnFailure) {
anyPushFailed.Store(true)
}
}(rwctx, shard)
})
}
wg.Wait()
return !anyPushFailed.Load()
@@ -848,7 +822,7 @@ type remoteWriteCtx struct {
rowsDroppedOnPushFailure *metrics.Counter
}
func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks int, sanitizedURL string) *remoteWriteCtx {
func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, sanitizedURL string) *remoteWriteCtx {
// strip query params, otherwise changing params resets pq
pqURL := *remoteWriteURL
pqURL.RawQuery = ""
@@ -863,6 +837,23 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
}
isPQDisabled := disableOnDiskQueue.GetOptionalArg(argIdx)
queuesSize := queues.GetOptionalArg(argIdx)
if queuesSize > maxQueues {
queuesSize = maxQueues
} else if queuesSize <= 0 {
queuesSize = 1
}
maxInmemoryBlocks := memory.Allowed() / len(*remoteWriteURLs) / *maxRowsPerBlock / 100
if maxInmemoryBlocks/queuesSize > 100 {
// There is no much sense in keeping higher number of blocks in memory,
// since this means that the producer outperforms consumer and the queue
// will continue growing. It is better storing the queue to file.
maxInmemoryBlocks = 100 * queuesSize
}
if maxInmemoryBlocks < 2 {
maxInmemoryBlocks = 2
}
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes, isPQDisabled)
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{path=%q, url=%q}`, queuePath, sanitizedURL), func() float64 {
return float64(fq.GetPendingBytes())
@@ -880,16 +871,16 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
var c *client
switch remoteWriteURL.Scheme {
case "http", "https":
c = newHTTPClient(argIdx, remoteWriteURL.String(), sanitizedURL, fq, *queues)
c = newHTTPClient(argIdx, remoteWriteURL.String(), sanitizedURL, fq, queuesSize)
default:
logger.Fatalf("unsupported scheme: %s for remoteWriteURL: %s, want `http`, `https`", remoteWriteURL.Scheme, sanitizedURL)
}
c.init(argIdx, *queues, sanitizedURL)
c.init(argIdx, queuesSize, sanitizedURL)
// Initialize pss
sf := significantFigures.GetOptionalArg(argIdx)
rd := roundDigits.GetOptionalArg(argIdx)
pssLen := *queues
pssLen := queuesSize
if n := cgroup.AvailableCPUs(); pssLen > n {
// There is no sense in running more than availableCPUs concurrent pendingSeries,
// since every pendingSeries can saturate up to a single CPU.

View File

@@ -76,11 +76,14 @@ func (t *Type) ValidateExpr(expr string) error {
if err != nil {
return fmt.Errorf("bad LogsQL expr: %q, err: %w", expr, err)
}
fields, _ := q.GetStatsByFields()
for i := range fields {
labels, err := q.GetStatsLabels()
if err != nil {
return fmt.Errorf("cannot obtain labels from LogsQL expr: %q, err: %w", expr, err)
}
for i := range labels {
// VictoriaLogs inserts `_time` field as a label in result when query with `stats by (_time:step)`,
// making the result meaningless and may lead to cardinality issues.
if fields[i] == "_time" {
if labels[i] == "_time" {
return fmt.Errorf("bad LogsQL expr: %q, err: cannot contain time buckets stats pipe `stats by (_time:step)`", expr)
}
}

View File

@@ -81,9 +81,7 @@ absolute path to all .tpl files in root.
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.")
)
var (
extURL *url.URL
)
var extURL *url.URL
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
@@ -161,7 +159,7 @@ func main() {
ctx, cancel := context.WithCancel(context.Background())
manager, err := newManager(ctx)
if err != nil {
logger.Fatalf("failed to init: %s", err)
logger.Fatalf("failed to create manager: %s", err)
}
logger.Infof("reading rules configuration file from %q", strings.Join(*rulePath, ";"))
groupsCfg, err := config.Parse(*rulePath, validateTplFn, *validateExpressions)

View File

@@ -65,11 +65,9 @@ func TestManagerUpdateConcurrent(t *testing.T) {
const workers = 500
const iterations = 10
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func(n int) {
defer wg.Done()
var wg sync.WaitGroup
for n := range workers {
wg.Go(func() {
r := rand.New(rand.NewSource(int64(n)))
for i := 0; i < iterations; i++ {
rnd := r.Intn(len(paths))
@@ -79,7 +77,7 @@ func TestManagerUpdateConcurrent(t *testing.T) {
}
_ = m.update(context.Background(), cfg, false)
}
}(i)
})
}
wg.Wait()
}

View File

@@ -80,14 +80,15 @@ func (as AlertState) String() string {
// AlertTplData is used to execute templating
type AlertTplData struct {
Type string
Labels map[string]string
Value float64
Expr string
AlertID uint64
GroupID uint64
ActiveAt time.Time
For time.Duration
Type string
Labels map[string]string
Value float64
Expr string
AlertID uint64
GroupID uint64
ActiveAt time.Time
For time.Duration
IsPartial bool
}
var tplHeaders = []string{
@@ -101,6 +102,7 @@ var tplHeaders = []string{
"{{ $groupID := .GroupID }}",
"{{ $activeAt := .ActiveAt }}",
"{{ $for := .For }}",
"{{ $isPartial := .IsPartial }}",
}
// ExecTemplate executes the Alert template for given

View File

@@ -14,7 +14,6 @@ import (
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
@@ -172,11 +171,6 @@ const alertManagerPath = "/api/v2/alerts"
func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg promauth.HTTPClientConfig,
relabelCfg *promrelabel.ParsedConfigs, timeout time.Duration,
) (*AlertManager, error) {
if err := httputil.CheckURL(alertManagerURL); err != nil {
return nil, fmt.Errorf("invalid alertmanager URL: %w", err)
}
tls := &promauth.TLSConfig{}
if authCfg.TLSConfig != nil {
tls = authCfg.TLSConfig

View File

@@ -212,18 +212,16 @@ consul_sd_configs:
const workers = 500
const iterations = 10
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func(n int) {
defer wg.Done()
var wg sync.WaitGroup
for n := range workers {
wg.Go(func() {
r := rand.New(rand.NewSource(int64(n)))
for i := 0; i < iterations; i++ {
rnd := r.Intn(len(paths))
_ = cw.reload(paths[rnd]) // update can fail and this is expected
_ = cw.notifiers()
}
}(i)
})
}
wg.Wait()
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
@@ -229,6 +230,9 @@ func notifiersFromFlags(gen AlertURLGenerator) ([]Notifier, error) {
Headers: []string{headers.GetOptionalArg(i)},
}
if err := httputil.CheckURL(addr); err != nil {
return nil, fmt.Errorf("invalid notifier.url %q: %w", addr, err)
}
addr = strings.TrimSuffix(addr, "/")
am, err := NewAlertManager(addr+alertManagerPath, gen, authCfg, nil, sendTimeout.GetOptionalArg(i))
if err != nil {
@@ -266,7 +270,7 @@ func GetTargets() map[TargetType][]Target {
if getActiveNotifiers == nil {
return nil
}
var targets = make(map[TargetType][]Target)
targets := make(map[TargetType][]Target)
// use cached targets from configWatcher instead of getActiveNotifiers for the extra target labels
if cw != nil {
cw.targetsMu.RLock()

View File

@@ -55,9 +55,9 @@ func TestInitNegative(t *testing.T) {
*blackHole = oldBlackHole
}()
f := func(path, addr string, bh bool) {
f := func(path string, addr []string, bh bool) {
*configPath = path
*addrs = flagutil.ArrayString{addr}
*addrs = flagutil.ArrayString(addr)
*blackHole = bh
if err := Init(nil, ""); err == nil {
t.Fatalf("expected to get error; got nil instead")
@@ -65,9 +65,12 @@ func TestInitNegative(t *testing.T) {
}
// *configPath, *addrs and *blackhole are mutually exclusive
f("/dummy/path", "127.0.0.1", false)
f("/dummy/path", "", true)
f("", "127.0.0.1", true)
f("/dummy/path", []string{"127.0.0.1"}, false)
f("/dummy/path", []string{}, true)
f("", []string{"127.0.0.1"}, true)
// addr cannot be ""
f("", []string{""}, false)
f("", []string{"127.0.0.1", ""}, false)
}
func TestBlackHole(t *testing.T) {

View File

@@ -346,6 +346,8 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
ls.processed[l.Name] = l.Value
}
// labels only support limited templating variables,
// including `labels`, `value` and `expr`, to avoid breaking alert states or causing cardinality issue with results
extraLabels, err := notifier.ExecTemplate(qFn, ar.Labels, notifier.AlertTplData{
Labels: ls.origin,
Value: m.Values[0],
@@ -387,11 +389,7 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
return nil, err
}
alertID := hash(ls.processed)
as, err := ar.expandAnnotationTemplates(s, qFn, time.Time{}, ls)
if err != nil {
return nil, err
}
a := ar.newAlert(s, time.Time{}, ls.processed, as) // initial alert
a := ar.newAlert(s, time.Time{}, ls.processed, nil) // initial alert
prevT := time.Time{}
for i := range s.Values {
@@ -407,8 +405,6 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
// reset to Pending if there are gaps > EvalInterval between DPs
a.State = notifier.StatePending
a.ActiveAt = at
// re-template the annotations as active timestamp is changed
a.Annotations, _ = ar.expandAnnotationTemplates(s, qFn, at, ls)
a.Start = time.Time{}
} else if at.Sub(a.ActiveAt) >= ar.For && a.State != notifier.StateFiring {
a.State = notifier.StateFiring
@@ -463,7 +459,8 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
}
ar.logDebugf(ts, nil, "query returned %d series (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartialResponse(res))
isPartial := isPartialResponse(res)
ar.logDebugf(ts, nil, "query returned %d series (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartial)
qFn := func(query string) ([]datasource.Metric, error) {
res, _, err := ar.q.Query(ctx, query, ts)
return res.Data, err
@@ -489,7 +486,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
at = a.ActiveAt
}
}
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls)
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls, isPartial)
if err != nil {
// only set error in current state, but do not break alert processing
curState.Err = err
@@ -607,16 +604,17 @@ func (ar *AlertingRule) expandLabelTemplates(m datasource.Metric, qFn templates.
return ls, nil
}
func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templates.QueryFn, activeAt time.Time, ls *labelSet) (map[string]string, error) {
func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templates.QueryFn, activeAt time.Time, ls *labelSet, isPartial bool) (map[string]string, error) {
tplData := notifier.AlertTplData{
Value: m.Values[0],
Type: ar.Type.String(),
Labels: ls.origin,
Expr: ar.Expr,
AlertID: hash(ls.processed),
GroupID: ar.GroupID,
ActiveAt: activeAt,
For: ar.For,
Value: m.Values[0],
Type: ar.Type.String(),
Labels: ls.origin,
Expr: ar.Expr,
AlertID: hash(ls.processed),
GroupID: ar.GroupID,
ActiveAt: activeAt,
For: ar.For,
IsPartial: isPartial,
}
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
if err != nil {

View File

@@ -664,7 +664,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
Name: "for-pending",
Type: config.NewPrometheusType().String(),
Labels: map[string]string{"alertname": "for-pending"},
Annotations: map[string]string{"activeAt": "5000"},
Annotations: map[string]string{},
State: notifier.StatePending,
ActiveAt: time.Unix(5, 0),
Value: 1,
@@ -684,7 +684,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
Name: "for-firing",
Type: config.NewPrometheusType().String(),
Labels: map[string]string{"alertname": "for-firing"},
Annotations: map[string]string{"activeAt": "1000"},
Annotations: map[string]string{},
State: notifier.StateFiring,
ActiveAt: time.Unix(1, 0),
Start: time.Unix(5, 0),
@@ -705,7 +705,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
Name: "for-hold-pending",
Type: config.NewPrometheusType().String(),
Labels: map[string]string{"alertname": "for-hold-pending"},
Annotations: map[string]string{"activeAt": "5000"},
Annotations: map[string]string{},
State: notifier.StatePending,
ActiveAt: time.Unix(5, 0),
Value: 1,
@@ -1120,7 +1120,7 @@ func TestAlertingRuleLimit_Success(t *testing.T) {
}
func TestAlertingRule_Template(t *testing.T) {
f := func(rule *AlertingRule, metrics []datasource.Metric, alertsExpected map[uint64]*notifier.Alert) {
f := func(rule *AlertingRule, metrics []datasource.Metric, isResponsePartial bool, alertsExpected map[uint64]*notifier.Alert) {
t.Helper()
fakeGroup := Group{
@@ -1133,6 +1133,7 @@ func TestAlertingRule_Template(t *testing.T) {
entries: make([]StateEntry, 10),
}
fq.Add(metrics...)
fq.SetPartialResponse(isResponsePartial)
if _, err := rule.exec(context.TODO(), time.Now(), 0); err != nil {
t.Fatalf("unexpected error: %s", err)
@@ -1163,7 +1164,7 @@ func TestAlertingRule_Template(t *testing.T) {
}, []datasource.Metric{
metricWithValueAndLabels(t, 1, "instance", "foo"),
metricWithValueAndLabels(t, 1, "instance", "bar"),
}, map[uint64]*notifier.Alert{
}, false, map[uint64]*notifier.Alert{
hash(map[string]string{alertNameLabel: "common", "region": "east", "instance": "foo"}): {
Annotations: map[string]string{
"summary": `common: Too high connection number for "foo"`,
@@ -1192,14 +1193,14 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "{{ $labels.instance }}",
},
Annotations: map[string]string{
"summary": `{{ $labels.__name__ }}: Too high connection number for "{{ $labels.instance }}"`,
"summary": `{{ $labels.__name__ }}: Too high connection number for "{{ $labels.instance }}".{{ if $isPartial }} WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.{{ end }}`,
"description": `{{ $labels.alertname}}: It is {{ $value }} connections for "{{ $labels.instance }}"`,
},
alerts: make(map[uint64]*notifier.Alert),
}, []datasource.Metric{
metricWithValueAndLabels(t, 2, "__name__", "first", "instance", "foo", alertNameLabel, "override"),
metricWithValueAndLabels(t, 10, "__name__", "second", "instance", "bar", alertNameLabel, "override"),
}, map[uint64]*notifier.Alert{
}, false, map[uint64]*notifier.Alert{
hash(map[string]string{alertNameLabel: "override label", "exported_alertname": "override", "instance": "foo"}): {
Labels: map[string]string{
alertNameLabel: "override label",
@@ -1207,7 +1208,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "foo",
},
Annotations: map[string]string{
"summary": `first: Too high connection number for "foo"`,
"summary": `first: Too high connection number for "foo".`,
"description": `override: It is 2 connections for "foo"`,
},
},
@@ -1218,7 +1219,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "bar",
},
Annotations: map[string]string{
"summary": `second: Too high connection number for "bar"`,
"summary": `second: Too high connection number for "bar".`,
"description": `override: It is 10 connections for "bar"`,
},
},
@@ -1231,7 +1232,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "{{ $labels.instance }}",
},
Annotations: map[string]string{
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}`,
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}.{{ if $isPartial }} WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.{{ end }}`,
},
alerts: make(map[uint64]*notifier.Alert),
}, []datasource.Metric{
@@ -1239,7 +1240,7 @@ func TestAlertingRule_Template(t *testing.T) {
alertNameLabel, "originAlertname",
alertGroupNameLabel, "originGroupname",
"instance", "foo"),
}, map[uint64]*notifier.Alert{
}, true, map[uint64]*notifier.Alert{
hash(map[string]string{
alertNameLabel: "OriginLabels",
"exported_alertname": "originAlertname",
@@ -1255,7 +1256,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "foo",
},
Annotations: map[string]string{
"summary": `Alert "originAlertname(originGroupname)" for instance foo`,
"summary": `Alert "originAlertname(originGroupname)" for instance foo. WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.`,
},
},
})
@@ -1385,7 +1386,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"group": "vmalert",
"alertname": "ConfigurationReloadFailure",
"alertgroup": "vmalert",
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
}
expectedProcessedLabels := map[string]string{
@@ -1395,7 +1396,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"exported_alertname": "ConfigurationReloadFailure",
"group": "vmalert",
"alertgroup": "vmalert",
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
}
ls, err := ar.toLabels(metric, nil)

View File

@@ -65,17 +65,15 @@ func TestRule_stateConcurrent(_ *testing.T) {
r := &AlertingRule{state: &ruleState{entries: make([]StateEntry, 20)}}
const workers = 50
const iterations = 100
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer wg.Done()
var wg sync.WaitGroup
for range workers {
wg.Go(func() {
for i := 0; i < iterations; i++ {
r.state.add(StateEntry{At: time.Now()})
r.state.getAll()
r.state.getLast()
}
}()
})
}
wg.Wait()
}

View File

@@ -9,6 +9,7 @@
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/rule"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
) %}
{% func Controls(prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) %}
@@ -78,6 +79,8 @@
{% func Welcome(r *http.Request) %}
{%= tpl.Header(r, navItems, "vmalert", getLastConfigError()) %}
<p>
Version {%s buildinfo.Version %} <br>
API:<br>
{% for _, p := range apiLinks %}
{%code p, doc := p[0], p[1] %}

File diff suppressed because it is too large Load Diff

View File

@@ -113,10 +113,8 @@ func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
case ui.concurrencyLimitCh <- struct{}{}:
return nil
default:
ui.concurrencyLimitReached.Inc()
// The per-user limit for the number of concurrent requests is reached.
// Wait until the currently executed requests are finished, so the current request could be executed.
// The number of concurrently executed requests for the given user equals the limt.
// Wait until some of the currently executed requests are finished, so the current request could be executed.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
select {
case ui.concurrencyLimitCh <- struct{}{}:
@@ -124,6 +122,8 @@ func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
case <-ctx.Done():
err := ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// The current request couldn't be executed until the request timeout.
ui.concurrencyLimitReached.Inc()
return fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because %d concurrent requests from the user %s are executed",
*maxQueueDuration, ui.getMaxConcurrentRequests(), ui.name())
}
@@ -150,12 +150,22 @@ func (ui *UserInfo) stopHealthChecks() {
if ui == nil {
return
}
if ui.URLPrefix == nil {
return
}
bus := ui.URLPrefix.bus.Load()
bus.stopHealthChecks()
if ui.URLPrefix != nil {
bus := ui.URLPrefix.bus.Load()
bus.stopHealthChecks()
}
if ui.DefaultURL != nil {
bus := ui.DefaultURL.bus.Load()
bus.stopHealthChecks()
}
for i := range ui.URLMaps {
um := &ui.URLMaps[i]
if um.URLPrefix != nil {
bus := um.URLPrefix.bus.Load()
bus.stopHealthChecks()
}
}
}
// Header is `Name: Value` http header, which must be added to the proxied request.
@@ -363,12 +373,10 @@ func (bu *backendURL) isBroken() bool {
func (bu *backendURL) setBroken() {
if bu.broken.CompareAndSwap(false, true) {
bu.healthCheckWG.Add(1)
go func() {
defer bu.healthCheckWG.Done()
bu.healthCheckWG.Go(func() {
bu.runHealthCheck()
bu.broken.Store(false)
}()
})
}
}
@@ -394,7 +402,7 @@ func (bu *backendURL) runHealthCheck() {
if errors.Is(bu.healthCheckContext.Err(), context.Canceled) {
return
}
logger.Warnf("ignoring the backend at %s for %s becasue of dial error: %s", addr, *failTimeout, err)
logger.Warnf("ignoring the backend at %s for %s because of dial error: %s", addr, *failTimeout, err)
continue
}
@@ -733,11 +741,9 @@ func initAuthConfig() {
configTimestamp.Set(fasttime.UnixTimestamp())
stopCh = make(chan struct{})
authConfigWG.Add(1)
go func() {
defer authConfigWG.Done()
authConfigWG.Go(func() {
authConfigReloader(sighupCh)
}()
})
}
func stopAuthConfig() {
@@ -809,7 +815,7 @@ func reloadAuthConfig() (bool, error) {
ok, err := reloadAuthConfigData(data)
if err != nil {
return false, fmt.Errorf("failed to pars -auth.config=%q: %w", *authConfigPath, err)
return false, fmt.Errorf("failed to parse -auth.config=%q: %w", *authConfigPath, err)
}
if !ok {
return false, nil

View File

@@ -24,6 +24,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ioutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
@@ -40,27 +41,38 @@ var (
useProxyProtocol = flagutil.NewArrayBool("httpListenAddr.useProxyProtocol", "Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . "+
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host. "+
"See also -maxConcurrentRequests")
idleConnTimeout = flag.Duration("idleConnTimeout", 50*time.Second, "The timeout for HTTP keep-alive connections to backend services. "+
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host")
idleConnTimeout = flag.Duration("idleConnTimeout", 50*time.Second, "The timeout for HTTP keep-alive connections to backend services. "+
"It is recommended setting this value to values smaller than -http.idleConnTimeout set at backend services")
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
maxConcurrentRequests = flag.Int("maxConcurrentRequests", 1000, "The maximum number of concurrent requests vmauth can process. Other requests are rejected with "+
"'429 Too Many Requests' http status code. See also -maxQueueDuration, -maxConcurrentPerUserRequests and -maxIdleConnsPerBackend command-line options")
maxConcurrentPerUserRequests = flag.Int("maxConcurrentPerUserRequests", 300, "The maximum number of concurrent requests vmauth can process per each configured user. "+
"Other requests are rejected with '429 Too Many Requests' http status code. See also -maxQueueDuration and -maxConcurrentRequests command-line options "+
"and max_concurrent_requests option in per-user config")
maxQueueDuration = flag.Duration("maxQueueDuration", 10*time.Second, "The maximum duration the request waits for execution when the number of concurrently executed "+
"requests reach -maxConcurrentRequests or -maxConcurrentPerUserRequests before returning '429 Too Many Requests' error. "+
"This allows graceful handling of short spikes in the number of concurrent requests")
requestBufferSize = flagutil.NewBytes("requestBufferSize", 32*1024, "The size of the buffer for reading the request body before proxying the request to backends. "+
"This allows reducing the comsumption of backend resources when processing requests from clients connected via slow networks. "+
"Set to 0 to disable request buffering. See https://docs.victoriametrics.com/victoriametrics/vmauth/#request-body-buffering")
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size to buffer in memory for potential retries at other backends. "+
"Request bodies larger than this size cannot be retried if the backend fails. Zero or negative value disables request body buffering and retries. "+
"See also -requestBufferSize")
maxConcurrentRequests = flag.Int("maxConcurrentRequests", 1000, "The maximum number of concurrent requests vmauth can process simultaneously. "+
"Requests exceeding this limit are queued for up to -maxQueueDuration and then rejected with '429 Too Many Requests' http status code if the limit is still reached. "+
"This protects vmauth itself from overloading and out-of-memory (OOM) failures. See also -maxConcurrentPerUserRequests "+
"and https://docs.victoriametrics.com/victoriametrics/vmauth/#concurrency-limiting")
maxConcurrentPerUserRequests = flag.Int("maxConcurrentPerUserRequests", 100, "The maximum number of concurrent requests vmauth can process per each configured user. "+
"Requests exceeding this limit are queued for up to -maxQueueDuration and then rejected with '429 Too Many Requests' http status code if the limit is still reached. "+
"This provides fairness and isolation between users, preventing a single user from consuming all the available resources. "+
"It works in conjunction with -maxConcurrentRequests, which sets the global limit across all users. "+
"This default can be overridden for individual users via max_concurrent_requests option in per-user config. "+
"See https://docs.victoriametrics.com/victoriametrics/vmauth/#concurrency-limiting")
maxQueueDuration = flag.Duration("maxQueueDuration", 10*time.Second, "The maximum duration to wait before rejecting incoming requests if concurrency limit "+
"specified via -maxConcurrentRequests or -maxConcurrentPerUserRequests command-line flags is reached. "+
"Requests are rejected with '429 Too Many Requests' http status code if the limit is still reached after the -maxQueueDuration duration. "+
"This allows graceful handling of short spikes in concurrent requests. See https://docs.victoriametrics.com/victoriametrics/vmauth/#concurrency-limiting")
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*")
logInvalidAuthTokens = flag.Bool("logInvalidAuthTokens", false, "Whether to log requests with invalid auth tokens. "+
`Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`)
failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend")
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size, which can be cached and re-tried at other backends. "+
"Bigger values may require more memory. Zero or negative value disables caching of request body. This may be useful when proxying data ingestion requests")
failTimeout = flag.Duration("failTimeout", 3*time.Second, "Sets a delay period for load balancing to skip a malfunctioning backend")
backendTLSInsecureSkipVerify = flag.Bool("backend.tlsInsecureSkipVerify", false, "Whether to skip TLS verification when connecting to backends over HTTPS. "+
"See https://docs.victoriametrics.com/victoriametrics/vmauth/#backend-tls-setup")
backendTLSCAFile = flag.String("backend.TLSCAFile", "", "Optional path to TLS root CA file, which is used for TLS verification when connecting to backends over HTTPS. "+
@@ -215,48 +227,121 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
ctx, cancel := context.WithTimeout(r.Context(), *maxQueueDuration)
defer cancel()
// Limit the concurrency of requests to backends
// Acquire global concurrency limit.
if err := beginConcurrencyLimit(ctx); err != nil {
handleConcurrencyLimitError(w, r, err)
return
}
defer endConcurrencyLimit()
// Set read deadline for reading the initial chunk for the request body.
rc := http.NewResponseController(w)
deadline, ok := ctx.Deadline()
if !ok {
logger.Panicf("BUG: expecting valid deadline for the context")
}
if err := rc.SetReadDeadline(deadline); err != nil {
logger.Panicf("BUG: cannot set read deadline: %s", err)
}
// Read the initial chunk for the request body.
userName := ui.name()
if userName == "" {
userName = "unauthorized"
}
bb, err := bufferRequestBody(ctx, r.Body, userName)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
r.Body = bb
// Disable the read deadline for the rest of the request body.
if err := rc.SetReadDeadline(time.Time{}); err != nil {
logger.Panicf("BUG: cannot reset read deadline: %s", err)
}
// Acquire concurrency limit for the given user.
if err := ui.beginConcurrencyLimit(ctx); err != nil {
handleConcurrencyLimitError(w, r, err)
return
}
defer ui.endConcurrencyLimit()
// Process the request.
processRequest(w, r, ui)
}
func beginConcurrencyLimit(ctx context.Context) error {
concurrencyLimitOnce.Do(concurrencyLimitInit)
select {
case concurrencyLimitCh <- struct{}{}:
if err := ui.beginConcurrencyLimit(ctx); err != nil {
handleConcurrencyLimitError(w, r, err)
<-concurrencyLimitCh
return
}
return nil
default:
// The -maxConcurrentRequests are executed. Wait until some of the requests are finished,
// so the current request could be executed.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
select {
case concurrencyLimitCh <- struct{}{}:
if err := ui.beginConcurrencyLimit(ctx); err != nil {
handleConcurrencyLimitError(w, r, err)
<-concurrencyLimitCh
return
}
return nil
case <-ctx.Done():
err := ctx.Err()
concurrentRequestsLimitReached.Inc()
if errors.Is(err, context.DeadlineExceeded) {
err = fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because -maxConcurrentRequests=%d concurrent requests are executed",
// The current request couldn't be executed until the request timeout.
concurrentRequestsLimitReached.Inc()
return fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because -maxConcurrentRequests=%d concurrent requests are executed",
*maxQueueDuration, cap(concurrencyLimitCh))
handleConcurrencyLimitError(w, r, err)
return
}
err = fmt.Errorf("cannot start executing the request because -maxConcurrentRequests=%d concurrent requests are executed: %w", cap(concurrencyLimitCh), err)
handleConcurrencyLimitError(w, r, err)
return
return fmt.Errorf("cannot start executing the request because -maxConcurrentRequests=%d concurrent requests are executed: %w", cap(concurrencyLimitCh), err)
}
}
processRequest(w, r, ui)
ui.endConcurrencyLimit()
}
func endConcurrencyLimit() {
<-concurrencyLimitCh
}
func bufferRequestBody(ctx context.Context, r io.ReadCloser, userName string) (io.ReadCloser, error) {
if r == nil {
// This is a GET request with nil reader.
return nil, nil
}
maxBufSize := max(requestBufferSize.IntN(), maxRequestBodySizeToRetry.IntN())
if maxBufSize <= 0 {
return r, nil
}
lr := ioutil.GetLimitedReader(r, int64(maxBufSize))
defer ioutil.PutLimitedReader(lr)
start := time.Now()
buf, err := io.ReadAll(lr)
bufferRequestBodyDuration.UpdateDuration(start)
if err != nil {
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
rejectSlowClientRequests.Inc()
d := time.Since(start)
return nil, &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("reject request from the user %s because the request body couldn't be read in -maxQueueDuration=%s; read %d bytes in %s",
userName, *maxQueueDuration, len(buf), d.Truncate(time.Second)),
StatusCode: http.StatusBadRequest,
}
}
return nil, &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("cannot read request body: %w", err),
StatusCode: http.StatusBadRequest,
}
}
bb := newBufferedBody(r, buf, maxBufSize)
return bb, nil
}
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
u := normalizeURL(r.URL)
up, hc := ui.getURLPrefixAndHeaders(u, r.Host, r.Header)
@@ -282,9 +367,6 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
isDefault = true
}
rtb := newReadTrackingBody(r.Body, maxRequestBodySizeToRetry.IntN())
r.Body = rtb
maxAttempts := up.getBackendsCount()
for i := 0; i < maxAttempts; i++ {
bu := up.getBackendURL()
@@ -292,18 +374,19 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
break
}
targetURL := bu.url
// Don't change path and add request_path query param for default route.
if isDefault {
// Don't change path and add request_path query param for default route.
query := targetURL.Query()
query.Set("request_path", u.String())
targetURL.RawQuery = query.Encode()
} else { // Update path for regular routes.
} else {
// Update path for regular routes.
targetURL = mergeURLs(targetURL, u, up.dropSrcPathPrefixParts, up.mergeQueryArgs)
}
wasLocalRetry := false
again:
ok, needLocalRetry := tryProcessingRequest(w, r, targetURL, hc, up.retryStatusCodes, ui)
ok, needLocalRetry := tryProcessingRequest(w, r, targetURL, hc, up.retryStatusCodes, ui, bu)
if needLocalRetry && !wasLocalRetry {
wasLocalRetry = true
goto again
@@ -313,6 +396,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
if ok {
return
}
bu.setBroken()
ui.backendErrors.Inc()
}
@@ -324,7 +408,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
ui.requestErrors.Inc()
}
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int, ui *UserInfo) (bool, bool) {
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int, ui *UserInfo, bu *backendURL) (bool, bool) {
ui.backendRequests.Inc()
req := sanitizeRequestHeaders(r)
@@ -339,27 +423,19 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
}
}
rtb, rtbOK := req.Body.(*readTrackingBody)
bb, bbOK := req.Body.(*bufferedBody)
canRetry := !bbOK || bb.canRetry()
res, err := ui.rt.RoundTrip(req)
if ctxErr := r.Context().Err(); ctxErr != nil {
// Override the error returned by the RoundTrip with the context error if it isn't non-nil
// This makes sure the proper logging for canceled and timed out requests - log the real cause of the error
// instead of the random error, which could be returned from RoundTrip because of canceled or timed out request.
err = ctxErr
if errors.Is(r.Context().Err(), context.Canceled) {
// Do not retry canceled requests.
clientCanceledRequests.Inc()
return true, false
}
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
// Do not retry canceled or timed out requests
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
if errors.Is(err, context.DeadlineExceeded) {
// Timed out request must be counted as errors, since this usually means that the backend is slow.
logger.Warnf("remoteAddr: %s; requestURI: %s; timeout while proxying the response from %s: %s", remoteAddr, requestURI, targetURL, err)
}
return false, false
}
if !rtbOK || !rtb.canRetry() {
if !canRetry {
// Request body cannot be re-sent to another backend. Return the error to the client then.
err = &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("cannot proxy the request to %s: %w", targetURL, err),
@@ -368,27 +444,32 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
httpserver.Errorf(w, r, "%s", err)
ui.backendErrors.Inc()
ui.requestErrors.Inc()
bu.setBroken()
return true, false
}
if netutil.IsTrivialNetworkError(err) {
// Retry request at the same backend on trivial network errors, such as proxy idle timeout misconfiguration or socket close by OS
if bbOK {
bb.resetReader()
}
return false, true
}
// Request body wasn't read yet, this usually means that the backend isn't reachable; retry the request at another backend
// Retry the request at another backend
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
// NOTE: do not use httpserver.GetRequestURI
// it explicitly reads request body, which may fail retries.
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed: %s, retrying the request at another backend", remoteAddr, req.URL, targetURL, err)
requestURI := httpserver.GetRequestURI(r)
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed: %s, retrying the request at another backend", remoteAddr, requestURI, targetURL, err)
if bbOK {
bb.resetReader()
}
return false, false
}
if slices.Contains(retryStatusCodes, res.StatusCode) {
_ = res.Body.Close()
if !rtbOK || !rtb.canRetry() {
if !canRetry {
// If we get an error from the retry_status_codes list, but cannot execute retry,
// we consider such a request an error as well.
err := &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("got response status code=%d from %s, but cannot retry the request at another backend, because the request has been already consumed",
Err: fmt.Errorf("got response status code=%d from %s, but cannot retry the request at another backend, because the request body has been already consumed",
res.StatusCode, targetURL),
StatusCode: http.StatusServiceUnavailable,
}
@@ -397,13 +478,16 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
ui.requestErrors.Inc()
return true, false
}
// Retry requests at other backends if it matches retryStatusCodes.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4893
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
// NOTE: do not use httpserver.GetRequestURI
// it explicitly reads request body, which may fail retries.
requestURI := httpserver.GetRequestURI(r)
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed, retrying the request at another backend because response status code=%d belongs to retry_status_codes=%d",
remoteAddr, req.URL, targetURL, res.StatusCode, retryStatusCodes)
remoteAddr, requestURI, targetURL, res.StatusCode, retryStatusCodes)
if bbOK {
bb.resetReader()
}
return false, false
}
removeHopHeaders(res.Header)
@@ -413,10 +497,16 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
err = copyStreamToClient(w, res.Body)
_ = res.Body.Close()
if err != nil && !netutil.IsTrivialNetworkError(err) && !errors.Is(err, context.Canceled) {
if errors.Is(r.Context().Err(), context.Canceled) {
// Do not retry canceled requests.
clientCanceledRequests.Inc()
return true, false
}
if err != nil && !netutil.IsTrivialNetworkError(err) {
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying response body from %s: %s", remoteAddr, requestURI, targetURL, err)
ui.requestErrors.Inc()
return true, false
@@ -546,6 +636,10 @@ var (
configReloadRequests = metrics.NewCounter(`vmauth_http_requests_total{path="/-/reload"}`)
invalidAuthTokenRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="invalid_auth_token"}`)
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
clientCanceledRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="client_canceled"}`)
rejectSlowClientRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="reject_slow_client"}`)
bufferRequestBodyDuration = metrics.NewSummary(`vmauth_buffer_request_body_duration_seconds`)
)
func newRoundTripper(caFileOpt, certFileOpt, keyFileOpt, serverNameOpt string, insecureSkipVerifyP *bool) (http.RoundTripper, error) {
@@ -629,10 +723,10 @@ func handleMissingAuthorizationError(w http.ResponseWriter) {
}
func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err error) {
ctx := r.Context()
if errors.Is(ctx.Err(), context.Canceled) {
if errors.Is(r.Context().Err(), context.Canceled) {
// Do not return any response for the request canceled by the client,
// since the connection to the client is already closed.
clientCanceledRequests.Inc()
return
}
@@ -644,123 +738,78 @@ func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err err
httpserver.Errorf(w, r, "%s", err)
}
// readTrackingBody must be obtained via getReadTrackingBody()
type readTrackingBody struct {
// maxBodySize is the maximum body size to cache in buf.
// bufferedBody serves two purposes:
// 1. Enables request retries when the body size does not exceed maxBodySize
// by fully buffering the body in memory.
// 2. Prevents slow clients from reducing effective server capacity by
// buffering the request body before acquiring a per-user concurrency slot.
//
// See bufferRequestBody for details on how bufferedBody is used.
type bufferedBody struct {
// r contains reader for reading the data after buf is read.
//
// Bigger bodies cannot be retried.
maxBodySize int
// r contains reader for initial data reading
// r is nil if buf contains all the data.
r io.ReadCloser
// buf is a buffer for data read from r. Buf size is limited by maxBodySize.
// If more than maxBodySize is read from r, then cannotRetry is set to true.
// buf contains the initial buffer read from r.
buf []byte
// readBuf points to the cached data at buf, which must be read in the next call to Read().
readBuf []byte
// bufOffset is the offset at buf for already read bytes.
bufOffset int
// cannotRetry is set to true when more than maxBodySize bytes are read from r.
// In this case the read data cannot fit buf, so it cannot be re-read from buf.
// cannotRetry is set to true after Close() call on non-nil r.
cannotRetry bool
// bufComplete is set to true when buf contains complete request body read from r.
bufComplete bool
}
func newReadTrackingBody(r io.ReadCloser, maxBodySize int) *readTrackingBody {
// do not use sync.Pool there
// since http.RoundTrip may still use request body after return
// See this issue for details https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
rtb := &readTrackingBody{}
if maxBodySize < 0 {
maxBodySize = 0
func newBufferedBody(r io.ReadCloser, buf []byte, maxBufSize int) *bufferedBody {
// Do not use sync.Pool here, since http.RoundTrip may still use request body after return.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
if len(buf) < maxBufSize {
// Read the full request body into buf.
r = nil
}
rtb.maxBodySize = maxBodySize
if r == nil {
// This is GET request without request body
r = (*zeroReader)(nil)
return &bufferedBody{
r: r,
buf: buf,
}
rtb.r = r
return rtb
}
type zeroReader struct{}
func (r *zeroReader) Read(_ []byte) (int, error) {
return 0, io.EOF
}
func (r *zeroReader) Close() error {
return nil
}
// Read implements io.Reader interface.
func (rtb *readTrackingBody) Read(p []byte) (int, error) {
if len(rtb.readBuf) > 0 {
n := copy(p, rtb.readBuf)
rtb.readBuf = rtb.readBuf[n:]
func (bb *bufferedBody) Read(p []byte) (int, error) {
if bb.cannotRetry {
return 0, fmt.Errorf("cannot read already closed body")
}
if bb.bufOffset < len(bb.buf) {
n := copy(p, bb.buf[bb.bufOffset:])
bb.bufOffset += n
return n, nil
}
if rtb.r == nil {
if rtb.bufComplete {
return 0, io.EOF
}
return 0, fmt.Errorf("cannot read client request body after closing client reader")
if bb.r == nil {
return 0, io.EOF
}
n, err := rtb.r.Read(p)
if rtb.cannotRetry {
return n, err
}
if len(rtb.buf)+n > rtb.maxBodySize {
rtb.cannotRetry = true
return n, err
}
rtb.buf = append(rtb.buf, p[:n]...)
if err == io.EOF {
rtb.bufComplete = true
}
return n, err
return bb.r.Read(p)
}
func (rtb *readTrackingBody) canRetry() bool {
if rtb.cannotRetry {
return false
}
if rtb.bufComplete {
return true
}
return rtb.r != nil
func (bb *bufferedBody) canRetry() bool {
return bb.r == nil
}
// Close implements io.Closer interface.
func (rtb *readTrackingBody) Close() error {
if !rtb.cannotRetry {
rtb.readBuf = rtb.buf
} else {
rtb.readBuf = nil
func (bb *bufferedBody) Close() error {
bb.resetReader()
if bb.r != nil {
bb.cannotRetry = true
return bb.r.Close()
}
// Close rtb.r only if the request body is completely read or if it is too big.
// http.Roundtrip performs body.Close call even without any Read calls,
// so this hack allows us to reuse request body.
if rtb.bufComplete || rtb.cannotRetry {
if rtb.r == nil {
return nil
}
err := rtb.r.Close()
rtb.r = nil
return err
}
return nil
}
func (bb *bufferedBody) resetReader() {
bb.bufOffset = 0
}
func debugInfo(u *url.URL, r *http.Request) string {
s := &strings.Builder{}
fmt.Fprintf(s, " (host: %q; ", r.Host)

View File

@@ -2,6 +2,7 @@ package main
import (
"bytes"
"context"
"fmt"
"io"
"net"
@@ -10,6 +11,7 @@ import (
"strings"
"sync/atomic"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
)
@@ -546,28 +548,300 @@ func (w *fakeResponseWriter) WriteHeader(statusCode int) {
}
}
func TestReadTrackingBody_RetrySuccess(t *testing.T) {
// This is needed for net/http.ResponseController
func (w *fakeResponseWriter) SetReadDeadline(deadline time.Time) error {
return nil
}
func TestBufferRequestBody_Success(t *testing.T) {
defaultRequestBufferSize := requestBufferSize.String()
defer func() {
if err := requestBufferSize.Set(defaultRequestBufferSize); err != nil {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
defaultMaxRequestBodySizeToRetry := maxRequestBodySizeToRetry.String()
defer func() {
if err := maxRequestBodySizeToRetry.Set(defaultMaxRequestBodySizeToRetry); err != nil {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
f := func(body *bytes.Buffer, requestBufferSizeFlag, maxRequestBodySizeToRetryFlag string) {
t.Helper()
expectedResponse := "statusCode=200"
if body.Len() > 0 {
expectedResponse += "\n" + body.String()
}
if err := requestBufferSize.Set(requestBufferSizeFlag); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
if err := maxRequestBodySizeToRetry.Set(maxRequestBodySizeToRetryFlag); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
var backendCalled bool
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
backendCalled = true
b, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, fmt.Sprintf("cannot read body: %s", err), http.StatusBadRequest)
return
}
if _, err := w.Write(b); err != nil {
http.Error(w, fmt.Sprintf("cannot write body: %s", err), http.StatusInternalServerError)
return
}
}))
defer ts.Close()
// regular url_prefix
cfgStr := strings.ReplaceAll(`
unauthorized_user:
url_prefix: {BACKEND}/foo`, "{BACKEND}", ts.URL)
cfgOrigP := authConfigData.Load()
if _, err := reloadAuthConfigData([]byte(cfgStr)); err != nil {
t.Fatalf("cannot load config data: %s", err)
}
defer func() {
cfgOrig := []byte("unauthorized_user:\n url_prefix: http://foo/bar")
if cfgOrigP != nil {
cfgOrig = *cfgOrigP
}
_, err := reloadAuthConfigData(cfgOrig)
if err != nil {
t.Fatalf("cannot load the original config: %s", err)
}
}()
r, err := http.NewRequest(http.MethodPost, `http://some-host.com`, body)
if err != nil {
t.Fatalf("cannot initialize http request: %s", err)
}
w := &fakeResponseWriter{}
if !requestHandlerWithInternalRoutes(w, r) {
t.Fatalf("unexpected false is returned from requestHandler")
}
response := w.getResponse()
response = strings.ReplaceAll(response, "\r\n", "\n")
response = strings.TrimSpace(response)
if response != expectedResponse {
t.Fatalf("unexpected response\ngot\n%s\nwant\n%s", response, expectedResponse)
}
if !backendCalled {
t.Fatalf("backend is not called")
}
}
// no body, no buffering, no retry
f(bytes.NewBuffer(nil), "0", "0")
// no body, buffering on, no retry
f(bytes.NewBuffer(nil), "100", "0")
// no body, no buffering, retry on
f(bytes.NewBuffer(nil), "0", "100")
// no body, buffering on, retry on
f(bytes.NewBuffer(nil), "100", "100")
// body smaller than buffer, retry max on
f(bytes.NewBufferString(strings.Repeat("abcdf", 100)), "101", "101")
// body smaller than buffer
f(bytes.NewBufferString(strings.Repeat("abcdf", 100)), "501", "0")
// body same size as buffer
f(bytes.NewBufferString(strings.Repeat("abcdf", 100)), "500", "0")
// body bigger than a buffer
f(bytes.NewBufferString(strings.Repeat("abcdf", 100)), "499", "0")
// body bigger than tmpBuf 8KiB used in buffering
f(bytes.NewBufferString(strings.Repeat("a", 32*1024)), "16384", "")
f(bytes.NewBufferString(strings.Repeat("a", 32*1024)), "16385", "")
f(bytes.NewBufferString(strings.Repeat("a", 32*1024)), "16383", "")
}
func TestBufferRequestBody_Failure(t *testing.T) {
defaultRequestBufferSize := requestBufferSize.String()
defer func() {
if err := requestBufferSize.Set(defaultRequestBufferSize); err != nil {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
defaultMaxRequestBodySizeToRetry := maxRequestBodySizeToRetry.String()
defer func() {
if err := maxRequestBodySizeToRetry.Set(defaultMaxRequestBodySizeToRetry); err != nil {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
defaultMaxQueueDuration := *maxQueueDuration
defer func() {
*maxQueueDuration = defaultMaxQueueDuration
}()
f := func(body *mockBody, expectedResponse string) {
t.Helper()
if err := maxRequestBodySizeToRetry.Set("0"); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
if err := requestBufferSize.Set("2048"); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
*maxQueueDuration = 100 * time.Millisecond
var backendCalled bool
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
backendCalled = true
b, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, fmt.Sprintf("cannot read body: %s", err), http.StatusBadRequest)
return
}
if _, err := w.Write(b); err != nil {
http.Error(w, fmt.Sprintf("cannot write body: %s", err), http.StatusInternalServerError)
return
}
}))
defer ts.Close()
// regular url_prefix
cfgStr := strings.ReplaceAll(`
unauthorized_user:
url_prefix: {BACKEND}/foo`, "{BACKEND}", ts.URL)
cfgOrigP := authConfigData.Load()
if _, err := reloadAuthConfigData([]byte(cfgStr)); err != nil {
t.Fatalf("cannot load config data: %s", err)
}
defer func() {
cfgOrig := []byte("unauthorized_user:\n url_prefix: http://foo/bar")
if cfgOrigP != nil {
cfgOrig = *cfgOrigP
}
_, err := reloadAuthConfigData(cfgOrig)
if err != nil {
t.Fatalf("cannot load the original config: %s", err)
}
}()
r, err := http.NewRequest(http.MethodPost, `http://some-host.com`, body)
if err != nil {
t.Fatalf("cannot initialize http request: %s", err)
}
w := &fakeResponseWriter{}
if !requestHandlerWithInternalRoutes(w, r) {
t.Fatalf("unexpected false is returned from requestHandler")
}
response := w.getResponse()
response = strings.ReplaceAll(response, "\r\n", "\n")
response = strings.TrimSpace(response)
if response != expectedResponse {
t.Fatalf("unexpected response\ngot\n%s\nwant\n%s", response, expectedResponse)
}
if backendCalled {
t.Fatalf("backend is called")
}
}
// an error at the beginning of reading
f(&mockBody{err: fmt.Errorf("an error")}, `statusCode=400
cannot read request body: an error`)
// an error after reading 1024 bytes, buffer size is 2048 bytes
f(&mockBody{head: make([]byte, 1024), err: fmt.Errorf("an error")}, `statusCode=400
cannot read request body: an error`)
}
type mockBody struct {
head []byte
err error
tail []byte
}
func (r *mockBody) Read(p []byte) (n int, err error) {
if len(r.head) > 0 {
n = copy(p, r.head)
r.head = r.head[n:]
return n, nil
}
if r.err != nil {
return 0, r.err
}
if len(r.tail) > 0 {
n = copy(p, r.tail)
r.tail = r.tail[n:]
return n, nil
}
return 0, io.EOF
}
func TestBufferedBody_RetrySuccess(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defaultRequestBufferSize := requestBufferSize.String()
defer func() {
if err := requestBufferSize.Set(defaultRequestBufferSize); err != nil {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
if !rtb.canRetry() {
defaultMaxRequestBodySizeToRetry := maxRequestBodySizeToRetry.String()
defer func() {
if err := maxRequestBodySizeToRetry.Set(defaultMaxRequestBodySizeToRetry); err != nil {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
if err := maxRequestBodySizeToRetry.Set("0"); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
ctx := context.Background()
rb, err := bufferRequestBody(ctx, io.NopCloser(bytes.NewBufferString(s)), "foo")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
bb, ok := rb.(*bufferedBody)
canRetry := !ok || bb.canRetry()
if !canRetry {
t.Fatalf("canRetry() must return true before reading anything")
}
for i := 0; i < 5; i++ {
data, err := io.ReadAll(rtb)
data, err := io.ReadAll(rb)
if err != nil {
t.Fatalf("unexpected error when reading all the data at iteration %d: %s", i, err)
}
if string(data) != s {
t.Fatalf("unexpected data read at iteration %d\ngot\n%s\nwant\n%s", i, data, s)
}
if err := rtb.Close(); err != nil {
t.Fatalf("unexpected error when closing readTrackingBody at iteration %d: %s", i, err)
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true at iteration %d", i)
if err := rb.Close(); err != nil {
t.Fatalf("unexpected error when closing bufferedBody at iteration %d: %s", i, err)
}
}
}
@@ -577,19 +851,48 @@ func TestReadTrackingBody_RetrySuccess(t *testing.T) {
f("", 100)
f("foo", 100)
f("foobar", 100)
f(newTestString(1000), 1000)
f(newTestString(1000), 1001)
}
func TestReadTrackingBody_RetrySuccessPartialRead(t *testing.T) {
func TestBufferedBody_RetrySuccessPartialRead(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
// Check the case with partial read
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defaultRequestBufferSize := requestBufferSize.String()
defer func() {
if err := requestBufferSize.Set(defaultRequestBufferSize); err != nil {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
defaultMaxRequestBodySizeToRetry := maxRequestBodySizeToRetry.String()
defer func() {
if err := maxRequestBodySizeToRetry.Set(defaultMaxRequestBodySizeToRetry); err != nil {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
if err := maxRequestBodySizeToRetry.Set("0"); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
ctx := context.Background()
rb, err := bufferRequestBody(ctx, io.NopCloser(bytes.NewBufferString(s)), "foo")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
bb, ok := rb.(*bufferedBody)
canRetry := !ok || bb.canRetry()
if !canRetry {
t.Fatalf("canRetry must return true")
}
for i := 0; i < len(s); i++ {
buf := make([]byte, i)
n, err := io.ReadFull(rtb, buf)
n, err := io.ReadFull(rb, buf)
if err != nil {
t.Fatalf("unexpected error when reading %d bytes: %s", i, err)
}
@@ -599,26 +902,20 @@ func TestReadTrackingBody_RetrySuccessPartialRead(t *testing.T) {
if string(buf) != s[:i] {
t.Fatalf("unexpected data read with the length %d\ngot\n%s\nwant\n%s", i, buf, s[:i])
}
if err := rtb.Close(); err != nil {
if err := rb.Close(); err != nil {
t.Fatalf("unexpected error when closing reader after reading %d bytes", i)
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true after closing the reader after reading %d bytes", i)
}
}
data, err := io.ReadAll(rtb)
data, err := io.ReadAll(rb)
if err != nil {
t.Fatalf("unexpected error when reading all the data: %s", err)
}
if string(data) != s {
t.Fatalf("unexpected data read\ngot\n%s\nwant\n%s", data, s)
}
if err := rtb.Close(); err != nil {
t.Fatalf("unexpected error when closing readTrackingBody: %s", err)
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true after closing the reader after reading all the input")
if err := rb.Close(); err != nil {
t.Fatalf("unexpected error when closing bufferedBody: %s", err)
}
}
@@ -627,30 +924,53 @@ func TestReadTrackingBody_RetrySuccessPartialRead(t *testing.T) {
f("", 100)
f("foo", 100)
f("foobar", 100)
f(newTestString(1000), 1000)
f(newTestString(1000), 1001)
}
func TestReadTrackingBody_RetryFailureTooBigBody(t *testing.T) {
func TestBufferedBody_RetryFailureTooBigBody(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defaultRequestBufferSize := requestBufferSize.String()
defer func() {
if err := requestBufferSize.Set(defaultRequestBufferSize); err != nil {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set("0"); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true before reading anything")
defaultMaxRequestBodySizeToRetry := maxRequestBodySizeToRetry.String()
defer func() {
if err := maxRequestBodySizeToRetry.Set(defaultMaxRequestBodySizeToRetry); err != nil {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
if err := maxRequestBodySizeToRetry.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
ctx := context.Background()
rb, err := bufferRequestBody(ctx, io.NopCloser(bytes.NewBufferString(s)), "foo")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
bb, ok := rb.(*bufferedBody)
canRetry := !ok || bb.canRetry()
if canRetry {
t.Fatalf("canRetry() must return false because of too big request body")
}
buf := make([]byte, 1)
n, err := io.ReadFull(rtb, buf)
n, err := io.ReadFull(rb, buf)
if err != nil {
t.Fatalf("unexpected error when reading a single byte: %s", err)
}
if n != 1 {
t.Fatalf("unexpected number of bytes read; got %d; want 1", n)
}
if !rtb.canRetry() {
t.Fatalf("canRetry() must return true after reading one byte")
}
data, err := io.ReadAll(rtb)
data, err := io.ReadAll(rb)
if err != nil {
t.Fatalf("unexpected error when reading all the data: %s", err)
}
@@ -658,14 +978,11 @@ func TestReadTrackingBody_RetryFailureTooBigBody(t *testing.T) {
if dataRead != s {
t.Fatalf("unexpected data read\ngot\n%s\nwant\n%s", dataRead, s)
}
if err := rtb.Close(); err != nil {
t.Fatalf("unexpected error when closing readTrackingBody: %s", err)
}
if rtb.canRetry() {
t.Fatalf("canRetry() must return false after closing the reader")
if err := rb.Close(); err != nil {
t.Fatalf("unexpected error when closing bufferedBody: %s", err)
}
data, err = io.ReadAll(rtb)
data, err = io.ReadAll(rb)
if err == nil {
t.Fatalf("expecting non-nil error")
}
@@ -679,35 +996,48 @@ func TestReadTrackingBody_RetryFailureTooBigBody(t *testing.T) {
f(newTestString(2*maxBodySize), maxBodySize)
}
func TestReadTrackingBody_RetryFailureZeroOrNegativeMaxBodySize(t *testing.T) {
func TestBufferedBody_RetryFailureZeroOrNegativeMaxBodySize(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
rtb := newReadTrackingBody(io.NopCloser(bytes.NewBufferString(s)), maxBodySize)
defaultRequestBufferSize := requestBufferSize.String()
defer func() {
if err := requestBufferSize.Set(defaultRequestBufferSize); err != nil {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
if !rtb.canRetry() {
ctx := context.Background()
rb, err := bufferRequestBody(ctx, io.NopCloser(bytes.NewBufferString(s)), "foo")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
bb, ok := rb.(*bufferedBody)
canRetry := !ok || bb.canRetry()
if !canRetry {
t.Fatalf("canRetry() must return true before reading anything")
}
data, err := io.ReadAll(rtb)
data, err := io.ReadAll(rb)
if err != nil {
t.Fatalf("unexpected error when reading all the data: %s", err)
}
if string(data) != s {
t.Fatalf("unexpected data read\ngot\n%s\nwant\n%s", data, s)
}
if err := rtb.Close(); err != nil {
t.Fatalf("unexpected error when closing readTrackingBody: %s", err)
if err := rb.Close(); err != nil {
t.Fatalf("unexpected error when closing bufferedBody: %s", err)
}
if rtb.canRetry() {
t.Fatalf("canRetry() must return false after closing the reader")
data, err = io.ReadAll(rb)
if err != nil {
t.Fatalf("unexpected error in io.ReadAll: %s", err)
}
data, err = io.ReadAll(rtb)
if err == nil {
t.Fatalf("expecting non-nil error")
}
if len(data) != 0 {
t.Fatalf("unexpected non-empty data read: %q", data)
if string(data) != s {
t.Fatalf("unexpected data read\ngot\n%s\nwant\n%s", data, s)
}
}

View File

@@ -123,32 +123,32 @@ var (
Name: vmExtraLabel,
Value: nil,
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
"will have priority. Flag can be set multiple times, to add few additional labels.",
" will have priority. Flag can be set multiple times, to add few additional labels.",
},
&cli.Int64Flag{
Name: vmRateLimit,
Usage: "Optional data transfer rate limit in bytes per second.\n" +
"By default, the rate limit is disabled. It can be useful for limiting load on configured via '--vmAddr' destination.",
"By default, the rate limit is disabled. It can be useful for limiting load on configured via '--vm-addr' destination.",
},
&cli.StringFlag{
Name: vmCertFile,
Usage: "Optional path to client-side TLS certificate file to use when connecting to '--vmAddr'",
Usage: "Optional path to client-side TLS certificate file to use when connecting to '--vm-addr'",
},
&cli.StringFlag{
Name: vmKeyFile,
Usage: "Optional path to client-side TLS key to use when connecting to '--vmAddr'",
Usage: "Optional path to client-side TLS key to use when connecting to '--vm-addr'",
},
&cli.StringFlag{
Name: vmCAFile,
Usage: "Optional path to TLS CA file to use for verifying connections to '--vmAddr'. By default, system CA is used",
Usage: "Optional path to TLS CA file to use for verifying connections to '--vm-addr'. By default, system CA is used",
},
&cli.StringFlag{
Name: vmServerName,
Usage: "Optional TLS server name to use for connections to '--vmAddr'. By default, the server name from '--vmAddr' is used",
Usage: "Optional TLS server name to use for connections to '--vm-addr'. By default, the server name from '--vm-addr' is used",
},
&cli.BoolFlag{
Name: vmInsecureSkipVerify,
Usage: "Whether to skip tls verification when connecting to '--vmAddr'",
Usage: "Whether to skip tls verification when connecting to '--vm-addr'",
Value: false,
},
&cli.IntFlag{
@@ -468,7 +468,7 @@ var (
Name: vmNativeFilterMatch,
Usage: "Time series selector to match series for export. For example, select {instance!=\"localhost\"} will " +
"match all series with \"instance\" label different to \"localhost\".\n" +
" See more details here https://github.com/VictoriaMetrics/VictoriaMetrics#how-to-export-data-in-native-format",
" See more details here https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-native-format",
Value: `{__name__!=""}`,
},
&cli.StringFlag{
@@ -598,7 +598,7 @@ var (
Name: vmExtraLabel,
Value: nil,
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
"will have priority. Flag can be set multiple times, to add few additional labels.",
" will have priority. Flag can be set multiple times, to add few additional labels.",
},
&cli.Int64Flag{
Name: vmRateLimit,
@@ -625,8 +625,8 @@ var (
&cli.BoolFlag{
Name: vmNativeDisableBinaryProtocol,
Usage: "Whether to use https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-json-line-format " +
"instead of https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-native-format API." +
"Binary export/import API protocol implies less network and resource usage, as it transfers compressed binary data blocks." +
"instead of https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-native-format API. " +
"Binary export/import API protocol implies less network and resource usage, as it transfers compressed binary data blocks. " +
"Non-binary export/import API is less efficient, but supports deduplication if it is configured on vm-native-src-addr side.",
Value: false,
},

View File

@@ -63,10 +63,8 @@ func (ip *influxProcessor) run(ctx context.Context) error {
ip.im.ResetStats()
var wg sync.WaitGroup
wg.Add(ip.cc)
for i := 0; i < ip.cc; i++ {
go func() {
defer wg.Done()
for range ip.cc {
wg.Go(func() {
for s := range seriesCh {
if err := ip.do(s); err != nil {
errCh <- fmt.Errorf("request failed for %q.%q: %s", s.Measurement, s.Field, err)
@@ -74,7 +72,7 @@ func (ip *influxProcessor) run(ctx context.Context) error {
}
bar.Increment()
}
}()
})
}
// any error breaks the import

View File

@@ -89,10 +89,8 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
bar.Finish()
}(bar)
var wg sync.WaitGroup
wg.Add(op.otsdbcc)
for i := 0; i < op.otsdbcc; i++ {
go func() {
defer wg.Done()
for range op.otsdbcc {
wg.Go(func() {
for s := range seriesCh {
if err := op.do(s); err != nil {
errCh <- fmt.Errorf("couldn't retrieve series for %s : %s", metric, err)
@@ -100,7 +98,7 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
}
bar.Increment()
}
}()
})
}
/*
Loop through all series for this metric, processing all retentions and time ranges

View File

@@ -4,8 +4,10 @@ import (
"context"
"fmt"
"log"
"strings"
"sync"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@@ -61,19 +63,19 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
var it chunkenc.Iterator
for ss.Next() {
var name string
var labels []vm.LabelPair
var labelPairs []vm.LabelPair
series := ss.At()
for _, label := range series.Labels() {
series.Labels().Range(func(label labels.Label) {
if label.Name == "__name__" {
name = label.Value
continue
return
}
labels = append(labels, vm.LabelPair{
Name: label.Name,
Value: label.Value,
labelPairs = append(labelPairs, vm.LabelPair{
Name: strings.Clone(label.Name),
Value: strings.Clone(label.Value),
})
}
})
if name == "" {
return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID)
}
@@ -99,7 +101,7 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
}
ts := vm.TimeSeries{
Name: name,
LabelPairs: labels,
LabelPairs: labelPairs,
Timestamps: timestamps,
Values: values,
}
@@ -122,10 +124,8 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
pp.im.ResetStats()
var wg sync.WaitGroup
wg.Add(pp.cc)
for i := 0; i < pp.cc; i++ {
go func() {
defer wg.Done()
for range pp.cc {
wg.Go(func() {
for br := range blockReadersCh {
if err := pp.do(br); err != nil {
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
@@ -133,7 +133,7 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
}
bar.Increment()
}
}()
})
}
// any error breaks the import
for _, br := range blocks {

View File

@@ -66,10 +66,8 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
errCh := make(chan error)
var wg sync.WaitGroup
wg.Add(rrp.cc)
for i := 0; i < rrp.cc; i++ {
go func() {
defer wg.Done()
for range rrp.cc {
wg.Go(func() {
for r := range rangeC {
if err := rrp.do(ctx, r); err != nil {
errCh <- fmt.Errorf("request failed for: %s", err)
@@ -77,7 +75,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
}
bar.Increment()
}
}()
})
}
for _, r := range ranges {

View File

@@ -156,15 +156,13 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
cfg.BatchSize = 1e5
}
im.wg.Add(int(cfg.Concurrency))
for i := 0; i < int(cfg.Concurrency); i++ {
for i := range int(cfg.Concurrency) {
pbPrefix := fmt.Sprintf(`{{ green "VM worker %d:" }}`, i)
bar := barpool.AddWithTemplate(pbPrefix+pbTpl, 0)
go func(bar barpool.Bar) {
defer im.wg.Done()
im.wg.Go(func() {
im.startWorker(ctx, bar, cfg.BatchSize, cfg.SignificantFigures, cfg.RoundDigits)
}(bar)
})
}
im.ResetStats()
return im, nil

View File

@@ -249,9 +249,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
var wg sync.WaitGroup
for i := 0; i < p.cc; i++ {
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
for f := range filterCh {
if !p.disablePerMetricRequests {
if err := p.do(ctx, f, srcURL, dstURL, nil); err != nil {
@@ -266,7 +264,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
}
}
}
}()
})
}
// any error breaks the import

View File

@@ -111,9 +111,7 @@ func InitStreamAggr() {
saCfgTimestamp.Set(fasttime.UnixTimestamp())
// Start config reloader.
saCfgReloaderWG.Add(1)
go func() {
defer saCfgReloaderWG.Done()
saCfgReloaderWG.Go(func() {
for {
select {
case <-sighupCh:
@@ -122,7 +120,7 @@ func InitStreamAggr() {
}
reloadStreamAggrConfig()
}
}()
})
}
func reloadStreamAggrConfig() {

View File

@@ -232,7 +232,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
}
firehose.WriteSuccessResponse(w, r)
return true
case "zabbixconnector/api/v1/history":
case "/zabbixconnector/api/v1/history":
zabbixconnectorHistoryRequests.Inc()
if err := zabbixconnector.InsertHandlerForHTTP(r); err != nil {
zabbixconnectorHistoryErrors.Inc()
@@ -241,7 +241,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
fmt.Fprintf(w, `{"error":%q}`, err.Error())
return true
}
w.WriteHeader(http.StatusAccepted)
w.WriteHeader(http.StatusOK)
return true
case "/newrelic":
newrelicCheckRequest.Inc()

View File

@@ -3896,27 +3896,9 @@ func nextSeriesConcurrentWrapper(nextSeries nextSeriesFunc, f func(s *series) (*
seriesCh := make(chan *series, goroutines)
errCh := make(chan error, 1)
var wg sync.WaitGroup
wg.Add(goroutines)
go func() {
var err error
for {
s, e := nextSeries()
if e != nil || s == nil {
err = e
break
}
seriesCh <- s
}
close(seriesCh)
wg.Wait()
close(resultCh)
errCh <- err
close(errCh)
}()
var skipProcessing atomic.Bool
for i := 0; i < goroutines; i++ {
go func() {
defer wg.Done()
for range goroutines {
wg.Go(func() {
for s := range seriesCh {
if skipProcessing.Load() {
continue
@@ -3934,8 +3916,24 @@ func nextSeriesConcurrentWrapper(nextSeries nextSeriesFunc, f func(s *series) (*
}
}
}
}()
})
}
go func() {
var err error
for {
s, e := nextSeries()
if e != nil || s == nil {
err = e
break
}
seriesCh <- s
}
close(seriesCh)
wg.Wait()
close(resultCh)
errCh <- err
close(errCh)
}()
wrapper := func() (*series, error) {
r := <-resultCh
if r == nil {

View File

@@ -520,7 +520,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
fmt.Fprintf(w, "%s", `{"status":"error","msg":"for accessing vmalert flag '-vmalert.proxyURL' must be configured"}`)
return true
}
proxyVMAlertRequests(w, r)
proxyVMAlertRequests(w, r, path)
return true
}
@@ -558,7 +558,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
case "/api/v1/rules", "/rules":
rulesRequests.Inc()
if len(*vmalertProxyURL) > 0 {
proxyVMAlertRequests(w, r)
proxyVMAlertRequests(w, r, path)
return true
}
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#rules
@@ -568,7 +568,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
case "/api/v1/alerts", "/alerts":
alertsRequests.Inc()
if len(*vmalertProxyURL) > 0 {
proxyVMAlertRequests(w, r)
proxyVMAlertRequests(w, r, path)
return true
}
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#alerts
@@ -578,7 +578,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
case "/api/v1/notifiers", "/notifiers":
notifiersRequests.Inc()
if len(*vmalertProxyURL) > 0 {
proxyVMAlertRequests(w, r)
proxyVMAlertRequests(w, r, path)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -725,7 +725,7 @@ var (
metricNamesStatsResetErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/admin/status/metric_names_stats/reset"}`)
)
func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request) {
func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request, path string) {
defer func() {
err := recover()
if err == nil || err == http.ErrAbortHandler {
@@ -736,8 +736,10 @@ func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request) {
// Forward other panics to the caller.
panic(err)
}()
r.Host = vmalertProxyHost
vmalertProxy.ServeHTTP(w, r)
req := r.Clone(r.Context())
req.URL.Path = strings.TrimPrefix(path, "prometheus")
req.Host = vmalertProxyHost
vmalertProxy.ServeHTTP(w, req)
}
var (

View File

@@ -296,14 +296,12 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
// Start workers and wait until they finish the work.
var wg sync.WaitGroup
for i := range workChs {
wg.Add(1)
qtChild := qt.NewChild("worker #%d", i)
go func(workerID uint) {
timeseriesWorker(qtChild, workChs, workerID)
for workerID := range workChs {
qtChild := qt.NewChild("worker #%d", workerID)
wg.Go(func() {
timeseriesWorker(qtChild, workChs, uint(workerID))
qtChild.Done()
wg.Done()
}(uint(i))
})
}
wg.Wait()
@@ -514,12 +512,10 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
// Start workers and wait until they finish the work.
var wg sync.WaitGroup
for i := 0; i < workers; i++ {
wg.Add(1)
go func(workerID uint) {
unpackWorker(workChs, workerID)
wg.Done()
}(uint(i))
for workerID := range workers {
wg.Go(func() {
unpackWorker(workChs, uint(workerID))
})
}
wg.Wait()
@@ -1020,12 +1016,10 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
mustStop atomic.Bool
)
var wg sync.WaitGroup
wg.Add(gomaxprocs)
for i := 0; i < gomaxprocs; i++ {
go func(workerID uint) {
defer wg.Done()
for workerID := range gomaxprocs {
wg.Go(func() {
for xw := range workCh {
if err := f(&xw.mn, &xw.b, tr, workerID); err != nil {
if err := f(&xw.mn, &xw.b, tr, uint(workerID)); err != nil {
errGlobalLock.Lock()
if errGlobal == nil {
errGlobal = err
@@ -1036,7 +1030,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
xw.reset()
exportWorkPool.Put(xw)
}
}(uint(i))
})
}
// Feed workers with work

View File

@@ -103,15 +103,13 @@ func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssEx
workersCount := netstorage.MaxWorkers()
tsCh := make(chan *timeseries)
var wg sync.WaitGroup
wg.Add(workersCount)
for i := 0; i < workersCount; i++ {
go func(workerID uint) {
defer wg.Done()
for workerID := range workersCount {
wg.Go(func() {
for ts := range tsCh {
runtime.Gosched() // allow other goroutines performing the work
iafc.updateTimeseries(ts, workerID)
iafc.updateTimeseries(ts, uint(workerID))
}
}(uint(i))
})
}
for _, ts := range tssSrc {
tsCh <- ts

View File

@@ -477,22 +477,18 @@ func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSec
var tssFirst []*timeseries
var errFirst error
qtFirst := qt.NewChild("expr1")
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
tssFirst, errFirst = evalExpr(qtFirst, ec, exprFirst)
qtFirst.Done()
}()
})
var tssSecond []*timeseries
var errSecond error
qtSecond := qt.NewChild("expr2")
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
tssSecond, errSecond = evalExpr(qtSecond, ec, exprSecond)
qtSecond.Done()
}()
})
wg.Wait()
if errFirst != nil {
@@ -710,17 +706,13 @@ func evalExprsInParallel(qt *querytracer.Tracer, ec *EvalConfig, es []metricsql.
qt.Printf("eval function args in parallel")
var wg sync.WaitGroup
for i, e := range es {
wg.Add(1)
qtChild := qt.NewChild("eval arg %d", i)
go func(e metricsql.Expr, i int) {
defer func() {
qtChild.Done()
wg.Done()
}()
wg.Go(func() {
defer qtChild.Done()
rv, err := evalExpr(qtChild, ec, e)
rvs[i] = rv
errs[i] = err
}(e, i)
})
}
wg.Wait()
for _, err := range errs {
@@ -785,7 +777,8 @@ func getRollupExprArg(arg metricsql.Expr) *metricsql.RollupExpr {
// - rollupFunc(m) if iafc is nil
// - aggrFunc(rollupFunc(m)) if iafc isn't nil
func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr,
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext,
) ([]*timeseries, error) {
if re.At == nil {
return evalRollupFuncWithoutAt(qt, ec, funcName, rf, expr, re, iafc)
}
@@ -835,7 +828,8 @@ func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf
}
func evalRollupFuncWithoutAt(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext,
) ([]*timeseries, error) {
funcName = strings.ToLower(funcName)
ecNew := ec
var offset int64
@@ -1017,16 +1011,14 @@ func doParallel(tss []*timeseries, f func(ts *timeseries, values []float64, time
}
var wg sync.WaitGroup
wg.Add(workers)
for i := 0; i < workers; i++ {
go func(workerID uint) {
defer wg.Done()
for workerID := range workers {
wg.Go(func() {
var tmpValues []float64
var tmpTimestamps []int64
for ts := range workChs[workerID] {
tmpValues, tmpTimestamps = f(ts, tmpValues, tmpTimestamps, workerID)
tmpValues, tmpTimestamps = f(ts, tmpValues, tmpTimestamps, uint(workerID))
}
}(uint(i))
})
}
wg.Wait()
}
@@ -1058,7 +1050,8 @@ func removeNanValues(dstValues []float64, dstTimestamps []int64, values []float6
// evalInstantRollup evaluates instant rollup where ec.Start == ec.End.
func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window int64) ([]*timeseries, error) {
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window int64,
) ([]*timeseries, error) {
if ec.Start != ec.End {
logger.Panicf("BUG: evalInstantRollup cannot be called on non-empty time range; got %s", ec.timeRangeString())
}
@@ -1083,10 +1076,12 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
rollupResultCacheV.DeleteInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
}
getCachedSeries := func(qt *querytracer.Tracer) ([]*timeseries, int64, error) {
rollupResultCacheV.rollupResultCacheRequests.Inc()
again:
offset := int64(0)
tssCached := rollupResultCacheV.GetInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
if len(tssCached) == 0 {
rollupResultCacheV.rollupResultCacheMisses.Inc()
// Cache miss. Re-populate the missing data.
start := int64(fasttime.UnixTimestamp()*1000) - cacheTimestampOffset.Milliseconds()
offset = timestamp - start
@@ -1129,6 +1124,7 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
deleteCachedSeries(qt)
goto again
}
rollupResultCacheV.rollupResultCachePartialHits.Inc()
ec.QueryStats.addSeriesFetched(len(tssCached))
return tssCached, offset, nil
}
@@ -1537,16 +1533,11 @@ func assertInstantValues(tss []*timeseries) {
}
}
var (
rollupResultCacheFullHits = metrics.NewCounter(`vm_rollup_result_cache_full_hits_total`)
rollupResultCachePartialHits = metrics.NewCounter(`vm_rollup_result_cache_partial_hits_total`)
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
memoryIntensiveQueries = metrics.NewCounter(`vm_memory_intensive_queries_total`)
)
var memoryIntensiveQueries = metrics.NewCounter(`vm_memory_intensive_queries_total`)
func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr,
) ([]*timeseries, error) {
window, err := windowExpr.NonNegativeDuration(ec.Step)
if err != nil {
return nil, fmt.Errorf("cannot parse lookbehind window in square brackets at %s: %w", expr.AppendString(nil), err)
@@ -1582,19 +1573,20 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
}
// Search for cached results.
rollupResultCacheV.rollupResultCacheRequests.Inc()
tssCached, start := rollupResultCacheV.GetSeries(qt, ec, expr, window)
ec.QueryStats.addSeriesFetched(len(tssCached))
if start > ec.End {
qt.Printf("the result is fully cached")
rollupResultCacheFullHits.Inc()
rollupResultCacheV.rollupResultCacheFullHits.Inc()
return tssCached, nil
}
if start > ec.Start {
qt.Printf("partial cache hit")
rollupResultCachePartialHits.Inc()
rollupResultCacheV.rollupResultCachePartialHits.Inc()
} else {
qt.Printf("cache miss")
rollupResultCacheMiss.Inc()
rollupResultCacheV.rollupResultCacheMisses.Inc()
}
// Fetch missing results, which aren't cached yet.
@@ -1630,7 +1622,8 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
//
// pointsPerSeries is used only for estimating the needed memory for query processing
func evalRollupFuncNoCache(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window, pointsPerSeries int64) ([]*timeseries, error) {
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window, pointsPerSeries int64,
) ([]*timeseries, error) {
if qt.Enabled() {
qt = qt.NewChild("rollup %s: timeRange=%s, step=%d, window=%d", expr.AppendString(nil), ec.timeRangeString(), ec.Step, window)
defer qt.Done()
@@ -1753,7 +1746,8 @@ func maxSilenceInterval() int64 {
func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool,
iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64,
) ([]*timeseries, error) {
qt = qt.NewChild("rollup %s() with incremental aggregation %s() over %d series; rollupConfigs=%s", funcName, iafc.ae.Name, rss.Len(), rcs)
defer qt.Done()
var samplesScannedTotal atomic.Uint64
@@ -1792,7 +1786,8 @@ func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string,
}
func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64,
) ([]*timeseries, error) {
qt = qt.NewChild("rollup %s() over %d series; rollupConfigs=%s", funcName, rss.Len(), rcs)
defer qt.Done()
@@ -1832,7 +1827,8 @@ func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, k
}
func doRollupForTimeseries(funcName string, keepMetricNames bool, rc *rollupConfig, tsDst *timeseries, mnSrc *storage.MetricName,
valuesSrc []float64, timestampsSrc []int64, sharedTimestamps []int64) uint64 {
valuesSrc []float64, timestampsSrc []int64, sharedTimestamps []int64,
) uint64 {
tsDst.MetricName.CopyFrom(mnSrc)
if len(rc.TagValue) > 0 {
tsDst.MetricName.AddTag("rollup", rc.TagValue)

View File

@@ -534,7 +534,10 @@ type rollupFuncArg struct {
timestamps []int64
// Real value preceding values.
// Is populated if preceding value is within the rc.LookbackDelta.
// Is populated if the preceding sample falls within the rc.LookbackDelta range, or if rc.LookbackDelta is not set.
//
// It provides an additional check and value for rollup functions such as increase(), changes(),
// when the prevValue is NaN due to a gap or a small lookback window.
realPrevValue float64
// Real value which goes after values.
@@ -713,7 +716,11 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
// Extend dstValues in order to remove mallocs below.
dstValues = decimal.ExtendFloat64sCapacity(dstValues, len(rc.Timestamps))
// Use step as the scrape interval for instant queries (when start == end).
// Set maxPrevInterval for subsequent rfa.prevValue calculations in rollupFunc:
// For instant queries, use rc.Step directly as maxPrevInterval.
// For range queries, rc.Step is typically too small to serve as the lookback window between two rollup points.
// Instead, estimate the scrape interval from raw sample timestamps (using the 0.6 quantile of the last 20 intervals)
// and slightly inflate the scrape interval to set maxPrevInterval, allowing for some tolerance to jitter.
maxPrevInterval := rc.Step
if rc.Start < rc.End {
scrapeInterval := getScrapeInterval(timestamps, rc.Step)
@@ -729,22 +736,21 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
}
}
window := rc.Window
// Adjust lookbehind window only if it isn't set explicitly, e.g. rate(foo).
// In the case of missing lookbehind window it should be adjusted in order to return non-empty graph
// when the window doesn't cover at least two raw samples (this is what most users expect).
//
// If the user explicitly sets the lookbehind window to some fixed value, e.g. rate(foo[1s]),
// then it is expected he knows what he is doing. Do not adjust the lookbehind window then.
//
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3483
if window <= 0 {
window = rc.Step
if rc.MayAdjustWindow && window < maxPrevInterval {
// Adjust lookbehind window only if it isn't set explicitly, e.g. rate(foo).
// In the case of missing lookbehind window it should be adjusted in order to return non-empty graph
// when the window doesn't cover at least two raw samples (this is what most users expect).
//
// If the user explicitly sets the lookbehind window to some fixed value, e.g. rate(foo[1s]),
// then it is expected he knows what he is doing. Do not adjust the lookbehind window then.
//
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3483
window = maxPrevInterval
}
// Artificial window cannot exceed explicit rc.LookbackDelta, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/784
if rc.isDefaultRollup && rc.LookbackDelta > 0 && window > rc.LookbackDelta {
// Implicit window exceeds -search.maxStalenessInterval, so limit it to -search.maxStalenessInterval
// according to https://github.com/VictoriaMetrics/VictoriaMetrics/issues/784
window = rc.LookbackDelta
}
}
@@ -869,17 +875,17 @@ func getScrapeInterval(timestamps []int64, defaultInterval int64) int64 {
return defaultInterval
}
// Estimate scrape interval as 0.6 quantile for the first 20 intervals.
tsPrev := timestamps[0]
timestamps = timestamps[1:]
// Estimate scrape interval as 0.6 quantile of the last 20 intervals.
tsPrev := timestamps[len(timestamps)-1]
timestamps = timestamps[:len(timestamps)-1]
if len(timestamps) > 20 {
timestamps = timestamps[:20]
timestamps = timestamps[len(timestamps)-20:]
}
a := getFloat64s()
intervals := a.A[:0]
for _, ts := range timestamps {
intervals = append(intervals, float64(ts-tsPrev))
tsPrev = ts
for i := len(timestamps) - 1; i >= 0; i-- {
intervals = append(intervals, float64(tsPrev-timestamps[i]))
tsPrev = timestamps[i]
}
scrapeInterval := int64(quantile(0.6, intervals))
a.A = intervals
@@ -2107,9 +2113,15 @@ func rollupChanges(rfa *rollupFuncArg) float64 {
if len(values) == 0 {
return nan
}
prevValue = values[0]
values = values[1:]
n++
// Assume that the value didn't change during the current gap
// if realPrevValue exists.
if !math.IsNaN(rfa.realPrevValue) {
prevValue = rfa.realPrevValue
} else {
n++
prevValue = values[0]
values = values[1:]
}
}
for _, v := range values {
if v != prevValue {

View File

@@ -83,9 +83,11 @@ func checkRollupResultCacheReset() {
const checkRollupResultCacheResetInterval = 5 * time.Second
var needRollupResultCacheReset atomic.Bool
var checkRollupResultCacheResetOnce sync.Once
var rollupResultResetMetricRowSample atomic.Pointer[storage.MetricRow]
var (
needRollupResultCacheReset atomic.Bool
checkRollupResultCacheResetOnce sync.Once
rollupResultResetMetricRowSample atomic.Pointer[storage.MetricRow]
)
var rollupResultCacheV = &rollupResultCache{
c: workingsetcache.New(1024 * 1024), // This is a cache for testing.
@@ -178,6 +180,12 @@ func InitRollupResultCache(cachePath string) {
rollupResultCacheV = &rollupResultCache{
c: c,
rollupResultCacheRequests: metrics.GetOrCreateCounter(`vm_rollup_result_cache_requests_total`),
rollupResultCacheFullHits: metrics.GetOrCreateCounter(`vm_rollup_result_cache_full_hits_total`),
rollupResultCachePartialHits: metrics.GetOrCreateCounter(`vm_rollup_result_cache_partial_hits_total`),
rollupResultCacheMisses: metrics.GetOrCreateCounter(`vm_rollup_result_cache_miss_total`),
rollupResultCacheResets: metrics.GetOrCreateCounter(`vm_rollup_result_cache_resets_total`),
}
}
@@ -193,13 +201,18 @@ func StopRollupResultCache() {
type rollupResultCache struct {
c *workingsetcache.Cache
}
var rollupResultCacheResets = metrics.NewCounter(`vm_cache_resets_total{type="promql/rollupResult"}`)
rollupResultCacheRequests *metrics.Counter
rollupResultCacheFullHits *metrics.Counter
rollupResultCachePartialHits *metrics.Counter
rollupResultCacheMisses *metrics.Counter
rollupResultCacheResets *metrics.Counter
}
// ResetRollupResultCache resets rollup result cache.
func ResetRollupResultCache() {
rollupResultCacheResets.Inc()
rollupResultCacheV.rollupResultCacheResets.Inc()
rollupResultCacheKeyPrefix.Add(1)
logger.Infof("rollupResult cache has been cleared")
}

View File

@@ -232,6 +232,7 @@ func testRollupFunc(t *testing.T, funcName string, args []any, vExpected float64
}
var rfa rollupFuncArg
rfa.prevValue = nan
rfa.realPrevValue = nan
rfa.prevTimestamp = 0
rfa.values = append(rfa.values, testValues...)
rfa.timestamps = append(rfa.timestamps, testTimestamps...)
@@ -1654,7 +1655,7 @@ func TestRollupDeltaWithStaleness(t *testing.T) {
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 7 {
t.Fatalf("expecting 8 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
t.Fatalf("expecting 7 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0}
timestampsExpected := []int64{0, 45e3}
@@ -1674,7 +1675,7 @@ func TestRollupDeltaWithStaleness(t *testing.T) {
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 7 {
t.Fatalf("expecting 8 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
t.Fatalf("expecting 7 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0}
timestampsExpected := []int64{0, 45e3}
@@ -1794,7 +1795,7 @@ func TestRollupIncreasePureWithStaleness(t *testing.T) {
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 7 {
t.Fatalf("expecting 8 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
t.Fatalf("expecting 7 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0}
timestampsExpected := []int64{0, 45e3}
@@ -1814,7 +1815,7 @@ func TestRollupIncreasePureWithStaleness(t *testing.T) {
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 7 {
t.Fatalf("expecting 8 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
t.Fatalf("expecting 7 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0}
timestampsExpected := []int64{0, 45e3}
@@ -1888,3 +1889,126 @@ func TestRollupIncreasePureWithStaleness(t *testing.T) {
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
})
}
func TestRollupChangesWithStaleness(t *testing.T) {
// there is a gap between samples in the dataset below
timestamps := []int64{0, 15000, 30000, 70000}
values := []float64{1, 1, 1, 1}
// if step > gap, then changes will always respect value before gap
t.Run("step>gap", func(t *testing.T) {
rc := rollupConfig{
Func: rollupChanges,
Start: 0,
End: 70000,
Step: 45000,
Window: 0,
MaxPointsPerSeries: 1e4,
}
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 7 {
t.Fatalf("expecting 7 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0}
timestampsExpected := []int64{0, 45e3}
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
})
// even if LookbackDelta < gap
t.Run("step>gap;LookbackDelta<gap", func(t *testing.T) {
rc := rollupConfig{
Func: rollupChanges,
Start: 0,
End: 70000,
Step: 45000,
LookbackDelta: 10e3,
Window: 0,
MaxPointsPerSeries: 1e4,
}
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 7 {
t.Fatalf("expecting 7 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0}
timestampsExpected := []int64{0, 45e3}
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
})
// if step < gap and LookbackDelta>0 then changes will respect value before gap
// only if it is not stale according to LookbackDelta
t.Run("step<gap;LookbackDelta>0", func(t *testing.T) {
rc := rollupConfig{
Func: rollupChanges,
Start: 0,
End: 70000,
Step: 10000,
Window: 0,
MaxPointsPerSeries: 1e4,
LookbackDelta: 30e3,
}
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 8 {
t.Fatalf("expecting 8 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0, 0, 0, 0, 0, 0, 1}
timestampsExpected := []int64{0, 10e3, 20e3, 30e3, 40e3, 50e3, 60e3, 70e3}
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
})
// there is a staleness marker between samples in the dataset below
timestamps = []int64{0, 10000, 20000, 30000, 40000}
values = []float64{1, 1, 1, decimal.StaleNaN, 1}
t.Run("staleness marker", func(t *testing.T) {
rc := rollupConfig{
Func: rollupChanges,
Start: 0,
End: 40000,
Step: 10000,
Window: 0,
MaxPointsPerSeries: 1e4,
}
rc.Timestamps = rc.getTimestamps()
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
if samplesScanned != 10 {
t.Fatalf("expecting 10 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
}
valuesExpected := []float64{1, 0, 0, 1, 1}
timestampsExpected := []int64{0, 10e3, 20e3, 30e3, 40e3}
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
})
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10280
//
// When there are gaps between samples that exceed maxPrevInterval,
// either due to changes in the scrape interval or missing scrapes.
// For example, if the scrape interval was initially 30s and later changed to 10s,
// the auto-calculated scrape interval is 10s, with maxPrevInterval inflated to 15s.
//
// At t=30s:
// prevValue is NaN, as the last sample at t=0s is considered stale for t=30s given the maxPrevInterval.
// realPrevValue is 1, taken from t=0s, since LookbackDelta=0 ignores staleness.
// the result should be `changes(1, 1) -> 0` instead of `changes(1, NaN)`.
// At t=100s:
// preValue is also NaN, as the last sample at t=70s is considered stale for t=100s.
// realPrevValue is 1, taken from t=70s,
// result should be `changes(2, 1) -> 1`.
timestamps = []int64{0, 30000, 40000, 50000, 60000, 70000, 100000}
values = []float64{1, 1, 1, 1, 1, 1, 2}
t.Run("issue-10280", func(t *testing.T) {
rc := rollupConfig{
Func: rollupChanges,
Start: 0,
End: 100e3,
Step: 10e3,
MaxPointsPerSeries: 1e4,
}
rc.Timestamps = rc.getTimestamps()
gotValues, _ := rc.Do(nil, values, timestamps)
valuesExpected := []float64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
timestampsExpected := []int64{0, 10e3, 20e3, 30e3, 40e3, 50e3, 60e3, 70e3, 80e3, 90e3, 100e3}
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
})
}

View File

@@ -2,6 +2,7 @@ package promql
import (
"fmt"
"math/rand"
"reflect"
"strconv"
"strings"
@@ -280,6 +281,87 @@ func timeseriesToPromMetrics(tss []*timeseries) string {
return strings.Join(a, "\n")
}
func TestTransformFuncSort(t *testing.T) {
f := func(isDesc bool, metrics, expectedMetrics string) {
t.Helper()
tss := promMetricsToTimeseries(metrics)
// Input tss order is not stable in VictoriaMetrics
// Shuffle tss to reflect that
// Commenting out the shuffle to make the test stable
rand.Shuffle(len(tss), func(i, j int) {
tss[i], tss[j] = tss[j], tss[i]
})
sortFunc := newTransformFuncSort(isDesc)
sorted, err := sortFunc(&transformFuncArg{
args: [][]*timeseries{tss},
})
if err != nil {
t.Fatalf("sort failed: %s", err)
}
result := timeseriesToPromMetrics(sorted)
if result != expectedMetrics {
t.Fatalf("unexpected sort result:\ngot\n%s\nwant\n%s", result, expectedMetrics)
}
}
// Test asc sort with different values
f(
false,
`foo{label="a"} 3 123
foo{label="b"} 2 123
foo{label="c"} 1 123`,
`foo{label="c"} 1 123
foo{label="b"} 2 123
foo{label="a"} 3 123`,
)
// Test desc sort with different values
f(
true,
`foo{label="a"} 3 123
foo{label="b"} 2 123
foo{label="c"} 1 123`,
`foo{label="a"} 3 123
foo{label="b"} 2 123
foo{label="c"} 1 123`,
)
// Test asc sort with mixed values
f(
false,
`foo{label="a"} 1 123
foo{label="b"} 1 123
foo{label="c"} 2 123
foo{label="d"} 2 123
foo{label="e"} 3 123
`,
`foo{label="a"} 1 123
foo{label="b"} 1 123
foo{label="c"} 2 123
foo{label="d"} 2 123
foo{label="e"} 3 123`,
)
// Test desc sort with mixed values
f(
true,
`foo{label="a"} 1 123
foo{label="b"} 1 123
foo{label="c"} 2 123
foo{label="d"} 2 123
foo{label="e"} 3 123`,
`foo{label="e"} 3 123
foo{label="c"} 2 123
foo{label="d"} 2 123
foo{label="a"} 1 123
foo{label="b"} 1 123`,
)
}
func TestGetNumPrefix(t *testing.T) {
f := func(s, prefixExpected string) {
t.Helper()

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -37,10 +37,10 @@
<meta property="og:title" content="UI for VictoriaMetrics">
<meta property="og:url" content="https://victoriametrics.com/">
<meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data">
<script type="module" crossorigin src="./assets/index-Clpj_g75.js"></script>
<link rel="modulepreload" crossorigin href="./assets/vendor-D5YL0cqB.js">
<script type="module" crossorigin src="./assets/index-B6lol36n.js"></script>
<link rel="modulepreload" crossorigin href="./assets/vendor-EZef-S_8.js">
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
<link rel="stylesheet" crossorigin href="./assets/index-jEWkrqzO.css">
<link rel="stylesheet" crossorigin href="./assets/index-VQRcNK83.css">
</head>
<body>
<noscript>You need to enable JavaScript to run this app.</noscript>

View File

@@ -29,7 +29,8 @@ import (
)
var (
retentionPeriod = flagutil.NewRetentionDuration("retentionPeriod", "1", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter")
retentionPeriod = flagutil.NewRetentionDuration("retentionPeriod", "1M", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. "+
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention. See also -retentionFilter")
snapshotAuthKey = flagutil.NewPassword("snapshotAuthKey", "authKey, which must be passed in query string to /snapshot* pages. It overrides -httpAuth.*")
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages. It overrides -httpAuth.*")
forceFlushAuthKey = flagutil.NewPassword("forceFlushAuthKey", "authKey, which must be passed in query string to /internal/force_flush pages. It overrides -httpAuth.*")
@@ -388,11 +389,23 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
case "/create":
snapshotsCreateTotal.Inc()
w.Header().Set("Content-Type", "application/json")
snapshotPath := Storage.MustCreateSnapshot()
snapshotName := Storage.MustCreateSnapshot()
// Verify whether the client already closed the connection.
// In this case it is better to drop the created snapshot, since the client isn't interested in it.
if err := r.Context().Err(); err != nil {
logger.Infof("deleting already created snapshot at %s because the client canceled the request", snapshotName)
if err := deleteSnapshot(snapshotName); err != nil {
logger.Infof("cannot delete just created snapshot: %s", err)
return true
}
return true
}
if prometheusCompatibleResponse {
fmt.Fprintf(w, `{"status":"success","data":{"name":%s}}`, stringsutil.JSONString(snapshotPath))
fmt.Fprintf(w, `{"status":"success","data":{"name":%s}}`, stringsutil.JSONString(snapshotName))
} else {
fmt.Fprintf(w, `{"status":"ok","snapshot":%s}`, stringsutil.JSONString(snapshotPath))
fmt.Fprintf(w, `{"status":"ok","snapshot":%s}`, stringsutil.JSONString(snapshotName))
}
return true
case "/list":
@@ -412,23 +425,12 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
snapshotsDeleteTotal.Inc()
w.Header().Set("Content-Type", "application/json")
snapshotName := r.FormValue("snapshot")
snapshots := Storage.MustListSnapshots()
for _, snName := range snapshots {
if snName == snapshotName {
if err := Storage.DeleteSnapshot(snName); err != nil {
err = fmt.Errorf("cannot delete snapshot %q: %w", snName, err)
jsonResponseError(w, err)
snapshotsDeleteErrorsTotal.Inc()
return true
}
fmt.Fprintf(w, `{"status":"ok"}`)
return true
}
if err := deleteSnapshot(snapshotName); err != nil {
jsonResponseError(w, err)
snapshotsDeleteErrorsTotal.Inc()
return true
}
err := fmt.Errorf("cannot find snapshot %q", snapshotName)
jsonResponseError(w, err)
fmt.Fprintf(w, `{"status":"ok"}`)
return true
case "/delete_all":
snapshotsDeleteAllTotal.Inc()
@@ -449,15 +451,26 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
}
}
func deleteSnapshot(snapshotName string) error {
snapshots := Storage.MustListSnapshots()
for _, snName := range snapshots {
if snName == snapshotName {
if err := Storage.DeleteSnapshot(snName); err != nil {
return fmt.Errorf("cannot delete snapshot %q: %w", snName, err)
}
return nil
}
}
return fmt.Errorf("cannot find snapshot %q", snapshotName)
}
func initStaleSnapshotsRemover(strg *storage.Storage) {
staleSnapshotsRemoverCh = make(chan struct{})
if snapshotsMaxAge.Duration() <= 0 {
return
}
snapshotsMaxAgeDur := snapshotsMaxAge.Duration()
staleSnapshotsRemoverWG.Add(1)
go func() {
defer staleSnapshotsRemoverWG.Done()
staleSnapshotsRemoverWG.Go(func() {
d := timeutil.AddJitterToDuration(time.Second * 11)
t := time.NewTicker(d)
defer t.Stop()
@@ -469,7 +482,7 @@ func initStaleSnapshotsRemover(strg *storage.Storage) {
}
strg.MustDeleteStaleSnapshots(snapshotsMaxAgeDur)
}
}()
})
}
func stopStaleSnapshotsRemover() {

View File

@@ -1,4 +1,4 @@
FROM golang:1.25.5 AS build-web-stage
FROM golang:1.25.6 AS build-web-stage
COPY build /build
WORKDIR /build

View File

@@ -1,26 +1,26 @@
# All these commands must run from repository root.
copy-metricsql-docs:
cp docs/victoriametrics/MetricsQL.md app/vmui/packages/vmui/src/assets/MetricsQL.md
vmui-package-base-image:
docker build -t vmui-builder-image -f app/vmui/Dockerfile-build ./app/vmui
vmui-build: copy-metricsql-docs vmui-package-base-image
vmui-run-npm-command: vmui-package-base-image
docker run --rm \
--user $(shell id -u):$(shell id -g) \
--mount type=bind,src="$(shell pwd)/app/vmui",dst=/build \
-w /build/packages/vmui \
--entrypoint=/bin/bash \
vmui-builder-image -c "npm install && npm run build"
vmui-builder-image -c "[ \"$$VMUI_SKIP_INSTALL\" = \"true\" ] || npm ci; $(NPM_COMMAND)"
vmui-anomaly-build: vmui-package-base-image
docker run --rm \
--user $(shell id -u):$(shell id -g) \
--mount type=bind,src="$(shell pwd)/app/vmui",dst=/build \
-w /build/packages/vmui \
--entrypoint=/bin/bash \
vmui-builder-image -c "npm install && npm run build:anomaly"
vmui-install:
NPM_COMMAND="true" $(MAKE) vmui-run-npm-command
vmui-package-base-image:
docker build -t vmui-builder-image -f app/vmui/Dockerfile-build ./app/vmui
vmui-build: copy-metricsql-docs
NPM_COMMAND="npm run build" $(MAKE) vmui-run-npm-command
vmui-release: vmui-build
docker build -t ${DOCKER_NAMESPACE}/vmui:latest -f app/vmui/Dockerfile-web ./app/vmui/packages/vmui
@@ -38,11 +38,11 @@ vmui-update: vmui-build
vmui-install-dependencies:
cd app/vmui/packages/vmui && npm ci
vmui-lint: vmui-install-dependencies
cd app/vmui/packages/vmui && npm run lint
vmui-lint:
NPM_COMMAND="npm run lint" $(MAKE) vmui-run-npm-command
vmui-typecheck: vmui-install-dependencies
cd app/vmui/packages/vmui && npm run typecheck
vmui-typecheck:
NPM_COMMAND="npm run typecheck" $(MAKE) vmui-run-npm-command
vmui-test: vmui-install-dependencies
cd app/vmui/packages/vmui && npm run test
vmui-test:
NPM_COMMAND="npm run test" $(MAKE) vmui-run-npm-command

View File

@@ -1 +0,0 @@
VITE_APP_TYPE=vmanomaly

View File

@@ -1,23 +0,0 @@
import { readFile } from "fs/promises";
import { IndexHtmlTransform } from "vite";
/**
* Vite plugin to dynamically load index.html based on the current mode.
* If a specific mode-based index file (e.g., index.vmanomaly.html) exists, it is used.
* Otherwise, the default index.html is loaded.
*/
export default function dynamicIndexHtmlPlugin({ mode }) {
return {
name: "vm-dynamic-index-html",
transformIndexHtml: {
order: "pre",
handler: async () => {
try {
return await readFile(`./index.${mode}.html`, "utf8");
} catch (error) {
return await readFile("./index.html", "utf8");
}
}
} as IndexHtmlTransform
};
}

View File

@@ -46,7 +46,7 @@ export default [...compat.extends(
settings: {
react: {
pragma: "React",
version: "detect",
version: "19.0",
},
linkComponents: ["Hyperlink", {
@@ -69,10 +69,11 @@ export default [...compat.extends(
"varsIgnorePattern": "^_",
"ignoreRestSiblings": true
}],
"unused-imports/no-unused-imports": "error",
"react/jsx-closing-bracket-location": [1, "line-aligned"],
"object-curly-spacing": [2, "always"],
"react/jsx-max-props-per-line": [1, {
maximum: 1,
@@ -81,13 +82,23 @@ export default [...compat.extends(
"react/jsx-first-prop-new-line": [1, "multiline"],
// Disable core indent rule due to recursion issues in ESLint 9; use JSX-specific rules instead
indent: "off",
indent: ["error", 2, {
SwitchCase: 1,
ignoredNodes: [
"JSXElement",
"JSXElement *",
"JSXFragment",
"JSXFragment *",
],
}],
"react/jsx-indent": ["error", 2],
"react/jsx-indent-props": ["error", 2],
"linebreak-style": ["error", "unix"],
quotes: ["error", "double"],
semi: ["error", "always"],
// Formatting rules moved out of ESLint core; omit here to avoid deprecation noise
"react/prop-types": 0,
"react/react-in-jsx-scope": "off",
},
}];

View File

@@ -1,54 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<link rel="icon" href="/favicon.svg" />
<link rel="apple-touch-icon" href="/favicon.svg" />
<link rel="mask-icon" href="/favicon.svg" color="#000000">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=5"/>
<meta name="theme-color" content="#000000"/>
<meta name="description" content="Detect anomalies in your metrics with VictoriaMetrics Anomaly Detection UI"/>
<!--
manifest.json provides metadata used when your web app is installed on a
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
-->
<link rel="manifest" href="/manifest.json" crossorigin="use-credentials"/>
<!--
Notice the use of in the tags above.
It will be replaced with the URL of the `public` folder during the build.
Only files inside the `public` folder can be referenced from the HTML.
Unlike "/favicon.ico" or "favicon.ico", "/favicon.ico" will
work correctly both with client-side routing and a non-root public URL.
Learn how to configure a non-root public URL by running `npm run build`.
-->
<title>UI for VictoriaMetrics Anomaly Detection</title>
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="UI for VictoriaMetrics Anomaly Detection">
<meta name="twitter:site" content="@https://victoriametrics.com/products/enterprise/anomaly-detection/">
<meta name="twitter:description" content="Detect anomalies in your metrics with VictoriaMetrics Anomaly Detection UI">
<meta name="twitter:image" content="/preview.jpg">
<meta property="og:type" content="website">
<meta property="og:title" content="UI for VictoriaMetrics Anomaly Detection">
<meta property="og:url" content="https://victoriametrics.com/products/enterprise/anomaly-detection/">
<meta property="og:description" content="Detect anomalies in your metrics with VictoriaMetrics Anomaly Detection UI">
</head>
<body>
<noscript>You need to enable JavaScript to run this app.</noscript>
<div id="root"></div>
<!--
This HTML file is a template.
If you open it directly in the browser, you will see an empty page.
You can add webfonts, meta tags, or analytics to this file.
The build step will place the bundled scripts into the <body> tag.
To begin the development, run `npm start` or `yarn start`.
To create a production bundle, use `npm run build` or `yarn build`.
-->
<script type="module" src="/src/index.tsx"></script>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@@ -7,10 +7,8 @@
"scripts": {
"prestart": "npm run copy-metricsql-docs",
"start": "vite",
"start:playground": "cross-env PLAYGROUND=METRICS npm run start",
"start:anomaly": "vite --mode vmanomaly",
"start:playground": "cross-env PLAYGROUND=true npm run start",
"build": "vite build",
"build:anomaly": "vite build --mode vmanomaly",
"lint": "eslint --output-file vmui-lint-report.json --format json 'src/**/*.{ts,tsx}'",
"lint:local": "eslint --ext .ts,.tsx -f stylish src",
"lint:fix": "eslint 'src/**/*.{ts,tsx}' --fix",
@@ -18,47 +16,48 @@
"preview": "vite preview",
"typecheck": "tsc --noEmit",
"test": "vitest run",
"test:dev": "vitest"
"test:dev": "vitest",
"precommit": "npm run lint:local && npm run typecheck && npm run test"
},
"dependencies": {
"classnames": "^2.5.1",
"dayjs": "^1.11.13",
"dayjs": "^1.11.19",
"lodash.debounce": "^4.0.8",
"marked": "^16.0.0",
"preact": "^10.26.9",
"qs": "^6.14.0",
"marked": "^17.0.1",
"preact": "^10.28.2",
"qs": "^6.14.1",
"react-input-mask": "^2.0.4",
"react-router-dom": "^7.6.3",
"react-router-dom": "^7.12.0",
"uplot": "^1.6.32",
"vite": "^7.1.11",
"web-vitals": "^5.0.3"
"vite": "^7.3.1",
"web-vitals": "^5.1.0"
},
"devDependencies": {
"@eslint/eslintrc": "^3.3.1",
"@eslint/js": "^9.30.1",
"@eslint/eslintrc": "^3.3.3",
"@eslint/js": "^9.39.2",
"@preact/preset-vite": "^2.10.2",
"@testing-library/jest-dom": "^6.6.3",
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/preact": "^3.2.4",
"@types/lodash.debounce": "^4.0.9",
"@types/node": "^24.0.12",
"@types/node": "^25.0.8",
"@types/qs": "^6.14.0",
"@types/react": "^19.1.8",
"@types/react": "^19.2.8",
"@types/react-input-mask": "^3.0.6",
"@types/react-router-dom": "^5.3.3",
"@typescript-eslint/eslint-plugin": "^8.36.0",
"@typescript-eslint/parser": "^8.36.0",
"cross-env": "^7.0.3",
"eslint": "^9.30.1",
"@typescript-eslint/eslint-plugin": "^8.53.0",
"@typescript-eslint/parser": "^8.53.0",
"cross-env": "^10.1.0",
"eslint": "^9.39.2",
"eslint-plugin-react": "^7.37.5",
"eslint-plugin-unused-imports": "^4.1.4",
"globals": "^16.3.0",
"eslint-plugin-unused-imports": "^4.3.0",
"globals": "^17.0.0",
"http-proxy-middleware": "^3.0.5",
"jsdom": "^26.1.0",
"jsdom": "^27.4.0",
"postcss": "^8.5.6",
"rollup-plugin-visualizer": "^6.0.3",
"sass-embedded": "^1.89.2",
"typescript": "^5.8.3",
"vitest": "^3.2.4"
"rollup-plugin-visualizer": "^6.0.5",
"sass-embedded": "^1.97.2",
"typescript": "^5.9.3",
"vitest": "^4.0.17"
},
"browserslist": {
"production": [

View File

@@ -1,41 +0,0 @@
import { FC, useState } from "preact/compat";
import { HashRouter, Route, Routes } from "react-router-dom";
import AppContextProvider from "./contexts/AppContextProvider";
import ThemeProvider from "./components/Main/ThemeProvider/ThemeProvider";
import AnomalyLayout from "./layouts/AnomalyLayout/AnomalyLayout";
import ExploreAnomaly from "./pages/ExploreAnomaly/ExploreAnomaly";
import router from "./router";
import CustomPanel from "./pages/CustomPanel";
const AppAnomaly: FC = () => {
const [loadedTheme, setLoadedTheme] = useState(false);
return <>
<HashRouter>
<AppContextProvider>
<>
<ThemeProvider onLoaded={setLoadedTheme}/>
{loadedTheme && (
<Routes>
<Route
path={"/"}
element={<AnomalyLayout/>}
>
<Route
path={"/"}
element={<ExploreAnomaly/>}
/>
<Route
path={router.query}
element={<CustomPanel/>}
/>
</Route>
</Routes>
)}
</>
</AppContextProvider>
</HashRouter>
</>;
};
export default AppAnomaly;

View File

@@ -14,12 +14,11 @@ export type QueryGroup = {
interface LegendProps {
labels: LegendItemType[];
query: string[];
isAnomalyView?: boolean;
isPredefinedPanel?: boolean;
onChange: (item: LegendItemType, metaKey: boolean) => void;
}
const Legend: FC<LegendProps> = ({ labels, query, isAnomalyView, isPredefinedPanel, onChange }) => {
const Legend: FC<LegendProps> = ({ labels, query, isPredefinedPanel, onChange }) => {
const { groupByLabel } = useLegendGroup();
const groupSeries = useGroupSeries({ labels, query, groupByLabel });
@@ -33,7 +32,6 @@ const Legend: FC<LegendProps> = ({ labels, query, isAnomalyView, isPredefinedPan
key={group}
labels={items}
group={group}
isAnomalyView={isAnomalyView}
onChange={onChange}
/>
))}

View File

@@ -13,7 +13,6 @@ import { getFromStorage } from "../../../../utils/storage";
export type LegendProps = {
labels: LegendItemType[];
isAnomalyView?: boolean;
duplicateFields?: string[];
onChange: (item: LegendItemType, metaKey: boolean) => void;
}
@@ -22,7 +21,7 @@ interface LegendGroupProps extends LegendProps {
group: string | number;
}
const LegendGroup: FC<LegendGroupProps> = ({ labels, group, isAnomalyView, onChange }) => {
const LegendGroup: FC<LegendGroupProps> = ({ labels, group, onChange }) => {
const { isTableView } = useLegendView();
const { groupByLabel } = useLegendGroup();
const copyToClipboard = useCopyToClipboard();
@@ -39,14 +38,14 @@ const LegendGroup: FC<LegendGroupProps> = ({ labels, group, isAnomalyView, onCha
const Content = isTableView ? LegendTable : LegendLines;
const disableAutoCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE") === "false"
const defaultExpanded = disableAutoCollapse ? true : sortedLabels.length <= LEGEND_COLLAPSE_SERIES_LIMIT
const disableAutoCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE") === "false";
const defaultExpanded = disableAutoCollapse ? true : sortedLabels.length <= LEGEND_COLLAPSE_SERIES_LIMIT;
const expandedWarning = (
<span className="vm-legend-group-header__warning">
Legend collapsed by default ({sortedLabels.length} series) click to expand.
</span>
)
);
return (
<div
@@ -81,7 +80,6 @@ const LegendGroup: FC<LegendGroupProps> = ({ labels, group, isAnomalyView, onCha
>
<Content
labels={sortedLabels}
isAnomalyView={isAnomalyView}
duplicateFields={duplicateFields}
onChange={onChange}
/>

View File

@@ -13,11 +13,10 @@ import { getLabelAlias } from "../../../../../utils/metric";
interface LegendItemProps {
legend: LegendItemType;
onChange?: (item: LegendItemType, metaKey: boolean) => void;
isAnomalyView?: boolean;
duplicateFields?: string[];
}
const LegendItem: FC<LegendItemProps> = ({ legend, onChange, duplicateFields, isAnomalyView }) => {
const LegendItem: FC<LegendItemProps> = ({ legend, onChange, duplicateFields }) => {
const copyToClipboard = useCopyToClipboard();
const { hideStats } = useShowStats();
@@ -52,12 +51,10 @@ const LegendItem: FC<LegendItemProps> = ({ legend, onChange, duplicateFields, is
})}
onClick={createHandlerClick(legend)}
>
{!isAnomalyView && (
<div
className="vm-legend-item__marker"
style={{ backgroundColor: legend.color }}
/>
)}
<div
className="vm-legend-item__marker"
style={{ backgroundColor: legend.color }}
/>
<div className="vm-legend-item-info">
<span className="vm-legend-item-info__label">
{legend.hasAlias && legend.label}

View File

@@ -2,7 +2,7 @@ import { FC } from "preact/compat";
import LegendItem from "../LegendItem/LegendItem";
import { LegendProps } from "../LegendGroup";
const LegendLines: FC<LegendProps> = ({ labels, isAnomalyView, duplicateFields, onChange }) => {
const LegendLines: FC<LegendProps> = ({ labels, duplicateFields, onChange }) => {
return (
<div className="vm-legend-item-container">
@@ -10,7 +10,6 @@ const LegendLines: FC<LegendProps> = ({ labels, isAnomalyView, duplicateFields,
<LegendItem
key={legendItem.label}
legend={legendItem}
isAnomalyView={isAnomalyView}
duplicateFields={duplicateFields}
onChange={onChange}
/>

View File

@@ -1,82 +0,0 @@
import { FC, useMemo } from "preact/compat";
import { ForecastType, SeriesItem } from "../../../../types";
import { anomalyColors } from "../../../../utils/color";
import "./style.scss";
type Props = {
series: SeriesItem[];
};
const titles: Partial<Record<ForecastType, string>> = {
[ForecastType.yhat]: "yhat",
[ForecastType.yhatLower]: "yhat_upper - yhat_lower",
[ForecastType.yhatUpper]: "yhat_upper - yhat_lower",
[ForecastType.anomaly]: "anomalies",
[ForecastType.training]: "training data",
[ForecastType.actual]: "y"
};
const LegendAnomaly: FC<Props> = ({ series }) => {
const uniqSeriesStyles = useMemo(() => {
const uniqSeries = series.reduce((accumulator, currentSeries) => {
const hasForecast = Object.prototype.hasOwnProperty.call(currentSeries, "forecast");
const isNotUpper = currentSeries.forecast !== ForecastType.yhatUpper;
const isUniqForecast = !accumulator.find(s => s.forecast === currentSeries.forecast);
if (hasForecast && isUniqForecast && isNotUpper) {
accumulator.push(currentSeries);
}
return accumulator;
}, [] as SeriesItem[]);
const trainingSeries = {
...uniqSeries[0],
forecast: ForecastType.training,
color: anomalyColors[ForecastType.training],
};
uniqSeries.splice(1, 0, trainingSeries);
return uniqSeries.map(s => ({
...s,
color: typeof s.stroke === "string" ? s.stroke : anomalyColors[s.forecast || ForecastType.actual],
}));
}, [series]);
return <>
<div className="vm-legend-anomaly">
{/* TODO: remove .filter() after the correct training data has been added */}
{uniqSeriesStyles.filter(f => f.forecast !== ForecastType.training).map((s, i) => (
<div
key={`${i}_${s.forecast}`}
className="vm-legend-anomaly-item"
>
<svg>
{s.forecast === ForecastType.anomaly ? (
<circle
cx="15"
cy="7"
r="4"
fill={s.color}
stroke={s.color}
strokeWidth="1.4"
/>
) : (
<line
x1="0"
y1="7"
x2="30"
y2="7"
stroke={s.color}
strokeWidth={s.width || 1}
strokeDasharray={s.dash?.join(",")}
/>
)}
</svg>
<div className="vm-legend-anomaly-item__title">{titles[s.forecast || ForecastType.actual]}</div>
</div>
))}
</div>
</>;
};
export default LegendAnomaly;

View File

@@ -1,23 +0,0 @@
@use "src/styles/variables" as *;
.vm-legend-anomaly {
position: relative;
display: flex;
align-items: center;
justify-content: center;
flex-wrap: wrap;
gap: calc($padding-large * 2);
cursor: default;
&-item {
display: flex;
align-items: center;
justify-content: center;
gap: $padding-small;
svg {
width: 30px;
height: 14px;
}
}
}

View File

@@ -13,7 +13,6 @@ import {
getRangeY,
getScales,
handleDestroy,
setBand,
setSelect
} from "../../../../utils/uplot";
import { MetricResult } from "../../../../api/types";
@@ -40,7 +39,6 @@ export interface LineChartProps {
setPeriod: ({ from, to }: { from: Date, to: Date }) => void;
layoutSize: ElementSize;
height?: number;
isAnomalyView?: boolean;
spanGaps?: boolean;
showAllPoints?: boolean;
}
@@ -55,7 +53,6 @@ const LineChart: FC<LineChartProps> = ({
setPeriod,
layoutSize,
height,
isAnomalyView,
spanGaps = false,
showAllPoints = false,
}) => {
@@ -75,7 +72,7 @@ const LineChart: FC<LineChartProps> = ({
seriesFocus,
setCursor,
resetTooltips
} = useLineTooltip({ u: uPlotInst, metrics, series, unit, isAnomalyView });
} = useLineTooltip({ u: uPlotInst, metrics, series, unit });
const options: uPlotOptions = {
...getDefaultOptions({ width: layoutSize.width, height }),
@@ -111,7 +108,6 @@ const LineChart: FC<LineChartProps> = ({
if (!uPlotInst) return;
delSeries(uPlotInst);
addSeries(uPlotInst, series, spanGaps, showAllPoints);
setBand(uPlotInst, series);
uPlotInst.redraw();
}, [series, spanGaps, showAllPoints]);

View File

@@ -29,7 +29,7 @@ const LimitsConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
const { seriesLimits } = useCustomPanelState();
const customPanelDispatch = useCustomPanelDispatch();
const storageCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE")
const storageCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE");
const [legendCollapse, setLegendCollapse] = useState(storageCollapse ? storageCollapse === "true" : true);
const [limits, setLimits] = useState(seriesLimits);
@@ -58,7 +58,7 @@ const LimitsConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
}, [limits]);
useEffect(() => {
saveToStorage("LEGEND_AUTO_COLLAPSE", `${legendCollapse}`)
saveToStorage("LEGEND_AUTO_COLLAPSE", `${legendCollapse}`);
}, [legendCollapse]);
useImperativeHandle(ref, () => ({ handleApply }), [handleApply]);

View File

@@ -9,7 +9,6 @@ import { getFromStorage, removeFromStorage, saveToStorage } from "../../../../ut
import useBoolean from "../../../../hooks/useBoolean";
import { ChildComponentHandle } from "../GlobalSettings";
import { useAppDispatch, useAppState } from "../../../../state/common/StateContext";
import { getTenantIdFromUrl } from "../../../../utils/tenants";
interface ServerConfiguratorProps {
onClose: () => void;
@@ -39,10 +38,6 @@ const ServerConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
};
const handleApply = useCallback(() => {
const tenantIdFromUrl = getTenantIdFromUrl(serverUrl);
if (tenantIdFromUrl !== "") {
dispatch({ type: "SET_TENANT_ID", payload: tenantIdFromUrl });
}
dispatch({ type: "SET_SERVER", payload: serverUrl });
onClose();
}, [serverUrl]);
@@ -60,12 +55,6 @@ const ServerConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
}
}, [enabledStorage]);
useEffect(() => {
if (enabledStorage) {
saveToStorage("SERVER_URL", serverUrl);
}
}, [serverUrl]);
useEffect(() => {
// the tenant selector can change the serverUrl
if (stateServerUrl === serverUrl) return;

View File

@@ -1,4 +1,4 @@
import { FC, useState, useRef, useEffect, useMemo } from "preact/compat";
import { FC, useState, useRef, useMemo } from "preact/compat";
import { useAppDispatch, useAppState } from "../../../../state/common/StateContext";
import { useTimeDispatch } from "../../../../state/time/TimeStateContext";
import { ArrowDownIcon, StorageIcon } from "../../../Main/Icons";
@@ -10,14 +10,14 @@ import { getAppModeEnable } from "../../../../utils/app-mode";
import Tooltip from "../../../Main/Tooltip/Tooltip";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import TextField from "../../../Main/TextField/TextField";
import { getTenantIdFromUrl, replaceTenantId } from "../../../../utils/tenants";
import { replaceTenantId } from "../../../../utils/tenants";
import useBoolean from "../../../../hooks/useBoolean";
const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
const appModeEnable = getAppModeEnable();
const { isMobile } = useDeviceDetect();
const { tenantId: tenantIdState, serverUrl } = useAppState();
const { tenantId, serverUrl } = useAppState();
const dispatch = useAppDispatch();
const timeDispatch = useTimeDispatch();
@@ -48,10 +48,8 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
}, [accountIds]);
const createHandlerChange = (value: string) => () => {
const tenant = value;
dispatch({ type: "SET_TENANT_ID", payload: tenant });
if (serverUrl) {
const updateServerUrl = replaceTenantId(serverUrl, tenant);
const updateServerUrl = replaceTenantId(serverUrl, value);
if (updateServerUrl === serverUrl) return;
dispatch({ type: "SET_SERVER", payload: updateServerUrl });
timeDispatch({ type: "RUN_QUERY" });
@@ -59,16 +57,6 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
handleCloseOptions();
};
useEffect(() => {
const id = getTenantIdFromUrl(serverUrl);
if (tenantIdState && tenantIdState !== id) {
createHandlerChange(tenantIdState)();
} else {
createHandlerChange(id)();
}
}, [serverUrl]);
if (!showTenantSelector) return null;
return (
@@ -83,7 +71,7 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
<span className="vm-mobile-option__icon"><StorageIcon/></span>
<div className="vm-mobile-option-text">
<span className="vm-mobile-option-text__label">Tenant ID</span>
<span className="vm-mobile-option-text__value">{tenantIdState}</span>
<span className="vm-mobile-option-text__value">{tenantId}</span>
</div>
<span className="vm-mobile-option__arrow"><ArrowDownIcon/></span>
</div>
@@ -106,7 +94,7 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
)}
onClick={toggleOpenOptions}
>
{tenantIdState}
{tenantId}
</Button>
)}
</div>
@@ -138,7 +126,7 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
className={classNames({
"vm-list-item": true,
"vm-list-item_mobile": isMobile,
"vm-list-item_active": id === tenantIdState
"vm-list-item_active": id === tenantId
})}
key={id}
onClick={createHandlerChange(id)}

View File

@@ -3,19 +3,18 @@ import { useEffect, useMemo, useState } from "preact/compat";
import { ErrorTypes } from "../../../../../types";
import { getAccountIds } from "../../../../../api/accountId";
import { getAppModeEnable, getAppModeParams } from "../../../../../utils/app-mode";
import { getTenantIdFromUrl } from "../../../../../utils/tenants";
export const useFetchAccountIds = () => {
const { useTenantID } = getAppModeParams();
const appModeEnable = getAppModeEnable();
const { serverUrl } = useAppState();
const { tenantId, serverUrl } = useAppState();
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<ErrorTypes | string>();
const [accountIds, setAccountIds] = useState<string[]>([]);
const fetchUrl = useMemo(() => getAccountIds(serverUrl), [serverUrl]);
const isServerUrlWithTenant = useMemo(() => !!getTenantIdFromUrl(serverUrl), [serverUrl]);
const isServerUrlWithTenant = useMemo(() => !!tenantId, [tenantId]);
const preventFetch = appModeEnable ? !useTenantID : !isServerUrlWithTenant;
useEffect(() => {

View File

@@ -17,4 +17,4 @@ export const formatDuration = (raw: number) => {
export const formatEventTime = (raw: string) => {
const t = dayjs(raw);
return t.year() <= 1 ? "Never" : t.format("DD MMM YYYY HH:mm:ss");
}
};

View File

@@ -1,132 +0,0 @@
import { FC, useState } from "preact/compat";
import Button from "../Main/Button/Button";
import TextField from "../Main/TextField/TextField";
import Modal from "../Main/Modal/Modal";
import Spinner from "../Main/Spinner/Spinner";
import { DownloadIcon, ErrorIcon } from "../Main/Icons";
import useBoolean from "../../hooks/useBoolean";
import useDeviceDetect from "../../hooks/useDeviceDetect";
import { useAppState } from "../../state/common/StateContext";
import classNames from "classnames";
import "./style.scss";
import { useQueryState } from "../../state/query/QueryStateContext";
import { useTimeState } from "../../state/time/TimeStateContext";
import { getStepFromDuration } from "../../utils/time";
const AnomalyConfig: FC = () => {
const { serverUrl } = useAppState();
const { isMobile } = useDeviceDetect();
const {
value: isModalOpen,
setTrue: setOpenModal,
setFalse: setCloseModal,
} = useBoolean(false);
const { query } = useQueryState();
const { period } = useTimeState();
const [isLoading, setIsLoading] = useState(false);
const [textConfig, setTextConfig] = useState<string>("");
const [downloadUrl, setDownloadUrl] = useState<string>("");
const [error, setError] = useState<string>("");
const fetchConfig = async () => {
setIsLoading(true);
try {
const queryParam = encodeURIComponent(query[0] || "");
const stepParam = encodeURIComponent(period.step || getStepFromDuration(period.end - period.start, false));
const url = `${serverUrl}/api/vmanomaly/config.yaml?query=${queryParam}&step=${stepParam}`;
const response = await fetch(url);
const contentType = response.headers.get("Content-Type");
if (!response.ok) {
const bodyText = await response.text();
setError(` ${response.status} ${response.statusText}: ${bodyText}`);
} else if (contentType == "application/yaml") {
const blob = await response.blob();
const yamlAsString = await blob.text();
setTextConfig(yamlAsString);
setDownloadUrl(URL.createObjectURL(blob));
} else {
setError("Response Content-Type is not YAML, does `Server URL` point to VMAnomaly server?");
}
} catch (error) {
console.error(error);
setError(String(error));
}
setIsLoading(false);
};
const handleOpenModal = () => {
setOpenModal();
setError("");
URL.revokeObjectURL(downloadUrl);
setTextConfig("");
setDownloadUrl("");
return fetchConfig();
};
return (
<>
<Button
color="secondary"
variant="outlined"
onClick={handleOpenModal}
>
Open Config
</Button>
{isModalOpen && (
<Modal
title="Download config"
onClose={setCloseModal}
>
<div
className={classNames({
"vm-anomaly-config": true,
"vm-anomaly-config_mobile": isMobile,
})}
>
{isLoading && (
<Spinner
containerStyles={{ position: "relative" }}
message={"Loading config..."}
/>
)}
{!isLoading && error && (
<div className="vm-anomaly-config-error">
<div className="vm-anomaly-config-error__icon"><ErrorIcon/></div>
<h3 className="vm-anomaly-config-error__title">Cannot download config</h3>
<p className="vm-anomaly-config-error__text">{error}</p>
</div>
)}
{!isLoading && textConfig && (
<TextField
value={textConfig}
label={"config.yaml"}
type="textarea"
disabled={true}
/>
)}
<div className="vm-anomaly-config-footer">
{downloadUrl && (
<a
href={downloadUrl}
download={"config.yaml"}
>
<Button
variant="contained"
startIcon={<DownloadIcon/>}
>
download
</Button>
</a>
)}
</div>
</div>
</Modal>
)}
</>
);
};
export default AnomalyConfig;

View File

@@ -1,61 +0,0 @@
@use "src/styles/variables" as *;
.vm-anomaly-config {
display: grid;
grid-template-rows: calc(($vh * 70) - 78px - ($padding-medium*3)) auto;
gap: $padding-global;
min-width: 400px;
max-width: 80vw;
min-height: 300px;
&_mobile {
width: 100%;
max-width: none;
min-height: 100%;
grid-template-rows: calc(($vh * 100) - 78px - ($padding-global*3)) auto;
}
textarea {
overflow: auto;
width: 100%;
height: 100%;
max-height: 900px;
}
&-error {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
width: 100%;
gap: $padding-small;
text-align: center;
&__icon {
display: flex;
align-items: center;
justify-content: center;
width: 30px;
height: 30px;
margin-bottom: $padding-small;
color: $color-error;
}
&__title {
font-size: $font-size-medium;
font-weight: bold;
}
&__text {
max-width: 700px;
line-height: 1.3;
}
}
&-footer {
display: flex;
align-items: center;
justify-content: flex-end;
gap: $padding-small;
}
}

View File

@@ -1,4 +1,5 @@
@use "src/styles/variables" as *;
@use 'sass:meta';
$button-radius: 6px;
@@ -42,6 +43,8 @@ $button-radius: 6px;
svg {
width: 14px;
min-width: 14px;
max-width: 14px;
}
}
@@ -51,6 +54,8 @@ $button-radius: 6px;
svg {
width: 16px;
min-width: 16px;
max-width: 16px;
}
}
@@ -60,6 +65,8 @@ $button-radius: 6px;
svg {
width: 18px;
min-width: 18px;
max-width: 18px;
line-height: 16px;
}
}
@@ -128,8 +135,14 @@ $button-radius: 6px;
);
@each $name, $color in $button-colors {
@include contained-button($name, $color, if($name == white, $color-black, $color-white));
@include outlined-button($name, $color, if($name == white, $color-white, $color));
@include text-button($name, if($name == white, $color-white, $color));
@if $name == white {
@include contained-button($name, $color, $color-black);
@include outlined-button($name, $color, $color-white);
@include text-button($name, $color-white);
} @else {
@include contained-button($name, $color, $color-white);
@include outlined-button($name, $color, $color);
@include text-button($name, $color);
}
}
}

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,8 @@
import { getFromStorage, removeFromStorage, saveToStorage, StorageKeys } from "../../utils/storage";
import {
getFromStorage,
saveToStorage,
StorageKeys,
} from "../../utils/storage";
import { QueryHistoryType } from "../../state/query/reducer";
import { MAX_QUERIES_HISTORY, MAX_QUERY_FIELDS } from "../../constants/graph";
@@ -73,17 +77,3 @@ export const getUpdatedHistory = (query: string, queryHistory?: QueryHistoryType
values: newValues
};
};
const migrateMetricsQueryHistoryToHistoryByKey = () => {
const migrateHistory = (type: HistoryType) => {
const queryList = getFromStorage(type) as string;
if (queryList) {
const queryHistory: string[][] = JSON.parse(queryList);
saveHistoryToStorage("METRICS_QUERY_HISTORY", type, queryHistory);
removeFromStorage([type]);
}
};
migrateHistory("QUERY_HISTORY");
migrateHistory("QUERY_FAVORITES");
};
migrateMetricsQueryHistoryToHistoryByKey();

View File

@@ -12,7 +12,7 @@ import {
getMinMaxBuffer,
getTimeSeries,
} from "../../../utils/uplot";
import { TimeParams, SeriesItem, LegendItemType } from "../../../types";
import { TimeParams, LegendItemType } from "../../../types";
import { AxisRange, YaxisState } from "../../../state/graph/reducer";
import { getMathStats } from "../../../utils/math";
import classNames from "classnames";
@@ -23,8 +23,6 @@ import { promValueToNumber } from "../../../utils/metric";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import useElementSize from "../../../hooks/useElementSize";
import { ChartTooltipProps } from "../../Chart/ChartTooltip/ChartTooltip";
import LegendAnomaly from "../../Chart/Line/LegendAnomaly/LegendAnomaly";
import { groupByMultipleKeys } from "../../../utils/array";
import { useGraphDispatch } from "../../../state/graph/GraphStateContext";
import { sameTs } from "../../../utils/time";
import { useLocation } from "react-router-dom";
@@ -44,7 +42,6 @@ export interface GraphViewProps {
fullWidth?: boolean;
height?: number;
isHistogram?: boolean;
isAnomalyView?: boolean;
isPredefinedPanel?: boolean;
spanGaps?: boolean;
showAllPoints?: boolean;
@@ -64,7 +61,6 @@ const GraphView: FC<GraphViewProps> = ({
fullWidth = true,
height,
isHistogram,
isAnomalyView,
isPredefinedPanel,
spanGaps,
showAllPoints
@@ -89,8 +85,8 @@ const GraphView: FC<GraphViewProps> = ({
const [legendValue, setLegendValue] = useState<ChartTooltipProps | null>(null);
const getSeriesItem = useMemo(() => {
return getSeriesItemContext(data, hideSeries, alias, showAllPoints, isAnomalyView, isRawQuery);
}, [data, hideSeries, alias, showAllPoints, isAnomalyView, isRawQuery]);
return getSeriesItemContext(data, hideSeries, alias, showAllPoints, isRawQuery);
}, [data, hideSeries, alias, showAllPoints, isRawQuery]);
const setLimitsYaxis = (minVal: number, maxVal: number) => {
let min = Number.isFinite(minVal) ? minVal : 0;
@@ -102,7 +98,7 @@ const GraphView: FC<GraphViewProps> = ({
};
const onChangeLegend = (legend: LegendItemType, metaKey: boolean) => {
setHideSeries(getHideSeries({ hideSeries, legend, metaKey, series, isAnomalyView }));
setHideSeries(getHideSeries({ hideSeries, legend, metaKey, series }));
};
const prepareHistogramData = (data: (number | null)[][]) => {
@@ -127,20 +123,6 @@ const GraphView: FC<GraphViewProps> = ({
return [null, [xs, ys, counts]];
};
const prepareAnomalyLegend = (legend: LegendItemType[]): LegendItemType[] => {
if (!isAnomalyView) return legend;
// For vmanomaly: Only select the first series per group (due to API specs) and clear __name__ in freeFormFields.
const grouped = groupByMultipleKeys(legend, ["group", "label"]);
return grouped.map((group) => {
const firstEl = group.values[0];
return {
...firstEl,
freeFormFields: { ...firstEl.freeFormFields, __name__: "" }
};
});
};
useEffect(() => {
const dLen = data.length;
@@ -155,7 +137,7 @@ const GraphView: FC<GraphViewProps> = ({
for (let i = 0; i < dLen; i++) {
const d = data[i];
const seriesItem = getSeriesItem(d, i);
const seriesItem = getSeriesItem(d);
tempSeries[i + 1] = seriesItem;
tempLegend[i] = getLegendItem(seriesItem, d.group);
@@ -206,7 +188,7 @@ const GraphView: FC<GraphViewProps> = ({
const avg = Math.abs(Number(avgRaw));
const range = getMinMaxBuffer(min, max);
const rangeStep = Math.abs(range[1] - range[0]);
const needStabilize = (avg > rangeStep * 1e10) && !isAnomalyView;
const needStabilize = (avg > rangeStep * 1e10);
return needStabilize ? results.fill(avg) : results;
});
@@ -214,13 +196,11 @@ const GraphView: FC<GraphViewProps> = ({
timeDataSeries.unshift(timeSeries);
const result = isHistogram ? prepareHistogramData(timeDataSeries) : timeDataSeries;
const legend = prepareAnomalyLegend(tempLegend);
setLimitsYaxis(minVal, maxVal);
setDataChart(result as uPlotData);
setSeries(tempSeries);
setLegend(legend);
isAnomalyView && setHideSeries(legend.map(s => s.label || "").slice(1));
setLegend(tempLegend);
}, [data, timezone, isHistogram, currentStep, isRawQuery]);
useEffect(() => {
@@ -232,13 +212,13 @@ const GraphView: FC<GraphViewProps> = ({
for (let i = 0; i < dLen; i++) {
const d = data[i];
const seriesItem = getSeriesItem(d, i);
const seriesItem = getSeriesItem(d);
tempSeries[i + 1] = seriesItem;
tempLegend[i] = getLegendItem(seriesItem, d.group);
}
setSeries(tempSeries);
setLegend(prepareAnomalyLegend(tempLegend));
setLegend(tempLegend);
}, [hideSeries]);
const hasTimeData = dataChart[0]?.length > 0;
@@ -281,7 +261,6 @@ const GraphView: FC<GraphViewProps> = ({
setPeriod={setPeriod}
layoutSize={containerSize}
height={height}
isAnomalyView={isAnomalyView}
spanGaps={spanGaps}
showAllPoints={isRawQuery ? true : showAllPoints}
/>
@@ -298,12 +277,10 @@ const GraphView: FC<GraphViewProps> = ({
onChangeLegend={setLegendValue}
/>
)}
{isAnomalyView && showLegend && (<LegendAnomaly series={series as SeriesItem[]}/>)}
{!isHistogram && showLegend && (
<Legend
labels={legend}
query={query}
isAnomalyView={isAnomalyView}
onChange={onChangeLegend}
isPredefinedPanel={isPredefinedPanel}
/>

View File

@@ -0,0 +1,69 @@
import { useEffect } from "react";
import { StorageErrorCode } from "./types";
import { useSnack } from "../../contexts/Snackbar";
import { storageErrorInfo } from "./storageErrors";
import "./style.scss";
const classifyStorageException = (e: unknown): StorageErrorCode => {
if (!(e instanceof DOMException)) return StorageErrorCode.UNKNOWN;
switch (e.name) {
case "QuotaExceededError":
return StorageErrorCode.QUOTA_EXCEEDED;
case "SecurityError":
return StorageErrorCode.SECURITY_ERROR;
default:
return StorageErrorCode.UNKNOWN;
}
};
const getStorageError = (storage: Storage | null | undefined): StorageErrorCode | null => {
if (!storage) {
return StorageErrorCode.NO_STORAGE;
}
try {
const key = "__vmui_test__";
storage.setItem(key, "1");
storage.removeItem(key);
return null;
} catch (e) {
return classifyStorageException(e);
}
};
const WebStorageCheck = () => {
const { showInfoMessage } = useSnack();
useEffect(() => {
const error = getStorageError(window.localStorage);
if (error) {
const { title, description, fix } = storageErrorInfo[error];
const text = (
<div className="vm-storage-check">
<h3>{title}</h3>
<p>{description}</p>
{!!fix?.length && (
<div className="vm-storage-check__fix">
<div>Try this:</div>
<ul>
{fix.map((step, i) => (
<li key={`${i}-${step}`}>{step}</li>
))}
</ul>
</div>
)}
</div>
);
showInfoMessage({ text: text, type: "error", timeout: 600000 });
}
}, []);
return null;
};
export default WebStorageCheck;

View File

@@ -0,0 +1,47 @@
import { StorageError, StorageErrorCode } from "./types";
export const storageErrorInfo: Record<StorageErrorCode, StorageError> = {
[StorageErrorCode.NO_STORAGE]: {
title: "Storage unavailable",
description:
"Browser storage is not available for this website.",
fix: [
"Disable Private/Incognito mode and reload the page.",
"Disable privacy or ad-blocking extensions for this site and reload.",
"Open the site in another browser.",
],
},
[StorageErrorCode.SECURITY_ERROR]: {
title: "Storage access blocked",
description:
"Browser settings or an extension are blocking access to browser storage.",
fix: [
"Disable Private/Incognito mode and reload the page.",
"Disable privacy or ad-blocking extensions for this site and reload.",
"Open the site in a regular browser tab (not embedded).",
],
},
[StorageErrorCode.QUOTA_EXCEEDED]: {
title: "Storage quota exceeded",
description:
"The storage limit for this website has been reached.",
fix: [
"Clear this websites stored data and reload the page.",
"Close other tabs for this website and try again.",
"Use another browser or browser profile.",
],
},
[StorageErrorCode.UNKNOWN]: {
title: "Storage error",
description:
"An unexpected error occurred while accessing browser storage.",
fix: [
"Reload the page.",
"Update the browser and try again.",
"Disable browser extensions and reload.",
],
},
};

View File

@@ -0,0 +1,11 @@
@use "src/styles/variables" as *;
.vm-storage-check {
h3 {
font-weight: bold;
}
p {
margin-bottom: $padding-global
}
}

View File

@@ -0,0 +1,13 @@
export enum StorageErrorCode {
NO_STORAGE = "NO_STORAGE",
SECURITY_ERROR = "SECURITY_ERROR",
QUOTA_EXCEEDED = "QUOTA_EXCEEDED",
UNKNOWN = "UNKNOWN",
}
export type StorageError = {
title: string;
description: string;
fix: string[]
}

View File

@@ -1,8 +0,0 @@
export enum AppType {
victoriametrics = "victoriametrics",
vmanomaly = "vmanomaly",
}
export const APP_TYPE = import.meta.env.VITE_APP_TYPE;
export const APP_TYPE_VM = APP_TYPE === AppType.victoriametrics;
export const APP_TYPE_ANOMALY = APP_TYPE === AppType.vmanomaly;

View File

@@ -13,10 +13,9 @@ interface LineTooltipHook {
metrics: MetricResult[];
series: uPlotSeries[];
unit?: string;
isAnomalyView?: boolean;
}
const useLineTooltip = ({ u, metrics, series, unit, isAnomalyView }: LineTooltipHook) => {
const useLineTooltip = ({ u, metrics, series, unit }: LineTooltipHook) => {
const [showTooltip, setShowTooltip] = useState(false);
const [tooltipIdx, setTooltipIdx] = useState({ seriesIdx: -1, dataIdx: -1 });
const [stickyTooltips, setStickyToolTips] = useState<ChartTooltipProps[]>([]);
@@ -79,7 +78,7 @@ const useLineTooltip = ({ u, metrics, series, unit, isAnomalyView }: LineTooltip
point,
u: u,
id: `${seriesIdx}_${dataIdx}`,
title: groups.size > 1 && !isAnomalyView ? `Query ${group}` : "",
title: groups.size > 1 ? `Query ${group}` : "",
dates: [date ? dayjs(date * 1000).tz().format(DATE_FULL_TIMEZONE_FORMAT) : "-"],
value: formatPrettyNumber(value, min, max),
info: getMetricName(metricItem, seriesItem),
@@ -87,7 +86,7 @@ const useLineTooltip = ({ u, metrics, series, unit, isAnomalyView }: LineTooltip
marker: `${seriesItem?.stroke}`,
duplicateCount,
};
}, [u, tooltipIdx, metrics, series, unit, isAnomalyView]);
}, [u, tooltipIdx, metrics, series, unit]);
const handleClick = useCallback(() => {
if (!showTooltip) return;

View File

@@ -1,7 +1,6 @@
import { useAppDispatch, useAppState } from "../state/common/StateContext";
import { useEffect, useState } from "preact/compat";
import { ErrorTypes } from "../types";
import { APP_TYPE_VM } from "../constants/appType";
const useFetchAppConfig = () => {
const { serverUrl } = useAppState();
@@ -12,7 +11,6 @@ const useFetchAppConfig = () => {
useEffect(() => {
const fetchAppConfig = async () => {
if (!APP_TYPE_VM) return;
setError("");
setIsLoading(true);

View File

@@ -5,7 +5,6 @@ import { useTimeDispatch } from "../state/time/TimeStateContext";
import { getFromStorage } from "../utils/storage";
import dayjs from "dayjs";
import { getBrowserTimezone } from "../utils/time";
import { APP_TYPE_VM } from "../constants/appType";
const disabledDefaultTimezone = Boolean(getFromStorage("DISABLED_DEFAULT_TIMEZONE"));
@@ -29,7 +28,7 @@ const useFetchDefaultTimezone = () => {
};
const fetchDefaultTimezone = async () => {
if (!serverUrl || !APP_TYPE_VM) return;
if (!serverUrl) return;
setError("");
setIsLoading(true);

View File

@@ -13,7 +13,6 @@ import { isHistogramData } from "../utils/metric";
import { useGraphState } from "../state/graph/GraphStateContext";
import { getStepFromDuration } from "../utils/time";
import { getQueryStringValue } from "../utils/query-string";
import { APP_TYPE_ANOMALY } from "../constants/appType";
interface FetchQueryParams {
predefinedQuery?: string[]
@@ -135,7 +134,7 @@ export const useFetchQuery = ({
}
const preventChangeType = !!getQueryStringValue("display_mode", null);
isHistogramResult = !APP_TYPE_ANOMALY && isDisplayChart && !preventChangeType && isHistogramData(resp.data.result);
isHistogramResult = isDisplayChart && !preventChangeType && isHistogramData(resp.data.result);
seriesLimit = isHistogramResult ? Infinity : defaultLimit;
const freeTempSize = Math.max(0, seriesLimit - tempData.length);
resp.data.result.slice(0, freeTempSize).forEach((d: MetricBase) => {

View File

@@ -3,20 +3,9 @@ import "./constants/dayjsPlugins";
import App from "./App";
import reportWebVitals from "./reportWebVitals";
import "./styles/style.scss";
import { APP_TYPE, AppType } from "./constants/appType";
import AppAnomaly from "./AppAnomaly";
const getAppComponent = () => {
switch (APP_TYPE) {
case AppType.vmanomaly:
return <AppAnomaly/>;
default:
return <App/>;
}
};
const root = document.getElementById("root");
if (root) render(getAppComponent(), root);
if (root) render(<App/>, root);
// If you want to start measuring performance in your app, pass a function

View File

@@ -1,50 +0,0 @@
import Header from "../Header/Header";
import { FC, useEffect } from "preact/compat";
import { Outlet, useSearchParams } from "react-router-dom";
import qs from "qs";
import "../MainLayout/style.scss";
import { getAppModeEnable } from "../../utils/app-mode";
import classNames from "classnames";
import Footer from "../Footer/Footer";
import useFetchDefaultTimezone from "../../hooks/useFetchDefaultTimezone";
import useDeviceDetect from "../../hooks/useDeviceDetect";
import ControlsAnomalyLayout from "./ControlsAnomalyLayout";
const AnomalyLayout: FC = () => {
const appModeEnable = getAppModeEnable();
const { isMobile } = useDeviceDetect();
const [searchParams, setSearchParams] = useSearchParams();
useFetchDefaultTimezone();
// for support old links with search params
const redirectSearchToHashParams = () => {
const { search, href } = window.location;
if (search) {
const query = qs.parse(search, { ignoreQueryPrefix: true });
Object.entries(query).forEach(([key, value]) => searchParams.set(key, value as string));
setSearchParams(searchParams);
window.location.search = "";
}
const newHref = href.replace(/\/\?#\//, "/#/");
if (newHref !== href) window.location.replace(newHref);
};
useEffect(redirectSearchToHashParams, []);
return <section className="vm-container">
<Header controlsComponent={ControlsAnomalyLayout}/>
<div
className={classNames({
"vm-container-body": true,
"vm-container-body_mobile": isMobile,
"vm-container-body_app": appModeEnable
})}
>
<Outlet/>
</div>
{!appModeEnable && <Footer/>}
</section>;
};
export default AnomalyLayout;

View File

@@ -1,43 +0,0 @@
import { FC } from "preact/compat";
import classNames from "classnames";
import TenantsConfiguration
from "../../components/Configurators/GlobalSettings/TenantsConfiguration/TenantsConfiguration";
import StepConfigurator from "../../components/Configurators/StepConfigurator/StepConfigurator";
import { TimeSelector } from "../../components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector";
import CardinalityDatePicker from "../../components/Configurators/CardinalityDatePicker/CardinalityDatePicker";
import { ExecutionControls } from "../../components/Configurators/TimeRangeSettings/ExecutionControls/ExecutionControls";
import GlobalSettings from "../../components/Configurators/GlobalSettings/GlobalSettings";
import ShortcutKeys from "../../components/Main/ShortcutKeys/ShortcutKeys";
import { ControlsProps } from "../Header/HeaderControls/HeaderControls";
const ControlsAnomalyLayout: FC<ControlsProps> = ({
displaySidebar,
isMobile,
headerSetup,
accountIds,
closeModal,
}) => {
return (
<div
className={classNames({
"vm-header-controls": true,
"vm-header-controls_mobile": isMobile,
})}
>
{headerSetup?.tenant && <TenantsConfiguration accountIds={accountIds || []}/>}
{headerSetup?.stepControl && <StepConfigurator/>}
{headerSetup?.timeSelector && <TimeSelector/>}
{headerSetup?.cardinalityDatePicker && <CardinalityDatePicker/>}
{headerSetup?.executionControls && <ExecutionControls
tooltip={headerSetup?.executionControls?.tooltip}
useAutorefresh={headerSetup?.executionControls?.useAutorefresh}
closeModal={closeModal}
/>}
<GlobalSettings/>
{!displaySidebar && <ShortcutKeys/>}
</div>
);
};
export default ControlsAnomalyLayout;

View File

@@ -2,7 +2,7 @@ import { FC, useMemo } from "preact/compat";
import { useNavigate } from "react-router-dom";
import router from "../../router";
import { getAppModeEnable, getAppModeParams } from "../../utils/app-mode";
import { LogoAnomalyIcon, LogoIcon } from "../../components/Main/Icons";
import { LogoIcon } from "../../components/Main/Icons";
import { getCssVariable } from "../../utils/theme";
import "./style.scss";
import classNames from "classnames";
@@ -13,19 +13,10 @@ import HeaderControls, { ControlsProps } from "./HeaderControls/HeaderControls";
import useDeviceDetect from "../../hooks/useDeviceDetect";
import useWindowSize from "../../hooks/useWindowSize";
import { ComponentType } from "react";
import { APP_TYPE, AppType } from "../../constants/appType";
export interface HeaderProps {
controlsComponent: ComponentType<ControlsProps>
}
const Logo = () => {
switch (APP_TYPE) {
case AppType.vmanomaly:
return <LogoAnomalyIcon/>;
default:
return <LogoIcon/>;
}
};
const Header: FC<HeaderProps> = ({ controlsComponent }) => {
const { isMobile } = useDeviceDetect();
@@ -75,7 +66,7 @@ const Header: FC<HeaderProps> = ({ controlsComponent }) => {
onClick={onClickLogo}
style={{ color }}
>
{<Logo/>}
{<LogoIcon/>}
</div>
{displaySidebar ? (

View File

@@ -12,6 +12,8 @@ import useDeviceDetect from "../../hooks/useDeviceDetect";
import ControlsMainLayout from "./ControlsMainLayout";
import useFetchDefaultTimezone from "../../hooks/useFetchDefaultTimezone";
import useFetchAppConfig from "../../hooks/useFetchAppConfig";
import WebStorageCheck from "../../components/WebStorageCheck/WebStorageCheck";
import { migrateStorageToPrefixedKeys } from "../../utils/storage";
const MainLayout: FC = () => {
const appModeEnable = getAppModeEnable();
@@ -45,6 +47,13 @@ const MainLayout: FC = () => {
useEffect(setDocumentTitle, [pathname]);
useEffect(redirectSearchToHashParams, []);
useEffect(() => {
const migrateStorage = migrateStorageToPrefixedKeys();
if (migrateStorage.removed.length || migrateStorage.migrated.length) {
console.info(migrateStorage);
}
}, []);
return <section className="vm-container">
<Header controlsComponent={ControlsMainLayout}/>
<div
@@ -57,6 +66,8 @@ const MainLayout: FC = () => {
<Outlet/>
</div>
{!appModeEnable && <Footer/>}
<WebStorageCheck/>
</section>;
};

View File

@@ -23,7 +23,6 @@ const CardinalityTotals: FC<CardinalityTotalsProps> = ({
totalSeries = 0,
totalSeriesPrev = 0,
totalSeriesAll = 0,
seriesCountByMetricName = [],
metricNameStats,
isPrometheus,
}) => {
@@ -34,7 +33,7 @@ const CardinalityTotals: FC<CardinalityTotalsProps> = ({
const focusLabel = searchParams.get("focusLabel");
const isMetric = /__name__/.test(match || "");
const progress = seriesCountByMetricName[0]?.value / totalSeriesAll * 100;
const progress = totalSeries / totalSeriesAll * 100;
const diff = totalSeries - totalSeriesPrev;
const dynamic = Math.abs(diff) / totalSeriesPrev * 100;

View File

@@ -7,7 +7,6 @@ import AppConfigurator from "../appConfigurator";
import { useSearchParams } from "react-router-dom";
import dayjs from "dayjs";
import { DATE_FORMAT } from "../../../constants/date";
import { getTenantIdFromUrl } from "../../../utils/tenants";
import usePrevious from "../../../hooks/usePrevious";
export const useFetchQuery = (): {
@@ -27,7 +26,7 @@ export const useFetchQuery = (): {
const prevDate = usePrevious(date);
const prevTotal = useRef<{ data: TSDBStatus }>();
const { serverUrl } = useAppState();
const { tenantId, serverUrl } = useAppState();
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<ErrorTypes | string>();
const [tsdbStatus, setTSDBStatus] = useState<TSDBStatus>(appConfigurator.defaultTSDBStatus);
@@ -158,9 +157,8 @@ export const useFetchQuery = (): {
}, [error]);
useEffect(() => {
const id = getTenantIdFromUrl(serverUrl);
setIsCluster(!!id);
}, [serverUrl]);
setIsCluster(!!tenantId);
}, [tenantId]);
appConfigurator.tsdbStatusData = tsdbStatus;

View File

@@ -13,10 +13,9 @@ type Props = {
isHistogram: boolean;
graphData: MetricResult[];
controlsRef: RefObject<HTMLDivElement>;
isAnomalyView?: boolean;
}
const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, isAnomalyView }) => {
const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef }) => {
const { isMobile } = useDeviceDetect();
const { customStep, yaxis, spanGaps, showAllPoints } = useGraphState();
@@ -74,7 +73,6 @@ const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, isAnomalyVie
setPeriod={setPeriod}
height={isMobile ? window.innerHeight * 0.5 : 500}
isHistogram={isHistogram}
isAnomalyView={isAnomalyView}
spanGaps={spanGaps}
showAllPoints={showAllPoints}
/>

View File

@@ -26,7 +26,6 @@ import useSearchParamsFromObject from "../../../hooks/useSearchParamsFromObject"
import { QueryStats } from "../../../api/types";
import { usePrettifyQuery } from "./hooks/usePrettifyQuery";
import QueryHistory from "../../../components/QueryHistory/QueryHistory";
import AnomalyConfig from "../../../components/ExploreAnomaly/AnomalyConfig";
import QueryEditorAutocomplete from "../../../components/Configurators/QueryEditor/QueryEditorAutocomplete";
import { getUpdatedHistory } from "../../../components/QueryHistory/utils";
@@ -46,7 +45,6 @@ export interface QueryConfiguratorProps {
prettify?: boolean;
autocomplete?: boolean;
traceQuery?: boolean;
anomalyConfig?: boolean;
disableCache?: boolean;
reduceMemUsage?: boolean;
}
@@ -278,7 +276,6 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
handleSelectQuery={handleSelectHistory}
historyKey={"METRICS_QUERY_HISTORY"}
/>
{hideButtons?.anomalyConfig && <AnomalyConfig/>}
{!hideButtons?.addQuery && stateQuery.length < MAX_QUERY_FIELDS && (
<Button
variant="outlined"

View File

@@ -1,7 +1,6 @@
import { useEffect, useState } from "react";
import { useTimeDispatch, useTimeState } from "../../../state/time/TimeStateContext";
import { useCustomPanelDispatch, useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext";
import { useAppDispatch, useAppState } from "../../../state/common/StateContext";
import { useQueryDispatch, useQueryState } from "../../../state/query/QueryStateContext";
import { displayTypeTabs } from "../DisplayTypeSwitch";
import { useGraphDispatch, useGraphState } from "../../../state/graph/GraphStateContext";
@@ -15,14 +14,12 @@ import { arrayEquals } from "../../../utils/array";
import { isEqualURLSearchParams } from "../../../utils/url";
export const useSetQueryParams = () => {
const { tenantId } = useAppState();
const { displayType } = useCustomPanelState();
const { query } = useQueryState();
const { duration, relativeTime, period: { date, step } } = useTimeState();
const { customStep } = useGraphState();
const [searchParams, setSearchParams] = useSearchParams();
const dispatch = useAppDispatch();
const timeDispatch = useTimeDispatch();
const graphDispatch = useGraphDispatch();
const queryDispatch = useQueryDispatch();
@@ -72,10 +69,6 @@ export const useSetQueryParams = () => {
if (searchParams.get(`${group}.tab`) !== displayTypeCode) {
newSearchParams.set(`${group}.tab`, `${displayTypeCode}`);
}
if (searchParams.get(`${group}.tenantID`) !== tenantId && tenantId) {
newSearchParams.set(`${group}.tenantID`, tenantId);
}
});
// Remove extra parameters that exceed the request size
@@ -89,7 +82,7 @@ export const useSetQueryParams = () => {
if (isEqualURLSearchParams(newSearchParams, searchParams) || !newSearchParams.size) return;
setSearchParams(newSearchParams);
}, [tenantId, displayType, query, duration, relativeTime, date, step, customStep]);
}, [displayType, query, duration, relativeTime, date, step, customStep]);
useEffect(() => {
const timer = setTimeout(setterSearchParams, 200);
@@ -114,11 +107,6 @@ export const useSetQueryParams = () => {
customPanelDispatch({ type: "SET_DISPLAY_TYPE", payload: displayTypeFromUrl });
}
const tenantIdFromUrl = searchParams.get("g0.tenantID") || "";
if (tenantIdFromUrl !== tenantId) {
dispatch({ type: "SET_TENANT_ID", payload: tenantIdFromUrl });
}
const queryFromUrl = getQueryArray();
if (!arrayEquals(queryFromUrl, query)) {
queryDispatch({ type: "SET_QUERY", payload: queryFromUrl });

View File

@@ -1,135 +0,0 @@
import { FC, useMemo, useRef, useState } from "preact/compat";
import classNames from "classnames";
import useDeviceDetect from "../../hooks/useDeviceDetect";
import { ForecastType } from "../../types";
import { useSetQueryParams } from "../CustomPanel/hooks/useSetQueryParams";
import QueryConfigurator from "../CustomPanel/QueryConfigurator/QueryConfigurator";
import "../CustomPanel/style.scss";
import { useQueryState } from "../../state/query/QueryStateContext";
import { useFetchQuery } from "../../hooks/useFetchQuery";
import { useGraphState } from "../../state/graph/GraphStateContext";
import Spinner from "../../components/Main/Spinner/Spinner";
import Alert from "../../components/Main/Alert/Alert";
import WarningLimitSeries from "../CustomPanel/WarningLimitSeries/WarningLimitSeries";
import GraphTab from "../CustomPanel/CustomPanelTabs/GraphTab";
import { extractFields, isForecast } from "../../utils/uplot";
import { MetricResult } from "../../api/types";
import { promValueToNumber } from "../../utils/metric";
// Hardcoded to 1.0 for now; consider adding a UI slider for threshold adjustment in the future.
const ANOMALY_SCORE_THRESHOLD = 1;
const ExploreAnomaly: FC = () => {
useSetQueryParams();
const { isMobile } = useDeviceDetect();
const { query } = useQueryState();
const { customStep } = useGraphState();
const controlsRef = useRef<HTMLDivElement>(null);
const [hideQuery] = useState<number[]>([]);
const [hideError, setHideError] = useState(!query[0]);
const [showAllSeries, setShowAllSeries] = useState(false);
const {
isLoading,
graphData,
error,
queryErrors,
setQueryErrors,
queryStats,
warning,
} = useFetchQuery({
visible: true,
customStep,
hideQuery,
showAllSeries
});
const data = useMemo(() => {
if (!graphData) return [];
const detectedData = graphData.map(d => ({ ...isForecast(d.metric), ...d }));
const realData = detectedData.filter(d => d.value === ForecastType.actual);
const anomalyScoreData = detectedData.filter(d => d.value === ForecastType.anomaly);
const anomalyData: MetricResult[] = realData.map((d) => {
const id = extractFields(d.metric);
const anomalyScoreDataByLabels = anomalyScoreData.find(du => extractFields(du.metric) === id);
return {
group: 1,
metric: { ...d.metric, __name__: ForecastType.anomaly },
values: d.values.filter(([t]) => {
if (!anomalyScoreDataByLabels) return false;
const anomalyScore = anomalyScoreDataByLabels.values.find(([tMax]) => tMax === t) as [number, string];
return anomalyScore && promValueToNumber(anomalyScore[1]) > ANOMALY_SCORE_THRESHOLD;
})
};
});
const filterData = detectedData.filter(d => (d.value !== ForecastType.anomaly) && d.value) as MetricResult[];
return filterData.concat(anomalyData);
}, [graphData]);
const handleRunQuery = () => {
setHideError(false);
};
return (
<div
className={classNames({
"vm-custom-panel": true,
"vm-custom-panel_mobile": isMobile,
})}
>
<QueryConfigurator
queryErrors={!hideError ? queryErrors : []}
setQueryErrors={setQueryErrors}
setHideError={setHideError}
stats={queryStats}
onRunQuery={handleRunQuery}
hideButtons={{
addQuery: true,
prettify: false,
autocomplete: false,
traceQuery: true,
anomalyConfig: true,
reduceMemUsage: true,
}}
/>
{isLoading && <Spinner/>}
{(!hideError && error) && <Alert variant="error">{error}</Alert>}
{warning && (
<WarningLimitSeries
warning={warning}
query={query}
onChange={setShowAllSeries}
/>
)}
<div
className={classNames({
"vm-custom-panel-body": true,
"vm-custom-panel-body_mobile": isMobile,
"vm-block": true,
"vm-block_mobile": isMobile,
})}
>
<div
className="vm-custom-panel-body-header"
ref={controlsRef}
>
<div/>
</div>
{data && (
<GraphTab
graphData={data}
isHistogram={false}
controlsRef={controlsRef}
isAnomalyView={true}
/>
)}
</div>
</div>
);
};
export default ExploreAnomaly;

View File

@@ -3,7 +3,6 @@ import { DashboardSettings, ErrorTypes } from "../../../types";
import { useAppState } from "../../../state/common/StateContext";
import { useDashboardsDispatch } from "../../../state/dashboards/DashboardsStateContext";
import { getAppModeEnable } from "../../../utils/app-mode";
import { APP_TYPE_VM } from "../../../constants/appType";
const importModule = async (filename: string) => {
const data = await fetch(`./dashboards/${filename}`);
@@ -35,7 +34,7 @@ export const useFetchDashboards = (): {
};
const fetchRemoteDashboards = async () => {
if (!serverUrl || !APP_TYPE_VM) return;
if (!serverUrl) return;
setError("");
setIsLoading(true);

View File

@@ -1,4 +1,3 @@
import { APP_TYPE, AppType } from "../constants/appType";
const router = {
home: "/",
@@ -12,7 +11,6 @@ const router = {
activeQueries: "/active-queries",
queryAnalyzer: "/query-analyzer",
icons: "/icons",
anomaly: "/anomaly",
query: "/query",
rawQuery: "/raw-query",
downsamplingDebug: "/downsampling-filters-debug",
@@ -52,23 +50,11 @@ const routerOptionsDefault = {
},
};
const getDefaultOptions = (appType: AppType) => {
switch (appType) {
case AppType.vmanomaly:
return {
title: "Anomaly exploration",
...routerOptionsDefault,
};
default:
return {
title: "Query",
...routerOptionsDefault,
};
}
};
export const routerOptions: { [key: string]: RouterOptions } = {
[router.home]: getDefaultOptions(APP_TYPE),
[router.home]: {
title: "Query",
...routerOptionsDefault,
},
[router.rawQuery]: {
title: "Raw query",
header: {
@@ -148,7 +134,6 @@ export const routerOptions: { [key: string]: RouterOptions } = {
title: "Icons",
header: {},
},
[router.anomaly]: getDefaultOptions(AppType.vmanomaly),
[router.query]: {
title: "Query",
...routerOptionsDefault,

Some files were not shown because too many files have changed in this diff Show More