mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
220 Commits
roaring-bi
...
optimize-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
de3690671b | ||
|
|
cc3a14b16b | ||
|
|
7ef08b1781 | ||
|
|
969cb5b4ae | ||
|
|
b9f0e614bd | ||
|
|
ed44c08f5f | ||
|
|
3ae44e734b | ||
|
|
d3264bd78f | ||
|
|
1f87faafec | ||
|
|
521b73dfc5 | ||
|
|
61db79c10a | ||
|
|
460ac6468c | ||
|
|
c42023c586 | ||
|
|
8a20ccf21d | ||
|
|
1a01dbbec7 | ||
|
|
630e413812 | ||
|
|
b639e7e641 | ||
|
|
858c318e1f | ||
|
|
b8327ce09c | ||
|
|
7514511c68 | ||
|
|
33d524bf13 | ||
|
|
d07c1c73d1 | ||
|
|
a896673c42 | ||
|
|
c60ab2d57a | ||
|
|
49e51611d7 | ||
|
|
902ca83177 | ||
|
|
66e3f8736b | ||
|
|
532fcc3dfe | ||
|
|
b003d6c6ae | ||
|
|
8fa0fae05a | ||
|
|
3fe2ec7bde | ||
|
|
6389979bce | ||
|
|
210fd0ae15 | ||
|
|
f95b483a13 | ||
|
|
b71c37e20a | ||
|
|
c27b5f5dfe | ||
|
|
0a31eacb3d | ||
|
|
70b0115ea6 | ||
|
|
dfafd14767 | ||
|
|
e3fdbc8341 | ||
|
|
1bf442537f | ||
|
|
211fb08028 | ||
|
|
846124e280 | ||
|
|
e1a9901654 | ||
|
|
5d0cf1d4a5 | ||
|
|
cd3d297a3d | ||
|
|
52f4d0f055 | ||
|
|
72c9e9377c | ||
|
|
0aaa741b5b | ||
|
|
0e9870b7a9 | ||
|
|
accb06d131 | ||
|
|
1787bce6cb | ||
|
|
141febd413 | ||
|
|
256eff061d | ||
|
|
fa1dd0ec0a | ||
|
|
6337dfc472 | ||
|
|
0a256002e5 | ||
|
|
b3c03c023c | ||
|
|
80e2f29761 | ||
|
|
df34ba3ba2 | ||
|
|
10dd45c4fd | ||
|
|
a3294b5aa2 | ||
|
|
4438454567 | ||
|
|
71af1ee5f1 | ||
|
|
e00fb7e605 | ||
|
|
5e2ee00504 | ||
|
|
de2bc4237a | ||
|
|
3e51f277bd | ||
|
|
5723339525 | ||
|
|
3b986ad326 | ||
|
|
08dd38d4a0 | ||
|
|
815cc97952 | ||
|
|
93d71e7106 | ||
|
|
577b161343 | ||
|
|
dd2d6807e4 | ||
|
|
e38e25b756 | ||
|
|
bc708c8568 | ||
|
|
cd73472a3e | ||
|
|
527d09653a | ||
|
|
28a87b90bb | ||
|
|
c445e7fcc0 | ||
|
|
9494ee103e | ||
|
|
94af588e92 | ||
|
|
e5c194cc10 | ||
|
|
7c65e3daca | ||
|
|
a6532c28b2 | ||
|
|
ec26ebb803 | ||
|
|
faba8b985b | ||
|
|
d233a409d9 | ||
|
|
96cbd6fff3 | ||
|
|
b1d009b13a | ||
|
|
57ce00a5c6 | ||
|
|
cfb53cbfb9 | ||
|
|
febafc1cf1 | ||
|
|
f1f70e976e | ||
|
|
5aa0a75ff8 | ||
|
|
d83f142c63 | ||
|
|
a07cae3279 | ||
|
|
8cda999238 | ||
|
|
2d6cf8827d | ||
|
|
c59ca79f2b | ||
|
|
be5ae9b95c | ||
|
|
60aef0510f | ||
|
|
b3b555c09c | ||
|
|
c57ea02564 | ||
|
|
5983d27b00 | ||
|
|
d36f7b6b49 | ||
|
|
70ab2c1585 | ||
|
|
c854816642 | ||
|
|
285e3d2a63 | ||
|
|
95175e00b4 | ||
|
|
d21d9e8382 | ||
|
|
235daa6208 | ||
|
|
10f4a86540 | ||
|
|
79cfffb984 | ||
|
|
23e2379c28 | ||
|
|
e761f22049 | ||
|
|
fb579cf592 | ||
|
|
fd0d764720 | ||
|
|
fe8aaa8885 | ||
|
|
b903fc29ec | ||
|
|
a6833ffd08 | ||
|
|
4516a58df9 | ||
|
|
5ad7b645e6 | ||
|
|
51a53014c8 | ||
|
|
e47abd6385 | ||
|
|
c04a5a597d | ||
|
|
e695d5f425 | ||
|
|
2bb03f6e34 | ||
|
|
92f03344eb | ||
|
|
e3360b87ff | ||
|
|
4c98b912fa | ||
|
|
225e2e870b | ||
|
|
2b078301c1 | ||
|
|
14090c5a07 | ||
|
|
66d47f23e4 | ||
|
|
eacdb80ed7 | ||
|
|
504cf31dab | ||
|
|
34d190b32a | ||
|
|
44fa216bb5 | ||
|
|
4589442345 | ||
|
|
78ad4b974c | ||
|
|
d12524749f | ||
|
|
1a5235a18f | ||
|
|
27847dbbb8 | ||
|
|
33fab3a2d6 | ||
|
|
695b21ecfc | ||
|
|
4ba488f806 | ||
|
|
1e046d35a8 | ||
|
|
8c9b202c94 | ||
|
|
060423141d | ||
|
|
0ee16ff2e5 | ||
|
|
b22853b97f | ||
|
|
b578fe9817 | ||
|
|
e9b7adc0e5 | ||
|
|
82eab5c5b7 | ||
|
|
5b4ab4456e | ||
|
|
d3ccc8d7a7 | ||
|
|
eb34bdd8d9 | ||
|
|
3139fa1c9b | ||
|
|
8f4cdb8a42 | ||
|
|
f236801fa4 | ||
|
|
2c48133ad8 | ||
|
|
1cb634858e | ||
|
|
4b45f909b5 | ||
|
|
4ae495bd1d | ||
|
|
925b0ecdc9 | ||
|
|
1348b0e424 | ||
|
|
83656e544d | ||
|
|
38a76eca7b | ||
|
|
dea915c10d | ||
|
|
b3f57c113b | ||
|
|
686c9a21ff | ||
|
|
8f215137e7 | ||
|
|
ed5dc35876 | ||
|
|
13ab8cfb78 | ||
|
|
f8a101e45e | ||
|
|
a1a35fd870 | ||
|
|
0d5df2722d | ||
|
|
db3353c6e1 | ||
|
|
cfbc5ae31d | ||
|
|
fdb3c96fc1 | ||
|
|
486d923351 | ||
|
|
f8552bdc96 | ||
|
|
893c981c57 | ||
|
|
3d7ff783b6 | ||
|
|
78543b7f87 | ||
|
|
f54d22562a | ||
|
|
b672e05dce | ||
|
|
847871b916 | ||
|
|
2aecca1163 | ||
|
|
d1efb2dd37 | ||
|
|
6882c72075 | ||
|
|
60eb543dba | ||
|
|
7db42b0659 | ||
|
|
8d924f0631 | ||
|
|
791679253d | ||
|
|
a745bb797a | ||
|
|
3607c53b7c | ||
|
|
7969647553 | ||
|
|
5f887b66c5 | ||
|
|
d3e2946791 | ||
|
|
603dc03c7d | ||
|
|
1cc471a6c1 | ||
|
|
d40adb1e58 | ||
|
|
8056806d5f | ||
|
|
3d67942a65 | ||
|
|
23bdd14cee | ||
|
|
18a2955553 | ||
|
|
570a9ef627 | ||
|
|
40e27fc2c8 | ||
|
|
befbf9afca | ||
|
|
65d0a8e129 | ||
|
|
c2841ca36c | ||
|
|
cd2026e430 | ||
|
|
216821aa1c | ||
|
|
ef507d372b | ||
|
|
e383b62f59 | ||
|
|
8f34284dd2 | ||
|
|
8f4eca39f7 |
23
.github/copilot-instructions.md
vendored
23
.github/copilot-instructions.md
vendored
@@ -1,23 +0,0 @@
|
||||
# Project Overview
|
||||
|
||||
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
|
||||
## Folder Structure
|
||||
|
||||
- `/app`: Contains the compilable binaries.
|
||||
- `/lib`: Contains the golang reusable libraries
|
||||
- `/docs/victoriametrics`: Contains documentation for the project.
|
||||
- `/apptest/tests`: Contains integration tests.
|
||||
|
||||
## Libraries and Frameworks
|
||||
|
||||
- Backend: Golang, no framework. Use third-party libraries sparingly.
|
||||
- Frontend: React.
|
||||
|
||||
## Code review guidelines
|
||||
|
||||
Ensure the feature or bugfix includes a changelog entry in /docs/victoriametrics/changelog/CHANGELOG.md.
|
||||
Verify the entry is under the ## tip section and matches the structure and style of existing entries.
|
||||
Chore-only changes may be omitted from the changelog.
|
||||
|
||||
|
||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -4,6 +4,8 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
cooldown:
|
||||
default-days: 21
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
@@ -23,6 +25,8 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
cooldown:
|
||||
default-days: 21
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/app/vmui/packages/vmui"
|
||||
schedule:
|
||||
|
||||
16
.github/workflows/check-commit-signed.yml
vendored
16
.github/workflows/check-commit-signed.yml
vendored
@@ -27,11 +27,21 @@ jobs:
|
||||
exit 0
|
||||
fi
|
||||
|
||||
unsigned=$(git log --pretty="%H %G?" $RANGE | grep -vE " (G|E)$" || true)
|
||||
# Check raw commit objects for a "gpgsig" header as a fast early signal for
|
||||
# contributors. Both GPG and SSH signatures use this header.
|
||||
# This avoids relying on %G? which returns N for SSH commits.
|
||||
# This check is not a security enforcement — unsigned commits cannot be merged
|
||||
# anyway due to the GitHub repository merge policy.
|
||||
unsigned=""
|
||||
for sha in $(git rev-list $RANGE); do
|
||||
if ! git cat-file commit "$sha" | grep -q "^gpgsig"; then
|
||||
unsigned="$unsigned $sha"
|
||||
fi
|
||||
done
|
||||
if [ -n "$unsigned" ]; then
|
||||
echo "Found unsigned commits:"
|
||||
echo "$unsigned"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All commits in PR are signed (G or E)"
|
||||
|
||||
echo "All commits in PR are signed (GPG or SSH)"
|
||||
2
.github/workflows/docs.yaml
vendored
2
.github/workflows/docs.yaml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
path: __vm-docs
|
||||
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v6
|
||||
uses: crazy-max/ghaction-import-gpg@v7
|
||||
id: import-gpg
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
|
||||
9
.github/workflows/test.yml
vendored
9
.github/workflows/test.yml
vendored
@@ -66,8 +66,8 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
scenario:
|
||||
- 'test-full'
|
||||
- 'test-full-386'
|
||||
- 'test'
|
||||
- 'test-386'
|
||||
- 'test-pure'
|
||||
|
||||
steps:
|
||||
@@ -88,11 +88,6 @@ jobs:
|
||||
- name: Run tests
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./coverage.txt
|
||||
|
||||
apptest:
|
||||
name: apptest
|
||||
runs-on: apptest
|
||||
|
||||
7
Makefile
7
Makefile
@@ -457,6 +457,9 @@ test:
|
||||
test-race:
|
||||
go test -tags 'synctest' -race ./lib/... ./app/...
|
||||
|
||||
test-386:
|
||||
GOARCH=386 go test -tags 'synctest' ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
CGO_ENABLED=0 go test -tags 'synctest' ./lib/... ./app/...
|
||||
|
||||
@@ -467,10 +470,10 @@ test-full-386:
|
||||
GOARCH=386 go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
apptest:
|
||||
$(MAKE) victoria-metrics vmagent vmalert vmauth vmctl vmbackup vmrestore
|
||||
$(MAKE) victoria-metrics-race vmagent-race vmalert-race vmauth-race vmctl-race vmbackup-race vmrestore-race
|
||||
go test ./apptest/... -skip="^Test(Cluster|Legacy).*"
|
||||
|
||||
apptest-legacy: victoria-metrics vmbackup vmrestore
|
||||
apptest-legacy: victoria-metrics-race vmbackup-race vmrestore-race
|
||||
OS=$$(uname | tr '[:upper:]' '[:lower:]'); \
|
||||
ARCH=$$(uname -m | tr '[:upper:]' '[:lower:]' | sed 's/x86_64/amd64/'); \
|
||||
VERSION=v1.132.0; \
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
# VictoriaMetrics
|
||||
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||

|
||||
[](https://hub.docker.com/u/victoriametrics)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions/workflows/build.yml)
|
||||
[](https://app.codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||

|
||||
[](https://slack.victoriametrics.com)
|
||||
[](https://x.com/VictoriaMetrics/)
|
||||
[](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
|
||||
|
||||
25
SECURITY.md
25
SECURITY.md
@@ -12,6 +12,31 @@ The following versions of VictoriaMetrics receive regular security fixes:
|
||||
|
||||
See [this page](https://victoriametrics.com/security/) for more details.
|
||||
|
||||
## Software Bill of Materials (SBOM)
|
||||
|
||||
Every VictoriaMetrics container{{% available_from "#" %}} image published to
|
||||
[Docker Hub](https://hub.docker.com/u/victoriametrics)
|
||||
and [Quay.io](https://quay.io/organization/victoriametrics)
|
||||
includes an [SPDX](https://spdx.dev/) SBOM attestation
|
||||
generated automatically by BuildKit during
|
||||
`docker buildx build`.
|
||||
|
||||
To inspect the SBOM for an image:
|
||||
|
||||
```sh
|
||||
docker buildx imagetools inspect \
|
||||
docker.io/victoriametrics/victoria-metrics:latest \
|
||||
--format "{{ json .SBOM }}"
|
||||
```
|
||||
|
||||
To scan an image using its SBOM attestation with
|
||||
[Trivy](https://github.com/aquasecurity/trivy):
|
||||
|
||||
```sh
|
||||
trivy image --sbom-sources oci \
|
||||
docker.io/victoriametrics/victoria-metrics:latest
|
||||
```
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report any security issues to <security@victoriametrics.com>
|
||||
|
||||
@@ -49,6 +49,11 @@ func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels
|
||||
Name: "__name__",
|
||||
Value: m.Name,
|
||||
})
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10557
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: "host",
|
||||
Value: sketch.Host,
|
||||
})
|
||||
for _, label := range m.Labels {
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: label.Name,
|
||||
@@ -57,9 +62,6 @@ func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels
|
||||
}
|
||||
for _, tag := range sketch.Tags {
|
||||
name, value := datadogutil.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: name,
|
||||
Value: value,
|
||||
|
||||
@@ -13,6 +13,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/awsapi"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
@@ -21,10 +24,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -290,7 +290,7 @@ func getAWSAPIConfig(argIdx int) (*awsapi.Config, error) {
|
||||
accessKey := awsAccessKey.GetOptionalArg(argIdx)
|
||||
secretKey := awsSecretKey.GetOptionalArg(argIdx)
|
||||
service := awsService.GetOptionalArg(argIdx)
|
||||
cfg, err := awsapi.NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey, service)
|
||||
cfg, err := awsapi.NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey, service, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -405,8 +405,7 @@ func (c *client) newRequest(url string, body []byte) (*http.Request, error) {
|
||||
// Otherwise, it tries sending the block to remote storage indefinitely.
|
||||
func (c *client) sendBlockHTTP(block []byte) bool {
|
||||
c.rl.Register(len(block))
|
||||
maxRetryDuration := timeutil.AddJitterToDuration(c.retryMaxInterval)
|
||||
retryDuration := timeutil.AddJitterToDuration(c.retryMinInterval)
|
||||
bt := timeutil.NewBackoffTimer(c.retryMinInterval, c.retryMaxInterval)
|
||||
retriesCount := 0
|
||||
|
||||
again:
|
||||
@@ -415,19 +414,10 @@ again:
|
||||
c.requestDuration.UpdateDuration(startTime)
|
||||
if err != nil {
|
||||
c.errorsCount.Inc()
|
||||
retryDuration *= 2
|
||||
if retryDuration > maxRetryDuration {
|
||||
retryDuration = maxRetryDuration
|
||||
}
|
||||
remoteWriteRetryLogger.Warnf("couldn't send a block with size %d bytes to %q: %s; re-sending the block in %.3f seconds",
|
||||
len(block), c.sanitizedURL, err, retryDuration.Seconds())
|
||||
t := timerpool.Get(retryDuration)
|
||||
select {
|
||||
case <-c.stopCh:
|
||||
timerpool.Put(t)
|
||||
remoteWriteRetryLogger.Warnf("couldn't send a block with size %d bytes to %q: %s; re-sending the block in %s",
|
||||
len(block), c.sanitizedURL, err, bt.CurrentDelay())
|
||||
if !bt.Wait(c.stopCh) {
|
||||
return false
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
}
|
||||
c.retriesCount.Inc()
|
||||
goto again
|
||||
@@ -493,7 +483,10 @@ again:
|
||||
// Unexpected status code returned
|
||||
retriesCount++
|
||||
retryAfterHeader := parseRetryAfterHeader(resp.Header.Get("Retry-After"))
|
||||
retryDuration = getRetryDuration(retryAfterHeader, retryDuration, maxRetryDuration)
|
||||
// retryAfterDuration has the highest priority duration
|
||||
if retryAfterHeader > 0 {
|
||||
bt.SetDelay(retryAfterHeader)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
@@ -502,15 +495,10 @@ again:
|
||||
logger.Errorf("cannot read response body from %q during retry #%d: %s", c.sanitizedURL, retriesCount, err)
|
||||
} else {
|
||||
logger.Errorf("unexpected status code received after sending a block with size %d bytes to %q during retry #%d: %d; response body=%q; "+
|
||||
"re-sending the block in %.3f seconds", len(block), c.sanitizedURL, retriesCount, statusCode, body, retryDuration.Seconds())
|
||||
"re-sending the block in %s", len(block), c.sanitizedURL, retriesCount, statusCode, body, bt.CurrentDelay())
|
||||
}
|
||||
t := timerpool.Get(retryDuration)
|
||||
select {
|
||||
case <-c.stopCh:
|
||||
timerpool.Put(t)
|
||||
if !bt.Wait(c.stopCh) {
|
||||
return false
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
}
|
||||
c.retriesCount.Inc()
|
||||
goto again
|
||||
@@ -519,27 +507,6 @@ again:
|
||||
var remoteWriteRejectedLogger = logger.WithThrottler("remoteWriteRejected", 5*time.Second)
|
||||
var remoteWriteRetryLogger = logger.WithThrottler("remoteWriteRetry", 5*time.Second)
|
||||
|
||||
// getRetryDuration returns retry duration.
|
||||
// retryAfterDuration has the highest priority.
|
||||
// If retryAfterDuration is not specified, retryDuration gets doubled.
|
||||
// retryDuration can't exceed maxRetryDuration.
|
||||
//
|
||||
// Also see: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6097
|
||||
func getRetryDuration(retryAfterDuration, retryDuration, maxRetryDuration time.Duration) time.Duration {
|
||||
// retryAfterDuration has the highest priority duration
|
||||
if retryAfterDuration > 0 {
|
||||
return timeutil.AddJitterToDuration(retryAfterDuration)
|
||||
}
|
||||
|
||||
// default backoff retry policy
|
||||
retryDuration *= 2
|
||||
if retryDuration > maxRetryDuration {
|
||||
retryDuration = maxRetryDuration
|
||||
}
|
||||
|
||||
return retryDuration
|
||||
}
|
||||
|
||||
// repackBlockFromZstdToSnappy repacks the given zstd-compressed block to snappy-compressed block.
|
||||
//
|
||||
// The input block may be corrupted, for example, if vmagent was shut down ungracefully and
|
||||
@@ -570,24 +537,20 @@ func logBlockRejected(block []byte, sanitizedURL string, resp *http.Response) {
|
||||
}
|
||||
|
||||
// parseRetryAfterHeader parses `Retry-After` value retrieved from HTTP response header.
|
||||
// retryAfterString should be in either HTTP-date or a number of seconds.
|
||||
// It will return time.Duration(0) if `retryAfterString` does not follow RFC 7231.
|
||||
func parseRetryAfterHeader(retryAfterString string) (retryAfterDuration time.Duration) {
|
||||
if retryAfterString == "" {
|
||||
return retryAfterDuration
|
||||
//
|
||||
// s should be in either HTTP-date or a number of seconds.
|
||||
// It returns time.Duration(0) if s does not follow RFC 7231.
|
||||
func parseRetryAfterHeader(s string) time.Duration {
|
||||
if s == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
defer func() {
|
||||
v := retryAfterDuration.Seconds()
|
||||
logger.Infof("'Retry-After: %s' parsed into %.2f second(s)", retryAfterString, v)
|
||||
}()
|
||||
|
||||
// Retry-After could be in "Mon, 02 Jan 2006 15:04:05 GMT" format.
|
||||
if parsedTime, err := time.Parse(http.TimeFormat, retryAfterString); err == nil {
|
||||
if parsedTime, err := time.Parse(http.TimeFormat, s); err == nil {
|
||||
return time.Duration(time.Until(parsedTime).Seconds()) * time.Second
|
||||
}
|
||||
// Retry-After could be in seconds.
|
||||
if seconds, err := strconv.Atoi(retryAfterString); err == nil {
|
||||
if seconds, err := strconv.Atoi(s); err == nil {
|
||||
return time.Duration(seconds) * time.Second
|
||||
}
|
||||
|
||||
|
||||
@@ -6,66 +6,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
)
|
||||
|
||||
func TestCalculateRetryDuration(t *testing.T) {
|
||||
// `testFunc` call `calculateRetryDuration` for `n` times
|
||||
// and evaluate if the result of `calculateRetryDuration` is
|
||||
// 1. >= expectMinDuration
|
||||
// 2. <= expectMinDuration + 10% (see timeutil.AddJitterToDuration)
|
||||
f := func(retryAfterDuration, retryDuration time.Duration, n int, expectMinDuration time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
for range n {
|
||||
retryDuration = getRetryDuration(retryAfterDuration, retryDuration, time.Minute)
|
||||
}
|
||||
|
||||
expectMaxDuration := helper(expectMinDuration)
|
||||
expectMinDuration = expectMinDuration - (1000 * time.Millisecond) // Avoid edge case when calculating time.Until(now)
|
||||
|
||||
if retryDuration < expectMinDuration || retryDuration > expectMaxDuration {
|
||||
t.Fatalf(
|
||||
"incorrect retry duration, want (ms): [%d, %d], got (ms): %d",
|
||||
expectMinDuration.Milliseconds(), expectMaxDuration.Milliseconds(),
|
||||
retryDuration.Milliseconds(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Call calculateRetryDuration for 1 time.
|
||||
{
|
||||
// default backoff policy
|
||||
f(0, time.Second, 1, 2*time.Second)
|
||||
// default backoff policy exceed max limit"
|
||||
f(0, 10*time.Minute, 1, time.Minute)
|
||||
|
||||
// retry after > default backoff policy
|
||||
f(10*time.Second, 1*time.Second, 1, 10*time.Second)
|
||||
// retry after < default backoff policy
|
||||
f(1*time.Second, 10*time.Second, 1, 1*time.Second)
|
||||
// retry after invalid and < default backoff policy
|
||||
f(0, time.Second, 1, 2*time.Second)
|
||||
|
||||
}
|
||||
|
||||
// Call calculateRetryDuration for multiple times.
|
||||
{
|
||||
// default backoff policy 2 times
|
||||
f(0, time.Second, 2, 4*time.Second)
|
||||
// default backoff policy 3 times
|
||||
f(0, time.Second, 3, 8*time.Second)
|
||||
// default backoff policy N times exceed max limit
|
||||
f(0, time.Second, 10, time.Minute)
|
||||
|
||||
// retry after 120s 1 times
|
||||
f(120*time.Second, time.Second, 1, 120*time.Second)
|
||||
// retry after 120s 2 times
|
||||
f(120*time.Second, time.Second, 2, 120*time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRetryAfterHeader(t *testing.T) {
|
||||
f := func(retryAfterString string, expectResult time.Duration) {
|
||||
t.Helper()
|
||||
@@ -91,13 +36,6 @@ func TestParseRetryAfterHeader(t *testing.T) {
|
||||
f(time.Now().Add(10*time.Second).Format("Mon, 02 Jan 2006 15:04:05 FAKETZ"), 0)
|
||||
}
|
||||
|
||||
// helper calculate the max possible time duration calculated by timeutil.AddJitterToDuration.
|
||||
func helper(d time.Duration) time.Duration {
|
||||
dv := min(d/10, 10*time.Second)
|
||||
|
||||
return d + dv
|
||||
}
|
||||
|
||||
func TestRepackBlockFromZstdToSnappy(t *testing.T) {
|
||||
expectedPlainBlock := []byte(`foobar`)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package remotewrite
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
@@ -11,6 +12,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -23,6 +28,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutil"
|
||||
@@ -30,8 +36,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -80,10 +84,14 @@ var (
|
||||
`This may be needed for reducing memory usage at remote storage when the order of labels in incoming samples is random. `+
|
||||
`For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}`+
|
||||
`Enabled sorting for labels can slow down ingestion performance a bit`)
|
||||
maxHourlySeries = flag.Int("remoteWrite.maxHourlySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last hour. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/victoriametrics/vmagent/#cardinality-limiter")
|
||||
maxDailySeries = flag.Int("remoteWrite.maxDailySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/victoriametrics/vmagent/#cardinality-limiter")
|
||||
maxHourlySeries = flag.Int64("remoteWrite.maxHourlySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last hour. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. "+
|
||||
fmt.Sprintf("Setting this flag to '-1' sets limit to maximum possible value (%d) which is useful in order to enable series tracking without enforcing limits. ", math.MaxInt32)+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/vmagent/#cardinality-limiter")
|
||||
maxDailySeries = flag.Int64("remoteWrite.maxDailySeries", 0, "The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. "+
|
||||
fmt.Sprintf("Setting this flag to '-1' sets limit to maximum possible value (%d) which is useful in order to enable series tracking without enforcing limits. ", math.MaxInt32)+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/vmagent/#cardinality-limiter")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmagent can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate. See also -remoteWrite.rateLimit")
|
||||
|
||||
@@ -92,6 +100,8 @@ var (
|
||||
"See https://docs.victoriametrics.com/victoriametrics/vmagent/#disabling-on-disk-persistence . See also -remoteWrite.dropSamplesOnOverload")
|
||||
dropSamplesOnOverload = flag.Bool("remoteWrite.dropSamplesOnOverload", false, "Whether to drop samples when -remoteWrite.disableOnDiskQueue is set and if the samples "+
|
||||
"cannot be pushed into the configured -remoteWrite.url systems in a timely manner. See https://docs.victoriametrics.com/victoriametrics/vmagent/#disabling-on-disk-persistence")
|
||||
disableMetadataPerURL = flagutil.NewArrayBool("remoteWrite.disableMetadata", "Whether to disable sending metadata to the corresponding -remoteWrite.url. "+
|
||||
"By default, metadata sending is controlled by the global -enableMetadata flag")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -157,8 +167,8 @@ func Init() {
|
||||
if len(*remoteWriteURLs) == 0 {
|
||||
logger.Fatalf("at least one `-remoteWrite.url` command-line flag must be set")
|
||||
}
|
||||
if *maxHourlySeries > 0 {
|
||||
hourlySeriesLimiter = bloomfilter.NewLimiter(*maxHourlySeries, time.Hour)
|
||||
if limit := getMaxHourlySeries(); limit > 0 {
|
||||
hourlySeriesLimiter = bloomfilter.NewLimiter(limit, time.Hour)
|
||||
_ = metrics.NewGauge(`vmagent_hourly_series_limit_max_series`, func() float64 {
|
||||
return float64(hourlySeriesLimiter.MaxItems())
|
||||
})
|
||||
@@ -166,8 +176,8 @@ func Init() {
|
||||
return float64(hourlySeriesLimiter.CurrentItems())
|
||||
})
|
||||
}
|
||||
if *maxDailySeries > 0 {
|
||||
dailySeriesLimiter = bloomfilter.NewLimiter(*maxDailySeries, 24*time.Hour)
|
||||
if limit := getMaxDailySeries(); limit > 0 {
|
||||
dailySeriesLimiter = bloomfilter.NewLimiter(limit, 24*time.Hour)
|
||||
_ = metrics.NewGauge(`vmagent_daily_series_limit_max_series`, func() float64 {
|
||||
return float64(dailySeriesLimiter.MaxItems())
|
||||
})
|
||||
@@ -540,6 +550,10 @@ func tryPushMetadataToRemoteStorages(rwctxs []*remoteWriteCtx, mms []prompb.Metr
|
||||
var wg sync.WaitGroup
|
||||
var anyPushFailed atomic.Bool
|
||||
for _, rwctx := range rwctxs {
|
||||
if !rwctx.enableMetadata {
|
||||
// Skip remote storage with disabled metadata
|
||||
continue
|
||||
}
|
||||
wg.Go(func() {
|
||||
if !rwctx.tryPushMetadataInternal(mms) {
|
||||
rwctx.pushFailures.Inc()
|
||||
@@ -811,6 +825,11 @@ type remoteWriteCtx struct {
|
||||
streamAggrKeepInput bool
|
||||
streamAggrDropInput bool
|
||||
|
||||
// enableMetadata indicates whether metadata should be sent to this remote storage.
|
||||
// It is determined by -remoteWrite.enableMetadata per-URL flag if set,
|
||||
// otherwise by the global -enableMetadata flag.
|
||||
enableMetadata bool
|
||||
|
||||
pss []*pendingSeries
|
||||
pssNextIdx atomic.Uint64
|
||||
|
||||
@@ -822,6 +841,18 @@ type remoteWriteCtx struct {
|
||||
rowsDroppedOnPushFailure *metrics.Counter
|
||||
}
|
||||
|
||||
// isMetadataEnabledForURL returns true if metadata should be sent to the remote storage at argIdx.
|
||||
// It checks the per-URL -remoteWrite.disableMetadata flag first.
|
||||
// If not set, it falls back to the global -enableMetadata flag.
|
||||
func isMetadataEnabledForURL(argIdx int) bool {
|
||||
if disableMetadataPerURL.GetOptionalArg(argIdx) {
|
||||
// Metadata is explicitly disabled for this URL
|
||||
return false
|
||||
}
|
||||
// Use global -enableMetadata value
|
||||
return prommetadata.IsEnabled()
|
||||
}
|
||||
|
||||
func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, sanitizedURL string) *remoteWriteCtx {
|
||||
// strip query params, otherwise changing params resets pq
|
||||
pqURL := *remoteWriteURL
|
||||
@@ -892,10 +923,11 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, sanitizedURL string)
|
||||
}
|
||||
|
||||
rwctx := &remoteWriteCtx{
|
||||
idx: argIdx,
|
||||
fq: fq,
|
||||
c: c,
|
||||
pss: pss,
|
||||
idx: argIdx,
|
||||
fq: fq,
|
||||
c: c,
|
||||
pss: pss,
|
||||
enableMetadata: isMetadataEnabledForURL(argIdx),
|
||||
|
||||
rowsPushedAfterRelabel: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_rows_pushed_after_relabel_total{path=%q,url=%q}`, queuePath, sanitizedURL)),
|
||||
rowsDroppedByRelabel: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_relabel_metrics_dropped_total{path=%q,url=%q}`, queuePath, sanitizedURL)),
|
||||
@@ -1116,3 +1148,21 @@ func newMapFromStrings(a []string) map[string]struct{} {
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func getMaxHourlySeries() int {
|
||||
limit := *maxHourlySeries
|
||||
if limit == -1 || limit > math.MaxInt32 {
|
||||
return math.MaxInt32
|
||||
}
|
||||
|
||||
return int(limit)
|
||||
}
|
||||
|
||||
func getMaxDailySeries() int {
|
||||
limit := *maxDailySeries
|
||||
if limit == -1 || limit > math.MaxInt32 {
|
||||
return math.MaxInt32
|
||||
}
|
||||
|
||||
return int(limit)
|
||||
}
|
||||
|
||||
@@ -81,12 +81,9 @@ func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool)
|
||||
if g.Interval.Duration() < 0 {
|
||||
return fmt.Errorf("interval shouldn't be lower than 0")
|
||||
}
|
||||
if g.EvalOffset.Duration() < 0 {
|
||||
return fmt.Errorf("eval_offset shouldn't be lower than 0")
|
||||
}
|
||||
// if `eval_offset` is set, interval won't use global evaluationInterval flag and must bigger than offset.
|
||||
if g.EvalOffset.Duration() > g.Interval.Duration() {
|
||||
return fmt.Errorf("eval_offset should be smaller than interval; now eval_offset: %v, interval: %v", g.EvalOffset.Duration(), g.Interval.Duration())
|
||||
// if `eval_offset` is set, the group interval must be specified explicitly(instead of inherited from global evaluationInterval flag) and must bigger than offset.
|
||||
if g.EvalOffset.Duration().Abs() > g.Interval.Duration() {
|
||||
return fmt.Errorf("the abs value of eval_offset should be smaller than interval; now eval_offset: %v, interval: %v", g.EvalOffset.Duration(), g.Interval.Duration())
|
||||
}
|
||||
if g.EvalOffset != nil && g.EvalDelay != nil {
|
||||
return fmt.Errorf("eval_offset cannot be used with eval_delay")
|
||||
|
||||
@@ -176,11 +176,17 @@ func TestGroupValidate_Failure(t *testing.T) {
|
||||
}, false, "interval shouldn't be lower than 0")
|
||||
|
||||
f(&Group{
|
||||
Name: "wrong eval_offset",
|
||||
Name: "too big eval_offset",
|
||||
Interval: promutil.NewDuration(time.Minute),
|
||||
EvalOffset: promutil.NewDuration(2 * time.Minute),
|
||||
}, false, "eval_offset should be smaller than interval")
|
||||
|
||||
f(&Group{
|
||||
Name: "too big negative eval_offset",
|
||||
Interval: promutil.NewDuration(time.Minute),
|
||||
EvalOffset: promutil.NewDuration(-2 * time.Minute),
|
||||
}, false, "eval_offset should be smaller than interval")
|
||||
|
||||
limit := -1
|
||||
f(&Group{
|
||||
Name: "wrong limit",
|
||||
|
||||
@@ -56,7 +56,7 @@ absolute path to all .tpl files in root.
|
||||
-rule.templates="dir/**/*.tpl". Includes all the .tpl files in "dir" subfolders recursively.
|
||||
`)
|
||||
|
||||
configCheckInterval = flag.Duration("configCheckInterval", 0, "Interval for checking for changes in '-rule' or '-notifier.config' files. "+
|
||||
configCheckInterval = flag.Duration("configCheckInterval", 0, "Interval for checking for changes in '-rule', '-rule.templates' and '-notifier.config' files. "+
|
||||
"By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes.")
|
||||
|
||||
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "Address to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol")
|
||||
|
||||
@@ -98,7 +98,7 @@ func (m *manager) close() {
|
||||
m.wg.Wait()
|
||||
}
|
||||
|
||||
func (m *manager) startGroup(ctx context.Context, g *rule.Group, restore bool) error {
|
||||
func (m *manager) startGroup(ctx context.Context, g *rule.Group, restore bool) {
|
||||
id := g.GetID()
|
||||
g.Init()
|
||||
m.wg.Go(func() {
|
||||
@@ -110,7 +110,6 @@ func (m *manager) startGroup(ctx context.Context, g *rule.Group, restore bool) e
|
||||
})
|
||||
|
||||
m.groups[id] = g
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore bool) error {
|
||||
@@ -119,7 +118,7 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
|
||||
for _, cfg := range groupsCfg {
|
||||
for _, r := range cfg.Rules {
|
||||
if rrPresent && arPresent {
|
||||
continue
|
||||
break
|
||||
}
|
||||
if r.Record != "" {
|
||||
rrPresent = true
|
||||
@@ -162,10 +161,7 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
|
||||
}
|
||||
}
|
||||
for _, ng := range groupsRegistry {
|
||||
if err := m.startGroup(ctx, ng, restore); err != nil {
|
||||
m.groupsMu.Unlock()
|
||||
return err
|
||||
}
|
||||
m.startGroup(ctx, ng, restore)
|
||||
}
|
||||
m.groupsMu.Unlock()
|
||||
|
||||
|
||||
@@ -13,14 +13,18 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -113,8 +117,10 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
|
||||
input: make(chan prompb.TimeSeries, cfg.MaxQueueSize),
|
||||
}
|
||||
|
||||
for range cc {
|
||||
c.run(ctx)
|
||||
for i := 0; i < cc; i++ {
|
||||
c.wg.Go(func() {
|
||||
c.run(ctx, i)
|
||||
})
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
@@ -156,8 +162,7 @@ func (c *Client) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) run(ctx context.Context) {
|
||||
ticker := time.NewTicker(c.flushInterval)
|
||||
func (c *Client) run(ctx context.Context, id int) {
|
||||
wr := &prompb.WriteRequest{}
|
||||
shutdown := func() {
|
||||
lastCtx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
@@ -174,40 +179,72 @@ func (c *Client) run(ctx context.Context) {
|
||||
cancel()
|
||||
}
|
||||
|
||||
c.wg.Go(func() {
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
// add jitter to spread remote write flushes over the flush interval to avoid congestion at the remote write destination
|
||||
h := xxhash.Sum64(bytesutil.ToUnsafeBytes(fmt.Sprintf("%d", id)))
|
||||
randJitter := uint64(float64(c.flushInterval) * (float64(h) / (1 << 64)))
|
||||
timer := time.NewTimer(time.Duration(randJitter))
|
||||
addJitter:
|
||||
for {
|
||||
select {
|
||||
case <-c.doneCh:
|
||||
timer.Stop()
|
||||
shutdown()
|
||||
return
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
shutdown()
|
||||
return
|
||||
case <-timer.C:
|
||||
break addJitter
|
||||
}
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(c.flushInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-c.doneCh:
|
||||
shutdown()
|
||||
return
|
||||
case <-ctx.Done():
|
||||
shutdown()
|
||||
return
|
||||
case <-ticker.C:
|
||||
c.flush(ctx, wr)
|
||||
// drain the potential stale tick to avoid small or empty flushes after a slow flush.
|
||||
select {
|
||||
case <-c.doneCh:
|
||||
shutdown()
|
||||
return
|
||||
case <-ctx.Done():
|
||||
shutdown()
|
||||
return
|
||||
case <-ticker.C:
|
||||
default:
|
||||
}
|
||||
case ts, ok := <-c.input:
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
wr.Timeseries = append(wr.Timeseries, ts)
|
||||
if len(wr.Timeseries) >= c.maxBatchSize {
|
||||
c.flush(ctx, wr)
|
||||
case ts, ok := <-c.input:
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
wr.Timeseries = append(wr.Timeseries, ts)
|
||||
if len(wr.Timeseries) >= c.maxBatchSize {
|
||||
c.flush(ctx, wr)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
rwErrors = metrics.NewCounter(`vmalert_remotewrite_errors_total`)
|
||||
rwTotal = metrics.NewCounter(`vmalert_remotewrite_total`)
|
||||
|
||||
sentRows = metrics.NewCounter(`vmalert_remotewrite_sent_rows_total`)
|
||||
sentBytes = metrics.NewCounter(`vmalert_remotewrite_sent_bytes_total`)
|
||||
droppedRows = metrics.NewCounter(`vmalert_remotewrite_dropped_rows_total`)
|
||||
sendDuration = metrics.NewFloatCounter(`vmalert_remotewrite_send_duration_seconds_total`)
|
||||
bufferFlushDuration = metrics.NewHistogram(`vmalert_remotewrite_flush_duration_seconds`)
|
||||
// sentRows and sentBytes are historical counters that can now be replaced by flushedRows and flushedBytes histograms. They may be deprecated in the future after the new histograms have been adopted for some time.
|
||||
sentRows = metrics.NewCounter(`vmalert_remotewrite_sent_rows_total`)
|
||||
sentBytes = metrics.NewCounter(`vmalert_remotewrite_sent_bytes_total`)
|
||||
flushedRows = metrics.NewHistogram(`vmalert_remotewrite_sent_rows`)
|
||||
flushedBytes = metrics.NewHistogram(`vmalert_remotewrite_sent_bytes`)
|
||||
droppedRows = metrics.NewCounter(`vmalert_remotewrite_dropped_rows_total`)
|
||||
sendDuration = metrics.NewFloatCounter(`vmalert_remotewrite_send_duration_seconds_total`)
|
||||
bufferFlushDuration = metrics.NewHistogram(`vmalert_remotewrite_flush_duration_seconds`)
|
||||
remoteWriteQueueSize = metrics.NewHistogram(`vmalert_remotewrite_queue_size`)
|
||||
|
||||
_ = metrics.NewGauge(`vmalert_remotewrite_queue_capacity`, func() float64 {
|
||||
return float64(*maxQueueSize)
|
||||
})
|
||||
|
||||
_ = metrics.NewGauge(`vmalert_remotewrite_concurrency`, func() float64 {
|
||||
return float64(*concurrency)
|
||||
@@ -221,6 +258,7 @@ func GetDroppedRows() int { return int(droppedRows.Get()) }
|
||||
// it to remote-write endpoint. Flush performs limited amount of retries
|
||||
// if request fails.
|
||||
func (c *Client) flush(ctx context.Context, wr *prompb.WriteRequest) {
|
||||
remoteWriteQueueSize.Update(float64(len(c.input)))
|
||||
if len(wr.Timeseries) < 1 {
|
||||
return
|
||||
}
|
||||
@@ -230,10 +268,8 @@ func (c *Client) flush(ctx context.Context, wr *prompb.WriteRequest) {
|
||||
data := wr.MarshalProtobuf(nil)
|
||||
b := snappy.Encode(nil, data)
|
||||
|
||||
retryInterval, maxRetryInterval := *retryMinInterval, *retryMaxTime
|
||||
if retryInterval > maxRetryInterval {
|
||||
retryInterval = maxRetryInterval
|
||||
}
|
||||
maxRetryInterval := *retryMaxTime
|
||||
bt := timeutil.NewBackoffTimer(*retryMinInterval, maxRetryInterval)
|
||||
timeStart := time.Now()
|
||||
defer func() {
|
||||
sendDuration.Add(time.Since(timeStart).Seconds())
|
||||
@@ -251,6 +287,8 @@ L:
|
||||
if err == nil {
|
||||
sentRows.Add(len(wr.Timeseries))
|
||||
sentBytes.Add(len(b))
|
||||
flushedRows.Update(float64(len(wr.Timeseries)))
|
||||
flushedBytes.Update(float64(len(b)))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -276,12 +314,11 @@ L:
|
||||
break
|
||||
}
|
||||
|
||||
if retryInterval > timeLeftForRetries {
|
||||
retryInterval = timeLeftForRetries
|
||||
if bt.CurrentDelay() > timeLeftForRetries {
|
||||
bt.SetDelay(timeLeftForRetries)
|
||||
}
|
||||
// sleeping to prevent remote db hammering
|
||||
time.Sleep(retryInterval)
|
||||
retryInterval *= 2
|
||||
bt.Wait(ctx.Done())
|
||||
|
||||
attempts++
|
||||
}
|
||||
|
||||
@@ -381,7 +381,9 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
|
||||
if len(g.Rules) < 1 {
|
||||
g.metrics.iterationDuration.UpdateDuration(start)
|
||||
g.mu.Lock()
|
||||
g.LastEvaluation = start
|
||||
g.mu.Unlock()
|
||||
return ts
|
||||
}
|
||||
|
||||
@@ -395,7 +397,9 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
}
|
||||
}
|
||||
g.metrics.iterationDuration.UpdateDuration(start)
|
||||
g.mu.Lock()
|
||||
g.LastEvaluation = start
|
||||
g.mu.Unlock()
|
||||
return ts
|
||||
}
|
||||
|
||||
@@ -405,11 +409,11 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
g.mu.Unlock()
|
||||
defer g.evalCancel()
|
||||
|
||||
realEvalTS := eval(evalCtx, evalTS)
|
||||
|
||||
t := time.NewTicker(g.Interval)
|
||||
defer t.Stop()
|
||||
|
||||
realEvalTS := eval(evalCtx, evalTS)
|
||||
|
||||
// restore the rules state after the first evaluation
|
||||
// so only active alerts can be restored.
|
||||
if rr != nil {
|
||||
@@ -484,8 +488,15 @@ func (g *Group) UpdateWith(newGroup *Group) {
|
||||
// delayBeforeStart calculates delay based on Group ID, so all groups will start at different moments of time.
|
||||
func (g *Group) delayBeforeStart(ts time.Time, maxDelay time.Duration) time.Duration {
|
||||
if g.EvalOffset != nil {
|
||||
offset := *g.EvalOffset
|
||||
// adjust the offset for negative evalOffset, the rule is:
|
||||
// `eval_offset: -x` is equivalent to `eval_offset: y` for `interval: x+y`.
|
||||
// For example, `eval_offset: -6m` is equivalent to `eval_offset: 4m` for `interval: 10m`.
|
||||
if offset < 0 {
|
||||
offset += g.Interval
|
||||
}
|
||||
// if offset is specified, ignore the maxDelay and return a duration aligned with offset
|
||||
currentOffsetPoint := ts.Truncate(g.Interval).Add(*g.EvalOffset)
|
||||
currentOffsetPoint := ts.Truncate(g.Interval).Add(offset)
|
||||
if currentOffsetPoint.Before(ts) {
|
||||
// wait until the next offset point
|
||||
return currentOffsetPoint.Add(g.Interval).Sub(ts)
|
||||
|
||||
@@ -606,6 +606,15 @@ func TestGroupStartDelay(t *testing.T) {
|
||||
f("2023-01-01T00:03:30.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
f("2023-01-01T00:08:00.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
|
||||
// test group with negative offset -2min, which is equivalent to 3min offset for 5min interval
|
||||
offset = -2 * time.Minute
|
||||
g.EvalOffset = &offset
|
||||
|
||||
f("2023-01-01T00:00:15.000+00:00", "2023-01-01T00:03:00.000+00:00")
|
||||
f("2023-01-01T00:01:00.000+00:00", "2023-01-01T00:03:00.000+00:00")
|
||||
f("2023-01-01T00:03:30.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
f("2023-01-01T00:08:00.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
|
||||
maxDelay = time.Minute * 1
|
||||
g.EvalOffset = nil
|
||||
|
||||
|
||||
@@ -57,12 +57,8 @@ type ApiGroup struct {
|
||||
EvalOffset float64 `json:"eval_offset,omitempty"`
|
||||
// EvalDelay will adjust the `time` parameter of rule evaluation requests to compensate intentional query delay from datasource.
|
||||
EvalDelay float64 `json:"eval_delay,omitempty"`
|
||||
// Unhealthy unhealthy rules count
|
||||
Unhealthy int
|
||||
// Healthy passing rules count
|
||||
Healthy int
|
||||
// NoMatch not matching rules count
|
||||
NoMatch int
|
||||
// States represents counts per each rule state
|
||||
States map[string]int `json:"states"`
|
||||
}
|
||||
|
||||
// APILink returns a link to the group's JSON representation.
|
||||
@@ -134,6 +130,11 @@ type ApiRule struct {
|
||||
Updates []StateEntry `json:"-"`
|
||||
}
|
||||
|
||||
// IsNoMatch returns true if rule is in nomatch state
|
||||
func (r *ApiRule) IsNoMatch() bool {
|
||||
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
|
||||
}
|
||||
|
||||
// ApiAlert represents a notifier.AlertingRule state
|
||||
// for WEB view
|
||||
// https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md#get-apiv1rules
|
||||
@@ -235,6 +236,20 @@ func NewAlertAPI(ar *AlertingRule, a *notifier.Alert) *ApiAlert {
|
||||
return aa
|
||||
}
|
||||
|
||||
func (r *ApiRule) ExtendState() {
|
||||
if len(r.Alerts) > 0 {
|
||||
return
|
||||
}
|
||||
if r.State == "" {
|
||||
r.State = "ok"
|
||||
}
|
||||
if r.Health != "ok" {
|
||||
r.State = "unhealthy"
|
||||
} else if r.IsNoMatch() {
|
||||
r.State = "nomatch"
|
||||
}
|
||||
}
|
||||
|
||||
// ToAPI returns ApiGroup representation of g
|
||||
func (g *Group) ToAPI() *ApiGroup {
|
||||
g.mu.RLock()
|
||||
@@ -252,6 +267,7 @@ func (g *Group) ToAPI() *ApiGroup {
|
||||
Headers: headersToStrings(g.Headers),
|
||||
NotifierHeaders: headersToStrings(g.NotifierHeaders),
|
||||
Labels: g.Labels,
|
||||
States: make(map[string]int),
|
||||
}
|
||||
if g.EvalOffset != nil {
|
||||
ag.EvalOffset = g.EvalOffset.Seconds()
|
||||
@@ -259,9 +275,10 @@ func (g *Group) ToAPI() *ApiGroup {
|
||||
if g.EvalDelay != nil {
|
||||
ag.EvalDelay = g.EvalDelay.Seconds()
|
||||
}
|
||||
ag.Rules = make([]ApiRule, 0)
|
||||
ag.Rules = make([]ApiRule, 0, len(g.Rules))
|
||||
for _, r := range g.Rules {
|
||||
ag.Rules = append(ag.Rules, r.ToAPI())
|
||||
ar := r.ToAPI()
|
||||
ag.Rules = append(ag.Rules, ar)
|
||||
}
|
||||
return &ag
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
<path d="M224.163 175.27a1.9 1.9 0 0 0 2.8 0l6-5.9a2.1 2.1 0 0 0 .2-2.7 1.9 1.9 0 0 0-3-.2l-2.6 2.6v-5.2c0-1.54-1.667-2.502-3-1.732-.619.357-1 1.017-1 1.732v5.2l-2.6-2.6a1.9 1.9 0 0 0-3 .2 2.1 2.1 0 0 0 .2 2.7zm-16.459-23.297h36c1.54 0 2.502-1.667 1.732-3a2 2 0 0 0-1.732-1h-36c-1.54 0-2.502 1.667-1.732 3 .357.619 1.017 1 1.732 1m36 4h-36c-1.54 0-2.502 1.667-1.732 3 .357.619 1.017 1 1.732 1h36c1.54 0 2.502-1.667 1.732-3a2 2 0 0 0-1.732-1m-16.59-23.517a1.9 1.9 0 0 0-2.8 0l-6 5.9a2.1 2.1 0 0 0-.2 2.7 1.9 1.9 0 0 0 3 .2l2.6-2.6v5.2c0 1.54 1.667 2.502 3 1.732.619-.357 1-1.017 1-1.732v-5.2l2.6 2.6a1.9 1.9 0 0 0 3-.2 2.1 2.1 0 0 0-.2-2.7z"/>
|
||||
</symbol>
|
||||
|
||||
<symbol id="filter" viewBox="-10 -10 320 310">
|
||||
<symbol id="state" viewBox="-10 -10 320 310">
|
||||
<path d="M288.953 0h-277c-5.522 0-10 4.478-10 10v49.531c0 5.522 4.478 10 10 10h12.372l91.378 107.397v113.978a10 10 0 0 0 15.547 8.32l49.5-33a10 10 0 0 0 4.453-8.32v-80.978l91.378-107.397h12.372c5.522 0 10-4.478 10-10V10c0-5.522-4.477-10-10-10M167.587 166.77a10 10 0 0 0-2.384 6.48v79.305l-29.5 19.666V173.25a10 10 0 0 0-2.384-6.48L50.585 69.531h199.736zM278.953 49.531h-257V20h257z"/>
|
||||
</symbol>
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 4.7 KiB After Width: | Height: | Size: 4.7 KiB |
@@ -8,9 +8,9 @@ function actionAll(isCollapse) {
|
||||
});
|
||||
}
|
||||
|
||||
function groupFilter(key) {
|
||||
function groupForState(key) {
|
||||
if (key) {
|
||||
location.href = `?filter=${key}`;
|
||||
location.href = `?state=${key}`;
|
||||
} else {
|
||||
window.location = window.location.pathname;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
@@ -50,6 +52,13 @@ var (
|
||||
"alert": rule.TypeAlerting,
|
||||
"record": rule.TypeRecording,
|
||||
}
|
||||
|
||||
// The "recovering", "noData", "normal", "error" states are used by Grafana.
|
||||
// Ignore "recovering" since it is not currently acknowledged by vmalert,
|
||||
// treat "noData" as an alias for "nomatch",
|
||||
// treat "normal" as an alias for "inactive",
|
||||
// treat "error" as an alias for "unhealthy"
|
||||
ruleStates = []string{"ok", "nomatch", "inactive", "firing", "pending", "unhealthy", "recovering", "noData", "normal", "error"}
|
||||
)
|
||||
|
||||
type requestHandler struct {
|
||||
@@ -63,6 +72,14 @@ var (
|
||||
staticServer = http.StripPrefix("/vmalert", staticHandler)
|
||||
)
|
||||
|
||||
func marshalJson(v any, kind string) ([]byte, *httpserver.ErrorWithStatusCode) {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, errResponse(fmt.Errorf("failed to marshal %s: %s", kind, err), http.StatusInternalServerError)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if strings.HasPrefix(r.URL.Path, "/vmalert/static") {
|
||||
staticServer.ServeHTTP(w, r)
|
||||
@@ -94,40 +111,32 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
WriteRuleDetails(w, r, rule)
|
||||
WriteRule(w, r, rule)
|
||||
return true
|
||||
case "/vmalert/groups":
|
||||
// current used by old vmalert UI and Grafana Alerts
|
||||
case "/vmalert/groups", "/rules":
|
||||
rf, err := newRulesFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
data := rh.groups(rf)
|
||||
WriteListGroups(w, r, data, rf.filter)
|
||||
// only support filtering by a single state
|
||||
state := ""
|
||||
if len(rf.states) > 0 {
|
||||
state = rf.states[0]
|
||||
rf.states = rf.states[:1]
|
||||
}
|
||||
lr := rh.groups(rf)
|
||||
WriteListGroups(w, r, lr.Data.Groups, state)
|
||||
return true
|
||||
case "/vmalert/notifiers":
|
||||
WriteListTargets(w, r, notifier.GetTargets())
|
||||
return true
|
||||
|
||||
// special cases for Grafana requests,
|
||||
// served without `vmalert` prefix:
|
||||
case "/rules":
|
||||
// Grafana makes an extra request to `/rules`
|
||||
// handler in addition to `/api/v1/rules` calls in alerts UI
|
||||
var data []*rule.ApiGroup
|
||||
rf, err := newRulesFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
data = rh.groups(rf)
|
||||
WriteListGroups(w, r, data, rf.filter)
|
||||
return true
|
||||
|
||||
case "/vmalert/api/v1/notifiers", "/api/v1/notifiers":
|
||||
data, err := rh.listNotifiers()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -135,15 +144,14 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
case "/vmalert/api/v1/rules", "/api/v1/rules":
|
||||
// path used by Grafana for ng alerting
|
||||
var data []byte
|
||||
rf, err := newRulesFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
data, err = rh.listGroups(rf)
|
||||
data, err := rh.listGroups(rf)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -152,14 +160,14 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
case "/vmalert/api/v1/alerts", "/api/v1/alerts":
|
||||
// path used by Grafana for ng alerting
|
||||
rf, err := newRulesFilter(r)
|
||||
gf, err := newGroupsFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
data, err := rh.listAlerts(rf)
|
||||
data, err := rh.listAlerts(gf)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -168,12 +176,12 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/vmalert/api/v1/alert", "/api/v1/alert":
|
||||
alert, err := rh.getAlert(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
data, err := json.Marshal(alert)
|
||||
data, err := marshalJson(alert, "alert")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "failed to marshal alert: %s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -182,16 +190,16 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/vmalert/api/v1/rule", "/api/v1/rule":
|
||||
apiRule, err := rh.getRule(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
rwu := rule.ApiRuleWithUpdates{
|
||||
ApiRule: apiRule,
|
||||
StateUpdates: apiRule.Updates,
|
||||
}
|
||||
data, err := json.Marshal(rwu)
|
||||
data, err := marshalJson(rwu, "rule")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "failed to marshal rule: %s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -200,12 +208,12 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/vmalert/api/v1/group", "/api/v1/group":
|
||||
group, err := rh.getGroup(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
data, err := json.Marshal(group)
|
||||
data, err := marshalJson(group, "group")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "failed to marshal group: %s", err)
|
||||
errJson(w, r, err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -225,10 +233,10 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
}
|
||||
|
||||
func (rh *requestHandler) getGroup(r *http.Request) (*rule.ApiGroup, error) {
|
||||
func (rh *requestHandler) getGroup(r *http.Request) (*rule.ApiGroup, *httpserver.ErrorWithStatusCode) {
|
||||
groupID, err := strconv.ParseUint(r.FormValue(rule.ParamGroupID), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err)
|
||||
return nil, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err), http.StatusBadRequest)
|
||||
}
|
||||
obj, err := rh.m.groupAPI(groupID)
|
||||
if err != nil {
|
||||
@@ -237,14 +245,14 @@ func (rh *requestHandler) getGroup(r *http.Request) (*rule.ApiGroup, error) {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (rh *requestHandler) getRule(r *http.Request) (rule.ApiRule, error) {
|
||||
func (rh *requestHandler) getRule(r *http.Request) (rule.ApiRule, *httpserver.ErrorWithStatusCode) {
|
||||
groupID, err := strconv.ParseUint(r.FormValue(rule.ParamGroupID), 10, 64)
|
||||
if err != nil {
|
||||
return rule.ApiRule{}, fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err)
|
||||
return rule.ApiRule{}, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err), http.StatusBadRequest)
|
||||
}
|
||||
ruleID, err := strconv.ParseUint(r.FormValue(rule.ParamRuleID), 10, 64)
|
||||
if err != nil {
|
||||
return rule.ApiRule{}, fmt.Errorf("failed to read %q param: %w", rule.ParamRuleID, err)
|
||||
return rule.ApiRule{}, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamRuleID, err), http.StatusBadRequest)
|
||||
}
|
||||
obj, err := rh.m.ruleAPI(groupID, ruleID)
|
||||
if err != nil {
|
||||
@@ -253,14 +261,14 @@ func (rh *requestHandler) getRule(r *http.Request) (rule.ApiRule, error) {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (rh *requestHandler) getAlert(r *http.Request) (*rule.ApiAlert, error) {
|
||||
func (rh *requestHandler) getAlert(r *http.Request) (*rule.ApiAlert, *httpserver.ErrorWithStatusCode) {
|
||||
groupID, err := strconv.ParseUint(r.FormValue(rule.ParamGroupID), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err)
|
||||
return nil, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err), http.StatusBadRequest)
|
||||
}
|
||||
alertID, err := strconv.ParseUint(r.FormValue(rule.ParamAlertID), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read %q param: %w", rule.ParamAlertID, err)
|
||||
return nil, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamAlertID, err), http.StatusBadRequest)
|
||||
}
|
||||
a, err := rh.m.alertAPI(groupID, alertID)
|
||||
if err != nil {
|
||||
@@ -270,28 +278,76 @@ func (rh *requestHandler) getAlert(r *http.Request) (*rule.ApiAlert, error) {
|
||||
}
|
||||
|
||||
type listGroupsResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Status string `json:"status"`
|
||||
Page int `json:"page,omitempty"`
|
||||
TotalPages int `json:"total_pages,omitempty"`
|
||||
TotalGroups int `json:"total_groups,omitempty"`
|
||||
TotalRules int `json:"total_rules,omitempty"`
|
||||
Data struct {
|
||||
Groups []*rule.ApiGroup `json:"groups"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// see https://prometheus.io/docs/prometheus/latest/querying/api/#rules
|
||||
type rulesFilter struct {
|
||||
files []string
|
||||
groupNames []string
|
||||
ruleNames []string
|
||||
ruleType string
|
||||
excludeAlerts bool
|
||||
filter string
|
||||
dsType config.Type
|
||||
type groupsFilter struct {
|
||||
groupNames []string
|
||||
files []string
|
||||
dsType config.Type
|
||||
}
|
||||
|
||||
func newRulesFilter(r *http.Request) (*rulesFilter, error) {
|
||||
rf := &rulesFilter{}
|
||||
query := r.URL.Query()
|
||||
func newGroupsFilter(r *http.Request) (*groupsFilter, *httpserver.ErrorWithStatusCode) {
|
||||
_ = r.ParseForm()
|
||||
vs := r.Form
|
||||
gf := &groupsFilter{
|
||||
groupNames: vs["rule_group[]"],
|
||||
files: vs["file[]"],
|
||||
}
|
||||
dsType := vs.Get("datasource_type")
|
||||
if len(dsType) > 0 {
|
||||
if config.SupportedType(dsType) {
|
||||
gf.dsType = config.NewRawType(dsType)
|
||||
} else {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "datasource_type": not supported value %q`, dsType), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
return gf, nil
|
||||
}
|
||||
|
||||
ruleTypeParam := query.Get("type")
|
||||
func (gf *groupsFilter) matches(group *rule.Group) bool {
|
||||
if len(gf.groupNames) > 0 && !slices.Contains(gf.groupNames, group.Name) {
|
||||
return false
|
||||
}
|
||||
if len(gf.files) > 0 && !slices.Contains(gf.files, group.File) {
|
||||
return false
|
||||
}
|
||||
if len(gf.dsType.Name) > 0 && gf.dsType.String() != group.Type.String() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// see https://prometheus.io/docs/prometheus/latest/querying/api/#rules
|
||||
type rulesFilter struct {
|
||||
gf *groupsFilter
|
||||
ruleNames []string
|
||||
ruleType string
|
||||
excludeAlerts bool
|
||||
states []string
|
||||
maxGroups int
|
||||
pageNum int
|
||||
search string
|
||||
extendedStates bool
|
||||
}
|
||||
|
||||
func newRulesFilter(r *http.Request) (*rulesFilter, *httpserver.ErrorWithStatusCode) {
|
||||
gf, err := newGroupsFilter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var rf rulesFilter
|
||||
rf.gf = gf
|
||||
vs := r.Form
|
||||
ruleTypeParam := vs.Get("type")
|
||||
if len(ruleTypeParam) > 0 {
|
||||
if ruleType, ok := ruleTypeMap[ruleTypeParam]; ok {
|
||||
rf.ruleType = ruleType
|
||||
@@ -300,102 +356,155 @@ func newRulesFilter(r *http.Request) (*rulesFilter, error) {
|
||||
}
|
||||
}
|
||||
|
||||
dsType := query.Get("datasource_type")
|
||||
if len(dsType) > 0 {
|
||||
if config.SupportedType(dsType) {
|
||||
rf.dsType = config.NewRawType(dsType)
|
||||
} else {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "datasource_type": not supported value %q`, dsType), http.StatusBadRequest)
|
||||
}
|
||||
states := vs["state"]
|
||||
if len(states) == 0 {
|
||||
states = vs["filter"]
|
||||
}
|
||||
|
||||
filter := strings.ToLower(query.Get("filter"))
|
||||
if len(filter) > 0 {
|
||||
if filter == "nomatch" || filter == "unhealthy" {
|
||||
rf.filter = filter
|
||||
} else {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "filter": not supported value %q`, filter), http.StatusBadRequest)
|
||||
for _, s := range states {
|
||||
values := strings.Split(s, ",")
|
||||
for _, v := range values {
|
||||
if len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
if !slices.Contains(ruleStates, v) {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "state": contains not supported value %q`, v), http.StatusBadRequest)
|
||||
}
|
||||
// Replace grafana states with supported internal states
|
||||
switch v {
|
||||
case "noData":
|
||||
v = "nomatch"
|
||||
case "normal":
|
||||
v = "inactive"
|
||||
case "error":
|
||||
v = "unhealthy"
|
||||
}
|
||||
rf.states = append(rf.states, v)
|
||||
}
|
||||
}
|
||||
|
||||
rf.excludeAlerts = httputil.GetBool(r, "exclude_alerts")
|
||||
rf.ruleNames = append([]string{}, r.Form["rule_name[]"]...)
|
||||
rf.groupNames = append([]string{}, r.Form["rule_group[]"]...)
|
||||
rf.files = append([]string{}, r.Form["file[]"]...)
|
||||
return rf, nil
|
||||
rf.extendedStates = httputil.GetBool(r, "extended_states")
|
||||
rf.ruleNames = append([]string{}, vs["rule_name[]"]...)
|
||||
rf.search = strings.ToLower(vs.Get("search"))
|
||||
|
||||
pageNum := vs.Get("page_num")
|
||||
maxGroups := vs.Get("group_limit")
|
||||
if pageNum != "" {
|
||||
if maxGroups == "" {
|
||||
return nil, errResponse(fmt.Errorf(`"group_limit" needs to be present in order to paginate over the groups`), http.StatusBadRequest)
|
||||
}
|
||||
v, err := strconv.Atoi(pageNum)
|
||||
if err != nil || v <= 0 {
|
||||
return nil, errResponse(fmt.Errorf(`"page_num" is expected to be a positive number, found %q`, pageNum), http.StatusBadRequest)
|
||||
}
|
||||
rf.pageNum = v
|
||||
}
|
||||
if maxGroups != "" {
|
||||
v, err := strconv.Atoi(maxGroups)
|
||||
if err != nil || v <= 0 {
|
||||
return nil, errResponse(fmt.Errorf(`"group_limit" is expected to be a positive number, found %q`, maxGroups), http.StatusBadRequest)
|
||||
}
|
||||
rf.maxGroups = v
|
||||
}
|
||||
return &rf, nil
|
||||
}
|
||||
|
||||
func (rf *rulesFilter) matchesGroup(group *rule.Group) bool {
|
||||
if len(rf.groupNames) > 0 && !slices.Contains(rf.groupNames, group.Name) {
|
||||
func (rf *rulesFilter) matchesRule(r *rule.ApiRule) bool {
|
||||
if rf.ruleType != "" && rf.ruleType != r.Type {
|
||||
return false
|
||||
}
|
||||
if len(rf.files) > 0 && !slices.Contains(rf.files, group.File) {
|
||||
if len(rf.ruleNames) > 0 && !slices.Contains(rf.ruleNames, r.Name) {
|
||||
return false
|
||||
}
|
||||
if len(rf.dsType.Name) > 0 && rf.dsType.String() != group.Type.String() {
|
||||
return false
|
||||
if len(rf.states) == 0 {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
return slices.Contains(rf.states, r.State)
|
||||
}
|
||||
|
||||
func (rh *requestHandler) groups(rf *rulesFilter) []*rule.ApiGroup {
|
||||
func (rh *requestHandler) groups(rf *rulesFilter) *listGroupsResponse {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
groups := make([]*rule.ApiGroup, 0)
|
||||
skipGroups := (rf.pageNum - 1) * rf.maxGroups
|
||||
lr := &listGroupsResponse{
|
||||
Status: "success",
|
||||
}
|
||||
lr.Data.Groups = make([]*rule.ApiGroup, 0)
|
||||
if skipGroups >= len(rh.m.groups) {
|
||||
return lr
|
||||
}
|
||||
// sort list of groups for deterministic output
|
||||
groups := make([]*rule.Group, 0, len(rh.m.groups))
|
||||
for _, group := range rh.m.groups {
|
||||
if !rf.matchesGroup(group) {
|
||||
groups = append(groups, group)
|
||||
}
|
||||
|
||||
slices.SortFunc(groups, func(a, b *rule.Group) int {
|
||||
nameCmp := cmp.Compare(a.Name, b.Name)
|
||||
if nameCmp != 0 {
|
||||
return nameCmp
|
||||
}
|
||||
return cmp.Compare(a.File, b.File)
|
||||
})
|
||||
for _, group := range groups {
|
||||
if !rf.gf.matches(group) {
|
||||
continue
|
||||
}
|
||||
groupFound := len(rf.search) == 0 || strings.Contains(strings.ToLower(group.Name), rf.search) || strings.Contains(strings.ToLower(group.File), rf.search)
|
||||
g := group.ToAPI()
|
||||
// the returned list should always be non-nil
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4221
|
||||
filteredRules := make([]rule.ApiRule, 0)
|
||||
for _, rule := range g.Rules {
|
||||
if rf.ruleType != "" && rf.ruleType != rule.Type {
|
||||
if !groupFound && !strings.Contains(strings.ToLower(rule.Name), rf.search) {
|
||||
continue
|
||||
}
|
||||
if len(rf.ruleNames) > 0 && !slices.Contains(rf.ruleNames, rule.Name) {
|
||||
continue
|
||||
if rf.extendedStates {
|
||||
rule.ExtendState()
|
||||
}
|
||||
if (rule.LastError == "" && rf.filter == "unhealthy") || (!isNoMatch(rule) && rf.filter == "nomatch") {
|
||||
if !rf.matchesRule(&rule) {
|
||||
continue
|
||||
}
|
||||
if rf.excludeAlerts {
|
||||
rule.Alerts = nil
|
||||
}
|
||||
if rule.LastError != "" {
|
||||
g.Unhealthy++
|
||||
} else {
|
||||
g.Healthy++
|
||||
}
|
||||
if isNoMatch(rule) {
|
||||
g.NoMatch++
|
||||
}
|
||||
g.States[rule.State]++
|
||||
filteredRules = append(filteredRules, rule)
|
||||
}
|
||||
g.Rules = filteredRules
|
||||
groups = append(groups, g)
|
||||
}
|
||||
// sort list of groups for deterministic output
|
||||
slices.SortFunc(groups, func(a, b *rule.ApiGroup) int {
|
||||
if a.Name != b.Name {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
if len(g.Rules) == 0 || len(filteredRules) > 0 {
|
||||
if rf.maxGroups > 0 {
|
||||
lr.TotalGroups++
|
||||
lr.TotalRules += len(filteredRules)
|
||||
}
|
||||
if skipGroups > 0 {
|
||||
skipGroups--
|
||||
continue
|
||||
}
|
||||
if rf.maxGroups == 0 || len(lr.Data.Groups) < rf.maxGroups {
|
||||
g.Rules = filteredRules
|
||||
lr.Data.Groups = append(lr.Data.Groups, g)
|
||||
}
|
||||
}
|
||||
return strings.Compare(a.File, b.File)
|
||||
})
|
||||
return groups
|
||||
}
|
||||
if rf.maxGroups > 0 {
|
||||
lr.Page = rf.pageNum
|
||||
lr.TotalPages = max(int(math.Ceil(float64(lr.TotalGroups)/float64(rf.maxGroups))), 1)
|
||||
}
|
||||
return lr
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listGroups(rf *rulesFilter) ([]byte, error) {
|
||||
lr := listGroupsResponse{Status: "success"}
|
||||
lr.Data.Groups = rh.groups(rf)
|
||||
func (rh *requestHandler) listGroups(rf *rulesFilter) ([]byte, *httpserver.ErrorWithStatusCode) {
|
||||
lr := rh.groups(rf)
|
||||
if rf.pageNum > 1 && len(lr.Data.Groups) == 0 {
|
||||
return nil, errResponse(fmt.Errorf(`page_num exceeds total amount of pages`), http.StatusBadRequest)
|
||||
}
|
||||
if lr.Page > lr.TotalPages {
|
||||
return nil, errResponse(fmt.Errorf(`page_num=%d exceeds total amount of pages in result=%d`, lr.Page, lr.TotalPages), http.StatusBadRequest)
|
||||
}
|
||||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
return nil, errResponse(fmt.Errorf(`error encoding list of groups: %w`, err), http.StatusInternalServerError)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
@@ -434,14 +543,14 @@ func (rh *requestHandler) groupAlerts() []rule.GroupAlerts {
|
||||
return gAlerts
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listAlerts(rf *rulesFilter) ([]byte, error) {
|
||||
func (rh *requestHandler) listAlerts(gf *groupsFilter) ([]byte, *httpserver.ErrorWithStatusCode) {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
lr := listAlertsResponse{Status: "success"}
|
||||
lr.Data.Alerts = make([]*rule.ApiAlert, 0)
|
||||
for _, group := range rh.m.groups {
|
||||
if !rf.matchesGroup(group) {
|
||||
if !gf.matches(group) {
|
||||
continue
|
||||
}
|
||||
g := group.ToAPI()
|
||||
@@ -460,10 +569,7 @@ func (rh *requestHandler) listAlerts(rf *rulesFilter) ([]byte, error) {
|
||||
|
||||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
return nil, errResponse(fmt.Errorf(`error encoding list of active alerts: %w`, err), http.StatusInternalServerError)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
@@ -475,7 +581,7 @@ type listNotifiersResponse struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listNotifiers() ([]byte, error) {
|
||||
func (rh *requestHandler) listNotifiers() ([]byte, *httpserver.ErrorWithStatusCode) {
|
||||
targets := notifier.GetTargets()
|
||||
|
||||
lr := listNotifiersResponse{Status: "success"}
|
||||
@@ -497,10 +603,7 @@ func (rh *requestHandler) listNotifiers() ([]byte, error) {
|
||||
|
||||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf(`error encoding list of notifiers: %w`, err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
return nil, errResponse(fmt.Errorf(`error encoding list of notifiers: %w`, err), http.StatusInternalServerError)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
@@ -511,3 +614,8 @@ func errResponse(err error, sc int) *httpserver.ErrorWithStatusCode {
|
||||
StatusCode: sc,
|
||||
}
|
||||
}
|
||||
|
||||
func errJson(w http.ResponseWriter, r *http.Request, err *httpserver.ErrorWithStatusCode) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
httpserver.Errorf(w, r, `{"error":%q,"errorType":%d}`, err, err.StatusCode)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
) %}
|
||||
|
||||
{% func Controls(prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) %}
|
||||
{% func Controls(prefix, currentIcon, currentText string, icons, states map[string]string, search bool) %}
|
||||
<div class="btn-toolbar mb-3" role="toolbar">
|
||||
<div class="d-flex gap-2 justify-content-between w-100">
|
||||
<div class="d-flex gap-2 align-items-center">
|
||||
@@ -28,10 +28,10 @@
|
||||
<use href="{%s prefix %}static/icons/icons.svg#expand"/>
|
||||
</svg>
|
||||
</a>
|
||||
{% if len(filters) > 0 %}
|
||||
{% if len(states) > 0 %}
|
||||
<span class="d-none d-md-inline-block">Filter by status:</span>
|
||||
<svg class="d-md-none" width="20" height="20">
|
||||
<use href="{%s prefix %}static/icons/icons.svg#filter">
|
||||
<use href="{%s prefix %}static/icons/icons.svg#state">
|
||||
</svg>
|
||||
<div class="dropdown">
|
||||
<button
|
||||
@@ -46,10 +46,10 @@
|
||||
</svg>
|
||||
</button>
|
||||
<ul class="dropdown-menu">
|
||||
{% for key, title := range filters %}
|
||||
{% for key, title := range states %}
|
||||
{% if title != currentText %}
|
||||
<li>
|
||||
<a class="dropdown-item" onclick="groupFilter('{%s key %}')">
|
||||
<a class="dropdown-item" onclick="groupForState('{%s key %}')">
|
||||
<span class="d-none d-md-inline-block">{%s title %}</span>
|
||||
<svg class="d-md-none" width="22" height="22">
|
||||
<use href="{%s prefix %}static/icons/icons.svg#{%s icons[key] %}"/>
|
||||
@@ -97,10 +97,10 @@
|
||||
{%= tpl.Footer(r) %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ListGroups(r *http.Request, groups []*rule.ApiGroup, filter string) %}
|
||||
{% func ListGroups(r *http.Request, groups []*rule.ApiGroup, state string) %}
|
||||
{%code
|
||||
prefix := vmalertutil.Prefix(r.URL.Path)
|
||||
filters := map[string]string{
|
||||
states := map[string]string{
|
||||
"": "All",
|
||||
"unhealthy": "Unhealthy",
|
||||
"nomatch": "No Match",
|
||||
@@ -110,14 +110,14 @@
|
||||
"unhealthy": "unhealthy",
|
||||
"nomatch": "nomatch",
|
||||
}
|
||||
currentText := filters[filter]
|
||||
currentIcon := icons[filter]
|
||||
currentText := states[state]
|
||||
currentIcon := icons[state]
|
||||
%}
|
||||
{%= tpl.Header(r, navItems, "Groups", getLastConfigError()) %}
|
||||
{%= Controls(prefix, currentIcon, currentText, icons, filters, true) %}
|
||||
{%= Controls(prefix, currentIcon, currentText, icons, states, true) %}
|
||||
{% if len(groups) > 0 %}
|
||||
{% for _, g := range groups %}
|
||||
<div id="group-{%s g.ID %}" class="w-100 border-0 flex-column vm-group{% if g.Unhealthy > 0 %} alert-danger{% endif %}">
|
||||
<div id="group-{%s g.ID %}" class="w-100 border-0 flex-column vm-group{% if g.States["unhealthy"] > 0 %} alert-danger{% endif %}">
|
||||
<span class="d-flex justify-content-between">
|
||||
<a
|
||||
class="vm-group-search"
|
||||
@@ -130,9 +130,9 @@
|
||||
data-bs-target="#item-{%s g.ID %}"
|
||||
>
|
||||
<span class="d-flex gap-2">
|
||||
{% if g.Unhealthy > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d g.Unhealthy %}</span> {% endif %}
|
||||
{% if g.NoMatch > 0 %}<span class="badge bg-warning" title="Number of rules with status NoMatch">{%d g.NoMatch %}</span> {% endif %}
|
||||
<span class="badge bg-success" title="Number of rules with status Ok">{%d g.Healthy %}</span>
|
||||
{% if g.States["unhealthy"] > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d g.States["unhealthy"] %}</span> {% endif %}
|
||||
{% if g.States["nomatch"] > 0 %}<span class="badge bg-warning" title="Number of rules with status NoMatch">{%d g.States["nomatch"] %}</span> {% endif %}
|
||||
<span class="badge bg-success" title="Number of rules with status Ok">{%d g.States["ok"] %}</span>
|
||||
</span>
|
||||
</span>
|
||||
</span>
|
||||
@@ -189,7 +189,7 @@
|
||||
<b>record:</b> {%s r.Name %}
|
||||
{% endif %}
|
||||
|
|
||||
{%= seriesFetchedWarn(prefix, r) %}
|
||||
{%= seriesFetchedWarn(prefix, &r) %}
|
||||
<span><a target="_blank" href="{%s prefix+r.WebLink() %}">Details</a></span>
|
||||
</div>
|
||||
<div class="col-12">
|
||||
@@ -476,7 +476,7 @@
|
||||
{% endfunc %}
|
||||
|
||||
|
||||
{% func RuleDetails(r *http.Request, rule rule.ApiRule) %}
|
||||
{% func Rule(r *http.Request, rule rule.ApiRule) %}
|
||||
{%code prefix := vmalertutil.Prefix(r.URL.Path) %}
|
||||
{%= tpl.Header(r, navItems, "", getLastConfigError()) %}
|
||||
{%code
|
||||
@@ -661,8 +661,8 @@
|
||||
<span class="badge bg-warning text-dark" title="This firing state is kept because of `keep_firing_for`">stabilizing</span>
|
||||
{% endfunc %}
|
||||
|
||||
{% func seriesFetchedWarn(prefix string, r rule.ApiRule) %}
|
||||
{% if isNoMatch(r) %}
|
||||
{% func seriesFetchedWarn(prefix string, r *rule.ApiRule) %}
|
||||
{% if r.IsNoMatch() %}
|
||||
<svg
|
||||
data-bs-toggle="tooltip"
|
||||
title="No match! This rule's last evaluation hasn't selected any time series from the datasource.
|
||||
@@ -673,9 +673,3 @@
|
||||
</svg>
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
|
||||
{%code
|
||||
func isNoMatch (r rule.ApiRule) bool {
|
||||
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
|
||||
}
|
||||
%}
|
||||
|
||||
@@ -31,7 +31,7 @@ var (
|
||||
)
|
||||
|
||||
//line app/vmalert/web.qtpl:15
|
||||
func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) {
|
||||
func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText string, icons, states map[string]string, search bool) {
|
||||
//line app/vmalert/web.qtpl:15
|
||||
qw422016.N().S(`
|
||||
<div class="btn-toolbar mb-3" role="toolbar">
|
||||
@@ -59,7 +59,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
|
||||
</a>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:31
|
||||
if len(filters) > 0 {
|
||||
if len(states) > 0 {
|
||||
//line app/vmalert/web.qtpl:31
|
||||
qw422016.N().S(`
|
||||
<span class="d-none d-md-inline-block">Filter by status:</span>
|
||||
@@ -68,7 +68,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
|
||||
//line app/vmalert/web.qtpl:34
|
||||
qw422016.E().S(prefix)
|
||||
//line app/vmalert/web.qtpl:34
|
||||
qw422016.N().S(`static/icons/icons.svg#filter">
|
||||
qw422016.N().S(`static/icons/icons.svg#state">
|
||||
</svg>
|
||||
<div class="dropdown">
|
||||
<button
|
||||
@@ -97,7 +97,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
|
||||
<ul class="dropdown-menu">
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:49
|
||||
for key, title := range filters {
|
||||
for key, title := range states {
|
||||
//line app/vmalert/web.qtpl:49
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
@@ -106,7 +106,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
|
||||
//line app/vmalert/web.qtpl:50
|
||||
qw422016.N().S(`
|
||||
<li>
|
||||
<a class="dropdown-item" onclick="groupFilter('`)
|
||||
<a class="dropdown-item" onclick="groupForState('`)
|
||||
//line app/vmalert/web.qtpl:52
|
||||
qw422016.E().S(key)
|
||||
//line app/vmalert/web.qtpl:52
|
||||
@@ -176,22 +176,22 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:77
|
||||
func WriteControls(qq422016 qtio422016.Writer, prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) {
|
||||
func WriteControls(qq422016 qtio422016.Writer, prefix, currentIcon, currentText string, icons, states map[string]string, search bool) {
|
||||
//line app/vmalert/web.qtpl:77
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:77
|
||||
StreamControls(qw422016, prefix, currentIcon, currentText, icons, filters, search)
|
||||
StreamControls(qw422016, prefix, currentIcon, currentText, icons, states, search)
|
||||
//line app/vmalert/web.qtpl:77
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/web.qtpl:77
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:77
|
||||
func Controls(prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) string {
|
||||
func Controls(prefix, currentIcon, currentText string, icons, states map[string]string, search bool) string {
|
||||
//line app/vmalert/web.qtpl:77
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:77
|
||||
WriteControls(qb422016, prefix, currentIcon, currentText, icons, filters, search)
|
||||
WriteControls(qb422016, prefix, currentIcon, currentText, icons, states, search)
|
||||
//line app/vmalert/web.qtpl:77
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/web.qtpl:77
|
||||
@@ -324,13 +324,13 @@ func Welcome(r *http.Request) string {
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:100
|
||||
func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule.ApiGroup, filter string) {
|
||||
func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule.ApiGroup, state string) {
|
||||
//line app/vmalert/web.qtpl:100
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:102
|
||||
prefix := vmalertutil.Prefix(r.URL.Path)
|
||||
filters := map[string]string{
|
||||
states := map[string]string{
|
||||
"": "All",
|
||||
"unhealthy": "Unhealthy",
|
||||
"nomatch": "No Match",
|
||||
@@ -340,8 +340,8 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
"unhealthy": "unhealthy",
|
||||
"nomatch": "nomatch",
|
||||
}
|
||||
currentText := filters[filter]
|
||||
currentIcon := icons[filter]
|
||||
currentText := states[state]
|
||||
currentIcon := icons[state]
|
||||
|
||||
//line app/vmalert/web.qtpl:115
|
||||
qw422016.N().S(`
|
||||
@@ -352,7 +352,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:117
|
||||
StreamControls(qw422016, prefix, currentIcon, currentText, icons, filters, true)
|
||||
StreamControls(qw422016, prefix, currentIcon, currentText, icons, states, true)
|
||||
//line app/vmalert/web.qtpl:117
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
@@ -371,7 +371,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
//line app/vmalert/web.qtpl:120
|
||||
qw422016.N().S(`" class="w-100 border-0 flex-column vm-group`)
|
||||
//line app/vmalert/web.qtpl:120
|
||||
if g.Unhealthy > 0 {
|
||||
if g.States["unhealthy"] > 0 {
|
||||
//line app/vmalert/web.qtpl:120
|
||||
qw422016.N().S(` alert-danger`)
|
||||
//line app/vmalert/web.qtpl:120
|
||||
@@ -418,11 +418,11 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
<span class="d-flex gap-2">
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:133
|
||||
if g.Unhealthy > 0 {
|
||||
if g.States["unhealthy"] > 0 {
|
||||
//line app/vmalert/web.qtpl:133
|
||||
qw422016.N().S(`<span class="badge bg-danger" title="Number of rules with status Error">`)
|
||||
//line app/vmalert/web.qtpl:133
|
||||
qw422016.N().D(g.Unhealthy)
|
||||
qw422016.N().D(g.States["unhealthy"])
|
||||
//line app/vmalert/web.qtpl:133
|
||||
qw422016.N().S(`</span> `)
|
||||
//line app/vmalert/web.qtpl:133
|
||||
@@ -431,11 +431,11 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:134
|
||||
if g.NoMatch > 0 {
|
||||
if g.States["nomatch"] > 0 {
|
||||
//line app/vmalert/web.qtpl:134
|
||||
qw422016.N().S(`<span class="badge bg-warning" title="Number of rules with status NoMatch">`)
|
||||
//line app/vmalert/web.qtpl:134
|
||||
qw422016.N().D(g.NoMatch)
|
||||
qw422016.N().D(g.States["nomatch"])
|
||||
//line app/vmalert/web.qtpl:134
|
||||
qw422016.N().S(`</span> `)
|
||||
//line app/vmalert/web.qtpl:134
|
||||
@@ -444,7 +444,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
qw422016.N().S(`
|
||||
<span class="badge bg-success" title="Number of rules with status Ok">`)
|
||||
//line app/vmalert/web.qtpl:135
|
||||
qw422016.N().D(g.Healthy)
|
||||
qw422016.N().D(g.States["ok"])
|
||||
//line app/vmalert/web.qtpl:135
|
||||
qw422016.N().S(`</span>
|
||||
</span>
|
||||
@@ -617,7 +617,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
|
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:192
|
||||
streamseriesFetchedWarn(qw422016, prefix, r)
|
||||
streamseriesFetchedWarn(qw422016, prefix, &r)
|
||||
//line app/vmalert/web.qtpl:192
|
||||
qw422016.N().S(`
|
||||
<span><a target="_blank" href="`)
|
||||
@@ -750,22 +750,22 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:234
|
||||
func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, groups []*rule.ApiGroup, filter string) {
|
||||
func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, groups []*rule.ApiGroup, state string) {
|
||||
//line app/vmalert/web.qtpl:234
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:234
|
||||
StreamListGroups(qw422016, r, groups, filter)
|
||||
StreamListGroups(qw422016, r, groups, state)
|
||||
//line app/vmalert/web.qtpl:234
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/web.qtpl:234
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:234
|
||||
func ListGroups(r *http.Request, groups []*rule.ApiGroup, filter string) string {
|
||||
func ListGroups(r *http.Request, groups []*rule.ApiGroup, state string) string {
|
||||
//line app/vmalert/web.qtpl:234
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:234
|
||||
WriteListGroups(qb422016, r, groups, filter)
|
||||
WriteListGroups(qb422016, r, groups, state)
|
||||
//line app/vmalert/web.qtpl:234
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/web.qtpl:234
|
||||
@@ -1462,7 +1462,7 @@ func Alert(r *http.Request, alert *rule.ApiAlert) string {
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:479
|
||||
func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule rule.ApiRule) {
|
||||
func StreamRule(qw422016 *qt422016.Writer, r *http.Request, rule rule.ApiRule) {
|
||||
//line app/vmalert/web.qtpl:479
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
@@ -1859,22 +1859,22 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule rule.Api
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:642
|
||||
func WriteRuleDetails(qq422016 qtio422016.Writer, r *http.Request, rule rule.ApiRule) {
|
||||
func WriteRule(qq422016 qtio422016.Writer, r *http.Request, rule rule.ApiRule) {
|
||||
//line app/vmalert/web.qtpl:642
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:642
|
||||
StreamRuleDetails(qw422016, r, rule)
|
||||
StreamRule(qw422016, r, rule)
|
||||
//line app/vmalert/web.qtpl:642
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/web.qtpl:642
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:642
|
||||
func RuleDetails(r *http.Request, rule rule.ApiRule) string {
|
||||
func Rule(r *http.Request, rule rule.ApiRule) string {
|
||||
//line app/vmalert/web.qtpl:642
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:642
|
||||
WriteRuleDetails(qb422016, r, rule)
|
||||
WriteRule(qb422016, r, rule)
|
||||
//line app/vmalert/web.qtpl:642
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/web.qtpl:642
|
||||
@@ -2015,12 +2015,12 @@ func badgeStabilizing() string {
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:664
|
||||
func streamseriesFetchedWarn(qw422016 *qt422016.Writer, prefix string, r rule.ApiRule) {
|
||||
func streamseriesFetchedWarn(qw422016 *qt422016.Writer, prefix string, r *rule.ApiRule) {
|
||||
//line app/vmalert/web.qtpl:664
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:665
|
||||
if isNoMatch(r) {
|
||||
if r.IsNoMatch() {
|
||||
//line app/vmalert/web.qtpl:665
|
||||
qw422016.N().S(`
|
||||
<svg
|
||||
@@ -2045,7 +2045,7 @@ func streamseriesFetchedWarn(qw422016 *qt422016.Writer, prefix string, r rule.Ap
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:675
|
||||
func writeseriesFetchedWarn(qq422016 qtio422016.Writer, prefix string, r rule.ApiRule) {
|
||||
func writeseriesFetchedWarn(qq422016 qtio422016.Writer, prefix string, r *rule.ApiRule) {
|
||||
//line app/vmalert/web.qtpl:675
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:675
|
||||
@@ -2056,7 +2056,7 @@ func writeseriesFetchedWarn(qq422016 qtio422016.Writer, prefix string, r rule.Ap
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:675
|
||||
func seriesFetchedWarn(prefix string, r rule.ApiRule) string {
|
||||
func seriesFetchedWarn(prefix string, r *rule.ApiRule) string {
|
||||
//line app/vmalert/web.qtpl:675
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:675
|
||||
@@ -2069,8 +2069,3 @@ func seriesFetchedWarn(prefix string, r rule.ApiRule) string {
|
||||
return qs422016
|
||||
//line app/vmalert/web.qtpl:675
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:678
|
||||
func isNoMatch(r rule.ApiRule) bool {
|
||||
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
|
||||
}
|
||||
|
||||
@@ -210,7 +210,7 @@ func TestHandler(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("/api/v1/rules&filters", func(t *testing.T) {
|
||||
t.Run("/api/v1/rules&states", func(t *testing.T) {
|
||||
check := func(url string, statusCode, expGroups, expRules int) {
|
||||
t.Helper()
|
||||
lr := listGroupsResponse{}
|
||||
@@ -252,9 +252,15 @@ func TestHandler(t *testing.T) {
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=foo", 200, 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml", 200, 3, 6)
|
||||
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=foo", 200, 3, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=foo", 200, 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert", 200, 3, 3)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert&rule_name[]=record", 200, 3, 6)
|
||||
|
||||
check("/api/v1/rules?group_limit=1", 200, 1, 2)
|
||||
check("/api/v1/rules?group_limit=1&type=alert", 200, 1, 1)
|
||||
check("/api/v1/rules?group_limit=1&type=record", 200, 1, 1)
|
||||
check("/api/v1/rules?group_limit=2", 200, 2, 4)
|
||||
check(fmt.Sprintf("/api/v1/rules?group_limit=1&page_num=%d", 1), 200, 1, 2)
|
||||
})
|
||||
t.Run("/api/v1/rules&exclude_alerts=true", func(t *testing.T) {
|
||||
// check if response returns active alerts by default
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
@@ -90,6 +92,8 @@ type UserInfo struct {
|
||||
|
||||
MetricLabels map[string]string `yaml:"metric_labels,omitempty"`
|
||||
|
||||
AccessLog *AccessLog `yaml:"access_log,omitempty"`
|
||||
|
||||
concurrencyLimitCh chan struct{}
|
||||
concurrencyLimitReached *metrics.Counter
|
||||
|
||||
@@ -102,11 +106,40 @@ type UserInfo struct {
|
||||
requestsDuration *metrics.Summary
|
||||
}
|
||||
|
||||
// AccessLog represents configuration for access log settings.
|
||||
type AccessLog struct {
|
||||
Filters *AccessLogFilters `yaml:"filters"`
|
||||
}
|
||||
|
||||
// AccessLogFilters represents list of filters for access logs printing
|
||||
type AccessLogFilters struct {
|
||||
// SkipStatusCodes is a list of HTTP status codes for which access logs will be skipped
|
||||
SkipStatusCodes []int `yaml:"skip_status_codes"`
|
||||
}
|
||||
|
||||
func (ui *UserInfo) logRequest(r *http.Request, userName string, statusCode int, duration time.Duration) {
|
||||
if ui.AccessLog == nil {
|
||||
return
|
||||
}
|
||||
filters := ui.AccessLog.Filters
|
||||
if filters != nil && len(filters.SkipStatusCodes) > 0 {
|
||||
if slices.Contains(filters.SkipStatusCodes, statusCode) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("access_log request_host=%q request_uri=%q status_code=%d remote_addr=%s user_agent=%q referer=%q duration_ms=%d username=%q",
|
||||
r.Host, requestURI, statusCode, remoteAddr, r.UserAgent(), r.Referer(), duration.Milliseconds(), userName)
|
||||
}
|
||||
|
||||
// HeadersConf represents config for request and response headers.
|
||||
type HeadersConf struct {
|
||||
RequestHeaders []*Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
|
||||
KeepOriginalHost *bool `yaml:"keep_original_host,omitempty"`
|
||||
RequestHeaders []*Header `yaml:"headers,omitempty"`
|
||||
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
|
||||
KeepOriginalHost *bool `yaml:"keep_original_host,omitempty"`
|
||||
hasAnyPlaceHolders bool
|
||||
}
|
||||
|
||||
func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
|
||||
@@ -114,7 +147,7 @@ func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
|
||||
case ui.concurrencyLimitCh <- struct{}{}:
|
||||
return nil
|
||||
default:
|
||||
// The number of concurrently executed requests for the given user equals the limt.
|
||||
// The number of concurrently executed requests for the given user equals the limit.
|
||||
// Wait until some of the currently executed requests are finished, so the current request could be executed.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
|
||||
select {
|
||||
@@ -349,6 +382,7 @@ func (bus *backendURLs) add(u *url.URL) {
|
||||
url: u,
|
||||
healthCheckContext: bus.healthChecksContext,
|
||||
healthCheckWG: &bus.healthChecksWG,
|
||||
hasPlaceHolders: hasAnyPlaceholders(u),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -366,6 +400,8 @@ type backendURL struct {
|
||||
concurrentRequests atomic.Int32
|
||||
|
||||
url *url.URL
|
||||
|
||||
hasPlaceHolders bool
|
||||
}
|
||||
|
||||
func (bu *backendURL) isBroken() bool {
|
||||
@@ -599,7 +635,7 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
|
||||
// The Load() in front of CompareAndSwap() avoids CAS overhead for items with values bigger than 0.
|
||||
if bu.concurrentRequests.Load() == 0 && bu.concurrentRequests.CompareAndSwap(0, 1) {
|
||||
atomicCounter.CompareAndSwap(n+1, idx+1)
|
||||
// There is no need in the call bu.get(), because we already incremented bu.concrrentRequests above.
|
||||
// There is no need in the call bu.get(), because we already incremented bu.concurrentRequests above.
|
||||
return bu
|
||||
}
|
||||
}
|
||||
@@ -842,12 +878,14 @@ func reloadAuthConfigData(data []byte) (bool, error) {
|
||||
return false, fmt.Errorf("failed to parse auth config: %w", err)
|
||||
}
|
||||
|
||||
jui, err := parseJWTUsers(ac)
|
||||
jui, oidcDP, err := parseJWTUsers(ac)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse JWT users from auth config: %w", err)
|
||||
}
|
||||
oidcDP.startDiscovery()
|
||||
jwtc := &jwtCache{
|
||||
users: jui,
|
||||
users: jui,
|
||||
oidcDP: oidcDP,
|
||||
}
|
||||
|
||||
m, err := parseAuthConfigUsers(ac)
|
||||
@@ -866,6 +904,11 @@ func reloadAuthConfigData(data []byte) (bool, error) {
|
||||
}
|
||||
metrics.RegisterSet(ac.ms)
|
||||
|
||||
jwtcPrev := jwtAuthCache.Load()
|
||||
if jwtcPrev != nil {
|
||||
jwtcPrev.oidcDP.stopDiscovery()
|
||||
}
|
||||
|
||||
authConfig.Store(ac)
|
||||
authConfigData.Store(&data)
|
||||
authUsers.Store(&m)
|
||||
@@ -903,6 +946,9 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
if ui.Name != "" {
|
||||
return nil, fmt.Errorf("field name can't be specified for unauthorized_user section")
|
||||
}
|
||||
if err := parseJWTPlaceholdersForUserInfo(ui, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -960,6 +1006,10 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
at, ui.Username, ui.Name, uiOld.Username, uiOld.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if err := parseJWTPlaceholdersForUserInfo(ui, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1059,6 +1109,7 @@ func (ui *UserInfo) initURLs() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range ui.URLMaps {
|
||||
if len(e.SrcPaths) == 0 && len(e.SrcHosts) == 0 && len(e.SrcQueryArgs) == 0 && len(e.SrcHeaders) == 0 {
|
||||
return fmt.Errorf("missing `src_paths`, `src_hosts`, `src_query_args` and `src_headers` in `url_map`")
|
||||
@@ -1118,6 +1169,9 @@ func (ui *UserInfo) name() string {
|
||||
h := xxhash.Sum64([]byte(ui.AuthToken))
|
||||
return fmt.Sprintf("auth_token:hash:%016X", h)
|
||||
}
|
||||
if ui.JWT != nil {
|
||||
return `jwt`
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,11 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
@@ -276,6 +279,50 @@ users:
|
||||
url_prefix: http://foo.bar
|
||||
metric_labels:
|
||||
not-prometheus-compatible: value
|
||||
`)
|
||||
// placeholder in url_prefix
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
|
||||
`)
|
||||
// placeholder in a header
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
headers:
|
||||
- 'X-Foo: {{a_placeholder}}'
|
||||
url_prefix: 'http://ahost'
|
||||
`)
|
||||
// placeholder in url_prefix
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
|
||||
`)
|
||||
// placeholder in a header in url_map
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_map:
|
||||
- src_paths: ["/select/.*"]
|
||||
headers:
|
||||
- 'X-Foo: {{a_placeholder}}'
|
||||
url_prefix: 'http://ahost'
|
||||
`)
|
||||
|
||||
// placeholder in a header in url_map
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
password: bar
|
||||
url_map:
|
||||
- src_paths: ["/select/.*"]
|
||||
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -637,6 +684,31 @@ users:
|
||||
URLPrefix: mustParseURL("http://aaa:343/bbb"),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
// Multiple users with access logs enabled
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
url_prefix: http://foo
|
||||
access_log: {}
|
||||
- username: bar
|
||||
url_prefix: https://bar/x/
|
||||
access_log:
|
||||
filters:
|
||||
skip_status_codes: [404]
|
||||
`, map[string]*UserInfo{
|
||||
getHTTPAuthBasicToken("foo", ""): {
|
||||
Username: "foo",
|
||||
URLPrefix: mustParseURL("http://foo"),
|
||||
AccessLog: &AccessLog{},
|
||||
},
|
||||
getHTTPAuthBasicToken("bar", ""): {
|
||||
Username: "bar",
|
||||
URLPrefix: mustParseURL("https://bar/x/"),
|
||||
AccessLog: &AccessLog{Filters: &AccessLogFilters{SkipStatusCodes: []int{404}}},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
}
|
||||
|
||||
func TestParseAuthConfigPassesTLSVerificationConfig(t *testing.T) {
|
||||
@@ -924,6 +996,41 @@ func TestDiscoverBackendIPsWithIPV6(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestLogRequest(t *testing.T) {
|
||||
ui := &UserInfo{AccessLog: &AccessLog{}}
|
||||
|
||||
testOutput := &bytes.Buffer{}
|
||||
logger.SetOutputForTests(testOutput)
|
||||
defer logger.ResetOutputForTest()
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost:8080/select/0/prometheus", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
f := func(user string, status int, duration time.Duration, expectedLog string) {
|
||||
t.Helper()
|
||||
|
||||
testOutput.Reset()
|
||||
ui.logRequest(req, user, status, duration)
|
||||
|
||||
got := testOutput.String()
|
||||
if expectedLog == "" && got != "" {
|
||||
t.Fatalf("expected empty log, got %q", got)
|
||||
}
|
||||
if !strings.Contains(got, expectedLog) {
|
||||
t.Fatalf("output \n%q \nshould contain \n%q", testOutput.String(), expectedLog)
|
||||
}
|
||||
}
|
||||
|
||||
f("foo", 200, 10*time.Millisecond, `access_log request_host="localhost:8080" request_uri="" status_code=200 remote_addr="" user_agent="" referer="" duration_ms=10 username="foo"`)
|
||||
f("foo", 404, time.Second, `access_log request_host="localhost:8080" request_uri="" status_code=404 remote_addr="" user_agent="" referer="" duration_ms=1000 username="foo"`)
|
||||
|
||||
ui.AccessLog.Filters = &AccessLogFilters{SkipStatusCodes: []int{200}}
|
||||
f("foo", 200, 10*time.Millisecond, ``)
|
||||
f("foo", 404, 10*time.Millisecond, `access_log request_host="localhost:8080" request_uri="" status_code=404 remote_addr="" user_agent="" referer="" duration_ms=10 username="foo"`)
|
||||
}
|
||||
|
||||
func getRegexs(paths []string) []*Regex {
|
||||
var sps []*Regex
|
||||
for _, path := range paths {
|
||||
|
||||
@@ -116,6 +116,20 @@ users:
|
||||
- "http://default1:8888/unsupported_url_handler"
|
||||
- "http://default2:8888/unsupported_url_handler"
|
||||
|
||||
# A JWT token based routing:
|
||||
# - Requests with JWT token that has the following structure:
|
||||
# {"team": "ops", "security": {"read_access": "1"}, "vm_access": {"metrics_account_id": 1000,"metrics_project_id":5}}
|
||||
# is routed to vmselect nodes and request url placeholder replaced with metrics tenant identificators
|
||||
- name: jwt-opts-team
|
||||
jwt:
|
||||
match_claims:
|
||||
team: ops
|
||||
security.read_access: "1"
|
||||
skip_verify: true
|
||||
url_prefix:
|
||||
- "http://vmselect1:8481/select/{{.MetricsTenant}}/prometheus"
|
||||
- "http://vmselect2:8481/select/{{.MetricsTenant}}/prometheus"
|
||||
|
||||
# Requests without Authorization header are proxied according to `unauthorized_user` section.
|
||||
# Requests are proxied in round-robin fashion between `url_prefix` backends.
|
||||
# The deny_partial_response query arg is added to all the proxied requests.
|
||||
@@ -125,3 +139,8 @@ unauthorized_user:
|
||||
- http://vmselect-az1/?deny_partial_response=1
|
||||
- http://vmselect-az2/?deny_partial_response=1
|
||||
retry_status_codes: [503, 500]
|
||||
# log access for requests routed to this user
|
||||
access_log:
|
||||
filters:
|
||||
# except requests with Status Codes below
|
||||
skip_status_codes: [200, 202]
|
||||
|
||||
@@ -2,49 +2,114 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
metricsTenantPlaceholder = `{{.MetricsTenant}}`
|
||||
metricsExtraLabelsPlaceholder = `{{.MetricsExtraLabels}}`
|
||||
metricsExtraFiltersPlaceholder = `{{.MetricsExtraFilters}}`
|
||||
|
||||
logsAccountIDPlaceholder = `{{.LogsAccountID}}`
|
||||
logsProjectIDPlaceholder = `{{.LogsProjectID}}`
|
||||
logsExtraFiltersPlaceholder = `{{.LogsExtraFilters}}`
|
||||
logsExtraStreamFiltersPlaceholder = `{{.LogsExtraStreamFilters}}`
|
||||
|
||||
placeholderPrefix = `{{`
|
||||
)
|
||||
|
||||
var allPlaceholders = []string{
|
||||
metricsTenantPlaceholder,
|
||||
metricsExtraLabelsPlaceholder,
|
||||
metricsExtraFiltersPlaceholder,
|
||||
logsAccountIDPlaceholder,
|
||||
logsProjectIDPlaceholder,
|
||||
logsExtraFiltersPlaceholder,
|
||||
logsExtraStreamFiltersPlaceholder,
|
||||
}
|
||||
|
||||
var urlPathPlaceHolders = []string{
|
||||
metricsTenantPlaceholder,
|
||||
logsAccountIDPlaceholder,
|
||||
logsProjectIDPlaceholder,
|
||||
}
|
||||
|
||||
type jwtCache struct {
|
||||
// users contain UserInfo`s from AuthConfig with JWTConfig set
|
||||
users []*UserInfo
|
||||
|
||||
oidcDP *oidcDiscovererPool
|
||||
}
|
||||
|
||||
type JWTConfig struct {
|
||||
PublicKeys []string `yaml:"public_keys,omitempty"`
|
||||
PublicKeyFiles []string `yaml:"public_key_files,omitempty"`
|
||||
SkipVerify bool `yaml:"skip_verify,omitempty"`
|
||||
PublicKeys []string `yaml:"public_keys,omitempty"`
|
||||
PublicKeyFiles []string `yaml:"public_key_files,omitempty"`
|
||||
SkipVerify bool `yaml:"skip_verify,omitempty"`
|
||||
OIDC *oidcConfig `yaml:"oidc,omitempty"`
|
||||
MatchClaims map[string]string `yaml:"match_claims,omitempty"`
|
||||
parsedMatchClaims []*jwt.Claim
|
||||
|
||||
verifierPool *jwt.VerifierPool
|
||||
// verifierPool is used to verify JWT tokens.
|
||||
// It is initialized from PublicKeys and/or PublicKeyFiles.
|
||||
// In this case, it is initialized once at config reload and never updated until next reload
|
||||
// In case of OIDC, it is initialized on config reload and periodically updated by discovery process.
|
||||
verifierPool atomic.Pointer[jwt.VerifierPool]
|
||||
}
|
||||
|
||||
func parseJWTUsers(ac *AuthConfig) ([]*UserInfo, error) {
|
||||
func parseJWTUsers(ac *AuthConfig) ([]*UserInfo, *oidcDiscovererPool, error) {
|
||||
jui := make([]*UserInfo, 0, len(ac.Users))
|
||||
for _, ui := range ac.Users {
|
||||
oidcDP := &oidcDiscovererPool{}
|
||||
|
||||
uniqClaims := make(map[string]*UserInfo)
|
||||
var sortedClaims []string
|
||||
for idx, ui := range ac.Users {
|
||||
jwtToken := ui.JWT
|
||||
if jwtToken == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if ui.AuthToken != "" || ui.BearerToken != "" || ui.Username != "" || ui.Password != "" {
|
||||
return nil, fmt.Errorf("auth_token, bearer_token, username and password cannot be specified if jwt is set")
|
||||
return nil, nil, fmt.Errorf("auth_token, bearer_token, username and password cannot be specified if jwt is set")
|
||||
}
|
||||
if len(jwtToken.PublicKeys) == 0 && len(jwtToken.PublicKeyFiles) == 0 && !jwtToken.SkipVerify {
|
||||
return nil, fmt.Errorf("jwt must contain at least a single public key, public_key_files or have skip_verify=true")
|
||||
if len(jwtToken.PublicKeys) == 0 && len(jwtToken.PublicKeyFiles) == 0 && !jwtToken.SkipVerify && jwtToken.OIDC == nil {
|
||||
return nil, nil, fmt.Errorf("jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true")
|
||||
}
|
||||
var claimsString string
|
||||
sortedClaims = sortedClaims[:0]
|
||||
parsedClaims := make([]*jwt.Claim, 0, len(jwtToken.MatchClaims))
|
||||
for ck, cv := range jwtToken.MatchClaims {
|
||||
sortedClaims = append(sortedClaims, fmt.Sprintf("%s=%s", ck, cv))
|
||||
pc, err := jwt.NewClaim(ck, cv)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("incorrect match claim, key=%q, value regex=%q: %w", ck, cv, err)
|
||||
}
|
||||
parsedClaims = append(parsedClaims, pc)
|
||||
}
|
||||
ui.JWT.parsedMatchClaims = parsedClaims
|
||||
sort.Strings(sortedClaims)
|
||||
claimsString = strings.Join(sortedClaims, ",")
|
||||
|
||||
if oldUI, ok := uniqClaims[claimsString]; ok {
|
||||
return nil, nil, fmt.Errorf("duplicate match claims=%q found for name=%q at idx=%d; the previous one is set for name=%q", claimsString, ui.Name, idx, oldUI.Name)
|
||||
}
|
||||
uniqClaims[claimsString] = &ui
|
||||
if len(jwtToken.PublicKeys) > 0 || len(jwtToken.PublicKeyFiles) > 0 {
|
||||
keys := make([]any, 0, len(jwtToken.PublicKeys)+len(jwtToken.PublicKeyFiles))
|
||||
|
||||
for i := range jwtToken.PublicKeys {
|
||||
k, err := jwt.ParseKey([]byte(jwtToken.PublicKeys[i]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@@ -52,30 +117,52 @@ func parseJWTUsers(ac *AuthConfig) ([]*UserInfo, error) {
|
||||
for _, filePath := range jwtToken.PublicKeyFiles {
|
||||
keyData, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read public key from file %q: %w", filePath, err)
|
||||
return nil, nil, fmt.Errorf("cannot read public key from file %q: %w", filePath, err)
|
||||
}
|
||||
k, err := jwt.ParseKey(keyData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse public key from file %q: %w", filePath, err)
|
||||
return nil, nil, fmt.Errorf("cannot parse public key from file %q: %w", filePath, err)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
vp, err := jwt.NewVerifierPool(keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
jwtToken.verifierPool = vp
|
||||
jwtToken.verifierPool.Store(vp)
|
||||
}
|
||||
if jwtToken.OIDC != nil {
|
||||
if len(jwtToken.PublicKeys) > 0 || len(jwtToken.PublicKeyFiles) > 0 || jwtToken.SkipVerify {
|
||||
return nil, nil, fmt.Errorf("jwt with oidc cannot contain public keys or have skip_verify=true")
|
||||
}
|
||||
|
||||
if jwtToken.OIDC.Issuer == "" {
|
||||
return nil, nil, fmt.Errorf("oidc issuer cannot be empty")
|
||||
}
|
||||
isserURL, err := url.Parse(jwtToken.OIDC.Issuer)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("oidc issuer %q must be a valid URL", jwtToken.OIDC.Issuer)
|
||||
}
|
||||
if isserURL.Scheme != "https" && isserURL.Scheme != "http" {
|
||||
return nil, nil, fmt.Errorf("oidc issuer %q must have http or https scheme", jwtToken.OIDC.Issuer)
|
||||
}
|
||||
|
||||
oidcDP.createOrAdd(ui.JWT.OIDC.Issuer, &ui.JWT.verifierPool)
|
||||
}
|
||||
|
||||
if err := parseJWTPlaceholdersForUserInfo(&ui, true); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := ui.initURLs(); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
metricLabels, err := ui.getMetricLabels()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse metric_labels: %w", err)
|
||||
return nil, nil, fmt.Errorf("cannot parse metric_labels: %w", err)
|
||||
}
|
||||
ui.requests = ac.ms.GetOrCreateCounter(`vmauth_user_requests_total` + metricLabels)
|
||||
ui.requestErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_errors_total` + metricLabels)
|
||||
@@ -94,36 +181,53 @@ func parseJWTUsers(ac *AuthConfig) ([]*UserInfo, error) {
|
||||
|
||||
rt, err := newRoundTripper(ui.TLSCAFile, ui.TLSCertFile, ui.TLSKeyFile, ui.TLSServerName, ui.TLSInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
|
||||
return nil, nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
|
||||
}
|
||||
ui.rt = rt
|
||||
|
||||
jui = append(jui, &ui)
|
||||
}
|
||||
|
||||
// the limitation will be lifted once claim based matching will be implemented
|
||||
if len(jui) > 1 {
|
||||
return nil, fmt.Errorf("multiple users with JWT tokens are not supported; found %d users", len(jui))
|
||||
}
|
||||
// sort by amount of matching claims
|
||||
// it allows to more specific claim win in case of clash
|
||||
sort.SliceStable(jui, func(i, j int) bool {
|
||||
return len(jui[i].JWT.MatchClaims) > len(jui[j].JWT.MatchClaims)
|
||||
})
|
||||
|
||||
return jui, nil
|
||||
return jui, oidcDP, nil
|
||||
}
|
||||
|
||||
func getUserInfoByJWTToken(ats []string) *UserInfo {
|
||||
var tokenPool sync.Pool
|
||||
|
||||
func getToken() *jwt.Token {
|
||||
tkn := tokenPool.Get()
|
||||
if tkn == nil {
|
||||
return &jwt.Token{}
|
||||
}
|
||||
return tkn.(*jwt.Token)
|
||||
}
|
||||
|
||||
func putToken(tkn *jwt.Token) {
|
||||
tkn.Reset()
|
||||
tokenPool.Put(tkn)
|
||||
}
|
||||
|
||||
func getJWTUserInfo(ats []string) (*UserInfo, *jwt.Token) {
|
||||
js := *jwtAuthCache.Load()
|
||||
if len(js.users) == 0 {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tkn := getToken()
|
||||
|
||||
for _, at := range ats {
|
||||
if strings.Count(at, ".") != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
at, _ = strings.CutPrefix(at, `http_auth:`)
|
||||
|
||||
tkn, err := jwt.NewToken(at, true)
|
||||
if err != nil {
|
||||
tkn.Reset()
|
||||
if err := tkn.Parse(at, true); err != nil {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("cannot parse jwt token: %s", err)
|
||||
}
|
||||
@@ -131,26 +235,252 @@ func getUserInfoByJWTToken(ats []string) *UserInfo {
|
||||
}
|
||||
if tkn.IsExpired(time.Now()) {
|
||||
if *logInvalidAuthTokens {
|
||||
// TODO: add more context:
|
||||
// token claims with issuer
|
||||
logger.Infof("jwt token is expired")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ui := range js.users {
|
||||
if ui.JWT.SkipVerify {
|
||||
return ui
|
||||
}
|
||||
if ui := getUserInfoByJWTToken(tkn, js.users); ui != nil {
|
||||
return ui, tkn
|
||||
}
|
||||
}
|
||||
|
||||
if err := ui.JWT.verifierPool.Verify(tkn); err != nil {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("cannot verify jwt token: %s", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
putToken(tkn)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func getUserInfoByJWTToken(tkn *jwt.Token, users []*UserInfo) *UserInfo {
|
||||
for _, ui := range users {
|
||||
if !tkn.MatchClaims(ui.JWT.parsedMatchClaims) {
|
||||
continue
|
||||
}
|
||||
|
||||
if ui.JWT.SkipVerify {
|
||||
return ui
|
||||
}
|
||||
|
||||
if ui.JWT.OIDC != nil {
|
||||
// OIDC requires iss claim.
|
||||
// It must match the discovery issuer URL set in OIDC config.
|
||||
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||
if tkn.Issuer() == "" {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("jwt token must have issuer filed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if tkn.Issuer() != ui.JWT.OIDC.Issuer {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("jwt token issuer: %q does not match oidc issuer: %q", tkn.Issuer(), ui.JWT.OIDC.Issuer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
vp := ui.JWT.verifierPool.Load()
|
||||
if vp == nil {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("jwt verifier not initialed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := vp.Verify(tkn); err != nil {
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("cannot verify jwt token: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return ui
|
||||
}
|
||||
|
||||
if *logInvalidAuthTokens {
|
||||
logger.Infof("no user match jwt token")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceJWTPlaceholders(bu *backendURL, hc HeadersConf, vma *jwt.VMAccessClaim) (*url.URL, HeadersConf) {
|
||||
if !bu.hasPlaceHolders && !hc.hasAnyPlaceHolders {
|
||||
return bu.url, hc
|
||||
}
|
||||
targetURL := bu.url
|
||||
data := jwtClaimsData(vma)
|
||||
if bu.hasPlaceHolders {
|
||||
// template url params and request path
|
||||
// make a copy of url
|
||||
uCopy := *bu.url
|
||||
for _, uph := range urlPathPlaceHolders {
|
||||
replacement := data[uph]
|
||||
uCopy.Path = strings.ReplaceAll(uCopy.Path, uph, replacement[0])
|
||||
}
|
||||
query := uCopy.Query()
|
||||
var foundAnyQueryPlaceholder bool
|
||||
var templatedValues []string
|
||||
for param, values := range query {
|
||||
templatedValues = templatedValues[:0]
|
||||
// filter in-place values with placeholders
|
||||
// and accumulate replacements
|
||||
// it will change the order of param values
|
||||
// but it's not guaranteed
|
||||
// and will be changed in any way with multiple arg templates
|
||||
var cnt int
|
||||
for _, value := range values {
|
||||
if dv, ok := data[value]; ok {
|
||||
foundAnyQueryPlaceholder = true
|
||||
templatedValues = append(templatedValues, dv...)
|
||||
continue
|
||||
}
|
||||
values[cnt] = value
|
||||
cnt++
|
||||
}
|
||||
values = values[:cnt]
|
||||
values = append(values, templatedValues...)
|
||||
query[param] = values
|
||||
}
|
||||
if foundAnyQueryPlaceholder {
|
||||
uCopy.RawQuery = query.Encode()
|
||||
}
|
||||
targetURL = &uCopy
|
||||
}
|
||||
if hc.hasAnyPlaceHolders {
|
||||
// make a copy of headers and update only values with placeholder
|
||||
rhs := make([]*Header, 0, len(hc.RequestHeaders))
|
||||
for _, rh := range hc.RequestHeaders {
|
||||
if dv, ok := data[rh.Value]; ok {
|
||||
rh := &Header{
|
||||
Name: rh.Name,
|
||||
Value: strings.Join(dv, ","),
|
||||
}
|
||||
rhs = append(rhs, rh)
|
||||
continue
|
||||
}
|
||||
rhs = append(rhs, rh)
|
||||
}
|
||||
hc.RequestHeaders = rhs
|
||||
}
|
||||
|
||||
return targetURL, hc
|
||||
}
|
||||
|
||||
func jwtClaimsData(vma *jwt.VMAccessClaim) map[string][]string {
|
||||
data := map[string][]string{
|
||||
// TODO: optimize at parsing stage
|
||||
metricsTenantPlaceholder: {fmt.Sprintf("%d:%d", vma.MetricsAccountID, vma.MetricsProjectID)},
|
||||
metricsExtraLabelsPlaceholder: vma.MetricsExtraLabels,
|
||||
metricsExtraFiltersPlaceholder: vma.MetricsExtraFilters,
|
||||
|
||||
// TODO: optimize at parsing stage
|
||||
logsAccountIDPlaceholder: {fmt.Sprintf("%d", vma.LogsAccountID)},
|
||||
logsProjectIDPlaceholder: {fmt.Sprintf("%d", vma.LogsProjectID)},
|
||||
logsExtraFiltersPlaceholder: vma.LogsExtraFilters,
|
||||
logsExtraStreamFiltersPlaceholder: vma.LogsExtraStreamFilters,
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func parseJWTPlaceholdersForUserInfo(ui *UserInfo, isAllowed bool) error {
|
||||
if ui.URLPrefix != nil {
|
||||
if err := validateJWTPlaceholdersForURL(ui.URLPrefix, isAllowed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := parsePlaceholdersForHC(&ui.HeadersConf, isAllowed); err != nil {
|
||||
return err
|
||||
}
|
||||
if ui.DefaultURL != nil {
|
||||
if err := validateJWTPlaceholdersForURL(ui.DefaultURL, isAllowed); err != nil {
|
||||
return fmt.Errorf("invalid `default_url` placeholders: %w", err)
|
||||
}
|
||||
}
|
||||
for i := range ui.URLMaps {
|
||||
e := &ui.URLMaps[i]
|
||||
if e.URLPrefix != nil {
|
||||
if err := validateJWTPlaceholdersForURL(e.URLPrefix, isAllowed); err != nil {
|
||||
return fmt.Errorf("invalid `url_map` `url_prefix` placeholders: %w", err)
|
||||
}
|
||||
}
|
||||
if err := parsePlaceholdersForHC(&e.HeadersConf, isAllowed); err != nil {
|
||||
return fmt.Errorf("invalid `url_map` headers placeholders: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateJWTPlaceholdersForURL(up *URLPrefix, isAllowed bool) error {
|
||||
for _, bu := range up.busOriginal {
|
||||
ok := strings.Contains(bu.Path, placeholderPrefix)
|
||||
if ok && !isAllowed {
|
||||
return fmt.Errorf("placeholder: %q is only allowed at JWT token context", bu.Path)
|
||||
}
|
||||
if ok {
|
||||
p := bu.Path
|
||||
for _, ph := range allPlaceholders {
|
||||
p = strings.ReplaceAll(p, ph, ``)
|
||||
}
|
||||
if strings.Contains(p, placeholderPrefix) {
|
||||
return fmt.Errorf("invalid placeholder found in URL request path: %q, supported values are: %s", bu.Path, strings.Join(allPlaceholders, ", "))
|
||||
|
||||
}
|
||||
}
|
||||
for param, values := range bu.Query() {
|
||||
for _, value := range values {
|
||||
ok := strings.Contains(value, placeholderPrefix)
|
||||
if ok && !isAllowed {
|
||||
return fmt.Errorf("query param: %q with placeholder: %q is only allowed at JWT token context", param, value)
|
||||
}
|
||||
if ok {
|
||||
// possible placeholder
|
||||
if !slices.Contains(allPlaceholders, value) {
|
||||
return fmt.Errorf("query param: %q has unsupported placeholder string: %q, supported values are: %s", param, value, strings.Join(allPlaceholders, ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parsePlaceholdersForHC(hc *HeadersConf, isAllowed bool) error {
|
||||
for _, rhs := range hc.RequestHeaders {
|
||||
ok := strings.Contains(rhs.Value, placeholderPrefix)
|
||||
if ok && !isAllowed {
|
||||
return fmt.Errorf("request header: %q placeholder: %q is only supported at JWT context", rhs.Name, rhs.Value)
|
||||
}
|
||||
if ok {
|
||||
if !slices.Contains(allPlaceholders, rhs.Value) {
|
||||
return fmt.Errorf("request header: %q has unsupported placeholder: %q, supported values are: %s", rhs.Name, rhs.Value, strings.Join(allPlaceholders, ", "))
|
||||
}
|
||||
hc.hasAnyPlaceHolders = true
|
||||
}
|
||||
}
|
||||
for _, rhs := range hc.ResponseHeaders {
|
||||
if strings.Contains(rhs.Value, placeholderPrefix) {
|
||||
return fmt.Errorf("response header placeholders are not supported; found placeholder prefix at header: %q with value: %q", rhs.Name, rhs.Value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasAnyPlaceholders(u *url.URL) bool {
|
||||
if strings.Contains(u.Path, placeholderPrefix) {
|
||||
return true
|
||||
}
|
||||
if len(u.Query()) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, values := range u.Query() {
|
||||
for _, value := range values {
|
||||
if strings.HasPrefix(value, placeholderPrefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -32,18 +35,20 @@ XOtclIk1uhc03oL9nOQ=
|
||||
ac, err := parseAuthConfig([]byte(s))
|
||||
if err != nil {
|
||||
if expErr != err.Error() {
|
||||
t.Fatalf("unexpected error; got %q; want %q", err.Error(), expErr)
|
||||
t.Fatalf("unexpected error; got\n%q\nwant\n%q", err.Error(), expErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
users, err := parseJWTUsers(ac)
|
||||
if err != nil {
|
||||
if expErr != err.Error() {
|
||||
t.Fatalf("unexpected error; got %q; want %q", err.Error(), expErr)
|
||||
}
|
||||
return
|
||||
users, oidcDP, err := parseJWTUsers(ac)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error; got %v", users)
|
||||
}
|
||||
if expErr != err.Error() {
|
||||
t.Fatalf("unexpected error; got\n%q\nwant \n%q", err.Error(), expErr)
|
||||
}
|
||||
if oidcDP != nil {
|
||||
t.Fatalf("expecting nil oidcDP; got %v", oidcDP)
|
||||
}
|
||||
t.Fatalf("expecting non-nil error; got %v", users)
|
||||
}
|
||||
|
||||
// unauthorized_user cannot be used with jwt
|
||||
@@ -80,28 +85,28 @@ users:
|
||||
users:
|
||||
- jwt: {}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
|
||||
|
||||
// jwt public_keys or skip_verify must be set, part 2
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_keys: null}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
|
||||
|
||||
// jwt public_keys or skip_verify must be set, part 3
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_keys: []}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
|
||||
|
||||
// jwt public_keys, public_key_files or skip_verify must be set
|
||||
f(`
|
||||
users:
|
||||
- jwt: {public_key_files: []}
|
||||
url_prefix: http://foo.bar
|
||||
`, `jwt must contain at least a single public key, public_key_files or have skip_verify=true`)
|
||||
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
|
||||
|
||||
// invalid public key, part 1
|
||||
f(`
|
||||
@@ -140,7 +145,7 @@ users:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, validRSAPublicKey, validECDSAPublicKey), `multiple users with JWT tokens are not supported; found 2 users`)
|
||||
`, validRSAPublicKey, validECDSAPublicKey), `duplicate match claims="" found for name="" at idx=1; the previous one is set for name=""`)
|
||||
|
||||
// public key file doesn't exist
|
||||
f(`
|
||||
@@ -164,6 +169,122 @@ users:
|
||||
- `+publicKeyFile+`
|
||||
url_prefix: http://foo.bar
|
||||
`, "cannot parse public key from file \""+publicKeyFile+"\": failed to parse key \"invalidPEM\": failed to decode PEM block containing public key")
|
||||
|
||||
// unsupported placeholder in a header
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
url_prefix: http://foo.bar/{{.UnsupportedPlaceholder}}/foo`,
|
||||
"invalid placeholder found in URL request path: \"/{{.UnsupportedPlaceholder}}/foo\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
|
||||
)
|
||||
// unsupported placeholder in a header
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
headers:
|
||||
- "AccountID: {{.UnsupportedPlaceholder}}"
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"request header: \"AccountID\" has unsupported placeholder: \"{{.UnsupportedPlaceholder}}\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
|
||||
)
|
||||
|
||||
// spaces in templating not allowed
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
headers:
|
||||
- "AccountID: {{ .LogsAccountID }}"
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"request header: \"AccountID\" has unsupported placeholder: \"{{ .LogsAccountID }}\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
|
||||
)
|
||||
|
||||
// oidc is not an object
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
oidc: "not an object"
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"cannot unmarshal AuthConfig data: yaml: unmarshal errors:\n line 4: cannot unmarshal !!str `not an ...` into main.oidcConfig",
|
||||
)
|
||||
|
||||
// oidc issuer empty
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
oidc: {}
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"oidc issuer cannot be empty",
|
||||
)
|
||||
|
||||
// oidc issuer invalid urls
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
oidc:
|
||||
issuer: "::invalid-url"
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"oidc issuer \"::invalid-url\" must be a valid URL",
|
||||
)
|
||||
|
||||
// oidc issuer invalid urls
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
oidc:
|
||||
issuer: "invalid-url"
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"oidc issuer \"invalid-url\" must have http or https scheme",
|
||||
)
|
||||
|
||||
// oidc and public_keys are not allowed
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
oidc:
|
||||
issuer: https://example.com
|
||||
url_prefix: http://foo.bar
|
||||
`, validRSAPublicKey),
|
||||
"jwt with oidc cannot contain public keys or have skip_verify=true",
|
||||
)
|
||||
|
||||
// oidc and skip_verify are not allowed
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
oidc:
|
||||
issuer: https://example.com
|
||||
url_prefix: http://foo.bar
|
||||
`,
|
||||
"jwt with oidc cannot contain public keys or have skip_verify=true",
|
||||
)
|
||||
// duplicate claims
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
match_claims:
|
||||
team: ops
|
||||
name: user-1
|
||||
url_prefix: http://foo.bar
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
match_claims:
|
||||
team: ops
|
||||
name: user-2
|
||||
url_prefix: http://foo.bar`,
|
||||
"duplicate match claims=\"team=ops\" found for name=\"user-2\" at idx=1; the previous one is set for name=\"user-1\"",
|
||||
)
|
||||
}
|
||||
|
||||
func TestJWTParseAuthConfigSuccess(t *testing.T) {
|
||||
@@ -193,10 +314,12 @@ XOtclIk1uhc03oL9nOQ=
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
jui, err := parseJWTUsers(ac)
|
||||
jui, oidcDP, err := parseJWTUsers(ac)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
oidcDP.startDiscovery()
|
||||
defer oidcDP.stopDiscovery()
|
||||
|
||||
for _, ui := range jui {
|
||||
if ui.JWT == nil {
|
||||
@@ -204,13 +327,13 @@ XOtclIk1uhc03oL9nOQ=
|
||||
}
|
||||
|
||||
if ui.JWT.SkipVerify {
|
||||
if ui.JWT.verifierPool != nil {
|
||||
if ui.JWT.verifierPool.Load() != nil {
|
||||
t.Fatalf("unexpected non-nil verifier pool for skip_verify=true")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if ui.JWT.verifierPool == nil {
|
||||
if ui.JWT.verifierPool.Load() == nil {
|
||||
t.Fatalf("unexpected nil verifier pool for non-empty public keys")
|
||||
}
|
||||
}
|
||||
@@ -301,4 +424,80 @@ users:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
`, validECDSAPublicKey, rsaKeyFile))
|
||||
|
||||
// oidc stub server
|
||||
var ipSrv *httptest.Server
|
||||
ipSrv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/.well-known/openid-configuration" {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(map[string]string{
|
||||
"issuer": ipSrv.URL,
|
||||
"jwks_uri": fmt.Sprintf("%s/jwks", ipSrv.URL),
|
||||
})
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/jwks" {
|
||||
// resp generated by https://jwkset.com/generate
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`
|
||||
{
|
||||
"keys": [
|
||||
{
|
||||
"kty": "RSA",
|
||||
"kid": "f13eee91-f566-4829-80fa-fca847c21f0e",
|
||||
"d": "Ua1llEFz3LZ05CrK5a2JxKMUEWJGXhBPPF20hHQjzxd1w0IEJK_mhPZQG8dNtBROBNIi1FC9l6QRw-RTnVIVat5Xy4yDFNKXXL3ZLXejOHY8SXrNEIDqQ-cSwIpK9cK7Umib0PcPeEeeAED5mqDH75D8_YssWFF18kLbNB5Z9pZmn6Fshiht7l2Sh4GN-KcReOW6eiQQwckDte3OGmZCRbtEriLWJt5TUGUvfZVIlcclqNMycNB6jGa9E1pO5Up7Ki3ZbI_-6XmRgZPtqnR9oLJ1zn3fj3hYpCXo-zcqLuOu3qxcslsq5igsfBzgGtfIJHY9LfWmHUsaDEa5cAX1gQ",
|
||||
"n": "xbLXXBTNREk70UCMiqZ53_mTzYh89W-UaPU61GZ-RZ5lYcLgyWOb5mdyRbvJpcgfZpsOeGAUWbk3GkQ4vqn8kUMnnWhUum2Qk9kGubOJGLW6yaURd00j3E-ilQ5xO2R_Hzz8bAojxV8GKdGTQ-iTf8z8nsSHH8kR2SERbNJCFFtwtFU7vyFWyoH4Lmvu2UpICTHFCR9RqwQVjyoKB1JjJ6Dh1L4zPTlsvQEnqoeFQHPYr0QcQSMYXdfPvlt_FiLOAOE89fX_9T2r9WbFAoda3uTRE5_aal0jxUU2cFyeVSIgauNtF07fp422XFb4XPkWQWrdNx0KX53laSIYQ9HOpw",
|
||||
"e": "AQAB",
|
||||
"p": "2JT57AD-Q2lamgjgyn0wL7DgYZ3OoCTTrDm5_NHg6h13uDvyIlXSukuUeWm4tzPSDedpstbS7dgXkLw5eQXBHwPYtByTcEZS8Z37CBnhMOOhfo_U1aNIPPanJACvWBgz47-TxHsxW1YhztZqghRoicBZPSSBAj49MgANJ4jF0zc",
|
||||
"q": "6a4MkeSXJI-ZzQ-bgP8hwJqpLFr0AiNGQcjZMH4Nn4CPGdnGiqqe6flhfLimgbNhbb67B0-8fLIji8zGhGKDL_JSIpAAdmfs2vzeEsY2hScrqVbd1VbfRcRh0J6lsn7obxkbvQthp9sX2DQbeDcEeaFEvd9gDKQSATYEqWo7eBE",
|
||||
"dp": "haL2yu6Z9RJuuxi7S3YPY33qFZF_y0St71j3L854zzw7gMxMTW9TRWwZQwk-1pv9AmNFzvnK0MNDVyUs-UXZsb932TrApshdqYRnPsppLvdl0GgDVYcYrbUr0IUzrFHSwraVAOlavRbaaXvX4EejcUvkRFvf1nh83fs2Iqy8E-U",
|
||||
"dq": "Cnf5qC-Ndd3ZDg688LJ9WJuVKJ-Kfu4Fn7zXvgxnn9Wqk4XmFyA9rk21yFidXQIkQz5gMpun3g48-W5bFmMzbVp1w4af_q35NnZNnJm0p5Jxqkxx87TIm9-IYkg5NB3rW87MJ1PzNAnkr5LmCCSu1qQa6Eaxjt9qzxMUcmKH94E",
|
||||
"qi": "saAeU11iaKHmye3cwCAYkegcyWbXV3xIXEVJtS9Af_yM19UhspwY2VhuwRaajcwYZwtvR9_ITmX9M-ea7uLdd7aDYO1fujC8NGbopeC4Hkr7yb5vTly3pfKf4h-3LwGGUucJUetdz1lmMIYiyuG4_gSf1yIEtPDLKzXiedgEMdI"
|
||||
}
|
||||
]
|
||||
}
|
||||
`))
|
||||
return
|
||||
}
|
||||
|
||||
http.NotFound(w, r)
|
||||
}))
|
||||
defer ipSrv.Close()
|
||||
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
oidc:
|
||||
issuer: ` + ipSrv.URL + `
|
||||
url_prefix: http://foo.bar
|
||||
`)
|
||||
// multiple match claims
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
match_claims:
|
||||
role: ro
|
||||
team: dev
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
- jwt:
|
||||
match_claims:
|
||||
role: admin
|
||||
team: dev
|
||||
public_key_files:
|
||||
- %q
|
||||
- %q
|
||||
url_prefix: http://foo.bar
|
||||
- jwt:
|
||||
match_claims:
|
||||
role: viewer
|
||||
team: dev
|
||||
department: ceo
|
||||
skip_verify: true
|
||||
url_prefix: http://foo.bar
|
||||
|
||||
|
||||
`, validRSAPublicKey, rsaKeyFile, ecdsaKeyFile))
|
||||
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
@@ -47,7 +48,7 @@ var (
|
||||
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
|
||||
|
||||
requestBufferSize = flagutil.NewBytes("requestBufferSize", 32*1024, "The size of the buffer for reading the request body before proxying the request to backends. "+
|
||||
"This allows reducing the comsumption of backend resources when processing requests from clients connected via slow networks. "+
|
||||
"This allows reducing the consumption of backend resources when processing requests from clients connected via slow networks. "+
|
||||
"Set to 0 to disable request buffering. See https://docs.victoriametrics.com/victoriametrics/vmauth/#request-body-buffering")
|
||||
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size to buffer in memory for potential retries at other backends. "+
|
||||
"Request bodies larger than this size cannot be retried if the backend fails. Zero or negative value disables request body buffering and retries. "+
|
||||
@@ -173,7 +174,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// Process requests for unauthorized users
|
||||
ui := authConfig.Load().UnauthorizedUser
|
||||
if ui != nil {
|
||||
processUserRequest(w, r, ui)
|
||||
processUserRequest(w, r, ui, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -182,17 +183,21 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
|
||||
if ui := getUserInfoByAuthTokens(ats); ui != nil {
|
||||
processUserRequest(w, r, ui)
|
||||
processUserRequest(w, r, ui, nil)
|
||||
return true
|
||||
}
|
||||
if ui := getUserInfoByJWTToken(ats); ui != nil {
|
||||
processUserRequest(w, r, ui)
|
||||
if ui, tkn := getJWTUserInfo(ats); ui != nil {
|
||||
if tkn == nil {
|
||||
logger.Panicf("BUG: unexpected nil jwt token for user %q", ui.name())
|
||||
}
|
||||
defer putToken(tkn)
|
||||
processUserRequest(w, r, ui, tkn)
|
||||
return true
|
||||
}
|
||||
|
||||
uu := authConfig.Load().UnauthorizedUser
|
||||
if uu != nil {
|
||||
processUserRequest(w, r, uu)
|
||||
processUserRequest(w, r, uu, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -221,7 +226,37 @@ func getUserInfoByAuthTokens(ats []string) *UserInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
// responseWriterWithStatus is a wrapper around http.ResponseWriter that captures the status code written to the response.
|
||||
type responseWriterWithStatus struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
}
|
||||
|
||||
// WriteHeader records the status so it can be easily retrieved later
|
||||
func (rws *responseWriterWithStatus) WriteHeader(status int) {
|
||||
rws.status = status
|
||||
rws.ResponseWriter.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Flush implements net/http.Flusher interface
|
||||
//
|
||||
// This is needed for the copyStreamToClient()
|
||||
func (rws *responseWriterWithStatus) Flush() {
|
||||
flusher, ok := rws.ResponseWriter.(http.Flusher)
|
||||
if !ok {
|
||||
logger.Panicf("BUG: it is expected http.ResponseWriter (%T) supports http.Flusher interface", rws.ResponseWriter)
|
||||
}
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
// Unwrap returns the original ResponseWriter wrapped by rws.
|
||||
//
|
||||
// This is needed for the net/http.ResponseController - see https://pkg.go.dev/net/http#NewResponseController
|
||||
func (rws *responseWriterWithStatus) Unwrap() http.ResponseWriter {
|
||||
return rws.ResponseWriter
|
||||
}
|
||||
|
||||
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *jwt.Token) {
|
||||
startTime := time.Now()
|
||||
defer ui.requestsDuration.UpdateDuration(startTime)
|
||||
|
||||
@@ -230,6 +265,20 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), *maxQueueDuration)
|
||||
defer cancel()
|
||||
|
||||
userName := ui.name()
|
||||
if userName == "" {
|
||||
userName = "unauthorized"
|
||||
}
|
||||
|
||||
if ui.AccessLog != nil {
|
||||
w = &responseWriterWithStatus{ResponseWriter: w}
|
||||
defer func() {
|
||||
rws := w.(*responseWriterWithStatus)
|
||||
duration := time.Since(startTime)
|
||||
ui.logRequest(r, userName, rws.status, duration)
|
||||
}()
|
||||
}
|
||||
|
||||
// Acquire global concurrency limit.
|
||||
if err := beginConcurrencyLimit(ctx); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
@@ -248,10 +297,6 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
}
|
||||
|
||||
// Read the initial chunk for the request body.
|
||||
userName := ui.name()
|
||||
if userName == "" {
|
||||
userName = "unauthorized"
|
||||
}
|
||||
bb, err := bufferRequestBody(ctx, r.Body, userName)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
@@ -272,7 +317,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
defer ui.endConcurrencyLimit()
|
||||
|
||||
// Process the request.
|
||||
processRequest(w, r, ui)
|
||||
processRequest(w, r, ui, tkn)
|
||||
}
|
||||
|
||||
func beginConcurrencyLimit(ctx context.Context) error {
|
||||
@@ -312,6 +357,7 @@ func bufferRequestBody(ctx context.Context, r io.ReadCloser, userName string) (i
|
||||
|
||||
maxBufSize := max(requestBufferSize.IntN(), maxRequestBodySizeToRetry.IntN())
|
||||
if maxBufSize <= 0 {
|
||||
// Request buffering is disabled.
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -345,7 +391,7 @@ func bufferRequestBody(ctx context.Context, r io.ReadCloser, userName string) (i
|
||||
return bb, nil
|
||||
}
|
||||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *jwt.Token) {
|
||||
u := normalizeURL(r.URL)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Host, r.Header)
|
||||
isDefault := false
|
||||
@@ -377,16 +423,21 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
break
|
||||
}
|
||||
targetURL := bu.url
|
||||
if tkn != nil {
|
||||
// for security reasons allow templating only for configured url values and headers
|
||||
targetURL, hc = replaceJWTPlaceholders(bu, hc, tkn.VMAccess())
|
||||
}
|
||||
if isDefault {
|
||||
// Don't change path and add request_path query param for default route.
|
||||
targetURLCopy := *targetURL
|
||||
query := targetURL.Query()
|
||||
query.Set("request_path", u.String())
|
||||
targetURL.RawQuery = query.Encode()
|
||||
targetURLCopy.RawQuery = query.Encode()
|
||||
targetURL = &targetURLCopy
|
||||
} else {
|
||||
// Update path for regular routes.
|
||||
targetURL = mergeURLs(targetURL, u, up.dropSrcPathPrefixParts, up.mergeQueryArgs)
|
||||
}
|
||||
|
||||
wasLocalRetry := false
|
||||
again:
|
||||
ok, needLocalRetry := tryProcessingRequest(w, r, targetURL, hc, up.retryStatusCodes, ui, bu)
|
||||
@@ -713,7 +764,7 @@ var concurrentRequestsLimitReached = metrics.NewCounter("vmauth_concurrent_reque
|
||||
|
||||
func usage() {
|
||||
const s = `
|
||||
vmauth authenticates and authorizes incoming requests and proxies them to VictoriaMetrics.
|
||||
vmauth authenticates and authorizes incoming requests and proxies them to VictoriaMetrics components or any other HTTP backends.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/ .
|
||||
`
|
||||
@@ -742,10 +793,11 @@ func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err err
|
||||
}
|
||||
|
||||
// bufferedBody serves two purposes:
|
||||
// 1. Enables request retries when the body size does not exceed maxBodySize
|
||||
// by fully buffering the body in memory.
|
||||
// 2. Prevents slow clients from reducing effective server capacity by
|
||||
// buffering the request body before acquiring a per-user concurrency slot.
|
||||
//
|
||||
// 1. It enables request retries when the request body size does not exceed maxBufSize
|
||||
// by fully buffering the request body in memory.
|
||||
// 2. It prevents slow clients from reducing effective server capacity
|
||||
// by buffering the request body before acquiring a per-user concurrency slot.
|
||||
//
|
||||
// See bufferRequestBody for details on how bufferedBody is used.
|
||||
type bufferedBody struct {
|
||||
@@ -769,7 +821,7 @@ func newBufferedBody(r io.ReadCloser, buf []byte, maxBufSize int) *bufferedBody
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
|
||||
|
||||
if len(buf) < maxBufSize {
|
||||
// Read the full request body into buf.
|
||||
// The full request body has been already read into buf.
|
||||
r = nil
|
||||
}
|
||||
|
||||
@@ -782,7 +834,7 @@ func newBufferedBody(r io.ReadCloser, buf []byte, maxBufSize int) *bufferedBody
|
||||
// Read implements io.Reader interface.
|
||||
func (bb *bufferedBody) Read(p []byte) (int, error) {
|
||||
if bb.cannotRetry {
|
||||
return 0, fmt.Errorf("cannot read already closed body")
|
||||
return 0, fmt.Errorf("cannot read already closed request body")
|
||||
}
|
||||
if bb.bufOffset < len(bb.buf) {
|
||||
n := copy(p, bb.buf[bb.bufOffset:])
|
||||
|
||||
@@ -12,11 +12,13 @@ import (
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@@ -101,6 +103,35 @@ User-Agent: vmauth
|
||||
X-Forwarded-For: 12.34.56.78, 42.2.3.84`
|
||||
f(cfgStr, requestURL, backendHandler, responseExpected)
|
||||
|
||||
// with default_url
|
||||
cfgStr = `
|
||||
unauthorized_user:
|
||||
default_url: {BACKEND}/default
|
||||
url_map:
|
||||
- src_paths:
|
||||
- /empty
|
||||
url_prefix: {BACKEND}/empty`
|
||||
requestURL = "http://some-host.com/abc/def?some_arg=some_value"
|
||||
backendHandler = func(w http.ResponseWriter, r *http.Request) {
|
||||
h := w.Header()
|
||||
h.Set("Connection", "close")
|
||||
h.Set("Foo", "bar")
|
||||
|
||||
var bb bytes.Buffer
|
||||
if err := r.Header.Write(&bb); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when marshaling headers: %w", err))
|
||||
}
|
||||
fmt.Fprintf(w, "requested_url=http://%s%s\n%s", r.Host, r.URL, bb.String())
|
||||
}
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
Foo: bar
|
||||
requested_url={BACKEND}/default?request_path=http%3A%2F%2Fsome-host.com%2Fabc%2Fdef%3Fsome_arg%3Dsome_value
|
||||
Pass-Header: abc
|
||||
User-Agent: vmauth
|
||||
X-Forwarded-For: 12.34.56.78, 42.2.3.84`
|
||||
f(cfgStr, requestURL, backendHandler, responseExpected)
|
||||
|
||||
// routing of all failed to authorize requests to unauthorized_user (issue #7543)
|
||||
cfgStr = `
|
||||
unauthorized_user:
|
||||
@@ -571,22 +602,41 @@ func TestJWTRequestHandler(t *testing.T) {
|
||||
|
||||
return payload + "." + signatureB64
|
||||
}
|
||||
genToken(t, nil, false)
|
||||
|
||||
f := func(cfgStr string, r *http.Request, responseExpected string) {
|
||||
t.Helper()
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write([]byte(r.RequestURI + "\n")); err != nil {
|
||||
if _, err := w.Write([]byte("path: " + r.URL.Path + "\n")); err != nil {
|
||||
panic(fmt.Errorf("cannot write response: %w", err))
|
||||
}
|
||||
if v := r.Header.Get(`extra_label`); v != "" {
|
||||
if _, err := w.Write([]byte(`extra_label=` + v + "\n")); err != nil {
|
||||
if _, err := w.Write([]byte("query:\n")); err != nil {
|
||||
panic(fmt.Errorf("cannot write response: %w", err))
|
||||
}
|
||||
names := make([]string, 0, len(r.URL.Query()))
|
||||
query := r.URL.Query()
|
||||
for n := range query {
|
||||
names = append(names, n)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, n := range names {
|
||||
for _, v := range query[n] {
|
||||
if _, err := w.Write([]byte(" " + n + "=" + v + "\n")); err != nil {
|
||||
panic(fmt.Errorf("cannot write response: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte("headers:\n")); err != nil {
|
||||
panic(fmt.Errorf("cannot write response: %w", err))
|
||||
}
|
||||
if v := r.Header.Get(`AccountID`); v != "" {
|
||||
if _, err := w.Write([]byte(` AccountID=` + v + "\n")); err != nil {
|
||||
panic(fmt.Errorf("cannot write response: %w", err))
|
||||
}
|
||||
}
|
||||
if v := r.Header.Get(`extra_filters`); v != "" {
|
||||
if _, err := w.Write([]byte(`extra_filters=` + v + "\n")); err != nil {
|
||||
if v := r.Header.Get(`ProjectID`); v != "" {
|
||||
if _, err := w.Write([]byte(` ProjectID=` + v + "\n")); err != nil {
|
||||
panic(fmt.Errorf("cannot write response: %w", err))
|
||||
}
|
||||
}
|
||||
@@ -632,7 +682,7 @@ users:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/foo`, string(publicKeyPEM))
|
||||
noVMAccessClaimToken := genToken(t, nil, true)
|
||||
defaultVMAccessClaimToken := genToken(t, map[string]any{
|
||||
minimalToken := genToken(t, map[string]any{
|
||||
"exp": time.Now().Add(10 * time.Minute).Unix(),
|
||||
"vm_access": map[string]any{},
|
||||
}, true)
|
||||
@@ -645,6 +695,30 @@ users:
|
||||
"vm_access": map[string]any{},
|
||||
}, false)
|
||||
|
||||
fullToken := genToken(t, map[string]any{
|
||||
"exp": time.Now().Add(10 * time.Minute).Unix(),
|
||||
"vm_access": map[string]any{
|
||||
"metrics_account_id": 123,
|
||||
"metrics_project_id": 234,
|
||||
"metrics_extra_labels": []string{
|
||||
"label1=value1",
|
||||
"label2=value2",
|
||||
},
|
||||
"metrics_extra_filters": []string{
|
||||
`{label3="value3"}`,
|
||||
`{label4="value4"}`,
|
||||
},
|
||||
"logs_account_id": 345,
|
||||
"logs_project_id": 456,
|
||||
"logs_extra_filters": []string{
|
||||
`{"namespace":"my-app","env":"prod"}`,
|
||||
},
|
||||
"logs_extra_stream_filters": []string{
|
||||
`{"team":"dev"}`,
|
||||
},
|
||||
},
|
||||
}, true)
|
||||
|
||||
// missing authorization
|
||||
request := httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
|
||||
responseExpected := `
|
||||
@@ -682,7 +756,9 @@ Unauthorized`
|
||||
request.Header.Set(`Authorization`, `Bearer `+invalidSignatureToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
/foo/abc`
|
||||
path: /foo/abc
|
||||
query:
|
||||
headers:`
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
@@ -691,15 +767,17 @@ users:
|
||||
|
||||
// token with default valid vm_access claim
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+defaultVMAccessClaimToken)
|
||||
request.Header.Set(`Authorization`, `Bearer `+minimalToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
/foo/abc`
|
||||
path: /foo/abc
|
||||
query:
|
||||
headers:`
|
||||
f(simpleCfgStr, request, responseExpected)
|
||||
|
||||
// jwt token used but no matching user with JWT token in config
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+defaultVMAccessClaimToken)
|
||||
request.Header.Set(`Authorization`, `Bearer `+minimalToken)
|
||||
responseExpected = `
|
||||
statusCode=401
|
||||
Unauthorized`
|
||||
@@ -715,20 +793,747 @@ users:
|
||||
t.Fatalf("failed to write public key file: %s", err)
|
||||
}
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+defaultVMAccessClaimToken)
|
||||
request.Header.Set(`Authorization`, `Bearer `+minimalToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
/foo/abc`
|
||||
path: /foo/abc
|
||||
query:
|
||||
headers:`
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_key_files:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/foo`, string(publicKeyFile)), request, responseExpected)
|
||||
url_prefix: {BACKEND}/foo`, publicKeyFile), request, responseExpected)
|
||||
|
||||
// ---- VictoriaMetrics specific tests ----
|
||||
|
||||
// extra_label and extra_filters dropped if empty in vm_access claim
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+minimalToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/0:0/api/v1/query
|
||||
query:
|
||||
headers:`
|
||||
f(fmt.Sprintf(
|
||||
`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/?extra_label={{.MetricsExtraLabels}}&extra_filters={{.MetricsExtraFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// extra_label and extra_filters set if present in vm_access claim
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/123:234/api/v1/query
|
||||
query:
|
||||
extra_filters={label3="value3"}
|
||||
extra_filters={label4="value4"}
|
||||
extra_label=label1=value1
|
||||
extra_label=label2=value2
|
||||
headers:`
|
||||
f(fmt.Sprintf(
|
||||
`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/?extra_label={{.MetricsExtraLabels}}&extra_filters={{.MetricsExtraFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// extra_label and extra_filters from vm_access claim merged with statically defined
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/123:234/api/v1/query
|
||||
query:
|
||||
extra_filters=aStaticFilter
|
||||
extra_filters={label3="value3"}
|
||||
extra_filters={label4="value4"}
|
||||
extra_label=aStaticLabel
|
||||
extra_label=label1=value1
|
||||
extra_label=label2=value2
|
||||
headers:`
|
||||
f(fmt.Sprintf(
|
||||
`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/?extra_label=aStaticLabel&extra_filters=aStaticFilter&extra_label={{.MetricsExtraLabels}}&extra_filters={{.MetricsExtraFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// extra_labels and extra_filters set from vm_access claim should override user provided query args
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query?extra_label=userProvidedLabel&extra_filters=userProvidedFilter", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/123:234/api/v1/query
|
||||
query:
|
||||
extra_filters={label3="value3"}
|
||||
extra_filters={label4="value4"}
|
||||
extra_label=label1=value1
|
||||
extra_label=label2=value2
|
||||
headers:`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/?extra_label={{.MetricsExtraLabels}}&extra_filters={{.MetricsExtraFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// merge user provided query args with extra_labels and extra_filters from vm_access claim
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query?extra_label=userProvidedLabel&extra_filters=userProvidedFilter", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/123:234/api/v1/query
|
||||
query:
|
||||
extra_filters={label3="value3"}
|
||||
extra_filters={label4="value4"}
|
||||
extra_filters=userProvidedFilter
|
||||
extra_label=label1=value1
|
||||
extra_label=label2=value2
|
||||
extra_label=userProvidedLabel
|
||||
headers:`
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
merge_query_args: [extra_filters, extra_label]
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/?extra_label={{.MetricsExtraLabels}}&extra_filters={{.MetricsExtraFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// pass user provided query args if vm_access claim has no extra_labels and extra_filters
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query?extra_label=userProvidedLabel&extra_filters=userProvidedFilter", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/123:234/api/v1/query
|
||||
query:
|
||||
extra_filters=userProvidedFilter
|
||||
extra_label=userProvidedLabel
|
||||
headers:`
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
merge_query_args: [extra_filters, extra_label]
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// pass user provided query args if vm_access claim has no extra_labels and extra_filters
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query?extra_label=userProvidedLabel&extra_filters=userProvidedFilter", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/123:234/api/v1/query
|
||||
query:
|
||||
extra_filters=userProvidedFilter
|
||||
extra_label=userProvidedLabel
|
||||
headers:`
|
||||
f(fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// placeholders in url_map
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/api/v1/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/123:234/api/v1/query
|
||||
query:
|
||||
extra_filters={label3="value3"}
|
||||
extra_filters={label4="value4"}
|
||||
extra_label=label1=value1
|
||||
extra_label=label2=value2
|
||||
headers:`
|
||||
f(fmt.Sprintf(
|
||||
`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_map:
|
||||
- src_paths: ["/api/.*"]
|
||||
url_prefix: {BACKEND}/select/{{.MetricsTenant}}/?extra_label={{.MetricsExtraLabels}}&extra_filters={{.MetricsExtraFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// ---- VictoriaLogs specific tests ----
|
||||
|
||||
// tenant headers not overwritten if set statically
|
||||
// extra_filters extra_stream_filters dropped if empty in vm_access claim
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+minimalToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
headers:
|
||||
AccountID=555
|
||||
ProjectID=666`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: 555"
|
||||
- "ProjectID: 666"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// tenant headers are overwritten if set as placeholders
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+minimalToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
headers:
|
||||
AccountID=0
|
||||
ProjectID=0`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// tenant headers are overwritten if set as placeholders
|
||||
// extra_filters extra_stream_filters from vm_access claim merged with statically defined
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters=aStaticFilter
|
||||
extra_filters={"namespace":"my-app","env":"prod"}
|
||||
extra_stream_filters=aStaticStreamFilter
|
||||
extra_stream_filters={"team":"dev"}
|
||||
headers:
|
||||
AccountID=345
|
||||
ProjectID=456`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters=aStaticFilter&extra_stream_filters=aStaticStreamFilter&extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// tenant headers are overwritten if set as placeholders
|
||||
// extra_filters extra_stream_filters from vm_access claim merged with statically defined
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters=aStaticFilter
|
||||
extra_filters={"namespace":"my-app","env":"prod"}
|
||||
extra_stream_filters=aStaticStreamFilter
|
||||
extra_stream_filters={"team":"dev"}
|
||||
headers:
|
||||
AccountID=345
|
||||
ProjectID=456`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters=aStaticFilter&extra_stream_filters=aStaticStreamFilter&extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// claim info should overwrite user provided query args and headers
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query?extra_filters=aUserFilter&extra_stream_filters=aUserStreamFilter", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
request.Header.Set(`AccountID`, `aUserAccountID`)
|
||||
request.Header.Set(`ProjectID`, `aUserProjectID`)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters={"namespace":"my-app","env":"prod"}
|
||||
extra_stream_filters={"team":"dev"}
|
||||
headers:
|
||||
AccountID=345
|
||||
ProjectID=456`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// merge user provided query args with extra_filters and extra_stream_filters from vm_access claim
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query?extra_filters=aUserFilter&extra_stream_filters=aUserStreamFilter", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters={"namespace":"my-app","env":"prod"}
|
||||
extra_filters=aUserFilter
|
||||
extra_stream_filters={"team":"dev"}
|
||||
extra_stream_filters=aUserStreamFilter
|
||||
headers:
|
||||
AccountID=345
|
||||
ProjectID=456`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
merge_query_args: [extra_filters, extra_stream_filters]
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// pass user provided query args if vm_access claim has no extra_labels and extra_filters
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query?extra_filters=aUserFilter&extra_stream_filters=aUserStreamFilter", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+minimalToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters=aUserFilter
|
||||
extra_stream_filters=aUserStreamFilter
|
||||
headers:
|
||||
AccountID=0
|
||||
ProjectID=0`
|
||||
f(
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
merge_query_args: [extra_filters, extra_stream_filters]
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// placeholders in url_map
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters={"namespace":"my-app","env":"prod"}
|
||||
extra_stream_filters={"team":"dev"}
|
||||
headers:
|
||||
AccountID=345
|
||||
ProjectID=456`
|
||||
f(fmt.Sprintf(
|
||||
`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_map:
|
||||
- src_paths: ["/query"]
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// multiple placeholders in url_map for the same param
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters={"namespace":"my-app","env":"prod"}
|
||||
extra_stream_filters={"team":"dev"}
|
||||
tenant_info=static=value
|
||||
tenant_info=345
|
||||
tenant_info=456
|
||||
headers:
|
||||
AccountID=345
|
||||
ProjectID=456`
|
||||
f(fmt.Sprintf(
|
||||
`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_map:
|
||||
- src_paths: ["/query"]
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}&tenant_info=static=value&tenant_info={{.LogsAccountID}}&tenant_info={{.LogsProjectID}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
// client request params must be ignored by placeholders
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/query?template_attack={{.LogsExtraFilters}}", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
request.Header.Set(`AccountID`, `{{.LogsAccountID}}`)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /select/logsql/query
|
||||
query:
|
||||
extra_filters={"namespace":"my-app","env":"prod"}
|
||||
extra_stream_filters={"team":"dev"}
|
||||
template_attack={{.LogsExtraFilters}}
|
||||
headers:
|
||||
AccountID={{.LogsAccountID}}`
|
||||
f(fmt.Sprintf(
|
||||
`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_map:
|
||||
- src_paths: ["/query"]
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
nestedToken := genToken(t, map[string]any{
|
||||
"exp": time.Now().Add(10 * time.Minute).Unix(),
|
||||
"team": "dev",
|
||||
"nested": map[string]any{
|
||||
"department_id": 0,
|
||||
"scopes": []string{"metrics", "logs"},
|
||||
"team_permissions": map[string]any{
|
||||
"read": 0,
|
||||
"write": 1,
|
||||
},
|
||||
},
|
||||
"vm_access": map[string]any{
|
||||
"metrics_account_id": 123,
|
||||
"metrics_project_id": 234,
|
||||
"metrics_extra_labels": []string{
|
||||
"label1=value1",
|
||||
"label2=value2",
|
||||
},
|
||||
"metrics_extra_filters": []string{
|
||||
`{label3="value3"}`,
|
||||
`{label4="value4"}`,
|
||||
},
|
||||
"logs_account_id": 345,
|
||||
"logs_project_id": 456,
|
||||
"logs_extra_filters": []string{
|
||||
`{"namespace":"my-app","env":"prod"}`,
|
||||
},
|
||||
"logs_extra_stream_filters": []string{
|
||||
`{"team":"dev"}`,
|
||||
},
|
||||
},
|
||||
}, true)
|
||||
|
||||
// use claim for routing, must specific match wins
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/route", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+nestedToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /dev/route
|
||||
query:
|
||||
headers:
|
||||
`
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
match_claims:
|
||||
team: dev
|
||||
nested.scopes.1: "logs"
|
||||
nested.department_id: "0"
|
||||
url_map:
|
||||
- src_paths: ["/route"]
|
||||
url_prefix: {BACKEND}/dev
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
match_claims:
|
||||
team: dev
|
||||
nested.scopes.1: "logs"
|
||||
url_map:
|
||||
- src_paths: ["/route"]
|
||||
url_prefix: {BACKEND}/ops
|
||||
`,
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// use claim for routing, most specific not matching
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/route", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+nestedToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /less_claims/route
|
||||
query:
|
||||
headers:
|
||||
`
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
match_claims:
|
||||
team: ops
|
||||
nested.scopes.1: "logs"
|
||||
nested.department_id: "0"
|
||||
url_map:
|
||||
- src_paths: ["/route"]
|
||||
url_prefix: {BACKEND}/more_claims
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
match_claims:
|
||||
team: dev
|
||||
nested.team_permissions.write: "1"
|
||||
url_map:
|
||||
- src_paths: ["/route"]
|
||||
url_prefix: {BACKEND}/less_claims
|
||||
`,
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
// use claim for routing, empty claim match
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/route", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+nestedToken)
|
||||
responseExpected = `
|
||||
statusCode=200
|
||||
path: /empty/route
|
||||
query:
|
||||
headers:
|
||||
`
|
||||
f(`
|
||||
users:
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
url_map:
|
||||
- src_paths: ["/route"]
|
||||
url_prefix: {BACKEND}/empty
|
||||
- jwt:
|
||||
skip_verify: true
|
||||
match_claims:
|
||||
team: ops
|
||||
nested.team_permissions.write: "1"
|
||||
url_map:
|
||||
- src_paths: ["/route"]
|
||||
url_prefix: {BACKEND}/ops
|
||||
`,
|
||||
request,
|
||||
responseExpected,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func TestOIDCRequestHandler(t *testing.T) {
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot generate RSA key: %s", err)
|
||||
}
|
||||
|
||||
var oidcSrv *httptest.Server
|
||||
oidcRespOK := atomic.Bool{}
|
||||
oidcRespOK.Store(true)
|
||||
|
||||
oidcSrv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/.well-known/openid-configuration":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(map[string]string{
|
||||
"issuer": oidcSrv.URL,
|
||||
"jwks_uri": oidcSrv.URL + "/jwks",
|
||||
}); err != nil {
|
||||
panic(fmt.Errorf("cannot write openid-configuration response: %w", err))
|
||||
}
|
||||
case "/jwks":
|
||||
if !oidcRespOK.Load() {
|
||||
http.Error(w, "internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Encode the RSA public key in JWK format (base64url, no padding)
|
||||
nBytes := privateKey.N.Bytes()
|
||||
eBytes := big.NewInt(int64(privateKey.E)).Bytes()
|
||||
jwksBody := fmt.Sprintf(`{"keys":[{"kty":"RSA","kid":%q,"n":%q,"e":%q}]}`,
|
||||
`test-key-id`,
|
||||
base64.RawURLEncoding.EncodeToString(nBytes),
|
||||
base64.RawURLEncoding.EncodeToString(eBytes),
|
||||
)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if _, err := w.Write([]byte(jwksBody)); err != nil {
|
||||
panic(fmt.Errorf("cannot write jwks response: %w", err))
|
||||
}
|
||||
default:
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
}))
|
||||
defer oidcSrv.Close()
|
||||
|
||||
headerJSON, err := json.Marshal(map[string]any{
|
||||
"alg": "RS256",
|
||||
"typ": "JWT",
|
||||
"iss": oidcSrv.URL,
|
||||
"kid": `test-key-id`,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal JWT header: %s", err)
|
||||
}
|
||||
headerB64 := base64.RawURLEncoding.EncodeToString(headerJSON)
|
||||
|
||||
bodyJSON, err := json.Marshal(map[string]any{
|
||||
"exp": time.Now().Add(time.Minute).Unix(),
|
||||
"iss": oidcSrv.URL,
|
||||
"vm_access": map[string]any{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal JWT body: %s", err)
|
||||
}
|
||||
bodyB64 := base64.RawURLEncoding.EncodeToString(bodyJSON)
|
||||
|
||||
payload := headerB64 + "." + bodyB64
|
||||
|
||||
var signatureB64 string
|
||||
hash := crypto.SHA256
|
||||
h := hash.New()
|
||||
h.Write([]byte(payload))
|
||||
digest := h.Sum(nil)
|
||||
|
||||
signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, digest)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot sign JWT token: %s", err)
|
||||
}
|
||||
signatureB64 = base64.RawURLEncoding.EncodeToString(signature)
|
||||
|
||||
tkn := payload + "." + signatureB64
|
||||
|
||||
backSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer backSrv.Close()
|
||||
|
||||
f := func(responseExpected string) {
|
||||
t.Helper()
|
||||
|
||||
cfgStr := `
|
||||
users:
|
||||
- jwt:
|
||||
oidc:
|
||||
issuer: ` + oidcSrv.URL + `
|
||||
url_prefix: ` + backSrv.URL + `/
|
||||
`
|
||||
|
||||
cfgOrigP := authConfigData.Load()
|
||||
if _, err := reloadAuthConfigData([]byte(cfgStr)); err != nil {
|
||||
t.Fatalf("cannot load config data: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
cfgOrig := []byte("unauthorized_user:\n url_prefix: http://foo/bar")
|
||||
if cfgOrigP != nil {
|
||||
cfgOrig = *cfgOrigP
|
||||
}
|
||||
if _, err := reloadAuthConfigData(cfgOrig); err != nil {
|
||||
t.Fatalf("cannot restore original config: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
r := httptest.NewRequest("GET", "http://some-host.com/api/v1/query", nil)
|
||||
r.Header.Set("Authorization", "Bearer "+tkn)
|
||||
|
||||
w := &fakeResponseWriter{}
|
||||
if !requestHandlerWithInternalRoutes(w, r) {
|
||||
t.Fatalf("unexpected false returned from requestHandler")
|
||||
}
|
||||
|
||||
if response := w.getResponse(); response != responseExpected {
|
||||
t.Fatalf("unexpected response\ngot\n%s\nwant\n%s", response, responseExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// successful
|
||||
f(`statusCode=200
|
||||
`)
|
||||
|
||||
oidcRespOK.Store(false)
|
||||
// OIDC server error
|
||||
f(`statusCode=401
|
||||
Unauthorized
|
||||
`)
|
||||
}
|
||||
|
||||
type fakeResponseWriter struct {
|
||||
h http.Header
|
||||
statusCode int
|
||||
h http.Header
|
||||
|
||||
bb bytes.Buffer
|
||||
}
|
||||
@@ -754,6 +1559,7 @@ func (w *fakeResponseWriter) Write(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (w *fakeResponseWriter) WriteHeader(statusCode int) {
|
||||
w.statusCode = statusCode
|
||||
fmt.Fprintf(&w.bb, "statusCode=%d\n", statusCode)
|
||||
if w.h == nil {
|
||||
return
|
||||
@@ -774,6 +1580,12 @@ func (w *fakeResponseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fakeResponseWriter) reset() {
|
||||
w.bb.Reset()
|
||||
w.statusCode = 0
|
||||
clear(w.h)
|
||||
}
|
||||
|
||||
func TestBufferRequestBody_Success(t *testing.T) {
|
||||
defaultRequestBufferSize := requestBufferSize.String()
|
||||
defer func() {
|
||||
|
||||
194
app/vmauth/main_timing_test.go
Normal file
194
app/vmauth/main_timing_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkJWTRequestHandler(b *testing.B) {
|
||||
// Generate RSA key pair for testing
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot generate RSA key: %s", err)
|
||||
}
|
||||
|
||||
// Generate public key PEM
|
||||
publicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot marshal public key: %s", err)
|
||||
}
|
||||
publicKeyPEM := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: publicKeyBytes,
|
||||
})
|
||||
|
||||
genToken := func(t *testing.B, body map[string]any, valid bool) string {
|
||||
t.Helper()
|
||||
|
||||
headerJSON, err := json.Marshal(map[string]any{
|
||||
"alg": "RS256",
|
||||
"typ": "JWT",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal header: %s", err)
|
||||
}
|
||||
headerB64 := base64.RawURLEncoding.EncodeToString(headerJSON)
|
||||
|
||||
bodyJSON, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal body: %s", err)
|
||||
}
|
||||
bodyB64 := base64.RawURLEncoding.EncodeToString(bodyJSON)
|
||||
|
||||
payload := headerB64 + "." + bodyB64
|
||||
|
||||
var signatureB64 string
|
||||
if valid {
|
||||
// Create real RSA signature
|
||||
hash := crypto.SHA256
|
||||
h := hash.New()
|
||||
h.Write([]byte(payload))
|
||||
digest := h.Sum(nil)
|
||||
|
||||
signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, digest)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot sign token: %s", err)
|
||||
}
|
||||
signatureB64 = base64.RawURLEncoding.EncodeToString(signature)
|
||||
} else {
|
||||
signatureB64 = base64.RawURLEncoding.EncodeToString([]byte("invalid_signature"))
|
||||
}
|
||||
|
||||
return payload + "." + signatureB64
|
||||
}
|
||||
|
||||
f := func(name string, cfgStr string, r *http.Request, statusCodeExpected int) {
|
||||
b.Helper()
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := w.Write([]byte("path: " + r.URL.Path + "\n")); err != nil {
|
||||
panic(fmt.Errorf("cannot write response: %w", err))
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
cfgStr = strings.ReplaceAll(cfgStr, "{BACKEND}", ts.URL)
|
||||
|
||||
cfgOrigP := authConfigData.Load()
|
||||
if _, err := reloadAuthConfigData([]byte(cfgStr)); err != nil {
|
||||
b.Fatalf("cannot load config data: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
cfgOrig := []byte("unauthorized_user:\n url_prefix: http://foo/bar")
|
||||
if cfgOrigP != nil {
|
||||
cfgOrig = *cfgOrigP
|
||||
}
|
||||
_, err := reloadAuthConfigData(cfgOrig)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot load the original config: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
b.Run(name, func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
w := &fakeResponseWriter{}
|
||||
for pb.Next() {
|
||||
w.reset()
|
||||
if !requestHandlerWithInternalRoutes(w, r) {
|
||||
b.Fatalf("unexpected false is returned from requestHandler")
|
||||
}
|
||||
if w.statusCode != statusCodeExpected {
|
||||
b.Fatalf("unexpected response code (-%d;+%d)", statusCodeExpected, w.statusCode)
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
simpleCfgStr := fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
url_prefix: {BACKEND}/foo`, string(publicKeyPEM))
|
||||
noVMAccessClaimToken := genToken(b, nil, true)
|
||||
expiredToken := genToken(b, map[string]any{
|
||||
"exp": 10,
|
||||
"vm_access": map[string]any{},
|
||||
}, true)
|
||||
|
||||
fullToken := genToken(b, map[string]any{
|
||||
"exp": time.Now().Add(10 * time.Minute).Unix(),
|
||||
"scope": "email id",
|
||||
"vm_access": map[string]any{
|
||||
"extra_labels": map[string]string{
|
||||
"label": "value1",
|
||||
"label2": "value3",
|
||||
},
|
||||
"extra_filters": []string{"stream_filter1", "stream_filter2"},
|
||||
"metrics_account_id": 123,
|
||||
"metrics_project_id": 234,
|
||||
"metrics_extra_labels": []string{
|
||||
"label1=value1",
|
||||
"label2=value2",
|
||||
},
|
||||
"metrics_extra_filters": []string{
|
||||
`{label3="value3"}`,
|
||||
`{label4="value4"}`,
|
||||
},
|
||||
"logs_account_id": 345,
|
||||
"logs_project_id": 456,
|
||||
"logs_extra_filters": []string{
|
||||
`{"namespace":"my-app","env":"prod"}`,
|
||||
},
|
||||
"logs_extra_stream_filters": []string{
|
||||
`{"team":"dev"}`,
|
||||
},
|
||||
},
|
||||
}, true)
|
||||
|
||||
// tenant headers are overwritten if set as placeholders
|
||||
// extra_filters extra_stream_filters from vm_access claim merged with statically defined
|
||||
request := httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+fullToken)
|
||||
f("full_template",
|
||||
fmt.Sprintf(`
|
||||
users:
|
||||
- jwt:
|
||||
public_keys:
|
||||
- %q
|
||||
headers:
|
||||
- "AccountID: {{.LogsAccountID}}"
|
||||
- "ProjectID: {{.LogsProjectID}}"
|
||||
url_prefix: {BACKEND}/select/logsql/?extra_filters=aStaticFilter&extra_stream_filters=aStaticStreamFilter&extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
|
||||
request,
|
||||
http.StatusOK,
|
||||
)
|
||||
|
||||
// token without vm_access claim
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+noVMAccessClaimToken)
|
||||
f("token_without_claim", simpleCfgStr, request, http.StatusUnauthorized)
|
||||
|
||||
// expired token
|
||||
request = httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
|
||||
request.Header.Set(`Authorization`, `Bearer `+expiredToken)
|
||||
f("expired_token", simpleCfgStr, request, http.StatusUnauthorized)
|
||||
}
|
||||
195
app/vmauth/oidc.go
Normal file
195
app/vmauth/oidc.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
)
|
||||
|
||||
type oidcConfig struct {
|
||||
Issuer string `yaml:"issuer"`
|
||||
}
|
||||
|
||||
type oidcDiscovererPool struct {
|
||||
ds map[string]*oidcDiscoverer
|
||||
|
||||
context context.Context
|
||||
cancel func()
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
func (dp *oidcDiscovererPool) createOrAdd(issuer string, vp *atomic.Pointer[jwt.VerifierPool]) {
|
||||
if dp.ds == nil {
|
||||
dp.ds = make(map[string]*oidcDiscoverer)
|
||||
dp.context, dp.cancel = context.WithCancel(context.Background())
|
||||
dp.wg = &sync.WaitGroup{}
|
||||
}
|
||||
|
||||
ds, found := dp.ds[issuer]
|
||||
if !found {
|
||||
ds = &oidcDiscoverer{
|
||||
issuer: issuer,
|
||||
}
|
||||
dp.ds[issuer] = ds
|
||||
}
|
||||
|
||||
ds.vps = append(ds.vps, vp)
|
||||
}
|
||||
|
||||
func (dp *oidcDiscovererPool) startDiscovery() {
|
||||
if len(dp.ds) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, d := range dp.ds {
|
||||
dp.wg.Go(func() {
|
||||
if err := d.refreshVerifierPools(dp.context); err != nil {
|
||||
logger.Errorf("failed to initialize OIDC verifier pool at start for issuer %q: %s", d.issuer, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
dp.wg.Wait()
|
||||
|
||||
for _, d := range dp.ds {
|
||||
dp.wg.Go(func() {
|
||||
d.run(dp.context)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (dp *oidcDiscovererPool) stopDiscovery() {
|
||||
if len(dp.ds) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dp.cancel()
|
||||
dp.wg.Wait()
|
||||
}
|
||||
|
||||
type oidcDiscoverer struct {
|
||||
issuer string
|
||||
vps []*atomic.Pointer[jwt.VerifierPool]
|
||||
}
|
||||
|
||||
func (d *oidcDiscoverer) run(ctx context.Context) {
|
||||
t := time.NewTimer(timeutil.AddJitterToDuration(time.Second * 10))
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
if err := d.refreshVerifierPools(ctx); errors.Is(err, context.Canceled) {
|
||||
return
|
||||
} else if err != nil {
|
||||
t.Reset(timeutil.AddJitterToDuration(time.Second * 10))
|
||||
logger.Errorf("failed to refresh OIDC verifier pool for issuer %q: %v", d.issuer, err)
|
||||
continue
|
||||
}
|
||||
// OIDC may return Cache-Control header with max-age directive.
|
||||
// It could be used as time range for next refresh.
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#RotateEncKeys
|
||||
t.Reset(timeutil.AddJitterToDuration(time.Minute * 5))
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *oidcDiscoverer) refreshVerifierPools(ctx context.Context) error {
|
||||
cfg, err := getOpenIDConfiguration(ctx, d.issuer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The issuer in the OIDC configuration must match the expected issuer.
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#RotateEncKeys
|
||||
if cfg.Issuer != d.issuer {
|
||||
return fmt.Errorf("openid configuration issuer %q does not match expected issuer %q", cfg.Issuer, d.issuer)
|
||||
}
|
||||
|
||||
verifierPool, err := fetchAndParseJWKs(ctx, cfg.JWKsURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vp := range d.vps {
|
||||
vp.Store(verifierPool)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// See https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata for details.
|
||||
type openidConfig struct {
|
||||
Issuer string `json:"issuer"`
|
||||
JWKsURI string `json:"jwks_uri"`
|
||||
}
|
||||
|
||||
var oidcHTTPClient = &http.Client{
|
||||
Timeout: time.Second * 5,
|
||||
}
|
||||
|
||||
func fetchAndParseJWKs(ctx context.Context, jwksURI string) (*jwt.VerifierPool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, jwksURI, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request for fetching jwks keys from %q: %w", jwksURI, err)
|
||||
}
|
||||
|
||||
resp, err := oidcHTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch jwks keys from %q: %w", jwksURI, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code %d when fetching jwks keys from %q", resp.StatusCode, jwksURI)
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body from %q: %w", jwksURI, err)
|
||||
}
|
||||
|
||||
vp, err := jwt.ParseJWKs(b)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse jwks keys from %q: %v", jwksURI, err)
|
||||
}
|
||||
|
||||
return vp, nil
|
||||
}
|
||||
|
||||
func getOpenIDConfiguration(ctx context.Context, issuer string) (openidConfig, error) {
|
||||
issuer, _ = strings.CutSuffix(issuer, "/")
|
||||
configURL := fmt.Sprintf("%s/.well-known/openid-configuration", issuer)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, configURL, nil)
|
||||
if err != nil {
|
||||
return openidConfig{}, fmt.Errorf("failed to create request for fetching openid config from %q: %w", configURL, err)
|
||||
}
|
||||
|
||||
resp, err := oidcHTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return openidConfig{}, fmt.Errorf("failed to fetch openid config from %q: %w", configURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return openidConfig{}, fmt.Errorf("unexpected status code %d when fetching openid config from %q", resp.StatusCode, configURL)
|
||||
}
|
||||
|
||||
var cfg openidConfig
|
||||
if err := json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
|
||||
return openidConfig{}, fmt.Errorf("failed to decode openid config from %q: %s", configURL, err)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot/snapshotutil"
|
||||
)
|
||||
|
||||
@@ -416,6 +416,16 @@ const (
|
||||
promTemporaryDirPath = "prom-tmp-dir-path"
|
||||
)
|
||||
|
||||
const (
|
||||
thanosSnapshot = "thanos-snapshot"
|
||||
thanosConcurrency = "thanos-concurrency"
|
||||
thanosFilterTimeStart = "thanos-filter-time-start"
|
||||
thanosFilterTimeEnd = "thanos-filter-time-end"
|
||||
thanosFilterLabel = "thanos-filter-label"
|
||||
thanosFilterLabelValue = "thanos-filter-label-value"
|
||||
thanosAggrTypes = "thanos-aggr-types"
|
||||
)
|
||||
|
||||
var (
|
||||
promFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
@@ -451,6 +461,43 @@ var (
|
||||
Value: os.TempDir(),
|
||||
},
|
||||
}
|
||||
|
||||
thanosFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: thanosSnapshot,
|
||||
Usage: "Path to Thanos snapshot directory containing raw and/or downsampled blocks.",
|
||||
Required: true,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: thanosConcurrency,
|
||||
Usage: "Number of concurrently running snapshot readers",
|
||||
Value: 1,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: thanosFilterTimeStart,
|
||||
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: thanosFilterTimeEnd,
|
||||
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: thanosFilterLabel,
|
||||
Usage: "Thanos label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: thanosFilterLabelValue,
|
||||
Usage: fmt.Sprintf("Thanos regular expression to filter label from %q flag.", thanosFilterLabel),
|
||||
Value: ".*",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: thanosAggrTypes,
|
||||
Usage: "Aggregate types to import from Thanos downsampled blocks. Supported values: count, sum, min, max, counter. " +
|
||||
"Each aggregate will be imported as a separate metric with the aggregate type as suffix (e.g., metric_name:5m:count). " +
|
||||
"If not specified, all aggregate types will be imported from downsampled blocks.",
|
||||
Value: nil,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/thanos"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
@@ -285,6 +286,7 @@ func main() {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create prometheus client: %s", err)
|
||||
}
|
||||
|
||||
pp := prometheusProcessor{
|
||||
cl: cl,
|
||||
im: importer,
|
||||
@@ -294,6 +296,59 @@ func main() {
|
||||
return pp.run(ctx)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "thanos",
|
||||
Usage: "Migrate time series from Thanos blocks (supports raw and downsampled data)",
|
||||
Flags: mergeFlags(globalFlags, thanosFlags, vmFlags),
|
||||
Before: beforeFn,
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("Thanos import mode")
|
||||
|
||||
vmCfg, err := initConfigVM(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init VM configuration: %s", err)
|
||||
}
|
||||
|
||||
importer, err = vm.NewImporter(ctx, vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
}
|
||||
|
||||
thanosCfg := thanos.Config{
|
||||
Snapshot: c.String(thanosSnapshot),
|
||||
Filter: thanos.Filter{
|
||||
TimeMin: c.String(thanosFilterTimeStart),
|
||||
TimeMax: c.String(thanosFilterTimeEnd),
|
||||
Label: c.String(thanosFilterLabel),
|
||||
LabelValue: c.String(thanosFilterLabelValue),
|
||||
},
|
||||
}
|
||||
cl, err := thanos.NewClient(thanosCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create thanos client: %s", err)
|
||||
}
|
||||
|
||||
var aggrTypes []thanos.AggrType
|
||||
if aggrTypesStr := c.StringSlice(thanosAggrTypes); len(aggrTypesStr) > 0 {
|
||||
for _, typeStr := range aggrTypesStr {
|
||||
aggrType, err := thanos.ParseAggrType(typeStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse aggregate type %q: %s", typeStr, err)
|
||||
}
|
||||
aggrTypes = append(aggrTypes, aggrType)
|
||||
}
|
||||
}
|
||||
|
||||
tp := thanosProcessor{
|
||||
cl: cl,
|
||||
im: importer,
|
||||
cc: c.Int(thanosConcurrency),
|
||||
isVerbose: c.Bool(globalVerbose),
|
||||
aggrTypes: aggrTypes,
|
||||
}
|
||||
return tp.run(ctx)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "vm-native",
|
||||
Usage: "Migrate time series between VictoriaMetrics installations",
|
||||
|
||||
233
app/vmctl/thanos/aggr_chunk.go
Normal file
233
app/vmctl/thanos/aggr_chunk.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package thanos
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
// ChunkEncAggr is the top level encoding byte for the AggrChunk.
|
||||
// It is defined by Thanos as 0xff to prevent collisions with Prometheus encodings.
|
||||
const ChunkEncAggr = chunkenc.Encoding(0xff)
|
||||
|
||||
// AggrType represents an aggregation type in Thanos downsampled blocks.
|
||||
type AggrType uint8
|
||||
|
||||
// AggrTypeNone indicates raw blocks with no aggregation.
|
||||
// It is used as a sentinel to distinguish raw block processing from downsampled.
|
||||
const AggrTypeNone AggrType = 255
|
||||
|
||||
// Valid aggregation types matching Thanos definitions.
|
||||
const (
|
||||
AggrCount AggrType = iota
|
||||
AggrSum
|
||||
AggrMin
|
||||
AggrMax
|
||||
AggrCounter
|
||||
)
|
||||
|
||||
// AllAggrTypes contains all supported aggregation types.
|
||||
var AllAggrTypes = []AggrType{AggrCount, AggrSum, AggrMin, AggrMax, AggrCounter}
|
||||
|
||||
func (t AggrType) String() string {
|
||||
switch t {
|
||||
case AggrCount:
|
||||
return "count"
|
||||
case AggrSum:
|
||||
return "sum"
|
||||
case AggrMin:
|
||||
return "min"
|
||||
case AggrMax:
|
||||
return "max"
|
||||
case AggrCounter:
|
||||
return "counter"
|
||||
}
|
||||
return "<unknown>"
|
||||
}
|
||||
|
||||
// ParseAggrType parses aggregate type from string.
|
||||
func ParseAggrType(s string) (AggrType, error) {
|
||||
switch s {
|
||||
case "count":
|
||||
return AggrCount, nil
|
||||
case "sum":
|
||||
return AggrSum, nil
|
||||
case "min":
|
||||
return AggrMin, nil
|
||||
case "max":
|
||||
return AggrMax, nil
|
||||
case "counter":
|
||||
return AggrCounter, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unknown aggregate type: %q", s)
|
||||
}
|
||||
|
||||
// ErrAggrNotExist is returned if a requested aggregation is not present in an AggrChunk.
|
||||
var ErrAggrNotExist = errors.New("aggregate does not exist")
|
||||
|
||||
// AggrChunk is a chunk that is composed of a set of aggregates for the same underlying data.
|
||||
// Not all aggregates must be present.
|
||||
// This is a read-only implementation for decoding Thanos downsampled blocks.
|
||||
type AggrChunk []byte
|
||||
|
||||
// IsAggrChunk checks if the encoding byte indicates this is an AggrChunk.
|
||||
func IsAggrChunk(enc chunkenc.Encoding) bool {
|
||||
return enc == ChunkEncAggr
|
||||
}
|
||||
|
||||
// Get returns the sub-chunk for the given aggregate type if it exists.
|
||||
func (c AggrChunk) Get(t AggrType) (chunkenc.Chunk, error) {
|
||||
b := c[:]
|
||||
var x []byte
|
||||
|
||||
for i := AggrType(0); i <= t; i++ {
|
||||
l, n := binary.Uvarint(b)
|
||||
if n < 1 {
|
||||
return nil, errors.New("invalid size: failed to read uvarint")
|
||||
}
|
||||
if l > uint64(len(b[n:])) || l+1 > uint64(len(b[n:])) {
|
||||
if l > 0 {
|
||||
return nil, errors.New("invalid size: not enough bytes")
|
||||
}
|
||||
}
|
||||
b = b[n:]
|
||||
// If length is set to zero explicitly, that means the aggregate is unset.
|
||||
if l == 0 {
|
||||
if i == t {
|
||||
return nil, ErrAggrNotExist
|
||||
}
|
||||
continue
|
||||
}
|
||||
chunkLen := int(l) + 1
|
||||
x = b[:chunkLen]
|
||||
b = b[chunkLen:]
|
||||
}
|
||||
if len(x) == 0 {
|
||||
return nil, ErrAggrNotExist
|
||||
}
|
||||
return chunkenc.FromData(chunkenc.Encoding(x[0]), x[1:])
|
||||
}
|
||||
|
||||
// Encoding returns the encoding type for AggrChunk.
|
||||
func (c AggrChunk) Encoding() chunkenc.Encoding {
|
||||
return ChunkEncAggr
|
||||
}
|
||||
|
||||
// errIterator wraps a nop iterator but reports an error via Err().
|
||||
// It embeds chunkenc.Iterator to inherit all methods (including Seek)
|
||||
// which avoids go vet stdmethods warning about Seek signature.
|
||||
type errIterator struct {
|
||||
chunkenc.Iterator
|
||||
err error
|
||||
}
|
||||
|
||||
// Err returns the underlying error.
|
||||
func (it *errIterator) Err() error {
|
||||
return it.err
|
||||
}
|
||||
|
||||
// newAggrChunkIterator creates a new iterator for the specified aggregate type.
|
||||
// If the aggregate is not present in the chunk (ErrAggrNotExist), a nop iterator
|
||||
// is returned without error — the caller will simply see zero samples.
|
||||
// Real decoding/corruption errors are reported via the iterator's Err() method.
|
||||
func newAggrChunkIterator(data []byte, aggrType AggrType) chunkenc.Iterator {
|
||||
chunk := AggrChunk(data)
|
||||
subChunk, err := chunk.Get(aggrType)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrAggrNotExist) {
|
||||
return chunkenc.NewNopIterator()
|
||||
}
|
||||
return &errIterator{
|
||||
Iterator: chunkenc.NewNopIterator(),
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
return subChunk.Iterator(nil)
|
||||
}
|
||||
|
||||
// AggrChunkWrapper wraps AggrChunk to implement chunkenc.Chunk interface.
|
||||
// It delegates iteration to a specific aggregate type.
|
||||
type AggrChunkWrapper struct {
|
||||
data []byte
|
||||
aggrType AggrType
|
||||
}
|
||||
|
||||
// NewAggrChunkWrapper creates a new AggrChunk wrapper for the specified aggregate type.
|
||||
func NewAggrChunkWrapper(data []byte, aggrType AggrType) *AggrChunkWrapper {
|
||||
return &AggrChunkWrapper{
|
||||
data: data,
|
||||
aggrType: aggrType,
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns the underlying byte slice.
|
||||
func (c *AggrChunkWrapper) Bytes() []byte {
|
||||
return c.data
|
||||
}
|
||||
|
||||
// Encoding returns the AggrChunk encoding.
|
||||
func (c *AggrChunkWrapper) Encoding() chunkenc.Encoding {
|
||||
return ChunkEncAggr
|
||||
}
|
||||
|
||||
// Appender returns an error since AggrChunk is read-only.
|
||||
func (c *AggrChunkWrapper) Appender() (chunkenc.Appender, error) {
|
||||
return nil, errors.New("AggrChunk is read-only")
|
||||
}
|
||||
|
||||
// Iterator returns an iterator for the specified aggregate type.
|
||||
func (c *AggrChunkWrapper) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
|
||||
return newAggrChunkIterator(c.data, c.aggrType)
|
||||
}
|
||||
|
||||
// NumSamples returns the number of samples in the aggregate.
|
||||
func (c *AggrChunkWrapper) NumSamples() int {
|
||||
chunk := AggrChunk(c.data)
|
||||
subChunk, err := chunk.Get(c.aggrType)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return subChunk.NumSamples()
|
||||
}
|
||||
|
||||
// Compact is a no-op for read-only AggrChunk.
|
||||
func (c *AggrChunkWrapper) Compact() {}
|
||||
|
||||
// Reset resets the chunk with new data.
|
||||
func (c *AggrChunkWrapper) Reset(stream []byte) {
|
||||
c.data = stream
|
||||
}
|
||||
|
||||
// AggrChunkPool is a custom Pool that understands AggrChunk encoding (0xff).
|
||||
// It delegates standard encodings to the default pool and handles AggrChunk specially.
|
||||
type AggrChunkPool struct {
|
||||
defaultPool chunkenc.Pool
|
||||
aggrType AggrType
|
||||
}
|
||||
|
||||
// NewAggrChunkPool creates a new pool that handles AggrChunk encoding.
|
||||
func NewAggrChunkPool(aggrType AggrType) *AggrChunkPool {
|
||||
return &AggrChunkPool{
|
||||
defaultPool: chunkenc.NewPool(),
|
||||
aggrType: aggrType,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a chunk for the given encoding and data.
|
||||
func (p *AggrChunkPool) Get(e chunkenc.Encoding, b []byte) (chunkenc.Chunk, error) {
|
||||
if e == ChunkEncAggr {
|
||||
return NewAggrChunkWrapper(b, p.aggrType), nil
|
||||
}
|
||||
return p.defaultPool.Get(e, b)
|
||||
}
|
||||
|
||||
// Put returns a chunk to the pool.
|
||||
func (p *AggrChunkPool) Put(c chunkenc.Chunk) error {
|
||||
if c.Encoding() == ChunkEncAggr {
|
||||
// AggrChunk wrappers are not pooled
|
||||
return nil
|
||||
}
|
||||
return p.defaultPool.Put(c)
|
||||
}
|
||||
110
app/vmctl/thanos/block_meta.go
Normal file
110
app/vmctl/thanos/block_meta.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package thanos
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// BlockMeta extends Prometheus BlockMeta with Thanos-specific fields.
|
||||
type BlockMeta struct {
|
||||
// Thanos-specific metadata
|
||||
Thanos ThanosMeta `json:"thanos,omitempty"`
|
||||
}
|
||||
|
||||
// ThanosMeta contains Thanos-specific block metadata.
|
||||
type ThanosMeta struct {
|
||||
// Labels are external labels identifying the producer.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
|
||||
// Downsample contains downsampling information.
|
||||
Downsample ThanosDownsample `json:"downsample,omitempty"`
|
||||
|
||||
// Source indicates where the block came from.
|
||||
Source string `json:"source,omitempty"`
|
||||
|
||||
// SegmentFiles contains list of segment files in the block.
|
||||
SegmentFiles []string `json:"segment_files,omitempty"`
|
||||
|
||||
// Files contains metadata about files in the block.
|
||||
Files []ThanosFile `json:"files,omitempty"`
|
||||
}
|
||||
|
||||
// ThanosDownsample contains downsampling resolution info.
|
||||
type ThanosDownsample struct {
|
||||
// Resolution is the downsampling resolution in milliseconds.
|
||||
// 0 means raw data (no downsampling).
|
||||
// 300000 (5 minutes) or 3600000 (1 hour) for downsampled data.
|
||||
Resolution int64 `json:"resolution"`
|
||||
}
|
||||
|
||||
// ThanosFile contains metadata about a file in the block.
|
||||
type ThanosFile struct {
|
||||
RelPath string `json:"rel_path"`
|
||||
SizeBytes int64 `json:"size_bytes,omitempty"`
|
||||
}
|
||||
|
||||
// ResolutionLevel represents the downsampling resolution.
|
||||
type ResolutionLevel int64
|
||||
|
||||
const (
|
||||
// ResolutionRaw is for raw, non-downsampled data.
|
||||
ResolutionRaw ResolutionLevel = 0
|
||||
// Resolution5m is for 5-minute downsampled data (300000 ms).
|
||||
Resolution5m ResolutionLevel = 300000
|
||||
// Resolution1h is for 1-hour downsampled data (3600000 ms).
|
||||
Resolution1h ResolutionLevel = 3600000
|
||||
)
|
||||
|
||||
// String returns human-readable resolution string.
|
||||
func (r ResolutionLevel) String() string {
|
||||
switch r {
|
||||
case ResolutionRaw:
|
||||
return "raw"
|
||||
case Resolution5m:
|
||||
return "5m"
|
||||
case Resolution1h:
|
||||
return "1h"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// ReadBlockMeta reads Thanos-extended block metadata from meta.json.
|
||||
func ReadBlockMeta(blockDir string) (*BlockMeta, error) {
|
||||
metaPath := filepath.Join(blockDir, "meta.json")
|
||||
data, err := os.ReadFile(metaPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var meta BlockMeta
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// IsDownsampled returns true if the block contains downsampled data.
|
||||
func (m *BlockMeta) IsDownsampled() bool {
|
||||
return m.Thanos.Downsample.Resolution > 0
|
||||
}
|
||||
|
||||
// Resolution returns the block's downsampling resolution.
|
||||
func (m *BlockMeta) Resolution() ResolutionLevel {
|
||||
return ResolutionLevel(m.Thanos.Downsample.Resolution)
|
||||
}
|
||||
|
||||
// ResolutionSuffix returns a suffix string for metric names based on resolution.
|
||||
// For example: ":5m" or ":1h" for downsampled data, empty for raw data.
|
||||
func (m *BlockMeta) ResolutionSuffix() string {
|
||||
switch m.Resolution() {
|
||||
case Resolution5m:
|
||||
return ":5m"
|
||||
case Resolution1h:
|
||||
return ":1h"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
83
app/vmctl/thanos/block_reader.go
Normal file
83
app/vmctl/thanos/block_reader.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package thanos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
// BlockInfo contains information about a block including Thanos metadata.
|
||||
type BlockInfo struct {
|
||||
Block tsdb.BlockReader
|
||||
Resolution ResolutionLevel
|
||||
IsThanos bool
|
||||
// Closer releases the block's resources (file descriptors, mmap).
|
||||
// Must be called only after all queriers on this block have been closed.
|
||||
Closer io.Closer
|
||||
}
|
||||
|
||||
// OpenBlocksWithInfo opens all blocks and returns them with their metadata.
|
||||
// snapshotDir must be a snapshot directory containing block directories.
|
||||
func OpenBlocksWithInfo(snapshotDir string, aggrType AggrType) ([]BlockInfo, error) {
|
||||
entries, err := os.ReadDir(snapshotDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read snapshot directory: %w", err)
|
||||
}
|
||||
|
||||
var blocks []BlockInfo
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
blockDir := filepath.Join(snapshotDir, entry.Name())
|
||||
metaPath := filepath.Join(blockDir, "meta.json")
|
||||
|
||||
// Check if this is a valid block directory (has meta.json)
|
||||
if _, err := os.Stat(metaPath); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
meta, err := ReadBlockMeta(blockDir)
|
||||
if err != nil {
|
||||
CloseBlocks(blocks)
|
||||
return nil, fmt.Errorf("failed to read Thanos metadata for block %s: %w", blockDir, err)
|
||||
}
|
||||
|
||||
var pool chunkenc.Pool
|
||||
if meta.IsDownsampled() {
|
||||
// Use AggrChunkPool for downsampled blocks
|
||||
pool = NewAggrChunkPool(aggrType)
|
||||
}
|
||||
|
||||
block, err := tsdb.OpenBlock(nil, blockDir, pool, nil)
|
||||
if err != nil {
|
||||
// Close previously opened blocks before returning error
|
||||
CloseBlocks(blocks)
|
||||
return nil, fmt.Errorf("failed to open block %s: %w", blockDir, err)
|
||||
}
|
||||
|
||||
blocks = append(blocks, BlockInfo{
|
||||
Block: block,
|
||||
Resolution: meta.Resolution(),
|
||||
IsThanos: true,
|
||||
Closer: block,
|
||||
})
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// CloseBlocks closes all blocks in the slice.
|
||||
// Must be called only after all queriers on these blocks have been closed.
|
||||
func CloseBlocks(blocks []BlockInfo) {
|
||||
for _, bi := range blocks {
|
||||
if bi.Closer != nil {
|
||||
_ = bi.Closer.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
198
app/vmctl/thanos/client.go
Normal file
198
app/vmctl/thanos/client.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package thanos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
// Config contains parameters for reading Thanos snapshots.
|
||||
type Config struct {
|
||||
Snapshot string
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
// Filter contains configuration for filtering the timeseries.
|
||||
type Filter struct {
|
||||
TimeMin string
|
||||
TimeMax string
|
||||
Label string
|
||||
LabelValue string
|
||||
}
|
||||
|
||||
// Client reads Thanos snapshot blocks, including downsampled blocks with AggrChunk encoding.
|
||||
type Client struct {
|
||||
snapshotPath string
|
||||
filter filter
|
||||
statsPrinted bool
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
min, max int64
|
||||
label string
|
||||
labelValue string
|
||||
}
|
||||
|
||||
func (f filter) inRange(minV, maxV int64) bool {
|
||||
fmin, fmax := f.min, f.max
|
||||
if fmin == 0 {
|
||||
fmin = minV
|
||||
}
|
||||
if fmax == 0 {
|
||||
fmax = maxV
|
||||
}
|
||||
return minV <= fmax && fmin <= maxV
|
||||
}
|
||||
|
||||
// NewClient creates a new Thanos snapshot client.
|
||||
func NewClient(cfg Config) (*Client, error) {
|
||||
minTime, maxTime, err := parseTime(cfg.Filter.TimeMin, cfg.Filter.TimeMax)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse time in filter: %s", err)
|
||||
}
|
||||
return &Client{
|
||||
snapshotPath: cfg.Snapshot,
|
||||
filter: filter{
|
||||
min: minTime,
|
||||
max: maxTime,
|
||||
label: cfg.Filter.Label,
|
||||
labelValue: cfg.Filter.LabelValue,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Explore fetches all available blocks from the snapshot with support for
|
||||
// Thanos AggrChunk (downsampled blocks). It opens blocks with a custom pool
|
||||
// that can decode AggrChunk encoding (0xff).
|
||||
func (c *Client) Explore(aggrType AggrType) ([]BlockInfo, error) {
|
||||
blockInfos, err := OpenBlocksWithInfo(c.snapshotPath, aggrType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open blocks: %w", err)
|
||||
}
|
||||
|
||||
s := &Stats{
|
||||
Filtered: c.filter.min != 0 || c.filter.max != 0 || c.filter.label != "",
|
||||
Blocks: len(blockInfos),
|
||||
}
|
||||
|
||||
var blocksToImport []BlockInfo
|
||||
for _, bi := range blockInfos {
|
||||
meta := bi.Block.Meta()
|
||||
|
||||
if s.MinTime == 0 || meta.MinTime < s.MinTime {
|
||||
s.MinTime = meta.MinTime
|
||||
}
|
||||
if s.MaxTime == 0 || meta.MaxTime > s.MaxTime {
|
||||
s.MaxTime = meta.MaxTime
|
||||
}
|
||||
|
||||
if !c.filter.inRange(meta.MinTime, meta.MaxTime) {
|
||||
s.SkippedBlocks++
|
||||
if bi.Closer != nil {
|
||||
_ = bi.Closer.Close()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
s.Samples += meta.Stats.NumSamples
|
||||
s.Series += meta.Stats.NumSeries
|
||||
blocksToImport = append(blocksToImport, bi)
|
||||
}
|
||||
if !c.statsPrinted {
|
||||
fmt.Println(s)
|
||||
c.statsPrinted = true
|
||||
}
|
||||
return blocksToImport, nil
|
||||
}
|
||||
|
||||
// querierSeriesSet wraps a SeriesSet and its underlying Querier, ensuring
|
||||
// the querier is closed once the SeriesSet has been fully consumed.
|
||||
// This releases the querier's read reference on the block, which is required
|
||||
// for Block.Close() to complete without hanging.
|
||||
type querierSeriesSet struct {
|
||||
storage.SeriesSet
|
||||
q storage.Querier
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Next advances the iterator. When the underlying SeriesSet is exhausted,
|
||||
// it closes the querier to release resources.
|
||||
func (s *querierSeriesSet) Next() bool {
|
||||
if s.SeriesSet.Next() {
|
||||
return true
|
||||
}
|
||||
if !s.closed {
|
||||
_ = s.q.Close()
|
||||
s.closed = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Close explicitly closes the underlying querier.
|
||||
// This must be called if iteration is stopped early (before Next returns false)
|
||||
// to release block read references and prevent Block.Close() from hanging.
|
||||
func (s *querierSeriesSet) Close() {
|
||||
if !s.closed {
|
||||
_ = s.q.Close()
|
||||
s.closed = true
|
||||
}
|
||||
}
|
||||
|
||||
// ClosableSeriesSet extends storage.SeriesSet with a Close method for explicit cleanup.
|
||||
type ClosableSeriesSet interface {
|
||||
storage.SeriesSet
|
||||
Close()
|
||||
}
|
||||
|
||||
// Read reads the given BlockInfo according to configured time and label filters.
|
||||
// The returned ClosableSeriesSet automatically closes the underlying querier when fully consumed,
|
||||
// but Close() should be called explicitly (e.g., via defer) to handle early returns.
|
||||
func (c *Client) Read(bi BlockInfo) (ClosableSeriesSet, error) {
|
||||
minTime, maxTime := bi.Block.Meta().MinTime, bi.Block.Meta().MaxTime
|
||||
if c.filter.min != 0 {
|
||||
minTime = c.filter.min
|
||||
}
|
||||
if c.filter.max != 0 {
|
||||
maxTime = c.filter.max
|
||||
}
|
||||
q, err := tsdb.NewBlockQuerier(bi.Block, minTime, maxTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss := q.Select(
|
||||
context.Background(),
|
||||
false,
|
||||
nil,
|
||||
labels.MustNewMatcher(labels.MatchRegexp, c.filter.label, c.filter.labelValue),
|
||||
)
|
||||
return &querierSeriesSet{
|
||||
SeriesSet: ss,
|
||||
q: q,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseTime(start, end string) (int64, int64, error) {
|
||||
var s, e int64
|
||||
if start == "" && end == "" {
|
||||
return 0, 0, nil
|
||||
}
|
||||
if start != "" {
|
||||
v, err := time.Parse(time.RFC3339, start)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to parse %q: %s", start, err)
|
||||
}
|
||||
s = v.UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
if end != "" {
|
||||
v, err := time.Parse(time.RFC3339, end)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to parse %q: %s", end, err)
|
||||
}
|
||||
e = v.UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
return s, e, nil
|
||||
}
|
||||
38
app/vmctl/thanos/stats.go
Normal file
38
app/vmctl/thanos/stats.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package thanos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Stats represents data migration stats for Thanos blocks.
|
||||
type Stats struct {
|
||||
Filtered bool
|
||||
MinTime int64
|
||||
MaxTime int64
|
||||
Samples uint64
|
||||
Series uint64
|
||||
Blocks int
|
||||
SkippedBlocks int
|
||||
}
|
||||
|
||||
// String returns string representation for s.
|
||||
func (s Stats) String() string {
|
||||
str := fmt.Sprintf("Thanos snapshot stats:\n"+
|
||||
" blocks found: %d;\n"+
|
||||
" blocks skipped by time filter: %d;\n"+
|
||||
" min time: %d (%v);\n"+
|
||||
" max time: %d (%v);\n"+
|
||||
" samples: %d;\n"+
|
||||
" series: %d.",
|
||||
s.Blocks, s.SkippedBlocks,
|
||||
s.MinTime, time.Unix(s.MinTime/1e3, 0).Format(time.RFC3339),
|
||||
s.MaxTime, time.Unix(s.MaxTime/1e3, 0).Format(time.RFC3339),
|
||||
s.Samples, s.Series)
|
||||
|
||||
if s.Filtered {
|
||||
str += "\n* Stats numbers are based on blocks meta info and don't account for applied filters."
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
309
app/vmctl/thanos_processor.go
Normal file
309
app/vmctl/thanos_processor.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/thanos"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
)
|
||||
|
||||
type thanosProcessor struct {
|
||||
cl *thanos.Client
|
||||
im *vm.Importer
|
||||
cc int
|
||||
|
||||
isVerbose bool
|
||||
aggrTypes []thanos.AggrType
|
||||
}
|
||||
|
||||
func (tp *thanosProcessor) run(ctx context.Context) error {
|
||||
if len(tp.aggrTypes) == 0 {
|
||||
tp.aggrTypes = thanos.AllAggrTypes
|
||||
}
|
||||
|
||||
log.Printf("Processing blocks with aggregate types: %v", tp.aggrTypes)
|
||||
|
||||
// Use the first aggregate type to explore blocks (block list is the same for all types)
|
||||
blocks, err := tp.cl.Explore(tp.aggrTypes[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("explore failed: %s", err)
|
||||
}
|
||||
if len(blocks) < 1 {
|
||||
return fmt.Errorf("found no blocks to import")
|
||||
}
|
||||
|
||||
// Separate blocks into raw (resolution=0) and downsampled (resolution>0)
|
||||
var rawBlocks, downsampledBlocks []thanos.BlockInfo
|
||||
for _, block := range blocks {
|
||||
if block.Resolution == thanos.ResolutionRaw {
|
||||
rawBlocks = append(rawBlocks, block)
|
||||
} else {
|
||||
downsampledBlocks = append(downsampledBlocks, block)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Found %d raw blocks and %d downsampled blocks", len(rawBlocks), len(downsampledBlocks))
|
||||
|
||||
question := fmt.Sprintf("Found %d blocks to import (%d raw + %d downsampled with %d aggregate types). Continue?",
|
||||
len(blocks), len(rawBlocks), len(downsampledBlocks), len(tp.aggrTypes))
|
||||
if !prompt(ctx, question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculate total number of block processing passes for the progress bar:
|
||||
// raw blocks are processed once, downsampled blocks are processed once per aggregate type.
|
||||
totalPasses := len(rawBlocks) + len(downsampledBlocks)*len(tp.aggrTypes)
|
||||
thanosBlocksTotal.Add(totalPasses)
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing blocks"), totalPasses)
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer barpool.Stop()
|
||||
|
||||
tp.im.ResetStats()
|
||||
|
||||
type phaseStats struct {
|
||||
name string
|
||||
series uint64
|
||||
samples uint64
|
||||
}
|
||||
var phases []phaseStats
|
||||
|
||||
// Process raw blocks first (no aggregate suffix)
|
||||
if len(rawBlocks) > 0 {
|
||||
log.Println("Processing raw blocks (resolution=0)...")
|
||||
stats, err := tp.processBlocks(rawBlocks, thanos.AggrTypeNone, bar)
|
||||
if err != nil {
|
||||
return fmt.Errorf("migration failed for raw blocks: %s", err)
|
||||
}
|
||||
phases = append(phases, phaseStats{
|
||||
name: "raw",
|
||||
series: stats.series,
|
||||
samples: stats.samples,
|
||||
})
|
||||
}
|
||||
|
||||
// Close blocks from the initial Explore. The querierSeriesSet wrapper
|
||||
// has already released all querier read references, so Close won't hang.
|
||||
thanos.CloseBlocks(blocks)
|
||||
|
||||
// Process downsampled blocks for each aggregate type.
|
||||
// Each type needs its own AggrChunkPool, so we reopen blocks per type.
|
||||
for _, aggrType := range tp.aggrTypes {
|
||||
if len(downsampledBlocks) < 1 {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("Processing downsampled blocks with aggregate type: %s", aggrType)
|
||||
|
||||
aggrBlocks, err := tp.cl.Explore(aggrType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("explore failed for aggr type %s: %s", aggrType, err)
|
||||
}
|
||||
|
||||
var downsampledOnly []thanos.BlockInfo
|
||||
for _, block := range aggrBlocks {
|
||||
if block.Resolution != thanos.ResolutionRaw {
|
||||
downsampledOnly = append(downsampledOnly, block)
|
||||
}
|
||||
}
|
||||
|
||||
if len(downsampledOnly) < 1 {
|
||||
log.Printf("No downsampled blocks found for aggregate type %s, skipping", aggrType)
|
||||
thanos.CloseBlocks(aggrBlocks)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("Processing %d blocks for aggregate type: %s", len(downsampledOnly), aggrType)
|
||||
stats, err := tp.processBlocks(downsampledOnly, aggrType, bar)
|
||||
thanos.CloseBlocks(aggrBlocks)
|
||||
if err != nil {
|
||||
return fmt.Errorf("migration failed for aggr type %s: %s", aggrType, err)
|
||||
}
|
||||
phases = append(phases, phaseStats{
|
||||
name: aggrType.String(),
|
||||
series: stats.series,
|
||||
samples: stats.samples,
|
||||
})
|
||||
}
|
||||
|
||||
// Print per-phase and total statistics
|
||||
var totalSeries, totalSamples uint64
|
||||
log.Printf("Migration statistics (%d raw blocks, %d downsampled blocks):", len(rawBlocks), len(downsampledBlocks))
|
||||
for _, p := range phases {
|
||||
log.Printf(" %s: %d series, %d samples", p.name, p.series, p.samples)
|
||||
totalSeries += p.series
|
||||
totalSamples += p.samples
|
||||
}
|
||||
log.Printf(" total: %d series, %d samples", totalSeries, totalSamples)
|
||||
|
||||
// Wait for all buffers to flush
|
||||
tp.im.Close()
|
||||
// Drain import errors channel
|
||||
for vmErr := range tp.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
thanosErrorsTotal.Inc()
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, tp.isVerbose))
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Import finished!")
|
||||
log.Println(tp.im.Stats())
|
||||
return nil
|
||||
}
|
||||
|
||||
// processBlocksStats holds statistics collected during block processing.
|
||||
type processBlocksStats struct {
|
||||
blocks uint64
|
||||
series uint64
|
||||
samples uint64
|
||||
}
|
||||
|
||||
func (tp *thanosProcessor) processBlocks(blocks []thanos.BlockInfo, aggrType thanos.AggrType, bar barpool.Bar) (processBlocksStats, error) {
|
||||
blockReadersCh := make(chan thanos.BlockInfo)
|
||||
errCh := make(chan error, tp.cc)
|
||||
|
||||
var processedBlocks, totalSeries, totalSamples uint64
|
||||
var mu sync.Mutex
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := range tp.cc {
|
||||
workerID := i
|
||||
wg.Go(func() {
|
||||
for bi := range blockReadersCh {
|
||||
seriesCount, samplesCount, err := tp.do(bi, aggrType)
|
||||
if err != nil {
|
||||
thanosErrorsTotal.Inc()
|
||||
errCh <- fmt.Errorf("read failed for block %q with aggr %s: %s", bi.Block.Meta().ULID, aggrType, err)
|
||||
return
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
processedBlocks++
|
||||
totalSeries += seriesCount
|
||||
totalSamples += samplesCount
|
||||
log.Printf("[Worker %d] Block %s: %d series, %d samples | Total: %d/%d blocks, %d series, %d samples",
|
||||
workerID, bi.Block.Meta().ULID.String()[:8], seriesCount, samplesCount,
|
||||
processedBlocks, len(blocks), totalSeries, totalSamples)
|
||||
mu.Unlock()
|
||||
|
||||
thanosBlocksProcessed.Inc()
|
||||
bar.Increment()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// any error breaks the import
|
||||
for _, bi := range blocks {
|
||||
select {
|
||||
case thanosErr := <-errCh:
|
||||
close(blockReadersCh)
|
||||
wg.Wait()
|
||||
return processBlocksStats{}, fmt.Errorf("thanos error: %s", thanosErr)
|
||||
case vmErr := <-tp.im.Errors():
|
||||
close(blockReadersCh)
|
||||
wg.Wait()
|
||||
thanosErrorsTotal.Inc()
|
||||
return processBlocksStats{}, fmt.Errorf("import process failed: %s", wrapErr(vmErr, tp.isVerbose))
|
||||
case blockReadersCh <- bi:
|
||||
}
|
||||
}
|
||||
|
||||
close(blockReadersCh)
|
||||
wg.Wait()
|
||||
close(errCh)
|
||||
for err := range errCh {
|
||||
return processBlocksStats{}, fmt.Errorf("import process failed: %s", err)
|
||||
}
|
||||
|
||||
return processBlocksStats{
|
||||
blocks: processedBlocks,
|
||||
series: totalSeries,
|
||||
samples: totalSamples,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tp *thanosProcessor) do(bi thanos.BlockInfo, aggrType thanos.AggrType) (uint64, uint64, error) {
|
||||
ss, err := tp.cl.Read(bi)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to read block: %s", err)
|
||||
}
|
||||
defer ss.Close() // Ensure querier is closed even on early returns
|
||||
|
||||
var it chunkenc.Iterator
|
||||
var seriesCount, samplesCount uint64
|
||||
|
||||
for ss.Next() {
|
||||
var name string
|
||||
var labelPairs []vm.LabelPair
|
||||
series := ss.At()
|
||||
|
||||
series.Labels().Range(func(label labels.Label) {
|
||||
if label.Name == "__name__" {
|
||||
name = label.Value
|
||||
return
|
||||
}
|
||||
labelPairs = append(labelPairs, vm.LabelPair{
|
||||
Name: strings.Clone(label.Name),
|
||||
Value: strings.Clone(label.Value),
|
||||
})
|
||||
})
|
||||
if name == "" {
|
||||
return seriesCount, samplesCount, fmt.Errorf("failed to find `__name__` label in labelset for block %v", bi.Block.Meta().ULID)
|
||||
}
|
||||
|
||||
// Add resolution and aggregate type suffix to metric name for downsampled blocks
|
||||
if bi.Resolution != thanos.ResolutionRaw && aggrType != thanos.AggrTypeNone {
|
||||
name = fmt.Sprintf("%s:%s:%s", name, bi.Resolution.String(), aggrType.String())
|
||||
}
|
||||
|
||||
var timestamps []int64
|
||||
var values []float64
|
||||
it = series.Iterator(it)
|
||||
for {
|
||||
typ := it.Next()
|
||||
if typ == chunkenc.ValNone {
|
||||
break
|
||||
}
|
||||
if typ != chunkenc.ValFloat {
|
||||
continue
|
||||
}
|
||||
t, v := it.At()
|
||||
timestamps = append(timestamps, t)
|
||||
values = append(values, v)
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return seriesCount, samplesCount, err
|
||||
}
|
||||
|
||||
samplesCount += uint64(len(timestamps))
|
||||
seriesCount++
|
||||
|
||||
ts := vm.TimeSeries{
|
||||
Name: name,
|
||||
LabelPairs: labelPairs,
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
}
|
||||
if err := tp.im.Input(&ts); err != nil {
|
||||
return seriesCount, samplesCount, err
|
||||
}
|
||||
}
|
||||
return seriesCount, samplesCount, ss.Err()
|
||||
}
|
||||
|
||||
var (
|
||||
thanosBlocksTotal = metrics.NewCounter(`vmctl_thanos_migration_blocks_total`)
|
||||
thanosBlocksProcessed = metrics.NewCounter(`vmctl_thanos_migration_blocks_processed`)
|
||||
thanosErrorsTotal = metrics.NewCounter(`vmctl_thanos_migration_errors_total`)
|
||||
)
|
||||
@@ -55,7 +55,7 @@ var (
|
||||
deduplicator *streamaggr.Deduplicator
|
||||
)
|
||||
|
||||
// CheckStreamAggrConfig checks config pointed by -stramaggr.config
|
||||
// CheckStreamAggrConfig checks config pointed by -streamaggr.config
|
||||
func CheckStreamAggrConfig() error {
|
||||
if *streamAggrConfig == "" {
|
||||
return nil
|
||||
|
||||
@@ -45,15 +45,14 @@ func insertRows(sketches []*datadogsketches.Sketch, extraLabels []prompb.Label)
|
||||
ms := sketch.ToSummary()
|
||||
for _, m := range ms {
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10557
|
||||
ctx.AddLabel("host", sketch.Host) // newly added
|
||||
ctx.AddLabel("", m.Name)
|
||||
for _, label := range m.Labels {
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
for _, tag := range sketch.Tags {
|
||||
name, value := datadogutil.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
ctx.AddLabel(name, value)
|
||||
}
|
||||
for j := range extraLabels {
|
||||
|
||||
@@ -77,7 +77,7 @@ func push(ctx *common.InsertCtx, tss []prompb.TimeSeries) {
|
||||
r := &ts.Samples[i]
|
||||
metricNameRaw, err = ctx.WriteDataPointExt(metricNameRaw, ctx.Labels, r.Timestamp, r.Value)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot write promscape data to storage: %s", err)
|
||||
logger.Errorf("cannot write promscrape data to storage: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ var (
|
||||
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce restore duration")
|
||||
maxBytesPerSecond = flagutil.NewBytes("maxBytesPerSecond", 0, "The maximum download speed. There is no limit if it is set to 0")
|
||||
skipBackupCompleteCheck = flag.Bool("skipBackupCompleteCheck", false, "Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file")
|
||||
SkipPreallocation = flag.Bool("skipFilePreallocation", false, "Whether to skip pre-allocated files. This will likely be slower in most cases, but allows restores to resume mid file on failure")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -63,6 +64,7 @@ func main() {
|
||||
Src: srcFS,
|
||||
Dst: dstFS,
|
||||
SkipBackupCompleteCheck: *skipBackupCompleteCheck,
|
||||
SkipPreallocation: *SkipPreallocation,
|
||||
}
|
||||
pushmetrics.Init()
|
||||
if err := a.Run(ctx); err != nil {
|
||||
|
||||
@@ -321,19 +321,23 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
case "/tags/tagSeries":
|
||||
graphiteTagsTagSeriesRequests.Inc()
|
||||
if err := graphite.TagsTagSeriesHandler(startTime, w, r); err != nil {
|
||||
graphiteTagsTagSeriesErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("graphite tag registration has been disabled and is planned to be removed in future. " +
|
||||
"See: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10544"),
|
||||
StatusCode: http.StatusNotImplemented,
|
||||
}
|
||||
graphiteTagsTagSeriesErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
case "/tags/tagMultiSeries":
|
||||
graphiteTagsTagMultiSeriesRequests.Inc()
|
||||
if err := graphite.TagsTagMultiSeriesHandler(startTime, w, r); err != nil {
|
||||
graphiteTagsTagMultiSeriesErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("graphite tag registration has been disabled and is planned to be removed in future. " +
|
||||
"See: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10544"),
|
||||
StatusCode: http.StatusNotImplemented,
|
||||
}
|
||||
graphiteTagsTagMultiSeriesErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
case "/tags":
|
||||
graphiteTagsRequests.Inc()
|
||||
@@ -739,6 +743,26 @@ func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request, path string) {
|
||||
req := r.Clone(r.Context())
|
||||
req.URL.Path = strings.TrimPrefix(path, "prometheus")
|
||||
req.Host = vmalertProxyHost
|
||||
|
||||
if strings.HasPrefix(r.Header.Get(`User-Agent`), `Grafana`) {
|
||||
// Grafana currently supports only Prometheus-style alerts. If other alert types
|
||||
// (e.g. logs or traces) are returned, it may fail with "Error loading alerts".
|
||||
//
|
||||
// Grafana queries the vmalert API directly, bypassing the VictoriaMetrics datasource,
|
||||
// so query params (such as datasource_type) cannot be enforced on the Grafana side.
|
||||
//
|
||||
// To ensure compatibility, we detect Grafana requests via the User-Agent and enforce
|
||||
// `datasource_type=prometheus`.
|
||||
//
|
||||
// See:
|
||||
// - https://github.com/VictoriaMetrics/victoriametrics-datasource/issues/329#issuecomment-3847585443
|
||||
// - https://github.com/VictoriaMetrics/victoriametrics-datasource/issues/59
|
||||
q := req.URL.Query()
|
||||
q.Set("datasource_type", "prometheus")
|
||||
req.URL.RawQuery = q.Encode()
|
||||
req.RequestURI = ""
|
||||
}
|
||||
|
||||
vmalertProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricnamestats"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
)
|
||||
|
||||
@@ -1362,7 +1363,7 @@ func applyGraphiteRegexpFilter(filter string, ss []string) ([]string, error) {
|
||||
const maxFastAllocBlockSize = 32 * 1024
|
||||
|
||||
// GetMetricNamesStats returns statistic for timeseries metric names usage.
|
||||
func GetMetricNamesStats(qt *querytracer.Tracer, limit, le int, matchPattern string) (storage.MetricNamesStatsResponse, error) {
|
||||
func GetMetricNamesStats(qt *querytracer.Tracer, limit, le int, matchPattern string) (metricnamestats.StatsResult, error) {
|
||||
qt = qt.NewChild("get metric names usage statistics with limit: %d, less or equal to: %d, match pattern=%q", limit, le, matchPattern)
|
||||
defer qt.Done()
|
||||
return vmstorage.GetMetricNamesStats(qt, limit, le, matchPattern)
|
||||
|
||||
@@ -11,6 +11,16 @@
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
{% func ExportCSVHeader(fieldNames []string) %}
|
||||
{% if len(fieldNames) == 0 %}{% return %}{% endif %}
|
||||
{%s= fieldNames[0] %}
|
||||
{% for _, fieldName := range fieldNames[1:] %}
|
||||
,
|
||||
{%s= fieldName %}
|
||||
{% endfor %}
|
||||
{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportCSVLine(xb *exportBlock, fieldNames []string) %}
|
||||
{% if len(xb.timestamps) == 0 || len(fieldNames) == 0 %}{% return %}{% endif %}
|
||||
{% for i, timestamp := range xb.timestamps %}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
132
app/vmselect/prometheus/export_test.go
Normal file
132
app/vmselect/prometheus/export_test.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestExportCSVHeader(t *testing.T) {
|
||||
f := func(fieldNames []string, expected string) {
|
||||
t.Helper()
|
||||
got := ExportCSVHeader(fieldNames)
|
||||
if got != expected {
|
||||
t.Fatalf("ExportCSVHeader(%v): got %q; want %q", fieldNames, got, expected)
|
||||
}
|
||||
}
|
||||
|
||||
f(nil, "")
|
||||
f([]string{}, "")
|
||||
|
||||
f([]string{"__value__"}, "__value__\n")
|
||||
f([]string{"__timestamp__"}, "__timestamp__\n")
|
||||
f([]string{"__timestamp__:rfc3339"}, "__timestamp__:rfc3339\n")
|
||||
f([]string{"__name__"}, "__name__\n")
|
||||
f([]string{"job"}, "job\n")
|
||||
|
||||
f([]string{"__timestamp__:rfc3339", "__value__"}, "__timestamp__:rfc3339,__value__\n")
|
||||
f([]string{"__value__", "__timestamp__"}, "__value__,__timestamp__\n")
|
||||
f([]string{"job", "instance"}, "job,instance\n")
|
||||
|
||||
f([]string{"__name__", "__value__", "__timestamp__:unix_s"}, "__name__,__value__,__timestamp__:unix_s\n")
|
||||
f([]string{"job", "instance", "__value__", "__timestamp__:unix_ms"}, "job,instance,__value__,__timestamp__:unix_ms\n")
|
||||
f([]string{"__timestamp__:custom:2006-01-02", "__value__", "host", "dc", "env"},
|
||||
"__timestamp__:custom:2006-01-02,__value__,host,dc,env\n")
|
||||
|
||||
// duplicate fields
|
||||
f([]string{"__value__", "__value__"}, "__value__,__value__\n")
|
||||
f([]string{"__timestamp__", "__timestamp__:rfc3339"}, "__timestamp__,__timestamp__:rfc3339\n")
|
||||
}
|
||||
|
||||
func TestExportCSVLine(t *testing.T) {
|
||||
localBak := time.Local
|
||||
time.Local = time.UTC
|
||||
defer func() { time.Local = localBak }()
|
||||
|
||||
f := func(mn *storage.MetricName, timestamps []int64, values []float64, fieldNames []string, expected string) {
|
||||
t.Helper()
|
||||
xb := &exportBlock{
|
||||
mn: mn,
|
||||
timestamps: timestamps,
|
||||
values: values,
|
||||
}
|
||||
got := ExportCSVLine(xb, fieldNames)
|
||||
if got != expected {
|
||||
t.Fatalf("ExportCSVLine: got %q; want %q", got, expected)
|
||||
}
|
||||
}
|
||||
|
||||
mn := &storage.MetricName{
|
||||
MetricGroup: []byte("cpu_usage"),
|
||||
Tags: []storage.Tag{
|
||||
{Key: []byte("job"), Value: []byte("node")},
|
||||
{Key: []byte("instance"), Value: []byte("localhost:9090")},
|
||||
},
|
||||
}
|
||||
|
||||
// empty inputs
|
||||
f(mn, nil, nil, []string{"__value__"}, "")
|
||||
f(mn, []int64{}, []float64{}, []string{"__value__"}, "")
|
||||
f(mn, []int64{1000}, []float64{1.5}, nil, "")
|
||||
f(mn, []int64{1000}, []float64{1.5}, []string{}, "")
|
||||
|
||||
f(mn, []int64{1000}, []float64{42.5}, []string{"__value__"}, "42.5\n")
|
||||
f(mn, []int64{1704067200000}, []float64{1}, []string{"__timestamp__"}, "1704067200000\n")
|
||||
f(mn, []int64{1704067200000}, []float64{1}, []string{"__timestamp__:unix_s"}, "1704067200\n")
|
||||
f(mn, []int64{1704067200000}, []float64{1}, []string{"__timestamp__:unix_ms"}, "1704067200000\n")
|
||||
f(mn, []int64{1704067200000}, []float64{1}, []string{"__timestamp__:unix_ns"}, "1704067200000000000\n")
|
||||
f(mn, []int64{1704067200000}, []float64{1}, []string{"__timestamp__:rfc3339"}, "2024-01-01T00:00:00Z\n")
|
||||
|
||||
f(mn, []int64{1000}, []float64{1}, []string{"__name__"}, "cpu_usage\n")
|
||||
f(mn, []int64{1000}, []float64{1}, []string{"job"}, "node\n")
|
||||
f(mn, []int64{1000}, []float64{1}, []string{"instance"}, "localhost:9090\n")
|
||||
f(mn, []int64{1000}, []float64{1}, []string{"missing_label"}, "\n")
|
||||
|
||||
// multiple fields
|
||||
f(mn, []int64{1704067200000}, []float64{99.9},
|
||||
[]string{"__timestamp__:unix_s", "__value__", "job"},
|
||||
"1704067200,99.9,node\n")
|
||||
|
||||
// multiple rows
|
||||
f(mn, []int64{1000, 2000}, []float64{1.1, 2.2},
|
||||
[]string{"__value__", "__timestamp__"},
|
||||
"1.1,1000\n2.2,2000\n")
|
||||
f(mn, []int64{1000, 2000, 3000}, []float64{10, 20, 30},
|
||||
[]string{"__timestamp__:unix_s", "__value__"},
|
||||
"1,10\n2,20\n3,30\n")
|
||||
|
||||
// escaping for special characters in tag values
|
||||
f(&storage.MetricName{
|
||||
MetricGroup: []byte("m"),
|
||||
Tags: []storage.Tag{{Key: []byte("desc"), Value: []byte("a,b")}},
|
||||
}, []int64{1000}, []float64{1}, []string{"desc"}, "\"a,b\"\n")
|
||||
|
||||
f(&storage.MetricName{
|
||||
MetricGroup: []byte("m"),
|
||||
Tags: []storage.Tag{{Key: []byte("desc"), Value: []byte(`say "hello"`)}},
|
||||
}, []int64{1000}, []float64{1}, []string{"desc"}, "\"say \\\"hello\\\"\"\n")
|
||||
|
||||
f(&storage.MetricName{
|
||||
MetricGroup: []byte("m"),
|
||||
Tags: []storage.Tag{{Key: []byte("desc"), Value: []byte("line1\nline2")}},
|
||||
}, []int64{1000}, []float64{1}, []string{"desc"}, "\"line1\\nline2\"\n")
|
||||
|
||||
// header and data line field counts must match
|
||||
fieldNames := []string{"__name__", "job", "instance", "__value__", "__timestamp__:unix_s"}
|
||||
header := ExportCSVHeader(fieldNames)
|
||||
line := ExportCSVLine(&exportBlock{
|
||||
mn: mn,
|
||||
timestamps: []int64{1704067200000},
|
||||
values: []float64{99.9},
|
||||
}, fieldNames)
|
||||
headerCommas := strings.Count(header, ",")
|
||||
lineCommas := strings.Count(line, ",")
|
||||
if headerCommas != lineCommas {
|
||||
t.Fatalf("header has %d commas, data line has %d commas", headerCommas, lineCommas)
|
||||
}
|
||||
if headerCommas != len(fieldNames)-1 {
|
||||
t.Fatalf("expected %d commas in header, got %d", len(fieldNames)-1, headerCommas)
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
@@ -174,6 +175,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
||||
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
WriteExportCSVHeader(bw, fieldNames)
|
||||
sw := newScalableWriter(bw)
|
||||
writeCSVLine := func(xb *exportBlock, workerID uint) error {
|
||||
if len(xb.timestamps) == 0 {
|
||||
@@ -528,6 +530,14 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName s
|
||||
return err
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxLabelsAPISeries)
|
||||
|
||||
if strings.HasPrefix(labelName, "U__") {
|
||||
// This label seems to be Unicode-encoded according to the Prometheus spec.
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
// Spec: https://github.com/prometheus/proposals/blob/main/proposals/0028-utf8.md
|
||||
labelName = unescapePrometheusLabelName(labelName)
|
||||
}
|
||||
|
||||
labelValues, err := netstorage.LabelValues(qt, labelName, sq, limit, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain values for label %q: %w", labelName, err)
|
||||
@@ -1330,3 +1340,70 @@ func calculateMaxUniqueTimeSeriesForResource(maxConcurrentRequests, remainingMem
|
||||
func GetMaxUniqueTimeSeries() int {
|
||||
return maxUniqueTimeseriesValue
|
||||
}
|
||||
|
||||
// copied from https://github.com/prometheus/common/blob/adea6285c1c7447fcb7bfdeb6abfc6eff893e0a7/model/metric.go#L483
|
||||
// it's not possible to use direct import due to increased binary size
|
||||
func unescapePrometheusLabelName(name string) string {
|
||||
// lower function taken from strconv.atoi.
|
||||
lower := func(c byte) byte {
|
||||
return c | ('x' - 'X')
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return name
|
||||
}
|
||||
escapedName, found := strings.CutPrefix(name, "U__")
|
||||
if !found {
|
||||
return name
|
||||
}
|
||||
|
||||
var unescaped strings.Builder
|
||||
TOP:
|
||||
for i := 0; i < len(escapedName); i++ {
|
||||
// All non-underscores are treated normally.
|
||||
if escapedName[i] != '_' {
|
||||
unescaped.WriteByte(escapedName[i])
|
||||
continue
|
||||
}
|
||||
i++
|
||||
if i >= len(escapedName) {
|
||||
return name
|
||||
}
|
||||
// A double underscore is a single underscore.
|
||||
if escapedName[i] == '_' {
|
||||
unescaped.WriteByte('_')
|
||||
continue
|
||||
}
|
||||
// We think we are in a UTF-8 code, process it.
|
||||
var utf8Val uint
|
||||
for j := 0; i < len(escapedName); j++ {
|
||||
// This is too many characters for a utf8 value based on the MaxRune
|
||||
// value of '\U0010FFFF'.
|
||||
if j >= 6 {
|
||||
return name
|
||||
}
|
||||
// Found a closing underscore, convert to a rune, check validity, and append.
|
||||
if escapedName[i] == '_' {
|
||||
utf8Rune := rune(utf8Val)
|
||||
if !utf8.ValidRune(utf8Rune) {
|
||||
return name
|
||||
}
|
||||
unescaped.WriteRune(utf8Rune)
|
||||
continue TOP
|
||||
}
|
||||
r := lower(escapedName[i])
|
||||
utf8Val *= 16
|
||||
switch {
|
||||
case r >= '0' && r <= '9':
|
||||
utf8Val += uint(r) - '0'
|
||||
case r >= 'a' && r <= 'f':
|
||||
utf8Val += uint(r) - 'a' + 10
|
||||
default:
|
||||
return name
|
||||
}
|
||||
i++
|
||||
}
|
||||
// Didn't find closing underscore, invalid.
|
||||
return name
|
||||
}
|
||||
return unescaped.String()
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricnamestats"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
@@ -34,9 +35,9 @@ TSDBStatusResponse generates response for /api/v1/status/tsdb .
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
{% func tsdbStatusMetricNameEntries(a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) %}
|
||||
{% func tsdbStatusMetricNameEntries(a []storage.TopHeapEntry, queryStats []metricnamestats.StatRecord) %}
|
||||
{% code
|
||||
queryStatsByMetricName := make(map[string]storage.MetricNamesStatsRecord,len(queryStats))
|
||||
queryStatsByMetricName := make(map[string]metricnamestats.StatRecord,len(queryStats))
|
||||
for _, record := range queryStats{
|
||||
queryStatsByMetricName[record.MetricName] = record
|
||||
}
|
||||
|
||||
@@ -8,228 +8,229 @@ package prometheus
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricnamestats"
|
||||
)
|
||||
|
||||
// TSDBStatusResponse generates response for /api/v1/status/tsdb .
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:9
|
||||
func StreamTSDBStatusResponse(qw422016 *qt422016.Writer, status *storage.TSDBStatus, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:8
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:9
|
||||
qw422016.N().S(`{"status":"success","data":{"totalSeries":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:12
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:13
|
||||
qw422016.N().DUL(status.TotalSeries)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:12
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:13
|
||||
qw422016.N().S(`,"totalLabelValuePairs":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:13
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14
|
||||
qw422016.N().DUL(status.TotalLabelValuePairs)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:13
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14
|
||||
qw422016.N().S(`,"seriesCountByMetricName":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15
|
||||
streamtsdbStatusMetricNameEntries(qw422016, status.SeriesCountByMetricName, status.SeriesQueryStatsByMetricName)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15
|
||||
qw422016.N().S(`,"seriesCountByLabelName":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16
|
||||
streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelName)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16
|
||||
qw422016.N().S(`,"seriesCountByFocusLabelValue":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
|
||||
streamtsdbStatusEntries(qw422016, status.SeriesCountByFocusLabelValue)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
|
||||
qw422016.N().S(`,"seriesCountByLabelValuePair":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:18
|
||||
streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelValuePair)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:18
|
||||
qw422016.N().S(`,"labelValueCountByLabelName":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:18
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:19
|
||||
streamtsdbStatusEntries(qw422016, status.LabelValueCountByLabelName)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:18
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:19
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:20
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:21
|
||||
qt.Done()
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:21
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22
|
||||
streamdumpQueryTrace(qw422016, qt)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:21
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
func WriteTSDBStatusResponse(qq422016 qtio422016.Writer, status *storage.TSDBStatus, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
StreamTSDBStatusResponse(qw422016, status, qt)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
func TSDBStatusResponse(status *storage.TSDBStatus, qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
WriteTSDBStatusResponse(qb422016, status, qt)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:23
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:25
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26
|
||||
func streamtsdbStatusEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:25
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:27
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:28
|
||||
for i, e := range a {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:27
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:28
|
||||
qw422016.N().S(`{"name":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:30
|
||||
qw422016.N().Q(e.Name)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:30
|
||||
qw422016.N().S(`,"value":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:30
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31
|
||||
qw422016.N().D(int(e.Count))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:30
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:32
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:33
|
||||
if i+1 < len(a) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:32
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:33
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:32
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:33
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:33
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:33
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
func writetsdbStatusEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
streamtsdbStatusEntries(qw422016, a)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
func tsdbStatusEntries(a []storage.TopHeapEntry) string {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
writetsdbStatusEntries(qb422016, a)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:36
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:37
|
||||
func streamtsdbStatusMetricNameEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:39
|
||||
queryStatsByMetricName := make(map[string]storage.MetricNamesStatsRecord, len(queryStats))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:38
|
||||
func streamtsdbStatusMetricNameEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry, queryStats []metricnamestats.StatRecord) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:40
|
||||
queryStatsByMetricName := make(map[string]metricnamestats.StatRecord, len(queryStats))
|
||||
for _, record := range queryStats {
|
||||
queryStatsByMetricName[record.MetricName] = record
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:43
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:44
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:45
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:46
|
||||
for i, e := range a {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:45
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:46
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:48
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:49
|
||||
entry, ok := queryStatsByMetricName[e.Name]
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:49
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:50
|
||||
qw422016.N().S(`"name":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:50
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:51
|
||||
qw422016.N().Q(e.Name)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:50
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:51
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:51
|
||||
if !ok {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:51
|
||||
qw422016.N().S(`"value":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:52
|
||||
qw422016.N().D(int(e.Count))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:53
|
||||
} else {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:53
|
||||
if !ok {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:52
|
||||
qw422016.N().S(`"value":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:54
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:53
|
||||
qw422016.N().D(int(e.Count))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:54
|
||||
} else {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:54
|
||||
qw422016.N().S(`"value":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:55
|
||||
qw422016.N().D(int(e.Count))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:55
|
||||
qw422016.N().S(`,"requestsCount":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:55
|
||||
qw422016.N().D(int(entry.RequestsCount))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:55
|
||||
qw422016.N().S(`,"lastRequestTimestamp":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:56
|
||||
qw422016.N().D(int(entry.RequestsCount))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:56
|
||||
qw422016.N().S(`,"lastRequestTimestamp":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:57
|
||||
qw422016.N().D(int(entry.LastRequestTs))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:57
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:58
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:57
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:58
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:59
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
if i+1 < len(a) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:59
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:59
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:61
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:61
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
func writetsdbStatusMetricNameEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
func writetsdbStatusMetricNameEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry, queryStats []metricnamestats.StatRecord) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
streamtsdbStatusMetricNameEntries(qw422016, a, queryStats)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
func tsdbStatusMetricNameEntries(a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) string {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
func tsdbStatusMetricNameEntries(a []storage.TopHeapEntry, queryStats []metricnamestats.StatRecord) string {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
writetsdbStatusMetricNameEntries(qb422016, a, queryStats)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:62
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
}
|
||||
|
||||
@@ -1166,6 +1166,61 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
// the cached rate result could be inaccurate in edge cases, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10098
|
||||
case "rate":
|
||||
if iafc != nil {
|
||||
if !strings.EqualFold(iafc.ae.Name, "sum") {
|
||||
qt.Printf("do not apply instant rollup optimization for incremental aggregate %s()", iafc.ae.Name)
|
||||
return evalAt(qt, timestamp, window)
|
||||
}
|
||||
qt.Printf("optimized calculation for sum(rate(m[d])) as (sum(increase(m[d])) / d)")
|
||||
afe := expr.(*metricsql.AggrFuncExpr)
|
||||
fe := afe.Args[0].(*metricsql.FuncExpr)
|
||||
feIncrease := *fe
|
||||
feIncrease.Name = "increase"
|
||||
// copy RollupExpr to drop possible offset,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
|
||||
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
|
||||
newArg.Offset = nil
|
||||
feIncrease.Args = []metricsql.Expr{newArg}
|
||||
d := newArg.Window.Duration(ec.Step)
|
||||
if d == 0 {
|
||||
d = ec.Step
|
||||
}
|
||||
afeIncrease := *afe
|
||||
afeIncrease.Args = []metricsql.Expr{&feIncrease}
|
||||
be := &metricsql.BinaryOpExpr{
|
||||
Op: "/",
|
||||
KeepMetricNames: true,
|
||||
Left: &afeIncrease,
|
||||
Right: &metricsql.NumberExpr{
|
||||
N: float64(d) / 1000,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
}
|
||||
qt.Printf("optimized calculation for instant rollup rate(m[d]) as (increase(m[d]) / d)")
|
||||
fe := expr.(*metricsql.FuncExpr)
|
||||
feIncrease := *fe
|
||||
feIncrease.Name = "increase"
|
||||
// copy RollupExpr to drop possible offset,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
|
||||
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
|
||||
newArg.Offset = nil
|
||||
feIncrease.Args = []metricsql.Expr{newArg}
|
||||
d := newArg.Window.Duration(ec.Step)
|
||||
if d == 0 {
|
||||
d = ec.Step
|
||||
}
|
||||
be := &metricsql.BinaryOpExpr{
|
||||
Op: "/",
|
||||
KeepMetricNames: fe.KeepMetricNames,
|
||||
Left: &feIncrease,
|
||||
Right: &metricsql.NumberExpr{
|
||||
N: float64(d) / 1000,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
case "max_over_time":
|
||||
if iafc != nil {
|
||||
if !strings.EqualFold(iafc.ae.Name, "max") {
|
||||
|
||||
@@ -4018,6 +4018,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(scalar)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(123, 456, time())`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-no-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6, label_set(100, "foo", "bar"))`
|
||||
@@ -4030,6 +4036,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-no-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(123,456, label_set(100, "foo", "bar"))`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-invalid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6, label_set(100, "le", "foobar"))`
|
||||
@@ -4042,6 +4054,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-invalid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(50, 60, label_set(100, "le", "foobar"))`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-inf-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6, label_set(100, "le", "+Inf"))`
|
||||
@@ -4183,6 +4201,28 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(0, 100, label_set(100, "le", "200"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.5, 0.5, 0.5, 0.5, 0.5, 0.5},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(200, 300, label_set(100, "le", "200"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0, 0, 0, 0, 0, 0},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_quantile(0.6, label_set(100, "le", "200"), "foobar"))`
|
||||
@@ -4212,7 +4252,7 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r1, r2, r3}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
|
||||
t.Run(`histogram_share(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_share(120, label_set(100, "le", "200"), "foobar"))`
|
||||
r1 := netstorage.Result{
|
||||
@@ -4311,7 +4351,37 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_share(single-value-valid-le-mid-le)`, func(t *testing.T) {
|
||||
t.Run(`histogram_fraction(single-value-valid-le-max-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(0,100, (
|
||||
label_set(100, "le", "100"),
|
||||
label_set(40, "le", "50"),
|
||||
label_set(0, "le", "10"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le-min-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(0,10, (
|
||||
label_set(100, "le", "100"),
|
||||
label_set(40, "le", "50"),
|
||||
label_set(0, "le", "10"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0, 0, 0, 0, 0, 0},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_share(single-value-valid-le-mid-le-1)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_share(105, (
|
||||
label_set(100, "le", "200"),
|
||||
@@ -4325,6 +4395,34 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_share(single-value-valid-le-mid-le-2)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_share(55, (
|
||||
label_set(100, "le", "200"),
|
||||
label_set(0, "le", "55"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0, 0, 0, 0, 0, 0},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(single-value-valid-le-mid-le)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(55,105, (
|
||||
label_set(100, "le", "200"),
|
||||
label_set(0, "le", "55"),
|
||||
))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(single-value-valid-le-min-phi-no-zero-bucket)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0, label_set(100, "le", "200"))`
|
||||
@@ -4358,6 +4456,17 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(scalar-phi)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(25, time() / 8, label_set(100, "le", "200"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.5, 0.625, 0.75, 0.875, 0.875, 0.875},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(duplicate-le)`, func(t *testing.T) {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225
|
||||
t.Parallel()
|
||||
@@ -4439,6 +4548,36 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(valid)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_fraction(0, 25,
|
||||
label_set(90, "foo", "bar", "le", "10")
|
||||
or label_set(100, "foo", "bar", "le", "30")
|
||||
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||
or label_set(200, "tag", "xx", "le", "10")
|
||||
or label_set(300, "tag", "xx", "le", "30")
|
||||
))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.325, 0.325, 0.325, 0.325, 0.325, 0.325},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("tag"),
|
||||
Value: []byte("xx"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(negative-bucket-count)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6,
|
||||
@@ -4555,6 +4694,25 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_fraction(normal-bucket-count)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_fraction(22,35,
|
||||
label_set(0, "foo", "bar", "le", "10")
|
||||
or label_set(100, "foo", "bar", "le", "30")
|
||||
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||
)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`histogram_quantile(normal-bucket-count, boundsLabel)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(histogram_quantile(0.2,
|
||||
|
||||
@@ -51,6 +51,7 @@ var transformFuncs = map[string]transformFunc{
|
||||
"exp": newTransformFuncOneArg(transformExp),
|
||||
"floor": newTransformFuncOneArg(transformFloor),
|
||||
"histogram_avg": transformHistogramAvg,
|
||||
"histogram_fraction": transformHistogramFraction,
|
||||
"histogram_quantile": transformHistogramQuantile,
|
||||
"histogram_quantiles": transformHistogramQuantiles,
|
||||
"histogram_share": transformHistogramShare,
|
||||
@@ -662,13 +663,13 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
if math.IsNaN(leReq) || len(xss) == 0 {
|
||||
return nan, nan, nan
|
||||
}
|
||||
fixBrokenBuckets(i, xss)
|
||||
if leReq < 0 {
|
||||
return 0, 0, 0
|
||||
}
|
||||
if math.IsInf(leReq, 1) {
|
||||
return 1, 1, 1
|
||||
}
|
||||
fixBrokenBuckets(i, xss)
|
||||
var vPrev, lePrev float64
|
||||
for _, xs := range xss {
|
||||
v := xs.ts.Values[i]
|
||||
@@ -729,6 +730,85 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
// histogram_fraction is a shortcut for `histogram_share(upperLe, buckets) - histogram_share(lowerLe, buckets)`;
|
||||
// histogram_fraction(x, y) = histogram_fraction(-Inf, y) - histogram_fraction(-Inf, x) = histogram_share(y) - histogram_share(x).
|
||||
// This function is supported by PromQL.
|
||||
func transformHistogramFraction(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 3); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lowerles, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse lower le: %w", err)
|
||||
}
|
||||
upperles, err := getScalar(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse upper le: %w", err)
|
||||
}
|
||||
if lowerles[0] >= upperles[0] {
|
||||
return nil, fmt.Errorf("lower le cannot be greater than upper le; got lower le: %f, upper le: %f", lowerles[0], upperles[0])
|
||||
}
|
||||
|
||||
// Convert buckets with `vmrange` labels to buckets with `le` labels.
|
||||
tss := vmrangeBucketsToLE(args[2])
|
||||
|
||||
// Group metrics by all tags excluding "le"
|
||||
m := groupLeTimeseries(tss)
|
||||
|
||||
fraction := func(i int, lowerle, upperle float64, xss []leTimeseries) (q float64) {
|
||||
if math.IsNaN(lowerle) || math.IsNaN(upperle) || len(xss) == 0 {
|
||||
return nan
|
||||
}
|
||||
fixBrokenBuckets(i, xss)
|
||||
share := func(leReq float64) float64 {
|
||||
if leReq < 0 {
|
||||
return 0
|
||||
}
|
||||
if math.IsInf(leReq, 1) {
|
||||
return 1
|
||||
}
|
||||
var vPrev, lePrev float64
|
||||
for _, xs := range xss {
|
||||
v := xs.ts.Values[i]
|
||||
le := xs.le
|
||||
if leReq >= le {
|
||||
vPrev = v
|
||||
lePrev = le
|
||||
continue
|
||||
}
|
||||
// precondition: lePrev <= leReq < le
|
||||
vLast := xss[len(xss)-1].ts.Values[i]
|
||||
lower := vPrev / vLast
|
||||
if math.IsInf(le, 1) {
|
||||
return lower
|
||||
}
|
||||
if lePrev == leReq {
|
||||
return lower
|
||||
}
|
||||
q = lower + (v-vPrev)/vLast*(leReq-lePrev)/(le-lePrev)
|
||||
return q
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return share(upperle) - share(lowerle)
|
||||
}
|
||||
rvs := make([]*timeseries, 0, len(m))
|
||||
for _, xss := range m {
|
||||
sort.Slice(xss, func(i, j int) bool {
|
||||
return xss[i].le < xss[j].le
|
||||
})
|
||||
xss = mergeSameLE(xss)
|
||||
dst := xss[0].ts
|
||||
for i := range dst.Values {
|
||||
q := fraction(i, lowerles[i], upperles[i], xss)
|
||||
dst.Values[i] = q
|
||||
}
|
||||
rvs = append(rvs, dst)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformHistogramAvg(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricnamestats"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
MetricNamesStatsResponse generates response for /api/v1/status/metric_names_stats .
|
||||
{% func MetricNamesStatsResponse(stats *storage.MetricNamesStatsResponse, qt *querytracer.Tracer) %}
|
||||
{% func MetricNamesStatsResponse(stats *metricnamestats.StatsResult, qt *querytracer.Tracer) %}
|
||||
{
|
||||
"status":"success",
|
||||
"statsCollectedSince": {%dul= stats.CollectedSinceTs %},
|
||||
|
||||
@@ -7,7 +7,7 @@ package stats
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricnamestats"
|
||||
)
|
||||
|
||||
// MetricNamesStatsResponse generates response for /api/v1/status/metric_names_stats .
|
||||
@@ -26,7 +26,7 @@ var (
|
||||
)
|
||||
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:8
|
||||
func StreamMetricNamesStatsResponse(qw422016 *qt422016.Writer, stats *storage.MetricNamesStatsResponse, qt *querytracer.Tracer) {
|
||||
func StreamMetricNamesStatsResponse(qw422016 *qt422016.Writer, stats *metricnamestats.StatsResult, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:8
|
||||
qw422016.N().S(`{"status":"success","statsCollectedSince":`)
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:11
|
||||
@@ -91,7 +91,7 @@ func StreamMetricNamesStatsResponse(qw422016 *qt422016.Writer, stats *storage.Me
|
||||
}
|
||||
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:31
|
||||
func WriteMetricNamesStatsResponse(qq422016 qtio422016.Writer, stats *storage.MetricNamesStatsResponse, qt *querytracer.Tracer) {
|
||||
func WriteMetricNamesStatsResponse(qq422016 qtio422016.Writer, stats *metricnamestats.StatsResult, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:31
|
||||
@@ -102,7 +102,7 @@ func WriteMetricNamesStatsResponse(qq422016 qtio422016.Writer, stats *storage.Me
|
||||
}
|
||||
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:31
|
||||
func MetricNamesStatsResponse(stats *storage.MetricNamesStatsResponse, qt *querytracer.Tracer) string {
|
||||
func MetricNamesStatsResponse(stats *metricnamestats.StatsResult, qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/stats/metric_names_usage_response.qtpl:31
|
||||
|
||||
@@ -1227,7 +1227,10 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
||||
#### buckets_limit
|
||||
|
||||
`buckets_limit(limit, buckets)` is a [transform function](#transform-functions), which limits the number
|
||||
of [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) to the given `limit`.
|
||||
of [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) to the given `limit`.
|
||||
|
||||
The result will preserve the first and the last bucket to improve accuracy for min and max values.
|
||||
So, if the `limit` is greater than 0 and less than 3, the function will still return 3 buckets: the first bucket, the last bucket, and a selected bucket.
|
||||
|
||||
See also [prometheus_buckets](#prometheus_buckets) and [histogram_quantile](#histogram_quantile).
|
||||
|
||||
@@ -1381,6 +1384,15 @@ It can be used for calculating the average over the given time range across mult
|
||||
For example, `histogram_avg(sum(histogram_over_time(response_time_duration_seconds[5m])) by (vmrange,job))` would return the average response time
|
||||
per each `job` over the last 5 minutes.
|
||||
|
||||
#### histogram_fraction
|
||||
|
||||
`histogram_fraction(lowerLe, upperLe, buckets)` is a [transform function](#transform-functions), which calculates the share (in the range `[0...1]`) for `buckets` that fall between `lowerLe` and `upperLe`.
|
||||
The result of `histogram_fraction(lowerLe, upperLe, buckets)` is equivalent to `histogram_share(upperLe, buckets) - histogram_share(lowerLe, buckets)`.
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [histogram_share](#histogram_share).
|
||||
|
||||
#### histogram_quantile
|
||||
|
||||
`histogram_quantile(phi, buckets)` is a [transform function](#transform-functions), which calculates `phi`-[percentile](https://en.wikipedia.org/wiki/Percentile)
|
||||
File diff suppressed because one or more lines are too long
197
app/vmselect/vmui/assets/index-C24BPpD_.js
Normal file
197
app/vmselect/vmui/assets/index-C24BPpD_.js
Normal file
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/assets/index-D2OEy8Ra.css
Normal file
1
app/vmselect/vmui/assets/index-D2OEy8Ra.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/assets/rolldown-runtime-COnpUsM8.js
Normal file
1
app/vmselect/vmui/assets/rolldown-runtime-COnpUsM8.js
Normal file
@@ -0,0 +1 @@
|
||||
var e=Object.create,t=Object.defineProperty,n=Object.getOwnPropertyDescriptor,r=Object.getOwnPropertyNames,i=Object.getPrototypeOf,a=Object.prototype.hasOwnProperty,o=(e,t)=>()=>(e&&(t=e(e=0)),t),s=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports),c=(e,n)=>{let r={};for(var i in e)t(r,i,{get:e[i],enumerable:!0});return n||t(r,Symbol.toStringTag,{value:`Module`}),r},l=(e,i,o,s)=>{if(i&&typeof i==`object`||typeof i==`function`)for(var c=r(i),l=0,u=c.length,d;l<u;l++)d=c[l],!a.call(e,d)&&d!==o&&t(e,d,{get:(e=>i[e]).bind(null,d),enumerable:!(s=n(i,d))||s.enumerable});return e},u=(n,r,a)=>(a=n==null?{}:e(i(n)),l(r||!n||!n.__esModule?t(a,`default`,{value:n,enumerable:!0}):a,n)),d=e=>a.call(e,`module.exports`)?e[`module.exports`]:l(t({},`__esModule`,{value:!0}),e);export{u as a,d as i,o as n,c as r,s as t};
|
||||
File diff suppressed because one or more lines are too long
66
app/vmselect/vmui/assets/vendor-BWBgVCcr.js
Normal file
66
app/vmselect/vmui/assets/vendor-BWBgVCcr.js
Normal file
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/assets/vendor-CnsZ1jie.css
Normal file
1
app/vmselect/vmui/assets/vendor-CnsZ1jie.css
Normal file
@@ -0,0 +1 @@
|
||||
.uplot,.uplot *,.uplot :before,.uplot :after{box-sizing:border-box}.uplot{width:min-content;font-family:system-ui,-apple-system,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;line-height:1.5}.u-title{text-align:center;font-size:18px;font-weight:700}.u-wrap{-webkit-user-select:none;user-select:none;position:relative}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{width:100%;height:100%;display:block;position:relative}.u-axis{position:absolute}.u-legend{text-align:center;margin:auto;font-size:14px}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{vertical-align:middle;display:inline-block}.u-legend .u-marker{width:1em;height:1em;margin-right:4px;background-clip:padding-box!important}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{pointer-events:none;background:#00000012;position:absolute}.u-cursor-x,.u-cursor-y{pointer-events:none;will-change:transform;position:absolute;top:0;left:0}.u-hz .u-cursor-x,.u-vt .u-cursor-y{border-right:1px dashed #607d8b;height:100%}.u-hz .u-cursor-y,.u-vt .u-cursor-x{border-bottom:1px dashed #607d8b;width:100%}.u-cursor-pt{pointer-events:none;will-change:transform;border:0 solid;border-radius:50%;position:absolute;top:0;left:0;background-clip:padding-box!important}.u-axis.u-off,.u-select.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-cursor-pt.u-off{display:none}
|
||||
@@ -1 +0,0 @@
|
||||
.uplot,.uplot *,.uplot *:before,.uplot *:after{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";line-height:1.5;width:min-content}.u-title{text-align:center;font-size:18px;font-weight:700}.u-wrap{position:relative;-webkit-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;position:relative;width:100%;height:100%}.u-axis{position:absolute}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{vertical-align:middle;display:inline-block}.u-legend .u-marker{width:1em;height:1em;margin-right:4px;background-clip:padding-box!important}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:#00000012;position:absolute;pointer-events:none}.u-cursor-x,.u-cursor-y{position:absolute;left:0;top:0;pointer-events:none;will-change:transform}.u-hz .u-cursor-x,.u-vt .u-cursor-y{height:100%;border-right:1px dashed #607D8B}.u-hz .u-cursor-y,.u-vt .u-cursor-x{width:100%;border-bottom:1px dashed #607D8B}.u-cursor-pt{position:absolute;top:0;left:0;border-radius:50%;border:0 solid;pointer-events:none;will-change:transform;background-clip:padding-box!important}.u-axis.u-off,.u-select.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-cursor-pt.u-off{display:none}
|
||||
@@ -37,10 +37,11 @@
|
||||
<meta property="og:title" content="UI for VictoriaMetrics">
|
||||
<meta property="og:url" content="https://victoriametrics.com/">
|
||||
<meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data">
|
||||
<script type="module" crossorigin src="./assets/index-C1hTBemk.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-BR6Q0Fin.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-D7CzMv1O.css">
|
||||
<script type="module" crossorigin src="./assets/index-C24BPpD_.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/rolldown-runtime-COnpUsM8.js">
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-BWBgVCcr.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-CnsZ1jie.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-D2OEy8Ra.css">
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricnamestats"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
|
||||
@@ -55,11 +57,13 @@ var (
|
||||
denyQueriesOutsideRetention = flag.Bool("denyQueriesOutsideRetention", false, "Whether to deny queries outside the configured -retentionPeriod. "+
|
||||
"When set, then /api/v1/query_range would return '503 Service Unavailable' error for queries with 'from' value outside -retentionPeriod. "+
|
||||
"This may be useful when multiple data sources with distinct retentions are hidden behind query-tee")
|
||||
maxHourlySeries = flag.Int("storage.maxHourlySeries", 0, "The maximum number of unique series can be added to the storage during the last hour. "+
|
||||
maxHourlySeries = flag.Int64("storage.maxHourlySeries", 0, "The maximum number of unique series can be added to the storage during the last hour. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series cardinality. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#cardinality-limiter . "+
|
||||
fmt.Sprintf("Setting this flag to '-1' sets limit to maximum possible value (%d) which is useful in order to enable series tracking without enforcing limits. ", math.MaxInt32)+
|
||||
"See also -storage.maxDailySeries")
|
||||
maxDailySeries = flag.Int("storage.maxDailySeries", 0, "The maximum number of unique series can be added to the storage during the last 24 hours. "+
|
||||
maxDailySeries = flag.Int64("storage.maxDailySeries", 0, "The maximum number of unique series can be added to the storage during the last 24 hours. "+
|
||||
"Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#cardinality-limiter . "+
|
||||
fmt.Sprintf("Setting this flag to '-1' sets limit to maximum possible value (%d) which is useful in order to enable series tracking without enforcing limits. ", math.MaxInt32)+
|
||||
"See also -storage.maxHourlySeries")
|
||||
|
||||
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 100e6, "The minimum free disk space at -storageDataPath after which the storage stops accepting new data")
|
||||
@@ -141,8 +145,8 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
||||
WG = syncwg.WaitGroup{}
|
||||
opts := storage.OpenOptions{
|
||||
Retention: retentionPeriod.Duration(),
|
||||
MaxHourlySeries: *maxHourlySeries,
|
||||
MaxDailySeries: *maxDailySeries,
|
||||
MaxHourlySeries: getMaxHourlySeries(),
|
||||
MaxDailySeries: getMaxDailySeries(),
|
||||
DisablePerDayIndex: *disablePerDayIndex,
|
||||
TrackMetricNamesStats: *trackMetricNamesStats,
|
||||
IDBPrefillStart: *idbPrefillStart,
|
||||
@@ -233,7 +237,7 @@ func DeleteSeries(qt *querytracer.Tracer, tfss []*storage.TagFilters, maxMetrics
|
||||
}
|
||||
|
||||
// GetMetricNamesStats returns metric names usage stats with give limit and lte predicate
|
||||
func GetMetricNamesStats(qt *querytracer.Tracer, limit, le int, matchPattern string) (storage.MetricNamesStatsResponse, error) {
|
||||
func GetMetricNamesStats(qt *querytracer.Tracer, limit, le int, matchPattern string) (metricnamestats.StatsResult, error) {
|
||||
WG.Add(1)
|
||||
r := Storage.GetMetricNamesStats(qt, limit, le, matchPattern)
|
||||
WG.Done()
|
||||
@@ -319,6 +323,7 @@ func Stop() {
|
||||
Storage.MustClose()
|
||||
logger.Infof("successfully closed the storage in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
fs.MustStopDirRemover()
|
||||
logger.Infof("the storage has been stopped")
|
||||
}
|
||||
|
||||
@@ -601,10 +606,10 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="big_timestamp"}`, m.TooBigTimestampRows)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="small_timestamp"}`, m.TooSmallTimestampRows)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="invalid_raw_metric_name"}`, m.InvalidRawMetricNames)
|
||||
if *maxHourlySeries > 0 {
|
||||
if getMaxHourlySeries() > 0 {
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="hourly_limit_exceeded"}`, m.HourlySeriesLimitRowsDropped)
|
||||
}
|
||||
if *maxDailySeries > 0 {
|
||||
if getMaxDailySeries() > 0 {
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="daily_limit_exceeded"}`, m.DailySeriesLimitRowsDropped)
|
||||
}
|
||||
|
||||
@@ -614,13 +619,13 @@ func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
|
||||
metrics.WriteCounterUint64(w, `vm_slow_row_inserts_total`, m.SlowRowInserts)
|
||||
metrics.WriteCounterUint64(w, `vm_slow_per_day_index_inserts_total`, m.SlowPerDayIndexInserts)
|
||||
|
||||
if *maxHourlySeries > 0 {
|
||||
if getMaxHourlySeries() > 0 {
|
||||
metrics.WriteGaugeUint64(w, `vm_hourly_series_limit_current_series`, m.HourlySeriesLimitCurrentSeries)
|
||||
metrics.WriteGaugeUint64(w, `vm_hourly_series_limit_max_series`, m.HourlySeriesLimitMaxSeries)
|
||||
metrics.WriteCounterUint64(w, `vm_hourly_series_limit_rows_dropped_total`, m.HourlySeriesLimitRowsDropped)
|
||||
}
|
||||
|
||||
if *maxDailySeries > 0 {
|
||||
if getMaxDailySeries() > 0 {
|
||||
metrics.WriteGaugeUint64(w, `vm_daily_series_limit_current_series`, m.DailySeriesLimitCurrentSeries)
|
||||
metrics.WriteGaugeUint64(w, `vm_daily_series_limit_max_series`, m.DailySeriesLimitMaxSeries)
|
||||
metrics.WriteCounterUint64(w, `vm_daily_series_limit_rows_dropped_total`, m.DailySeriesLimitRowsDropped)
|
||||
@@ -745,3 +750,21 @@ func jsonResponseError(w http.ResponseWriter, err error) {
|
||||
errStr := err.Error()
|
||||
fmt.Fprintf(w, `{"status":"error","msg":%s}`, stringsutil.JSONString(errStr))
|
||||
}
|
||||
|
||||
func getMaxHourlySeries() int {
|
||||
limit := *maxHourlySeries
|
||||
if limit == -1 || limit > math.MaxInt32 {
|
||||
return math.MaxInt32
|
||||
}
|
||||
|
||||
return int(limit)
|
||||
}
|
||||
|
||||
func getMaxDailySeries() int {
|
||||
limit := *maxDailySeries
|
||||
if limit == -1 || limit > math.MaxInt32 {
|
||||
return math.MaxInt32
|
||||
}
|
||||
|
||||
return int(limit)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.26.0 AS build-web-stage
|
||||
FROM golang:1.26.2 AS build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
3724
app/vmui/packages/vmui/package-lock.json
generated
3724
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -21,43 +21,42 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"classnames": "^2.5.1",
|
||||
"dayjs": "^1.11.19",
|
||||
"dayjs": "^1.11.20",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"marked": "^17.0.1",
|
||||
"preact": "^10.28.3",
|
||||
"qs": "^6.14.1",
|
||||
"marked": "^17.0.5",
|
||||
"preact": "^10.29.0",
|
||||
"qs": "^6.15.0",
|
||||
"react-input-mask": "^2.0.4",
|
||||
"react-router-dom": "^7.13.0",
|
||||
"react-router-dom": "^7.13.2",
|
||||
"uplot": "^1.6.32",
|
||||
"vite": "^7.3.1",
|
||||
"vite": "^8.0.7",
|
||||
"web-vitals": "^5.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/eslintrc": "^3.3.3",
|
||||
"@eslint/eslintrc": "^3.3.5",
|
||||
"@eslint/js": "^9.39.2",
|
||||
"@preact/preset-vite": "^2.10.3",
|
||||
"@preact/preset-vite": "^2.10.5",
|
||||
"@testing-library/jest-dom": "^6.9.1",
|
||||
"@testing-library/preact": "^3.2.4",
|
||||
"@types/lodash.debounce": "^4.0.9",
|
||||
"@types/node": "^25.2.0",
|
||||
"@types/qs": "^6.14.0",
|
||||
"@types/react": "^19.2.10",
|
||||
"@types/node": "^25.5.0",
|
||||
"@types/qs": "^6.15.0",
|
||||
"@types/react": "^19.2.14",
|
||||
"@types/react-input-mask": "^3.0.6",
|
||||
"@types/react-router-dom": "^5.3.3",
|
||||
"@typescript-eslint/eslint-plugin": "^8.54.0",
|
||||
"@typescript-eslint/parser": "^8.54.0",
|
||||
"@typescript-eslint/eslint-plugin": "^8.57.2",
|
||||
"@typescript-eslint/parser": "^8.57.2",
|
||||
"cross-env": "^10.1.0",
|
||||
"eslint": "^9.39.2",
|
||||
"eslint-plugin-react": "^7.37.5",
|
||||
"eslint-plugin-unused-imports": "^4.3.0",
|
||||
"globals": "^17.3.0",
|
||||
"eslint-plugin-unused-imports": "^4.4.1",
|
||||
"globals": "^17.4.0",
|
||||
"http-proxy-middleware": "^3.0.5",
|
||||
"jsdom": "^28.0.0",
|
||||
"postcss": "^8.5.6",
|
||||
"rollup-plugin-visualizer": "^6.0.5",
|
||||
"sass-embedded": "^1.97.3",
|
||||
"jsdom": "^29.0.1",
|
||||
"postcss": "^8.5.8",
|
||||
"sass-embedded": "^1.98.0",
|
||||
"typescript": "^5.9.3",
|
||||
"vitest": "^4.0.18"
|
||||
"vitest": "^4.1.1"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
export const getGroupsUrl = (server: string): string => {
|
||||
return `${server}/vmalert/api/v1/rules?datasource_type=prometheus`;
|
||||
export const getGroupsUrl = (server: string, search: string, type: string, states: string[], maxGroups: number): string => {
|
||||
return `${server}/vmalert/api/v1/rules?datasource_type=prometheus&search=${encodeURIComponent(search)}&type=${encodeURIComponent(type)}&state=${states.map(encodeURIComponent).join(",")}&group_limit=${maxGroups}&extended_states=true`;
|
||||
};
|
||||
|
||||
export const getItemUrl = (
|
||||
|
||||
@@ -16,23 +16,29 @@ export const getExportDataUrl = (server: string, query: string, period: TimePara
|
||||
return `${server}/api/v1/export?${params}`;
|
||||
};
|
||||
|
||||
export const getExportCSVDataUrl = (server: string, query: string[], period: TimeParams, reduceMemUsage: boolean): string => {
|
||||
const getBaseParams = (period: TimeParams, query: string[]): URLSearchParams => {
|
||||
const params = new URLSearchParams({
|
||||
start: period.start.toString(),
|
||||
end: period.end.toString(),
|
||||
format: "__name__,__value__,__timestamp__:unix_ms",
|
||||
});
|
||||
query.forEach((q => params.append("match[]", q)));
|
||||
return params;
|
||||
};
|
||||
|
||||
export const getLabelsUrl = (server: string, query: string[], period: TimeParams): string => {
|
||||
const params = getBaseParams(period, query);
|
||||
return `${server}/api/v1/labels?${params}`;
|
||||
};
|
||||
|
||||
export const getExportCSVDataUrl = (server: string, query: string[], period: TimeParams, reduceMemUsage: boolean, format: string): string => {
|
||||
const params = getBaseParams(period, query);
|
||||
params.set("format", format);
|
||||
if (reduceMemUsage) params.set("reduce_mem_usage", "1");
|
||||
return `${server}/api/v1/export/csv?${params}`;
|
||||
};
|
||||
|
||||
export const getExportJSONDataUrl = (server: string, query: string[], period: TimeParams, reduceMemUsage: boolean): string => {
|
||||
const params = new URLSearchParams({
|
||||
start: period.start.toString(),
|
||||
end: period.end.toString(),
|
||||
});
|
||||
query.forEach((q => params.append("match[]", q)));
|
||||
const params = getBaseParams(period, query);
|
||||
if (reduceMemUsage) params.set("reduce_mem_usage", "1");
|
||||
return `${server}/api/v1/export?${params}`;
|
||||
};
|
||||
|
||||
29
app/vmui/packages/vmui/src/api/raw-query.test.ts
Normal file
29
app/vmui/packages/vmui/src/api/raw-query.test.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { fetchRawQueryCSVExport } from "./raw-query";
|
||||
|
||||
describe("fetchRawQueryCSVExport", () => {
|
||||
it.skip("requests all label columns before exporting CSV data", async () => {
|
||||
const fetchMock = vi.fn()
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({ data: ["job", "__name__", "instance"] }),
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: async () => "up,localhost:9100,node_exporter,1,1710000000000",
|
||||
});
|
||||
|
||||
const result = await fetchRawQueryCSVExport(
|
||||
"http://localhost:8428",
|
||||
["up"],
|
||||
{ start: 1710000000, end: 1710000300, step: "15s", date: "2024-03-09T16:05:00Z" },
|
||||
false,
|
||||
fetchMock as unknown as typeof fetch,
|
||||
);
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledTimes(2);
|
||||
expect(fetchMock.mock.calls[0][0]).toBe("http://localhost:8428/api/v1/labels?start=1710000000&end=1710000300&match%5B%5D=up");
|
||||
expect(fetchMock.mock.calls[1][0]).toBe("http://localhost:8428/api/v1/export/csv?start=1710000000&end=1710000300&match%5B%5D=up&format=__name__%2Cinstance%2Cjob%2C__value__%2C__timestamp__%3Aunix_ms");
|
||||
expect(result).toBe("up,localhost:9100,node_exporter,1,1710000000000");
|
||||
});
|
||||
});
|
||||
31
app/vmui/packages/vmui/src/api/raw-query.ts
Normal file
31
app/vmui/packages/vmui/src/api/raw-query.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { getExportCSVDataUrl, getLabelsUrl } from "./query-range";
|
||||
import { TimeParams } from "../types";
|
||||
import { getCSVExportColumns } from "../utils/csv";
|
||||
|
||||
interface LabelsResponse {
|
||||
data?: string[];
|
||||
}
|
||||
|
||||
export const fetchRawQueryCSVExport = async (
|
||||
serverUrl: string,
|
||||
query: string[],
|
||||
period: TimeParams,
|
||||
reduceMemUsage: boolean,
|
||||
fetchFn: typeof fetch = fetch,
|
||||
): Promise<string> => {
|
||||
const labelsResponse = await fetchFn(getLabelsUrl(serverUrl, query, period));
|
||||
if (!labelsResponse.ok) {
|
||||
throw new Error(await labelsResponse.text());
|
||||
}
|
||||
|
||||
const { data = [] } = (await labelsResponse.json()) as LabelsResponse;
|
||||
const columns = getCSVExportColumns(data);
|
||||
const format = columns.join(",");
|
||||
|
||||
const response = await fetchFn(getExportCSVDataUrl(serverUrl, query, period, reduceMemUsage, format));
|
||||
if (!response.ok) {
|
||||
throw new Error(await response.text());
|
||||
}
|
||||
|
||||
return await response.text();
|
||||
};
|
||||
@@ -1227,7 +1227,10 @@ Metric names are stripped from the resulting series. Add [keep_metric_names](#ke
|
||||
#### buckets_limit
|
||||
|
||||
`buckets_limit(limit, buckets)` is a [transform function](#transform-functions), which limits the number
|
||||
of [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) to the given `limit`.
|
||||
of [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) to the given `limit`.
|
||||
|
||||
The result will preserve the first and the last bucket to improve accuracy for min and max values.
|
||||
So, if the `limit` is greater than 0 and less than 3, the function will still return 3 buckets: the first bucket, the last bucket, and a selected bucket.
|
||||
|
||||
See also [prometheus_buckets](#prometheus_buckets) and [histogram_quantile](#histogram_quantile).
|
||||
|
||||
@@ -1381,6 +1384,15 @@ It can be used for calculating the average over the given time range across mult
|
||||
For example, `histogram_avg(sum(histogram_over_time(response_time_duration_seconds[5m])) by (vmrange,job))` would return the average response time
|
||||
per each `job` over the last 5 minutes.
|
||||
|
||||
#### histogram_fraction
|
||||
|
||||
`histogram_fraction(lowerLe, upperLe, buckets)` is a [transform function](#transform-functions), which calculates the share (in the range `[0...1]`) for `buckets` that fall between `lowerLe` and `upperLe`.
|
||||
The result of `histogram_fraction(lowerLe, upperLe, buckets)` is equivalent to `histogram_share(upperLe, buckets) - histogram_share(lowerLe, buckets)`.
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [histogram_share](#histogram_share).
|
||||
|
||||
#### histogram_quantile
|
||||
|
||||
`histogram_quantile(phi, buckets)` is a [transform function](#transform-functions), which calculates `phi`-[percentile](https://en.wikipedia.org/wiki/Percentile)
|
||||
|
||||
@@ -60,7 +60,7 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
const options = useMemo(() => {
|
||||
switch (context) {
|
||||
case QueryContextType.metricsql:
|
||||
return [...metrics, ...metricsqlFunctions];
|
||||
return includeFunctions ? [...metrics, ...metricsqlFunctions] : metrics;
|
||||
case QueryContextType.label:
|
||||
return labels;
|
||||
case QueryContextType.labelValue:
|
||||
@@ -68,7 +68,7 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}, [context, metrics, labels, labelValues, metricsqlFunctions]);
|
||||
}, [context, metrics, labels, labelValues, metricsqlFunctions, includeFunctions]);
|
||||
|
||||
const handleSelect = useCallback((insert: string) => {
|
||||
// Find the start and end of valueByContext in the query string
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import "./style.scss";
|
||||
import { ReactNode } from "react";
|
||||
|
||||
export type BadgeColor = "firing" | "inactive" | "pending" | "no-match" | "unhealthy" | "ok" | "passive";
|
||||
export type BadgeColor = "firing" | "inactive" | "pending" | "nomatch" | "unhealthy" | "ok" | "passive";
|
||||
|
||||
interface BadgeItem {
|
||||
value?: number | string;
|
||||
|
||||
@@ -4,7 +4,7 @@ $badge-colors: (
|
||||
"firing": $color-error,
|
||||
"inactive": $color-success,
|
||||
"pending": $color-warning,
|
||||
"no-match": $color-notice,
|
||||
"nomatch": $color-notice,
|
||||
"unhealthy": $color-broken,
|
||||
"ok": $color-info,
|
||||
"passive": $color-passive,
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { useMemo } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import { Group as APIGroup } from "../../../types";
|
||||
import { formatDuration, formatEventTime } from "../helpers";
|
||||
import ItemHeader from "../ItemHeader";
|
||||
import { getStates, formatDuration, formatEventTime } from "../helpers";
|
||||
import Badges, { BadgeColor } from "../Badges";
|
||||
|
||||
interface BaseGroupProps {
|
||||
@@ -117,6 +118,21 @@ const BaseGroup = ({ group }: BaseGroupProps) => {
|
||||
)}
|
||||
</tbody>
|
||||
</table>
|
||||
<div className="vm-explore-alerts-rule-item">
|
||||
<span className="vm-alerts-title">Rules</span>
|
||||
{group.rules.map((rule) => (
|
||||
<ItemHeader
|
||||
classes={["vm-badge-item", rule.state]}
|
||||
key={rule.id}
|
||||
entity="rule"
|
||||
type={rule.type}
|
||||
groupId={rule.group_id}
|
||||
states={getStates(rule)}
|
||||
id={rule.id}
|
||||
name={rule.name}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
import Button from "../../Main/Button/Button";
|
||||
|
||||
interface ItemHeaderControlsProps {
|
||||
classes?: string[];
|
||||
entity: string;
|
||||
type?: string;
|
||||
groupId: string;
|
||||
@@ -27,12 +28,19 @@ interface ItemHeaderControlsProps {
|
||||
onClose?: () => void;
|
||||
}
|
||||
|
||||
const ItemHeader: FC<ItemHeaderControlsProps> = ({ name, id, groupId, entity, type, states, onClose }) => {
|
||||
const ItemHeader: FC<ItemHeaderControlsProps> = ({ name, id, groupId, entity, type, states, onClose, classes }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const { serverUrl } = useAppState();
|
||||
const navigate = useNavigate();
|
||||
const copyToClipboard = useCopyToClipboard();
|
||||
|
||||
const openGroupLink = () => {
|
||||
navigate({
|
||||
pathname: "/rules",
|
||||
search: `group_id=${groupId}`,
|
||||
});
|
||||
};
|
||||
|
||||
const openItemLink = () => {
|
||||
navigate({
|
||||
pathname: "/rules",
|
||||
@@ -49,7 +57,7 @@ const ItemHeader: FC<ItemHeaderControlsProps> = ({ name, id, groupId, entity, ty
|
||||
const headerClasses = classNames({
|
||||
"vm-explore-alerts-item-header": true,
|
||||
"vm-explore-alerts-item-header_mobile": isMobile,
|
||||
});
|
||||
}, classes);
|
||||
|
||||
const renderIcon = () => {
|
||||
switch(entity) {
|
||||
@@ -105,16 +113,30 @@ const ItemHeader: FC<ItemHeaderControlsProps> = ({ name, id, groupId, entity, ty
|
||||
items={badgesItems}
|
||||
/>
|
||||
{onClose ? (
|
||||
<Button
|
||||
className="vm-back-button"
|
||||
size="small"
|
||||
variant="outlined"
|
||||
color="gray"
|
||||
startIcon={<LinkIcon />}
|
||||
onClick={copyLink}
|
||||
>
|
||||
<span className="vm-button-text">Copy Link</span>
|
||||
</Button>
|
||||
<>
|
||||
{id && (
|
||||
<Button
|
||||
className="vm-back-button"
|
||||
size="small"
|
||||
variant="outlined"
|
||||
color="gray"
|
||||
startIcon={<GroupIcon />}
|
||||
onClick={openGroupLink}
|
||||
>
|
||||
<span className="vm-button-text">Open Group</span>
|
||||
</Button>
|
||||
)}
|
||||
<Button
|
||||
className="vm-back-button"
|
||||
size="small"
|
||||
variant="outlined"
|
||||
color="gray"
|
||||
startIcon={<LinkIcon />}
|
||||
onClick={copyLink}
|
||||
>
|
||||
<span className="vm-button-text">Copy Link</span>
|
||||
</Button>
|
||||
</>
|
||||
) : (
|
||||
<Button
|
||||
className="vm-button-borderless"
|
||||
|
||||
@@ -6,6 +6,10 @@
|
||||
justify-content: space-between;
|
||||
gap: $padding-global;
|
||||
|
||||
&:is(.vm-badge-item) {
|
||||
padding: 6px 0 6px 6px;
|
||||
}
|
||||
|
||||
.vm-button_small {
|
||||
padding: 4px;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,94 @@
|
||||
import Button from "../../Main/Button/Button";
|
||||
import { ArrowDownIcon } from "../../Main/Icons";
|
||||
import "./style.scss";
|
||||
import classNames from "classnames";
|
||||
|
||||
interface PaginationProps {
|
||||
page: number;
|
||||
totalPages: number;
|
||||
totalRules: number;
|
||||
totalGroups: number;
|
||||
pageRules: number;
|
||||
pageGroups: number;
|
||||
onPageChange: (num: number) => () => void;
|
||||
}
|
||||
|
||||
const getButtons = (page: number, totalPages: number) => {
|
||||
const result: number[] = [];
|
||||
if (totalPages < 2) return result;
|
||||
result.push(1);
|
||||
if (page > 3) result.push(0);
|
||||
if (page > 2) result.push(page - 1);
|
||||
if (page > 1 && page < totalPages) result.push(page);
|
||||
if (page > 0 && page < totalPages - 1) result.push(page + 1);
|
||||
if (totalPages - page > 2) result.push(0);
|
||||
result.push(totalPages);
|
||||
return result;
|
||||
};
|
||||
|
||||
const Pagination = ({
|
||||
page,
|
||||
totalPages,
|
||||
onPageChange,
|
||||
totalGroups,
|
||||
totalRules,
|
||||
pageGroups,
|
||||
pageRules,
|
||||
}: PaginationProps) => {
|
||||
|
||||
const buttons = getButtons(page, totalPages);
|
||||
return (
|
||||
<>
|
||||
<div
|
||||
className="vm-pagination"
|
||||
>
|
||||
<span className="vm-pagination-stats">
|
||||
<span>Page rules/groups:</span> <b>{pageRules}</b> / <b>{pageGroups}</b>
|
||||
</span>
|
||||
{!!buttons.length && (
|
||||
<div className="vm-pagination-buttons">
|
||||
<Button
|
||||
className="vm-button-borderless vm-pagination-prev"
|
||||
size="small"
|
||||
color="gray"
|
||||
disabled={page == 1}
|
||||
variant="outlined"
|
||||
startIcon={<ArrowDownIcon />}
|
||||
onClick={onPageChange(page-1)}
|
||||
/>
|
||||
{buttons.map((button, index) => {
|
||||
return button ? (
|
||||
<Button
|
||||
className={classNames({
|
||||
"vm-button-borderless": page !== button,
|
||||
})}
|
||||
key={index}
|
||||
size="small"
|
||||
color="gray"
|
||||
variant="outlined"
|
||||
onClick={onPageChange(button)}
|
||||
>{button}</Button>
|
||||
) : (
|
||||
<span className="vm-pagination-more">...</span>
|
||||
);
|
||||
})}
|
||||
<Button
|
||||
className="vm-button-borderless vm-pagination-next"
|
||||
size="small"
|
||||
color="gray"
|
||||
disabled={page==totalPages}
|
||||
variant="outlined"
|
||||
startIcon={<ArrowDownIcon />}
|
||||
onClick={onPageChange(page+1)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<span className="vm-pagination-stats">
|
||||
<span>Total rules/groups:</span> <b>{totalRules}</b> / <b>{totalGroups}</b>
|
||||
</span>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default Pagination;
|
||||
@@ -0,0 +1,33 @@
|
||||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-pagination {
|
||||
display: flex;
|
||||
min-height: 24px;
|
||||
justify-content: space-between;
|
||||
&-stats {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
color: var(--color-text-secondary);
|
||||
column-gap: $padding-tiny;
|
||||
}
|
||||
&-buttons {
|
||||
display: flex;
|
||||
column-gap: $padding-small;
|
||||
}
|
||||
.vm-button-borderless {
|
||||
border: 0;
|
||||
}
|
||||
&-more {
|
||||
align-self: center;
|
||||
}
|
||||
&-prev {
|
||||
svg {
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
}
|
||||
&-next {
|
||||
svg {
|
||||
transform: rotate(-90deg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { FC, useMemo } from "preact/compat";
|
||||
import { useMemo } from "preact/compat";
|
||||
import Select from "../../Main/Select/Select";
|
||||
import { SearchIcon } from "../../Main/Icons";
|
||||
import TextField from "../../Main/TextField/TextField";
|
||||
@@ -8,25 +8,25 @@ import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||
|
||||
interface RulesHeaderProps {
|
||||
types: string[];
|
||||
allTypes: string[];
|
||||
allRuleTypes: string[];
|
||||
allStates: string[];
|
||||
states: string[];
|
||||
search: string;
|
||||
onChangeTypes: (input: string) => void;
|
||||
onChangeRuleType: (input: string) => void;
|
||||
onChangeStates: (input: string) => void;
|
||||
onChangeSearch: (input: string) => void;
|
||||
}
|
||||
|
||||
const RulesHeader: FC<RulesHeaderProps> = ({
|
||||
const RulesHeader = ({
|
||||
types,
|
||||
allTypes,
|
||||
allRuleTypes,
|
||||
allStates,
|
||||
states,
|
||||
search,
|
||||
onChangeTypes,
|
||||
onChangeRuleType,
|
||||
onChangeStates,
|
||||
onChangeSearch,
|
||||
}) => {
|
||||
}: RulesHeaderProps) => {
|
||||
const noStateText = useMemo(
|
||||
() => (types.length ? "" : "No states. Please select rule states"),
|
||||
[types],
|
||||
@@ -46,10 +46,10 @@ const RulesHeader: FC<RulesHeaderProps> = ({
|
||||
<div className="vm-explore-alerts-header__rule_type">
|
||||
<Select
|
||||
value={types}
|
||||
list={allTypes}
|
||||
label="Rules type"
|
||||
list={allRuleTypes}
|
||||
label="Rule type"
|
||||
placeholder="Please select rule type"
|
||||
onChange={onChangeTypes}
|
||||
onChange={onChangeRuleType}
|
||||
autofocus={!!types.length && !isMobile}
|
||||
includeAll
|
||||
searchable
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import dayjs from "dayjs";
|
||||
import { Rule } from "../../types";
|
||||
|
||||
export const formatDuration = (raw: number) => {
|
||||
const duration = dayjs.duration(Math.round(raw * 1000));
|
||||
@@ -18,3 +19,13 @@ export const formatEventTime = (raw: string) => {
|
||||
const t = dayjs(raw);
|
||||
return t.year() <= 1 ? "Never" : t.format("DD MMM YYYY HH:mm:ss");
|
||||
};
|
||||
|
||||
export const getStates = (rule: Rule) => {
|
||||
if (!rule.alerts?.length) {
|
||||
return { [rule.state]: 1 };
|
||||
}
|
||||
return rule.alerts.reduce((acc, alert) => {
|
||||
acc[alert.state] = (acc[alert.state] ?? 0) + 1;
|
||||
return acc;
|
||||
}, {} as Record<string, number>);
|
||||
};
|
||||
|
||||
@@ -55,7 +55,7 @@ const ExploreMetricItem: FC<ExploreMetricItemGraphProps> = ({
|
||||
|
||||
const base = `{${params.join(",")}}`;
|
||||
if (isBucket) {
|
||||
return [`sum(rate(${base})) by (vmrange, le)`];
|
||||
return [`sum(increase_pure(${base})) by (vmrange, le)`];
|
||||
}
|
||||
const queryBase = rateEnabled ? `rollup_rate(${base})` : `rollup(${base})`;
|
||||
return [`
|
||||
|
||||
@@ -27,6 +27,7 @@ interface TextFieldProps {
|
||||
endIcon?: ReactNode
|
||||
startIcon?: ReactNode
|
||||
disabled?: boolean
|
||||
readonly?: boolean
|
||||
autofocus?: boolean
|
||||
helperText?: string
|
||||
inputmode?: "search" | "text" | "email" | "tel" | "url" | "none" | "numeric" | "decimal"
|
||||
@@ -50,6 +51,7 @@ const TextField: FC<TextFieldProps> = ({
|
||||
endIcon,
|
||||
startIcon,
|
||||
disabled = false,
|
||||
readonly = false,
|
||||
autofocus = false,
|
||||
inputmode = "text",
|
||||
caretPosition,
|
||||
@@ -148,6 +150,7 @@ const TextField: FC<TextFieldProps> = ({
|
||||
<textarea
|
||||
className={inputClasses}
|
||||
disabled={disabled}
|
||||
readOnly={readonly}
|
||||
ref={textareaRef}
|
||||
value={value}
|
||||
rows={1}
|
||||
@@ -166,6 +169,7 @@ const TextField: FC<TextFieldProps> = ({
|
||||
<input
|
||||
className={inputClasses}
|
||||
disabled={disabled}
|
||||
readOnly={readonly}
|
||||
ref={inputRef}
|
||||
value={value}
|
||||
type={type}
|
||||
|
||||
@@ -72,9 +72,9 @@ const useGetMetricsQL = (includeFunctions: boolean) => {
|
||||
}
|
||||
};
|
||||
fetchMarkdown();
|
||||
}, []);
|
||||
}, [includeFunctions, metricsQLFunctions.length, queryDispatch]);
|
||||
|
||||
return includeFunctions ? metricsQLFunctions : [];
|
||||
return metricsQLFunctions;
|
||||
};
|
||||
|
||||
export default useGetMetricsQL;
|
||||
|
||||
@@ -80,7 +80,7 @@ export default class AppConfigurator {
|
||||
|
||||
let keys: string[] = [];
|
||||
if (focusLabel || isMetricWithLabel) {
|
||||
keys = keys.concat("seriesCountByFocusLabelValue");
|
||||
keys = keys.concat("seriesCountByMetricName", "seriesCountByFocusLabelValue");
|
||||
} else if (isMetric) {
|
||||
keys = keys.concat("labelValueCountByLabelName");
|
||||
} else if (isLabel) {
|
||||
|
||||
@@ -115,16 +115,20 @@ const DownsamplingFilters: FC = () => {
|
||||
</div>
|
||||
<div className="vm-downsampling-filters-body-top">
|
||||
<a
|
||||
className="vm-link vm-link_with-icon"
|
||||
target="_blank"
|
||||
href="https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#downsampling"
|
||||
rel="help noreferrer"
|
||||
>
|
||||
<WikiIcon/>
|
||||
Documentation
|
||||
<Button
|
||||
variant="text"
|
||||
color="gray"
|
||||
startIcon={<WikiIcon/>}
|
||||
>
|
||||
Documentation
|
||||
</Button>
|
||||
</a>
|
||||
<Button
|
||||
variant="text"
|
||||
variant="outlined"
|
||||
onClick={handleRunExample}
|
||||
>
|
||||
Try example
|
||||
@@ -134,7 +138,7 @@ const DownsamplingFilters: FC = () => {
|
||||
onClick={handleApplyFilters}
|
||||
startIcon={<PlayIcon/>}
|
||||
>
|
||||
Apply
|
||||
Preview
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Rule as APIRule } from "../../types";
|
||||
import ItemHeader from "../../components/ExploreAlerts/ItemHeader";
|
||||
import BaseRule from "../../components/ExploreAlerts/BaseRule";
|
||||
import Modal from "../../components/Main/Modal/Modal";
|
||||
import { getStates } from "./helpers";
|
||||
import { getStates } from "../../components/ExploreAlerts/helpers";
|
||||
|
||||
interface ExploreRuleProps {
|
||||
groupId: string;
|
||||
|
||||
@@ -7,30 +7,36 @@ import Accordion from "../../components/Main/Accordion/Accordion";
|
||||
import { useFetchGroups } from "./hooks/useFetchGroups";
|
||||
import "./style.scss";
|
||||
import RulesHeader from "../../components/ExploreAlerts/RulesHeader";
|
||||
import Pagination from "../../components/ExploreAlerts/Pagination";
|
||||
import GroupHeader from "../../components/ExploreAlerts/GroupHeader";
|
||||
import Rule from "../../components/ExploreAlerts/Rule";
|
||||
import ExploreRule from "../../pages/ExploreAlerts/ExploreRule";
|
||||
import ExploreAlert from "../../pages/ExploreAlerts/ExploreAlert";
|
||||
import ExploreGroup from "../../pages/ExploreAlerts/ExploreGroup";
|
||||
import { getQueryStringValue } from "../../utils/query-string";
|
||||
import { getStates, getChanges, filterGroups } from "./helpers";
|
||||
import { getChanges } from "./helpers";
|
||||
import debounce from "lodash.debounce";
|
||||
import { getStates } from "../../components/ExploreAlerts/helpers";
|
||||
|
||||
const defaultTypesStr = getQueryStringValue("types", "") as string;
|
||||
const defaultTypes = defaultTypesStr.split("&").filter((rt) => rt) as string[];
|
||||
const defaultRuleType = getQueryStringValue("type", "") as string;
|
||||
const defaultStatesStr = getQueryStringValue("states", "") as string;
|
||||
const defaultStates = defaultStatesStr.split("&").filter((s) => s) as string[];
|
||||
const defaultSearchInput = getQueryStringValue("search", "") as string;
|
||||
const TYPE_STATES: Record<string, string[]> = {
|
||||
alert: ["inactive", "firing", "nomatch", "pending", "unhealthy"],
|
||||
record: ["unhealthy", "nomatch", "ok"],
|
||||
};
|
||||
|
||||
const ExploreRules: FC = () => {
|
||||
const pageNum = getQueryStringValue("page_num", "1") as string;
|
||||
const groupId = getQueryStringValue("group_id", "") as string;
|
||||
const ruleId = getQueryStringValue("rule_id", "") as string;
|
||||
const alertId = getQueryStringValue("alert_id", "") as string;
|
||||
|
||||
const [searchInput, setSearchInput] = useState(defaultSearchInput);
|
||||
const [types, setTypes] = useState(defaultTypes);
|
||||
const [ruleType, setRuleType] = useState(defaultRuleType);
|
||||
const [states, setStates] = useState(defaultStates);
|
||||
const [modalOpen, setModalOpen] = useState(true);
|
||||
const [modalOpen, setModalOpen] = useState(false);
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
|
||||
useEffect(() => {
|
||||
@@ -38,7 +44,7 @@ const ExploreRules: FC = () => {
|
||||
}, [groupId]);
|
||||
|
||||
useSetQueryParams({
|
||||
types: types.join("&"),
|
||||
type: ruleType,
|
||||
states: states.join("&"),
|
||||
search: searchInput,
|
||||
group_id: groupId,
|
||||
@@ -47,12 +53,11 @@ const ExploreRules: FC = () => {
|
||||
});
|
||||
|
||||
const handleChangeSearch = useCallback((input: string) => {
|
||||
if (!input) {
|
||||
setSearchInput("");
|
||||
} else {
|
||||
setSearchInput(input);
|
||||
}
|
||||
}, [searchInput]);
|
||||
const newParams = new URLSearchParams(searchParams);
|
||||
newParams.set("page_num", "1");
|
||||
setSearchParams(newParams);
|
||||
setSearchInput(input || "");
|
||||
}, [searchInput, searchParams]);
|
||||
|
||||
const getModal = () => {
|
||||
if (ruleId) {
|
||||
@@ -94,55 +99,79 @@ const ExploreRules: FC = () => {
|
||||
setModalOpen(false);
|
||||
};
|
||||
|
||||
const onPageChange = (num: number) => {
|
||||
return () => {
|
||||
const newParams = new URLSearchParams(searchParams);
|
||||
newParams.set("page_num", num.toString());
|
||||
setSearchParams(newParams);
|
||||
};
|
||||
};
|
||||
|
||||
const allRuleTypes = Object.keys(TYPE_STATES);
|
||||
const allStates = useMemo(
|
||||
() => Array.from(ruleType === "" ? new Set(Object.values(TYPE_STATES).flat()) : TYPE_STATES[ruleType] || []),
|
||||
[ruleType]
|
||||
);
|
||||
const selectedRuleTypes = [ruleType].filter(Boolean);
|
||||
useEffect(() => {
|
||||
if (!states.every(v => allStates.includes(v))) {
|
||||
setStates([]);
|
||||
}
|
||||
}, [states, allStates]);
|
||||
|
||||
const pageNumInt: number = Math.max(1, parseInt(pageNum, 10) || 1);
|
||||
const {
|
||||
groups,
|
||||
isLoading,
|
||||
error,
|
||||
} = useFetchGroups({ blockFetch: modalOpen });
|
||||
|
||||
const { filteredGroups, allTypes, allStates } = useMemo(
|
||||
() => filterGroups(groups || [], types, states, searchInput),
|
||||
[groups, types, states, searchInput]
|
||||
);
|
||||
|
||||
if (!types.every(v => allTypes.has(v))) {
|
||||
setTypes([]);
|
||||
}
|
||||
const selectedTypes = allTypes.size === types.length ? [] : types;
|
||||
|
||||
if (!states.every(v => allStates.has(v))) {
|
||||
setStates([]);
|
||||
}
|
||||
const selectedStates = allStates.size === states.length ? [] : states;
|
||||
pageInfo,
|
||||
} = useFetchGroups({ blockFetch: modalOpen, search: searchInput, ruleType, states, pageNum: pageNumInt, onPageChange });
|
||||
|
||||
const handleChangeStates = useCallback((title: string) => {
|
||||
setStates(getChanges(title, selectedStates));
|
||||
}, [states]);
|
||||
const newParams = new URLSearchParams(searchParams);
|
||||
newParams.set("page_num", "1");
|
||||
setSearchParams(newParams);
|
||||
const changes = getChanges(title, states);
|
||||
setStates(changes.length == allStates.length ? [] : changes);
|
||||
}, [states, searchParams]);
|
||||
|
||||
const handleChangeTypes = useCallback((title: string) => {
|
||||
setTypes(getChanges(title, selectedTypes));
|
||||
}, [types]);
|
||||
const handleChangeRuleType = useCallback((title: string) => {
|
||||
const newParams = new URLSearchParams(searchParams);
|
||||
newParams.set("page_num", "1");
|
||||
setSearchParams(newParams);
|
||||
const changes = getChanges(title, selectedRuleTypes);
|
||||
setRuleType(changes.length && changes.length !== allRuleTypes.length ? changes[0] : "");
|
||||
}, [ruleType, searchParams]);
|
||||
|
||||
return (
|
||||
<>
|
||||
{modalOpen && getModal()}
|
||||
{(!modalOpen || !!allStates?.size) && (
|
||||
{(!modalOpen || !!allStates?.length) && (
|
||||
<div className="vm-explore-alerts">
|
||||
<RulesHeader
|
||||
types={selectedTypes}
|
||||
allTypes={Array.from(allTypes)}
|
||||
states={selectedStates}
|
||||
allStates={Array.from(allStates)}
|
||||
types={selectedRuleTypes}
|
||||
allRuleTypes={allRuleTypes}
|
||||
states={states}
|
||||
allStates={allStates}
|
||||
search={searchInput}
|
||||
onChangeTypes={handleChangeTypes}
|
||||
onChangeRuleType={handleChangeRuleType}
|
||||
onChangeStates={handleChangeStates}
|
||||
onChangeSearch={debounce(handleChangeSearch, 500)}
|
||||
/>
|
||||
<Pagination
|
||||
page={pageInfo.page}
|
||||
totalPages={pageInfo.total_pages}
|
||||
pageRules={groups.reduce((total, g) => total + g?.rules.length, 0)}
|
||||
pageGroups={groups.length}
|
||||
totalRules={pageInfo.total_rules}
|
||||
totalGroups={pageInfo.total_groups}
|
||||
onPageChange={onPageChange}
|
||||
/>
|
||||
{(isLoading && <Spinner />) || (error && <Alert variant="error">{error}</Alert>) || (
|
||||
!filteredGroups.length && <Alert variant="info">{noRuleFound}</Alert>
|
||||
!groups.length && <Alert variant="info">{noRuleFound}</Alert>
|
||||
) || (
|
||||
<div className="vm-explore-alerts-body">
|
||||
{filteredGroups.map((group) => (
|
||||
{groups.map((group) => (
|
||||
<div
|
||||
key={group.id}
|
||||
className="vm-explore-alert-group vm-block vm-block_empty-padding"
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import { Rule, Group } from "../../types";
|
||||
|
||||
export const getChanges = (title: string, prevValues: string[]): string[] => {
|
||||
if (title === "All") return [];
|
||||
|
||||
@@ -12,77 +10,3 @@ export const getChanges = (title: string, prevValues: string[]): string[] => {
|
||||
|
||||
return Array.from(newValues);
|
||||
};
|
||||
|
||||
export const getState = (rule: Rule) => {
|
||||
let state = rule?.state || "ok";
|
||||
if (rule?.health !== "ok") {
|
||||
state = "unhealthy";
|
||||
} else if (!rule?.lastSamples && !rule?.lastSeriesFetched) {
|
||||
state = "no match";
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
export const getStates = (rule: Rule) => {
|
||||
const output: Record<string, number> = {};
|
||||
const alertsCount = rule?.alerts?.length || 0;
|
||||
if (alertsCount > 0) {
|
||||
rule.alerts.forEach((alert) => {
|
||||
if (alert.state in output) {
|
||||
output[alert.state] += 1;
|
||||
} else {
|
||||
output[alert.state] = 1;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
output[getState(rule)] = 1;
|
||||
}
|
||||
return output;
|
||||
};
|
||||
|
||||
export const filterGroups = (groups: Group[], types: string[], states: string[], searchInput: string) => {
|
||||
const allTypes: Set<string> = new Set();
|
||||
const allStates: Set<string> = new Set();
|
||||
const filteredGroups: Group[] = [];
|
||||
|
||||
groups.forEach((group) => {
|
||||
const filteredRules: Rule[] = [];
|
||||
const statesPerGroup: Record<string, number> = {};
|
||||
group.rules.forEach((rule) => {
|
||||
const ruleType = rule.type.charAt(0).toUpperCase() + rule.type.slice(1);
|
||||
allTypes.add(ruleType);
|
||||
if (types?.length && !types.includes(ruleType)) return;
|
||||
|
||||
const state = getState(rule);
|
||||
const stateName = state.charAt(0).toUpperCase() + state.slice(1);
|
||||
allStates.add(stateName);
|
||||
if (states?.length && !states.includes(stateName)) return;
|
||||
|
||||
if (
|
||||
searchInput &&
|
||||
!rule.name.toLowerCase().includes(searchInput.toLowerCase()) &&
|
||||
!group.name.toLowerCase().includes(searchInput.toLowerCase()) &&
|
||||
!group.file.toLowerCase().includes(searchInput.toLowerCase())
|
||||
)
|
||||
return;
|
||||
|
||||
filteredRules.push(rule);
|
||||
if (state !== "no match" && state !== "unhealthy" && state !== "firing" && state !== "pending")
|
||||
return;
|
||||
|
||||
const count = state === "firing" || state === "pending" ? rule?.alerts?.length : 1;
|
||||
if (stateName in statesPerGroup) {
|
||||
statesPerGroup[stateName] += count;
|
||||
} else {
|
||||
statesPerGroup[stateName] = count;
|
||||
}
|
||||
});
|
||||
if (filteredRules.length) {
|
||||
const g = Object.assign({}, group);
|
||||
g.rules = filteredRules;
|
||||
g.states = statesPerGroup;
|
||||
filteredGroups.push(g);
|
||||
}
|
||||
});
|
||||
return { filteredGroups, allTypes, allStates };
|
||||
};
|
||||
|
||||
@@ -1,46 +1,75 @@
|
||||
import { useTimeState } from "../../../state/time/TimeStateContext";
|
||||
import { useEffect, useMemo, useState } from "preact/compat";
|
||||
import { useMemo, useEffect, useState } from "preact/compat";
|
||||
import { getGroupsUrl } from "../../../api/explore-alerts";
|
||||
import { useAppState } from "../../../state/common/StateContext";
|
||||
import { ErrorTypes, Group } from "../../../types";
|
||||
import { useTimeState } from "../../../state/time/TimeStateContext";
|
||||
|
||||
interface FetchGroupsReturn {
|
||||
groups: Group[];
|
||||
isLoading: boolean;
|
||||
error?: ErrorTypes | string;
|
||||
pageInfo: PageInfo;
|
||||
}
|
||||
|
||||
interface FetchGroupsProps {
|
||||
blockFetch: boolean
|
||||
blockFetch: boolean;
|
||||
search: string;
|
||||
ruleType: string;
|
||||
states: string[];
|
||||
pageNum: number;
|
||||
onPageChange: (num: number) => () => void;
|
||||
}
|
||||
|
||||
export const useFetchGroups = ({ blockFetch }: FetchGroupsProps): FetchGroupsReturn => {
|
||||
interface PageInfo {
|
||||
page: number;
|
||||
total_pages: number;
|
||||
total_groups: number;
|
||||
total_rules: number;
|
||||
}
|
||||
|
||||
const MAX_GROUPS = 100;
|
||||
|
||||
export const useFetchGroups = ({ blockFetch, pageNum, search, ruleType, states, onPageChange }: FetchGroupsProps): FetchGroupsReturn => {
|
||||
const { serverUrl } = useAppState();
|
||||
const { period } = useTimeState();
|
||||
|
||||
const [groups, setGroups] = useState<Group[]>([]);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [pageInfo, setPageInfo] = useState<PageInfo>({
|
||||
page: pageNum,
|
||||
total_pages: 1,
|
||||
total_groups: 0,
|
||||
total_rules: 0,
|
||||
});
|
||||
const [error, setError] = useState<ErrorTypes | string>();
|
||||
|
||||
const fetchUrl = useMemo(
|
||||
() => getGroupsUrl(serverUrl),
|
||||
[serverUrl],
|
||||
() => getGroupsUrl(serverUrl, search, ruleType, states, MAX_GROUPS),
|
||||
[serverUrl, search, ruleType, states],
|
||||
);
|
||||
|
||||
const loaded = !!groups.length || !blockFetch;
|
||||
|
||||
useEffect(() => {
|
||||
if (blockFetch) return;
|
||||
const fetchData = async () => {
|
||||
setIsLoading(true);
|
||||
try {
|
||||
const response = await fetch(fetchUrl);
|
||||
const url = `${fetchUrl}&page_num=${pageNum}`;
|
||||
const response = await fetch(url);
|
||||
const resp = await response.json();
|
||||
|
||||
if (response.ok) {
|
||||
const data = (resp.data.groups || []) as Group[];
|
||||
setGroups(data.sort((a, b) => a.name.localeCompare(b.name)));
|
||||
const loadedGroups = (resp.data.groups || []) as Group[];
|
||||
setGroups(loadedGroups);
|
||||
setPageInfo({
|
||||
page: resp.page || 1,
|
||||
total_pages: resp.total_pages || 1,
|
||||
total_groups: resp.total_groups || 0,
|
||||
total_rules: resp.total_rules || 0,
|
||||
});
|
||||
setError(undefined);
|
||||
} else if (response.status === 400 && resp?.error?.includes("exceeds total amount of pages")) {
|
||||
onPageChange(1)();
|
||||
setError(`${resp.errorType}\r\n${resp?.error}`);
|
||||
} else {
|
||||
setError(`${resp.errorType}\r\n${resp?.error}`);
|
||||
}
|
||||
@@ -51,9 +80,8 @@ export const useFetchGroups = ({ blockFetch }: FetchGroupsProps): FetchGroupsRet
|
||||
}
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
fetchData().catch(console.error);
|
||||
}, [fetchUrl, period, loaded]);
|
||||
}, [fetchUrl, period, loaded, pageNum]);
|
||||
|
||||
return { groups, isLoading, error };
|
||||
return { groups, isLoading, error, pageInfo };
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user