Compare commits

..

2 Commits

1300 changed files with 90512 additions and 75275 deletions

23
.github/copilot-instructions.md vendored Normal file
View File

@@ -0,0 +1,23 @@
# Project Overview
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
## Folder Structure
- `/app`: Contains the compilable binaries.
- `/lib`: Contains the golang reusable libraries
- `/docs/victoriametrics`: Contains documentation for the project.
- `/apptest/tests`: Contains integration tests.
## Libraries and Frameworks
- Backend: Golang, no framework. Use third-party libraries sparingly.
- Frontend: React.
## Code review guidelines
Ensure the feature or bugfix includes a changelog entry in /docs/victoriametrics/changelog/CHANGELOG.md.
Verify the entry is under the ## tip section and matches the structure and style of existing entries.
Chore-only changes may be omitted from the changelog.

View File

@@ -71,8 +71,7 @@ jobs:
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
- run: go version
go-version: stable
- name: Build victoria-metrics for ${{ matrix.os }}-${{ matrix.arch }}
run: make victoria-metrics-${{ matrix.os }}-${{ matrix.arch }}

View File

@@ -27,21 +27,11 @@ jobs:
exit 0
fi
# Check raw commit objects for a "gpgsig" header as a fast early signal for
# contributors. Both GPG and SSH signatures use this header.
# This avoids relying on %G? which returns N for SSH commits.
# This check is not a security enforcement — unsigned commits cannot be merged
# anyway due to the GitHub repository merge policy.
unsigned=""
for sha in $(git rev-list $RANGE); do
if ! git cat-file commit "$sha" | grep -q "^gpgsig"; then
unsigned="$unsigned $sha"
fi
done
unsigned=$(git log --pretty="%H %G?" $RANGE | grep -vE " (G|E)$" || true)
if [ -n "$unsigned" ]; then
echo "Found unsigned commits:"
echo "$unsigned"
exit 1
fi
echo "All commits in PR are signed (GPG or SSH)"
echo "All commits in PR are signed (G or E)"

View File

@@ -21,11 +21,9 @@ jobs:
id: go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version: stable
cache: false
- run: go version
- name: Cache Go artifacts
uses: actions/cache@v4
with:
@@ -34,7 +32,7 @@ jobs:
~/go/pkg/mod
~/go/bin
key: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-${{ steps.go.outputs.go-version }}-
restore-keys: go-artifacts-${{ runner.os }}-check-licenses-
- name: Check License
run: make check-licenses

View File

@@ -36,8 +36,7 @@ jobs:
uses: actions/setup-go@v6
with:
cache: false
go-version-file: 'go.mod'
- run: go version
go-version: stable
- name: Cache Go artifacts
uses: actions/cache@v4
@@ -47,7 +46,7 @@ jobs:
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-
- name: Initialize CodeQL
uses: github/codeql-action/init@v4

View File

@@ -28,7 +28,7 @@ jobs:
path: __vm-docs
- name: Import GPG key
uses: crazy-max/ghaction-import-gpg@v7
uses: crazy-max/ghaction-import-gpg@v6
id: import-gpg
with:
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}

View File

@@ -42,9 +42,8 @@ jobs:
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
go-version: stable
- run: go version
- name: Cache golangci-lint
uses: actions/cache@v4
@@ -52,7 +51,7 @@ jobs:
path: |
~/.cache/golangci-lint
~/go/bin
key: golangci-lint-${{ runner.os }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('.golangci.yml') }}
key: golangci-lint-${{ runner.os }}-${{ hashFiles('.golangci.yml') }}
- name: Run check-all
run: |
@@ -82,20 +81,19 @@ jobs:
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
- run: go version
go-version: stable
- name: Run tests
run: make ${{ matrix.scenario}}
run: GOGC=10 make ${{ matrix.scenario}}
- name: Publish coverage
uses: codecov/codecov-action@v6
uses: codecov/codecov-action@v5
with:
files: ./coverage.txt
apptest:
name: apptest
runs-on: apptest
integration:
name: integration
runs-on: ubuntu-latest
steps:
- name: Code checkout
@@ -109,8 +107,7 @@ jobs:
go.sum
Makefile
app/**/Makefile
go-version-file: 'go.mod'
- run: go version
go-version: stable
- name: Run app tests
run: make apptest
- name: Run integration tests
run: make integration-test

View File

@@ -17,7 +17,7 @@ EXTRA_GO_BUILD_TAGS ?=
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
TAR_OWNERSHIP ?= --owner=1000 --group=1000
GOLANGCI_LINT_VERSION := 2.9.0
GOLANGCI_LINT_VERSION := 2.7.2
.PHONY: $(MAKECMDGOALS)
@@ -443,7 +443,7 @@ fmt:
gofmt -l -w -s ./apptest
vet:
go vet -tags 'synctest' ./lib/...
GOEXPERIMENT=synctest go vet ./lib/...
go vet ./app/...
go vet ./apptest/...
@@ -452,25 +452,28 @@ check-all: fmt vet golangci-lint govulncheck
clean-checkers: remove-golangci-lint remove-govulncheck
test:
go test -tags 'synctest' ./lib/... ./app/...
GOEXPERIMENT=synctest go test ./lib/... ./app/...
test-race:
go test -tags 'synctest' -race ./lib/... ./app/...
GOEXPERIMENT=synctest go test -race ./lib/... ./app/...
test-pure:
CGO_ENABLED=0 go test -tags 'synctest' ./lib/... ./app/...
GOEXPERIMENT=synctest CGO_ENABLED=0 go test ./lib/... ./app/...
test-full:
go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
GOEXPERIMENT=synctest go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
test-full-386:
GOARCH=386 go test -tags 'synctest' -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
GOEXPERIMENT=synctest GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
integration-test:
$(MAKE) apptest
apptest:
$(MAKE) victoria-metrics vmagent vmalert vmauth vmctl vmbackup vmrestore
go test ./apptest/... -skip="^Test(Cluster|Legacy).*"
apptest-legacy: victoria-metrics vmbackup vmrestore
integration-test-legacy: victoria-metrics vmbackup vmrestore
OS=$$(uname | tr '[:upper:]' '[:lower:]'); \
ARCH=$$(uname -m | tr '[:upper:]' '[:lower:]' | sed 's/x86_64/amd64/'); \
VERSION=v1.132.0; \
@@ -487,17 +490,17 @@ apptest-legacy: victoria-metrics vmbackup vmrestore
go test ./apptest/tests -run="^TestLegacySingle.*"
benchmark:
go test -run=NO_TESTS -bench=. ./lib/...
go test -run=NO_TESTS -bench=. ./app/...
GOEXPERIMENT=synctest go test -bench=. ./lib/...
go test -bench=. ./app/...
benchmark-pure:
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./lib/...
CGO_ENABLED=0 go test -run=NO_TESTS -bench=. ./app/...
GOEXPERIMENT=synctest CGO_ENABLED=0 go test -bench=. ./lib/...
CGO_ENABLED=0 go test -bench=. ./app/...
vendor-update:
go get -u ./lib/...
go get -u ./app/...
go mod tidy -compat=1.26
go mod tidy -compat=1.24
go mod vendor
app-local:
@@ -521,7 +524,7 @@ install-qtc:
golangci-lint: install-golangci-lint
golangci-lint run --build-tags 'synctest'
GOEXPERIMENT=synctest golangci-lint run
install-golangci-lint:
which golangci-lint && (golangci-lint --version | grep -q $(GOLANGCI_LINT_VERSION)) || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v$(GOLANGCI_LINT_VERSION)

View File

@@ -1,12 +1,12 @@
# VictoriaMetrics
[![Latest Release](https://img.shields.io/github/v/release/VictoriaMetrics/VictoriaMetrics?sort=semver&label=&filter=!*-victorialogs&logo=github&labelColor=gray&color=gray&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Freleases%2Flatest)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
[![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics?label=&logo=docker&logoColor=white&labelColor=2496ED&color=2496ED&link=https%3A%2F%2Fhub.docker.com%2Fr%2Fvictoriametrics%2Fvictoria-metrics)](https://hub.docker.com/u/victoriametrics)
![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics?label=&logo=docker&logoColor=white&labelColor=2496ED&color=2496ED&link=https%3A%2F%2Fhub.docker.com%2Fr%2Fvictoriametrics%2Fvictoria-metrics)
[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics?link=https%3A%2F%2Fgoreportcard.com%2Freport%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
[![Build Status](https://github.com/VictoriaMetrics/VictoriaMetrics/actions/workflows/build.yml/badge.svg?branch=master&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Factions)](https://github.com/VictoriaMetrics/VictoriaMetrics/actions/workflows/build.yml)
[![codecov](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics/branch/master/graph/badge.svg?link=https%3A%2F%2Fcodecov.io%2Fgh%2FVictoriaMetrics%2FVictoriaMetrics)](https://app.codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
[![License](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics?labelColor=green&label=&link=https%3A%2F%2Fgithub.com%2FVictoriaMetrics%2FVictoriaMetrics%2Fblob%2Fmaster%2FLICENSE)](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
[![Join Slack](https://img.shields.io/badge/Join%20Slack-4A154B?logo=slack)](https://slack.victoriametrics.com)
![Slack](https://img.shields.io/badge/Join-4A154B?logo=slack&link=https%3A%2F%2Fslack.victoriametrics.com)
[![X](https://img.shields.io/twitter/follow/VictoriaMetrics?style=flat&label=Follow&color=black&logo=x&labelColor=black&link=https%3A%2F%2Fx.com%2FVictoriaMetrics)](https://x.com/VictoriaMetrics/)
[![Reddit](https://img.shields.io/reddit/subreddit-subscribers/VictoriaMetrics?style=flat&label=Join&labelColor=red&logoColor=white&logo=reddit&link=https%3A%2F%2Fwww.reddit.com%2Fr%2FVictoriaMetrics)](https://www.reddit.com/r/VictoriaMetrics/)
@@ -16,21 +16,16 @@
<img src="docs/victoriametrics/logo.webp" width="300" alt="VictoriaMetrics logo">
</picture>
VictoriaMetrics is a fast, cost-effective, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
Here are some resources and information about VictoriaMetrics:
- **Case studies**: [Grammarly, Roblox, Wix, Spotify,...](https://docs.victoriametrics.com/victoriametrics/casestudies/).
- **Available**: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), Docker images on [Docker Hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and [Quay](https://quay.io/repository/victoriametrics/victoria-metrics), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
- **Deployment types**: [Single-node version](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and [Cluster version](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) under [Apache License 2.0](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE).
- **Getting started:** Read [key concepts](https://docs.victoriametrics.com/victoriametrics/keyconcepts/) and follow the
[quick start guide](https://docs.victoriametrics.com/victoriametrics/quick-start/).
- **Community**: [Slack](https://slack.victoriametrics.com/) (join via [Slack Inviter](https://slack.victoriametrics.com/)), [X (Twitter)](https://x.com/VictoriaMetrics), [YouTube](https://www.youtube.com/@VictoriaMetrics). See full list [here](https://docs.victoriametrics.com/victoriametrics/#community-and-contributions).
- **Changelog**: Project evolves fast - check the [CHANGELOG](https://docs.victoriametrics.com/victoriametrics/changelog/), and [How to upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade-victoriametrics).
- **Enterprise support:** [Contact us](mailto:info@victoriametrics.com) for commercial support with additional [enterprise features](https://docs.victoriametrics.com/victoriametrics/enterprise/).
- **Enterprise releases:** Enterprise and [long-term support releases (LTS)](https://docs.victoriametrics.com/victoriametrics/lts-releases/) are publicly available and can be evaluated for free
using a [free trial license](https://victoriametrics.com/products/enterprise/trial/).
- **Security:** we achieved [security certifications](https://victoriametrics.com/security/) for Database Software Development and Software-Based Monitoring Services.
- Documentation: [docs.victoriametrics.com](https://docs.victoriametrics.com)
- Case studies: [Grammarly, Roblox, Wix,...](https://docs.victoriametrics.com/victoriametrics/casestudies/).
- Available: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), docker images [Docker Hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and [Quay](https://quay.io/repository/victoriametrics/victoria-metrics), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics)
- Deployment types: [Single-node version](https://docs.victoriametrics.com/), [Cluster version](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/), and [Enterprise version](https://docs.victoriametrics.com/victoriametrics/enterprise/)
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/victoriametrics/changelog/), and [How to upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade-victoriametrics)
- Community: [Slack](https://slack.victoriametrics.com/), [X (Twitter)](https://x.com/VictoriaMetrics), [LinkedIn](https://www.linkedin.com/company/victoriametrics/), [YouTube](https://www.youtube.com/@VictoriaMetrics)
Yes, we open-source both the single-node VictoriaMetrics and the cluster version.

View File

@@ -12,31 +12,6 @@ The following versions of VictoriaMetrics receive regular security fixes:
See [this page](https://victoriametrics.com/security/) for more details.
## Software Bill of Materials (SBOM)
Every VictoriaMetrics container{{% available_from "#" %}} image published to
[Docker Hub](https://hub.docker.com/u/victoriametrics)
and [Quay.io](https://quay.io/organization/victoriametrics)
includes an [SPDX](https://spdx.dev/) SBOM attestation
generated automatically by BuildKit during
`docker buildx build`.
To inspect the SBOM for an image:
```sh
docker buildx imagetools inspect \
docker.io/victoriametrics/victoria-metrics:latest \
--format "{{ json .SBOM }}"
```
To scan an image using its SBOM attestation with
[Trivy](https://github.com/aquasecurity/trivy):
```sh
trivy image --sbom-sources oci \
docker.io/victoriametrics/victoria-metrics:latest
```
## Reporting a Vulnerability
Please report any security issues to <security@victoriametrics.com>

View File

@@ -33,13 +33,13 @@ func PopulateTimeTpl(b []byte, tGlobal time.Time) []byte {
}
switch strings.TrimSpace(parts[0]) {
case `TIME_S`:
return fmt.Appendf(nil, "%d", t.Unix())
return []byte(fmt.Sprintf("%d", t.Unix()))
case `TIME_MSZ`:
return fmt.Appendf(nil, "%d", t.Unix()*1e3)
return []byte(fmt.Sprintf("%d", t.Unix()*1e3))
case `TIME_MS`:
return fmt.Appendf(nil, "%d", timeToMillis(t))
return []byte(fmt.Sprintf("%d", timeToMillis(t)))
case `TIME_NS`:
return fmt.Appendf(nil, "%d", t.UnixNano())
return []byte(fmt.Sprintf("%d", t.UnixNano()))
default:
log.Fatalf("unknown time pattern %s in %s", parts[0], repl)
}

View File

@@ -49,11 +49,6 @@ func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels
Name: "__name__",
Value: m.Name,
})
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10557
labels = append(labels, prompb.Label{
Name: "host",
Value: sketch.Host,
})
for _, label := range m.Labels {
labels = append(labels, prompb.Label{
Name: label.Name,
@@ -62,6 +57,9 @@ func insertRows(at *auth.Token, sketches []*datadogsketches.Sketch, extraLabels
}
for _, tag := range sketch.Tags {
name, value := datadogutil.SplitTag(tag)
if name == "host" {
name = "exported_host"
}
labels = append(labels, prompb.Label{
Name: name,
Value: value,

View File

@@ -18,7 +18,7 @@ func TestCalculateRetryDuration(t *testing.T) {
f := func(retryAfterDuration, retryDuration time.Duration, n int, expectMinDuration time.Duration) {
t.Helper()
for range n {
for i := 0; i < n; i++ {
retryDuration = getRetryDuration(retryAfterDuration, retryDuration, time.Minute)
}

View File

@@ -51,9 +51,9 @@ func testPushWriteRequest(t *testing.T, rowsCount, expectedBlockLenProm, expecte
func newTestWriteRequest(seriesCount, labelsCount int) *prompb.WriteRequest {
var wr prompb.WriteRequest
for i := range seriesCount {
for i := 0; i < seriesCount; i++ {
var labels []prompb.Label
for j := range labelsCount {
for j := 0; j < labelsCount; j++ {
labels = append(labels, prompb.Label{
Name: fmt.Sprintf("label_%d_%d", i, j),
Value: fmt.Sprintf("value_%d_%d", i, j),

View File

@@ -20,7 +20,8 @@ import (
)
var (
unparsedLabelsGlobal = flagutil.NewArrayString("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to all -remoteWrite.url.")
unparsedLabelsGlobal = flagutil.NewArrayString("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
"Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage")
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabeling configs, which are applied "+
"to all the metrics before sending them to -remoteWrite.url. See also -remoteWrite.urlRelabelConfig. "+
"The path can point either to local file or to http url. "+
@@ -38,7 +39,7 @@ var (
labelsGlobal []prompb.Label
remoteWriteRelabelConfigData atomic.Pointer[[]byte]
remoteWriteURLRelabelConfigData atomic.Pointer[[]any]
remoteWriteURLRelabelConfigData atomic.Pointer[[]interface{}]
relabelConfigReloads *metrics.Counter
relabelConfigReloadErrors *metrics.Counter
@@ -90,8 +91,8 @@ func WriteURLRelabelConfigData(w io.Writer) {
return
}
type urlRelabelCfg struct {
Url string `yaml:"url"`
RelabelConfig any `yaml:"relabel_config"`
Url string `yaml:"url"`
RelabelConfig interface{} `yaml:"relabel_config"`
}
var cs []urlRelabelCfg
for i, url := range *remoteWriteURLs {
@@ -144,7 +145,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
len(*relabelConfigPaths), (len(*remoteWriteURLs)))
}
var urlRelabelCfgs []any
var urlRelabelCfgs []interface{}
rcs.perURL = make([]*promrelabel.ParsedConfigs, len(*remoteWriteURLs))
for i, path := range *relabelConfigPaths {
if len(path) == 0 {
@@ -157,7 +158,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
}
rcs.perURL[i] = prc
var parsedCfg any
var parsedCfg interface{}
_ = yaml.Unmarshal(rawCfg, &parsedCfg)
urlRelabelCfgs = append(urlRelabelCfgs, parsedCfg)
}

View File

@@ -1080,7 +1080,7 @@ func (rwctx *remoteWriteCtx) tryPushTimeSeriesInternal(tss []prompb.TimeSeries)
}()
if len(labelsGlobal) > 0 {
// Make a copy of tss before adding extra labels to prevent
// Make a copy of tss before adding extra labels in order to prevent
// from affecting time series for other remoteWrite.url configs.
rctx = getRelabelCtx()
v = tssPool.Get().(*[]prompb.TimeSeries)

View File

@@ -28,12 +28,12 @@ func TestGetLabelsHash_Distribution(t *testing.T) {
itemsCount := 1_000 * bucketsCount
m := make([]int, bucketsCount)
var labels []prompb.Label
for i := range itemsCount {
for i := 0; i < itemsCount; i++ {
labels = append(labels[:0], prompb.Label{
Name: "__name__",
Value: fmt.Sprintf("some_name_%d", i),
})
for j := range 10 {
for j := 0; j < 10; j++ {
labels = append(labels, prompb.Label{
Name: fmt.Sprintf("label_%d", j),
Value: fmt.Sprintf("value_%d_%d", i, j),
@@ -248,7 +248,7 @@ func TestShardAmountRemoteWriteCtx(t *testing.T) {
seriesCount := 100000
// build 1000000 series
tssBlock := make([]prompb.TimeSeries, 0, seriesCount)
for i := range seriesCount {
for i := 0; i < seriesCount; i++ {
tssBlock = append(tssBlock, prompb.TimeSeries{
Labels: []prompb.Label{
{
@@ -269,7 +269,7 @@ func TestShardAmountRemoteWriteCtx(t *testing.T) {
// build active time series set
nodes := make([]string, 0, remoteWriteCount)
activeTimeSeriesByNodes := make([]map[string]struct{}, remoteWriteCount)
for i := range remoteWriteCount {
for i := 0; i < remoteWriteCount; i++ {
nodes = append(nodes, fmt.Sprintf("node%d", i))
activeTimeSeriesByNodes[i] = make(map[string]struct{})
}

View File

@@ -41,7 +41,7 @@ func TestParseInputValue_Success(t *testing.T) {
if len(outputExpected) != len(output) {
t.Fatalf("unexpected output length; got %d; want %d", len(outputExpected), len(output))
}
for i := range outputExpected {
for i := 0; i < len(outputExpected); i++ {
if outputExpected[i].Omitted != output[i].Omitted {
t.Fatalf("unexpected Omitted field in the output\ngot\n%v\nwant\n%v", output, outputExpected)
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"flag"
"fmt"
"maps"
"net"
"net/http"
"net/http/httptest"
@@ -13,7 +12,6 @@ import (
"os/signal"
"path/filepath"
"reflect"
"slices"
"sort"
"strings"
"syscall"
@@ -350,7 +348,9 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
for k := range alertEvalTimesMap {
alertEvalTimes = append(alertEvalTimes, k)
}
slices.Sort(alertEvalTimes)
sort.Slice(alertEvalTimes, func(i, j int) bool {
return alertEvalTimes[i] < alertEvalTimes[j]
})
// sort group eval order according to the given "group_eval_order".
sort.Slice(testGroups, func(i, j int) bool {
@@ -361,8 +361,12 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
var groups []*rule.Group
for _, group := range testGroups {
mergedExternalLabels := make(map[string]string)
maps.Copy(mergedExternalLabels, tg.ExternalLabels)
maps.Copy(mergedExternalLabels, externalLabels)
for k, v := range tg.ExternalLabels {
mergedExternalLabels[k] = v
}
for k, v := range externalLabels {
mergedExternalLabels[k] = v
}
ng := rule.NewGroup(group, q, time.Minute, mergedExternalLabels)
ng.Init()
groups = append(groups, ng)

View File

@@ -81,9 +81,12 @@ func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool)
if g.Interval.Duration() < 0 {
return fmt.Errorf("interval shouldn't be lower than 0")
}
// if `eval_offset` is set, the group interval must be specified explicitly(instead of inherited from global evaluationInterval flag) and must bigger than offset.
if g.EvalOffset.Duration().Abs() > g.Interval.Duration() {
return fmt.Errorf("the abs value of eval_offset should be smaller than interval; now eval_offset: %v, interval: %v", g.EvalOffset.Duration(), g.Interval.Duration())
if g.EvalOffset.Duration() < 0 {
return fmt.Errorf("eval_offset shouldn't be lower than 0")
}
// if `eval_offset` is set, interval won't use global evaluationInterval flag and must bigger than offset.
if g.EvalOffset.Duration() > g.Interval.Duration() {
return fmt.Errorf("eval_offset should be smaller than interval; now eval_offset: %v, interval: %v", g.EvalOffset.Duration(), g.Interval.Duration())
}
if g.EvalOffset != nil && g.EvalDelay != nil {
return fmt.Errorf("eval_offset cannot be used with eval_delay")

View File

@@ -176,17 +176,11 @@ func TestGroupValidate_Failure(t *testing.T) {
}, false, "interval shouldn't be lower than 0")
f(&Group{
Name: "too big eval_offset",
Name: "wrong eval_offset",
Interval: promutil.NewDuration(time.Minute),
EvalOffset: promutil.NewDuration(2 * time.Minute),
}, false, "eval_offset should be smaller than interval")
f(&Group{
Name: "too big negative eval_offset",
Interval: promutil.NewDuration(time.Minute),
EvalOffset: promutil.NewDuration(-2 * time.Minute),
}, false, "eval_offset should be smaller than interval")
limit := -1
f(&Group{
Name: "wrong limit",

View File

@@ -2,7 +2,6 @@ package config
import (
"fmt"
"slices"
"strings"
"github.com/VictoriaMetrics/VictoriaLogs/lib/logstorage"
@@ -81,8 +80,12 @@ func (t *Type) ValidateExpr(expr string) error {
if err != nil {
return fmt.Errorf("cannot obtain labels from LogsQL expr: %q, err: %w", expr, err)
}
if slices.Contains(labels, "_time") {
return fmt.Errorf("bad LogsQL expr: %q, err: cannot contain time buckets stats pipe `stats by (_time:step)`", expr)
for i := range labels {
// VictoriaLogs inserts `_time` field as a label in result when query with `stats by (_time:step)`,
// making the result meaningless and may lead to cardinality issues.
if labels[i] == "_time" {
return fmt.Errorf("bad LogsQL expr: %q, err: cannot contain time buckets stats pipe `stats by (_time:step)`", expr)
}
}
default:
return fmt.Errorf("unknown datasource type=%q", t.Name)

View File

@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"net/url"
"strings"
@@ -92,7 +91,9 @@ func (c *Client) Clone() *Client {
ns.extraHeaders = make([]keyValue, len(c.extraHeaders))
copy(ns.extraHeaders, c.extraHeaders)
}
maps.Copy(ns.extraParams, c.extraParams)
for k, v := range c.extraParams {
ns.extraParams[k] = v
}
return ns
}

View File

@@ -34,7 +34,7 @@ type promResponse struct {
// Stats supported by VictoriaMetrics since v1.90
Stats struct {
SeriesFetched *string `json:"seriesFetched,omitempty"`
} `json:"stats"`
} `json:"stats,omitempty"`
// IsPartial supported by VictoriaMetrics
IsPartial *bool `json:"isPartial,omitempty"`
}

View File

@@ -134,7 +134,7 @@ func (ls Labels) String() string {
func LabelCompare(a, b Labels) int {
l := min(len(b), len(a))
for i := range l {
for i := 0; i < l; i++ {
if a[i].Name != b[i].Name {
if a[i].Name < b[i].Name {
return -1

View File

@@ -13,7 +13,7 @@ func BenchmarkPromInstantUnmarshal(b *testing.B) {
// BenchmarkParsePrometheusResponse/Instant_std+fastjson-10 1760 668959 ns/op 280147 B/op 5781 allocs/op
b.Run("Instant std+fastjson", func(b *testing.B) {
for range b.N {
for i := 0; i < b.N; i++ {
var pi promInstant
err = pi.Unmarshal(data)
if err != nil {

View File

@@ -56,7 +56,7 @@ absolute path to all .tpl files in root.
-rule.templates="dir/**/*.tpl". Includes all the .tpl files in "dir" subfolders recursively.
`)
configCheckInterval = flag.Duration("configCheckInterval", 0, "Interval for checking for changes in '-rule', '-rule.templates' and '-notifier.config' files. "+
configCheckInterval = flag.Duration("configCheckInterval", 0, "Interval for checking for changes in '-rule' or '-notifier.config' files. "+
"By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes.")
httpListenAddrs = flagutil.NewArrayString("httpListenAddr", "Address to listen for incoming http requests. See also -tls and -httpListenAddr.useProxyProtocol")

View File

@@ -98,7 +98,7 @@ func (m *manager) close() {
m.wg.Wait()
}
func (m *manager) startGroup(ctx context.Context, g *rule.Group, restore bool) {
func (m *manager) startGroup(ctx context.Context, g *rule.Group, restore bool) error {
id := g.GetID()
g.Init()
m.wg.Go(func() {
@@ -110,6 +110,7 @@ func (m *manager) startGroup(ctx context.Context, g *rule.Group, restore bool) {
})
m.groups[id] = g
return nil
}
func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore bool) error {
@@ -118,7 +119,7 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
for _, cfg := range groupsCfg {
for _, r := range cfg.Rules {
if rrPresent && arPresent {
break
continue
}
if r.Record != "" {
rrPresent = true
@@ -161,7 +162,10 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
}
}
for _, ng := range groupsRegistry {
m.startGroup(ctx, ng, restore)
if err := m.startGroup(ctx, ng, restore); err != nil {
m.groupsMu.Unlock()
return err
}
}
m.groupsMu.Unlock()

View File

@@ -69,7 +69,7 @@ func TestManagerUpdateConcurrent(t *testing.T) {
for n := range workers {
wg.Go(func() {
r := rand.New(rand.NewSource(int64(n)))
for range iterations {
for i := 0; i < iterations; i++ {
rnd := r.Intn(len(paths))
cfg, err := config.Parse([]string{paths[rnd]}, notifier.ValidateTemplates, true)
if err != nil { // update can fail and this is expected
@@ -259,7 +259,7 @@ func compareGroups(t *testing.T, a, b *rule.Group) {
for i, r := range a.Rules {
got, want := r, b.Rules[i]
if a.CreateID() != b.CreateID() {
t.Fatalf("expected to have rule %d; got %d", want.ID(), got.ID())
t.Fatalf("expected to have rule %q; got %q", want.ID(), got.ID())
}
if err := rule.CompareRules(t, want, got); err != nil {
t.Fatalf("comparison error: %s", err)

View File

@@ -216,7 +216,7 @@ consul_sd_configs:
for n := range workers {
wg.Go(func() {
r := rand.New(rand.NewSource(int64(n)))
for range iterations {
for i := 0; i < iterations; i++ {
rnd := r.Intn(len(paths))
_ = cw.reload(paths[rnd]) // update can fail and this is expected
_ = cw.notifiers()

View File

@@ -11,6 +11,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
@@ -290,7 +291,7 @@ func GetTargets() map[TargetType][]Target {
}
// Send sends alerts to all active notifiers
func Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string) chan error {
func Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string) *vmalertutil.ErrGroup {
alertsToSend := make([]Alert, 0, len(alerts))
lblss := make([][]prompb.Label, 0, len(alerts))
// apply global relabel config first without modifying original alerts in alerts
@@ -303,18 +304,17 @@ func Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string
lblss = append(lblss, lbls)
}
errGr := new(vmalertutil.ErrGroup)
wg := sync.WaitGroup{}
activeNotifiers := getActiveNotifiers()
errCh := make(chan error, len(activeNotifiers))
defer close(errCh)
for i := range activeNotifiers {
nt := activeNotifiers[i]
wg.Go(func() {
if err := nt.Send(ctx, alertsToSend, lblss, notifierHeaders); err != nil {
errCh <- fmt.Errorf("failed to send alerts to addr %q: %w", nt.Addr(), err)
errGr.Add(fmt.Errorf("failed to send alerts to addr %q: %w", nt.Addr(), err))
}
})
}
wg.Wait()
return errCh
return errGr
}

View File

@@ -205,9 +205,7 @@ alert_relabel_configs:
},
}
errG := Send(context.Background(), firingAlerts, nil)
for err := range errG {
if err != nil {
t.Errorf("unexpected error when sending alerts: %s", err)
}
if errG.Err() != nil {
t.Fatalf("unexpected error when sending alerts: %s", err)
}
}

View File

@@ -113,7 +113,7 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
input: make(chan prompb.TimeSeries, cfg.MaxQueueSize),
}
for range cc {
for i := 0; i < cc; i++ {
c.run(ctx)
}
return c, nil
@@ -186,11 +186,6 @@ func (c *Client) run(ctx context.Context) {
return
case <-ticker.C:
c.flush(ctx, wr)
// drain the potential stale tick to avoid small or empty flushes after a slow flush.
select {
case <-ticker.C:
default:
}
case ts, ok := <-c.input:
if !ok {
continue
@@ -243,10 +238,8 @@ func (c *Client) flush(ctx context.Context, wr *prompb.WriteRequest) {
defer func() {
sendDuration.Add(time.Since(timeStart).Seconds())
}()
attempts := 0
L:
for {
for attempts := 0; ; attempts++ {
err := c.send(ctx, b)
if err != nil && (errors.Is(err, io.EOF) || netutil.IsTrivialNetworkError(err)) {
// Something in the middle between client and destination might be closing
@@ -288,7 +281,6 @@ L:
time.Sleep(retryInterval)
retryInterval *= 2
attempts++
}
rwErrors.Inc()

View File

@@ -44,7 +44,7 @@ func TestClient_Push(t *testing.T) {
r := rand.New(rand.NewSource(1))
const rowsN = int(1e4)
for range rowsN {
for i := 0; i < rowsN; i++ {
s := prompb.TimeSeries{
Samples: []prompb.Sample{{
Value: r.Float64(),
@@ -102,7 +102,7 @@ func TestClient_run_maxBatchSizeDuringShutdown(t *testing.T) {
}
// push time series to the client.
for range pushCnt {
for i := 0; i < pushCnt; i++ {
if err = rwClient.Push(prompb.TimeSeries{}); err != nil {
t.Fatalf("cannot time series to the client: %s", err)
}

View File

@@ -22,7 +22,7 @@ func TestDebugClient_Push(t *testing.T) {
const rowsN = 100
var sent int
for i := range rowsN {
for i := 0; i < rowsN; i++ {
s := prompb.TimeSeries{
Samples: []prompb.Sample{{
Value: float64(i),

View File

@@ -789,7 +789,16 @@ func firingAlertStaleTimeSeries(ls map[string]string, timestamp int64) []prompb.
// restore restores the value of ActiveAt field for active alerts,
// based on previously written time series `alertForStateMetricName`.
// Only rules with For > 0 can be restored.
func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts time.Time, lookback time.Duration) error {
if ar.For < 1 {
return nil
}
if len(ar.alerts) < 1 {
return nil
}
nameStr := fmt.Sprintf("%s=%q", alertNameLabel, ar.Name)
if !*disableAlertGroupLabel {
nameStr = fmt.Sprintf("%s=%q,%s=%q", alertGroupNameLabel, ar.GroupName, alertNameLabel, ar.Name)
@@ -809,9 +818,7 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
expr := fmt.Sprintf("default_rollup(%s{%s%s}[%ds])",
alertForStateMetricName, nameStr, labelsFilter, int(lookback.Seconds()))
// query ALERTS_FOR_STATE at `ts-1s` instead `ts` to avoid retrieving data written in the current run,
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10335
res, _, err := q.Query(ctx, expr, ts.Add(-1*time.Second))
res, _, err := q.Query(ctx, expr, ts)
if err != nil {
return fmt.Errorf("failed to execute restore query %q: %w ", expr, err)
}

View File

@@ -1,106 +0,0 @@
//go:build synctest
package rule
import (
"context"
"strings"
"testing"
"testing/synctest"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
)
// TestAlertingRule_ActiveAtPreservedInAnnotations ensures that the fix for
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9543 is preserved
// while allowing query templates in labels (https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9783)
func TestAlertingRule_ActiveAtPreservedInAnnotations(t *testing.T) {
// wrap into synctest because of time manipulations
synctest.Test(t, func(t *testing.T) {
fq := &datasource.FakeQuerier{}
ar := &AlertingRule{
Name: "TestActiveAtPreservation",
Labels: map[string]string{
"test_query_in_label": `{{ "static_value" }}`,
},
Annotations: map[string]string{
"description": "Alert active since {{ $activeAt }}",
},
alerts: make(map[uint64]*notifier.Alert),
q: fq,
state: &ruleState{
entries: make([]StateEntry, 10),
},
}
// Mock query result - return empty result to make suppress_for_mass_alert = false
// (no need to add anything to fq for empty result)
// Add a metric that should trigger the alert
fq.Add(metricWithValueAndLabels(t, 1, "instance", "server1"))
// First execution - creates new alert
ts1 := time.Now()
_, err := ar.exec(context.TODO(), ts1, 0)
if err != nil {
t.Fatalf("unexpected error on first exec: %s", err)
}
if len(ar.alerts) != 1 {
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
}
firstAlert := ar.GetAlerts()[0]
// Verify first execution: activeAt should be ts1 and annotation should reflect it
if !firstAlert.ActiveAt.Equal(ts1) {
t.Fatalf("expected activeAt to be %v, got %v", ts1, firstAlert.ActiveAt)
}
// Extract time from annotation (format will be like "Alert active since 2025-09-30 08:55:13.638551611 -0400 EDT m=+0.002928464")
expectedTimeStr := ts1.Format("2006-01-02 15:04:05")
if !strings.Contains(firstAlert.Annotations["description"], expectedTimeStr) {
t.Fatalf("first exec annotation should contain time %s, got: %s", expectedTimeStr, firstAlert.Annotations["description"])
}
// Second execution - should preserve activeAt in annotation
// Ensure different timestamp with different seconds
// sleep is non-blocking thanks to synctest
time.Sleep(2 * time.Second)
ts2 := time.Now()
_, err = ar.exec(context.TODO(), ts2, 0)
if err != nil {
t.Fatalf("unexpected error on second exec: %s", err)
}
// Get the alert again (should be the same alert)
if len(ar.alerts) != 1 {
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
}
secondAlert := ar.GetAlerts()[0]
// Critical test: activeAt should still be ts1, not ts2
if !secondAlert.ActiveAt.Equal(ts1) {
t.Fatalf("activeAt should be preserved as %v, but got %v", ts1, secondAlert.ActiveAt)
}
// Critical test: annotation should still contain ts1 time, not ts2
if !strings.Contains(secondAlert.Annotations["description"], expectedTimeStr) {
t.Fatalf("second exec annotation should still contain original time %s, got: %s", expectedTimeStr, secondAlert.Annotations["description"])
}
// Additional verification: annotation should NOT contain ts2 time
ts2TimeStr := ts2.Format("2006-01-02 15:04:05")
if strings.Contains(secondAlert.Annotations["description"], ts2TimeStr) {
t.Fatalf("annotation should NOT contain new eval time %s, got: %s", ts2TimeStr, secondAlert.Annotations["description"])
}
// Verify query template in labels still works (this would fail if query templates were broken)
if firstAlert.Labels["test_query_in_label"] != "static_value" {
t.Fatalf("expected test_query_in_label=static_value, got %s", firstAlert.Labels["test_query_in_label"])
}
})
}

View File

@@ -10,6 +10,7 @@ import (
"strings"
"sync"
"testing"
"testing/synctest"
"time"
"github.com/VictoriaMetrics/metrics"
@@ -1478,3 +1479,95 @@ func TestAlertingRule_QueryTemplateInLabels(t *testing.T) {
t.Fatalf("expected 'suppress_for_mass_alert' label to be 'true' or 'false', got '%s'", suppressLabel)
}
}
// TestAlertingRule_ActiveAtPreservedInAnnotations ensures that the fix for
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9543 is preserved
// while allowing query templates in labels (https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9783)
func TestAlertingRule_ActiveAtPreservedInAnnotations(t *testing.T) {
// wrap into synctest because of time manipulations
synctest.Test(t, func(t *testing.T) {
fq := &datasource.FakeQuerier{}
ar := &AlertingRule{
Name: "TestActiveAtPreservation",
Labels: map[string]string{
"test_query_in_label": `{{ "static_value" }}`,
},
Annotations: map[string]string{
"description": "Alert active since {{ $activeAt }}",
},
alerts: make(map[uint64]*notifier.Alert),
q: fq,
state: &ruleState{
entries: make([]StateEntry, 10),
},
}
// Mock query result - return empty result to make suppress_for_mass_alert = false
// (no need to add anything to fq for empty result)
// Add a metric that should trigger the alert
fq.Add(metricWithValueAndLabels(t, 1, "instance", "server1"))
// First execution - creates new alert
ts1 := time.Now()
_, err := ar.exec(context.TODO(), ts1, 0)
if err != nil {
t.Fatalf("unexpected error on first exec: %s", err)
}
if len(ar.alerts) != 1 {
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
}
firstAlert := ar.GetAlerts()[0]
// Verify first execution: activeAt should be ts1 and annotation should reflect it
if !firstAlert.ActiveAt.Equal(ts1) {
t.Fatalf("expected activeAt to be %v, got %v", ts1, firstAlert.ActiveAt)
}
// Extract time from annotation (format will be like "Alert active since 2025-09-30 08:55:13.638551611 -0400 EDT m=+0.002928464")
expectedTimeStr := ts1.Format("2006-01-02 15:04:05")
if !strings.Contains(firstAlert.Annotations["description"], expectedTimeStr) {
t.Fatalf("first exec annotation should contain time %s, got: %s", expectedTimeStr, firstAlert.Annotations["description"])
}
// Second execution - should preserve activeAt in annotation
// Ensure different timestamp with different seconds
// sleep is non-blocking thanks to synctest
time.Sleep(2 * time.Second)
ts2 := time.Now()
_, err = ar.exec(context.TODO(), ts2, 0)
if err != nil {
t.Fatalf("unexpected error on second exec: %s", err)
}
// Get the alert again (should be the same alert)
if len(ar.alerts) != 1 {
t.Fatalf("expected 1 alert, got %d", len(ar.alerts))
}
secondAlert := ar.GetAlerts()[0]
// Critical test: activeAt should still be ts1, not ts2
if !secondAlert.ActiveAt.Equal(ts1) {
t.Fatalf("activeAt should be preserved as %v, but got %v", ts1, secondAlert.ActiveAt)
}
// Critical test: annotation should still contain ts1 time, not ts2
if !strings.Contains(secondAlert.Annotations["description"], expectedTimeStr) {
t.Fatalf("second exec annotation should still contain original time %s, got: %s", expectedTimeStr, secondAlert.Annotations["description"])
}
// Additional verification: annotation should NOT contain ts2 time
ts2TimeStr := ts2.Format("2006-01-02 15:04:05")
if strings.Contains(secondAlert.Annotations["description"], ts2TimeStr) {
t.Fatalf("annotation should NOT contain new eval time %s, got: %s", ts2TimeStr, secondAlert.Annotations["description"])
}
// Verify query template in labels still works (this would fail if query templates were broken)
if firstAlert.Labels["test_query_in_label"] != "static_value" {
t.Fatalf("expected test_query_in_label=static_value, got %s", firstAlert.Labels["test_query_in_label"])
}
})
}

View File

@@ -6,9 +6,7 @@ import (
"flag"
"fmt"
"hash/fnv"
"maps"
"net/url"
"os"
"sync"
"time"
@@ -20,7 +18,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
)
@@ -32,8 +29,8 @@ var (
"0 means no limit.")
ruleUpdateEntriesLimit = flag.Int("rule.updateEntriesLimit", 20, "Defines the max number of rule's state updates stored in-memory. "+
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param.")
resendDelay = flag.Duration("rule.resendDelay", 0, "Minimum amount of time to wait before resending an alert to notifier.")
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maximum duration for automatic alert expiration, "+
resendDelay = flag.Duration("rule.resendDelay", 0, "MiniMum amount of time to wait before resending an alert to notifier.")
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maxiMum duration for automatic alert expiration, "+
"which by default is 4 times evaluationInterval of the parent group")
evalDelay = flag.Duration("rule.evalDelay", 30*time.Second, "Adjustment of the 'time' parameter for rule evaluation requests to compensate intentional data delay from the datasource. "+
"Normally, should be equal to '-search.latencyOffset' (cmd-line flag configured for VictoriaMetrics single-node or vmselect). "+
@@ -99,7 +96,9 @@ type groupMetrics struct {
// set2 has priority over set1.
func mergeLabels(groupName, ruleName string, set1, set2 map[string]string) map[string]string {
r := map[string]string{}
maps.Copy(r, set1)
for k, v := range set1 {
r[k] = v
}
for k, v := range set2 {
if prevV, ok := r[k]; ok {
logger.Infof("label %q=%q for rule %q.%q overwritten with external label %q=%q",
@@ -214,7 +213,6 @@ func (g *Group) CreateID() uint64 {
// restore restores alerts state for group rules
func (g *Group) restore(ctx context.Context, qb datasource.QuerierBuilder, ts time.Time, lookback time.Duration) error {
for _, rule := range g.Rules {
// Only alerting rule with for > 0 and has active alerts from the first evaluation can be restored
ar, ok := rule.(*AlertingRule)
if !ok {
continue
@@ -222,9 +220,6 @@ func (g *Group) restore(ctx context.Context, qb datasource.QuerierBuilder, ts ti
if ar.For < 1 {
continue
}
if len(ar.alerts) < 1 {
return nil
}
q := qb.BuildWithParams(datasource.QuerierParams{
EvaluationInterval: g.Interval,
QueryParams: g.Params,
@@ -338,11 +333,6 @@ func (g *Group) Init() {
// Start starts group's evaluation
func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasource.QuerierBuilder) {
defer func() { close(g.finishedCh) }()
e := &executor{
Rw: rw,
notifierHeaders: g.NotifierHeaders,
}
evalTS := time.Now()
// sleep random duration to spread group rules evaluation
// over maxStartDelay to reduce the load on datasource.
@@ -377,19 +367,22 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
evalTS = evalTS.Add(sleepBeforeStart)
}
e := &executor{
Rw: rw,
notifierHeaders: g.NotifierHeaders,
}
g.infof("started")
eval := func(ctx context.Context, ts time.Time) time.Time {
eval := func(ctx context.Context, ts time.Time) {
g.metrics.iterationTotal.Inc()
start := time.Now()
if len(g.Rules) < 1 {
g.metrics.iterationDuration.UpdateDuration(start)
g.mu.Lock()
g.LastEvaluation = start
g.mu.Unlock()
return ts
return
}
resolveDuration := getResolveDuration(g.Interval, *resendDelay, *maxResolveDuration)
@@ -402,33 +395,7 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
}
}
g.metrics.iterationDuration.UpdateDuration(start)
g.mu.Lock()
g.LastEvaluation = start
g.mu.Unlock()
if g.EvalOffset != nil && e.Rw != nil {
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown"
}
labels := map[string]string{
"__name__": "vmalert_eval_timestamp",
"host": hostname,
"group": g.Name,
"file": g.File,
}
var ls []prompb.Label
for k, v := range labels {
ls = append(ls, prompb.Label{
Name: k,
Value: v,
})
}
ts := newTimeSeries([]float64{float64(ts.Unix())}, []int64{start.Unix()}, ls)
if err := e.Rw.Push(ts); err != nil {
logger.Errorf("group %q: failed to push evaluation timestamp: %s", g.Name, err)
}
}
return ts
}
evalCtx, cancel := context.WithCancel(ctx)
@@ -437,15 +404,15 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
g.mu.Unlock()
defer g.evalCancel()
eval(evalCtx, evalTS)
t := time.NewTicker(g.Interval)
defer t.Stop()
realEvalTS := eval(evalCtx, evalTS)
// restore the rules state after the first evaluation
// so only active alerts can be restored.
if rr != nil {
err := g.restore(ctx, rr, realEvalTS, *remoteReadLookBack)
err := g.restore(ctx, rr, evalTS, *remoteReadLookBack)
if err != nil {
logger.Errorf("error while restoring ruleState for group %q: %s", g.Name, err)
}
@@ -516,15 +483,8 @@ func (g *Group) UpdateWith(newGroup *Group) {
// delayBeforeStart calculates delay based on Group ID, so all groups will start at different moments of time.
func (g *Group) delayBeforeStart(ts time.Time, maxDelay time.Duration) time.Duration {
if g.EvalOffset != nil {
offset := *g.EvalOffset
// adjust the offset for negative evalOffset, the rule is:
// `eval_offset: -x` is equivalent to `eval_offset: y` for `interval: x+y`.
// For example, `eval_offset: -6m` is equivalent to `eval_offset: 4m` for `interval: 10m`.
if offset < 0 {
offset += g.Interval
}
// if offset is specified, ignore the maxDelay and return a duration aligned with offset
currentOffsetPoint := ts.Truncate(g.Interval).Add(offset)
currentOffsetPoint := ts.Truncate(g.Interval).Add(*g.EvalOffset)
if currentOffsetPoint.Before(ts) {
// wait until the next offset point
return currentOffsetPoint.Add(g.Interval).Sub(ts)
@@ -533,8 +493,11 @@ func (g *Group) delayBeforeStart(ts time.Time, maxDelay time.Duration) time.Dura
}
// otherwise, return a random duration between [0..min(interval, maxDelay)] based on group ID
// artificially limit interval, so groups with big intervals could start sooner.
interval := min(g.Interval, maxDelay)
interval := g.Interval
if interval > maxDelay {
// artificially limit interval, so groups with big intervals could start sooner.
interval = maxDelay
}
var randSleep time.Duration
randSleep = time.Duration(float64(interval) * (float64(g.GetID()) / (1 << 64)))
sleepOffset := time.Duration(ts.UnixNano() % interval.Nanoseconds())
@@ -792,7 +755,6 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
return fmt.Errorf("rule %q: failed to execute: %w", r, err)
}
var errG vmalertutil.ErrGroup
if e.Rw != nil {
pushToRW := func(tss []prompb.TimeSeries) error {
var lastErr error
@@ -804,26 +766,20 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
return lastErr
}
if err := pushToRW(tss); err != nil {
errG.Add(err)
return err
}
}
ar, ok := r.(*AlertingRule)
if !ok {
return errG.Err()
return nil
}
alerts := ar.alertsToSend(resolveDuration, *resendDelay)
if len(alerts) < 1 {
return errG.Err()
return nil
}
notifierErr := notifier.Send(ctx, alerts, e.notifierHeaders)
for err := range notifierErr {
if err != nil {
errG.Add(fmt.Errorf("rule %q: notifier failure: %w", r, err))
}
}
return errG.Err()
errGr := notifier.Send(ctx, alerts, e.notifierHeaders)
return errGr.Err()
}

View File

@@ -405,8 +405,7 @@ func TestGroupStart(t *testing.T) {
var cur uint64
prev := g.metrics.iterationTotal.Get()
i := 0
for {
for i := 0; ; i++ {
if i > 40 {
t.Fatalf("group wasn't able to perform %d evaluations during %d eval intervals", n, i)
}
@@ -415,7 +414,6 @@ func TestGroupStart(t *testing.T) {
return
}
time.Sleep(interval)
i++
}
}
@@ -606,15 +604,6 @@ func TestGroupStartDelay(t *testing.T) {
f("2023-01-01T00:03:30.000+00:00", "2023-01-01T00:08:00.000+00:00")
f("2023-01-01T00:08:00.000+00:00", "2023-01-01T00:08:00.000+00:00")
// test group with negative offset -2min, which is equivalent to 3min offset for 5min interval
offset = -2 * time.Minute
g.EvalOffset = &offset
f("2023-01-01T00:00:15.000+00:00", "2023-01-01T00:03:00.000+00:00")
f("2023-01-01T00:01:00.000+00:00", "2023-01-01T00:03:00.000+00:00")
f("2023-01-01T00:03:30.000+00:00", "2023-01-01T00:08:00.000+00:00")
f("2023-01-01T00:08:00.000+00:00", "2023-01-01T00:08:00.000+00:00")
maxDelay = time.Minute * 1
g.EvalOffset = nil

View File

@@ -121,7 +121,7 @@ func (s *ruleState) add(e StateEntry) {
func replayRule(r Rule, start, end time.Time, rw remotewrite.RWClient, replayRuleRetryAttempts int) (int, error) {
var err error
var tss []prompb.TimeSeries
for i := range replayRuleRetryAttempts {
for i := 0; i < replayRuleRetryAttempts; i++ {
tss, err = r.execRange(context.Background(), start, end)
if err == nil {
break

View File

@@ -40,7 +40,7 @@ func TestRule_state(t *testing.T) {
}
var last time.Time
for range stateEntriesN * 2 {
for i := 0; i < stateEntriesN*2; i++ {
last = time.Now()
r.state.add(StateEntry{At: last})
}
@@ -68,7 +68,7 @@ func TestRule_stateConcurrent(_ *testing.T) {
var wg sync.WaitGroup
for range workers {
wg.Go(func() {
for range iterations {
for i := 0; i < iterations; i++ {
r.state.add(StateEntry{At: time.Now()})
r.state.getAll()
r.state.getLast()

View File

@@ -19,13 +19,13 @@ func CompareRules(t *testing.T, a, b Rule) error {
case *AlertingRule:
br, ok := b.(*AlertingRule)
if !ok {
return fmt.Errorf("rule %d supposed to be of type AlertingRule", b.ID())
return fmt.Errorf("rule %q supposed to be of type AlertingRule", b.ID())
}
return compareAlertingRules(t, v, br)
case *RecordingRule:
br, ok := b.(*RecordingRule)
if !ok {
return fmt.Errorf("rule %d supposed to be of type RecordingRule", b.ID())
return fmt.Errorf("rule %q supposed to be of type RecordingRule", b.ID())
}
return compareRecordingRules(t, v, br)
default:

View File

@@ -57,8 +57,12 @@ type ApiGroup struct {
EvalOffset float64 `json:"eval_offset,omitempty"`
// EvalDelay will adjust the `time` parameter of rule evaluation requests to compensate intentional query delay from datasource.
EvalDelay float64 `json:"eval_delay,omitempty"`
// States represents counts per each rule state
States map[string]int `json:"states"`
// Unhealthy unhealthy rules count
Unhealthy int
// Healthy passing rules count
Healthy int
// NoMatch not matching rules count
NoMatch int
}
// APILink returns a link to the group's JSON representation.
@@ -130,11 +134,6 @@ type ApiRule struct {
Updates []StateEntry `json:"-"`
}
// IsNoMatch returns true if rule is in nomatch state
func (r *ApiRule) IsNoMatch() bool {
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
}
// ApiAlert represents a notifier.AlertingRule state
// for WEB view
// https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md#get-apiv1rules
@@ -236,20 +235,6 @@ func NewAlertAPI(ar *AlertingRule, a *notifier.Alert) *ApiAlert {
return aa
}
func (r *ApiRule) ExtendState() {
if len(r.Alerts) > 0 {
return
}
if r.State == "" {
r.State = "ok"
}
if r.Health != "ok" {
r.State = "unhealthy"
} else if r.IsNoMatch() {
r.State = "nomatch"
}
}
// ToAPI returns ApiGroup representation of g
func (g *Group) ToAPI() *ApiGroup {
g.mu.RLock()
@@ -267,7 +252,6 @@ func (g *Group) ToAPI() *ApiGroup {
Headers: headersToStrings(g.Headers),
NotifierHeaders: headersToStrings(g.NotifierHeaders),
Labels: g.Labels,
States: make(map[string]int),
}
if g.EvalOffset != nil {
ag.EvalOffset = g.EvalOffset.Seconds()
@@ -275,10 +259,9 @@ func (g *Group) ToAPI() *ApiGroup {
if g.EvalDelay != nil {
ag.EvalDelay = g.EvalDelay.Seconds()
}
ag.Rules = make([]ApiRule, 0, len(g.Rules))
ag.Rules = make([]ApiRule, 0)
for _, r := range g.Rules {
ar := r.ToAPI()
ag.Rules = append(ag.Rules, ar)
ag.Rules = append(ag.Rules, r.ToAPI())
}
return &ag
}

View File

@@ -11,7 +11,7 @@
<path d="M224.163 175.27a1.9 1.9 0 0 0 2.8 0l6-5.9a2.1 2.1 0 0 0 .2-2.7 1.9 1.9 0 0 0-3-.2l-2.6 2.6v-5.2c0-1.54-1.667-2.502-3-1.732-.619.357-1 1.017-1 1.732v5.2l-2.6-2.6a1.9 1.9 0 0 0-3 .2 2.1 2.1 0 0 0 .2 2.7zm-16.459-23.297h36c1.54 0 2.502-1.667 1.732-3a2 2 0 0 0-1.732-1h-36c-1.54 0-2.502 1.667-1.732 3 .357.619 1.017 1 1.732 1m36 4h-36c-1.54 0-2.502 1.667-1.732 3 .357.619 1.017 1 1.732 1h36c1.54 0 2.502-1.667 1.732-3a2 2 0 0 0-1.732-1m-16.59-23.517a1.9 1.9 0 0 0-2.8 0l-6 5.9a2.1 2.1 0 0 0-.2 2.7 1.9 1.9 0 0 0 3 .2l2.6-2.6v5.2c0 1.54 1.667 2.502 3 1.732.619-.357 1-1.017 1-1.732v-5.2l2.6 2.6a1.9 1.9 0 0 0 3-.2 2.1 2.1 0 0 0-.2-2.7z"/>
</symbol>
<symbol id="state" viewBox="-10 -10 320 310">
<symbol id="filter" viewBox="-10 -10 320 310">
<path d="M288.953 0h-277c-5.522 0-10 4.478-10 10v49.531c0 5.522 4.478 10 10 10h12.372l91.378 107.397v113.978a10 10 0 0 0 15.547 8.32l49.5-33a10 10 0 0 0 4.453-8.32v-80.978l91.378-107.397h12.372c5.522 0 10-4.478 10-10V10c0-5.522-4.477-10-10-10M167.587 166.77a10 10 0 0 0-2.384 6.48v79.305l-29.5 19.666V173.25a10 10 0 0 0-2.384-6.48L50.585 69.531h199.736zM278.953 49.531h-257V20h257z"/>
</symbol>

Before

Width:  |  Height:  |  Size: 4.7 KiB

After

Width:  |  Height:  |  Size: 4.7 KiB

View File

@@ -8,9 +8,9 @@ function actionAll(isCollapse) {
});
}
function groupForState(key) {
function groupFilter(key) {
if (key) {
location.href = `?state=${key}`;
location.href = `?filter=${key}`;
} else {
window.location = window.location.pathname;
}

View File

@@ -45,7 +45,7 @@ func (eg *ErrGroup) Error() string {
return ""
}
var b strings.Builder
fmt.Fprintf(&b, "errors(%d): \n", len(eg.errs))
fmt.Fprintf(&b, "errors(%d): ", len(eg.errs))
for i, err := range eg.errs {
b.WriteString(err.Error())
if i != len(eg.errs)-1 {

View File

@@ -30,8 +30,8 @@ func TestErrGroup(t *testing.T) {
}
f(nil, "")
f([]error{errors.New("timeout")}, "errors(1): \ntimeout")
f([]error{errors.New("timeout"), errors.New("deadline")}, "errors(2): \ntimeout\ndeadline")
f([]error{errors.New("timeout")}, "errors(1): timeout")
f([]error{errors.New("timeout"), errors.New("deadline")}, "errors(2): timeout\ndeadline")
}
// TestErrGroupConcurrent supposed to test concurrent
@@ -42,7 +42,7 @@ func TestErrGroupConcurrent(_ *testing.T) {
const writersN = 4
payload := make(chan error, writersN)
for range writersN {
for i := 0; i < writersN; i++ {
go func() {
for err := range payload {
eg.Add(err)
@@ -51,7 +51,7 @@ func TestErrGroupConcurrent(_ *testing.T) {
}
const iterations = 500
for i := range iterations {
for i := 0; i < iterations; i++ {
payload <- fmt.Errorf("error %d", i)
if i%10 == 0 {
_ = eg.Err()

View File

@@ -1,11 +1,9 @@
package main
import (
"cmp"
"embed"
"encoding/json"
"fmt"
"math"
"net/http"
"slices"
"strconv"
@@ -52,7 +50,6 @@ var (
"alert": rule.TypeAlerting,
"record": rule.TypeRecording,
}
ruleStates = []string{"ok", "nomatch", "inactive", "firing", "pending", "unhealthy"}
)
type requestHandler struct {
@@ -66,14 +63,6 @@ var (
staticServer = http.StripPrefix("/vmalert", staticHandler)
)
func marshalJson(v any, kind string) ([]byte, *httpserver.ErrorWithStatusCode) {
data, err := json.Marshal(v)
if err != nil {
return nil, errResponse(fmt.Errorf("failed to marshal %s: %s", kind, err), http.StatusInternalServerError)
}
return data, nil
}
func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
if strings.HasPrefix(r.URL.Path, "/vmalert/static") {
staticServer.ServeHTTP(w, r)
@@ -105,32 +94,40 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
httpserver.Errorf(w, r, "%s", err)
return true
}
WriteRule(w, r, rule)
WriteRuleDetails(w, r, rule)
return true
// current used by old vmalert UI and Grafana Alerts
case "/vmalert/groups", "/rules":
case "/vmalert/groups":
rf, err := newRulesFilter(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
// only support filtering by a single state
state := ""
if len(rf.states) > 0 {
state = rf.states[0]
rf.states = rf.states[:1]
}
lr := rh.groups(rf)
WriteListGroups(w, r, lr.Data.Groups, state)
data := rh.groups(rf)
WriteListGroups(w, r, data, rf.filter)
return true
case "/vmalert/notifiers":
WriteListTargets(w, r, notifier.GetTargets())
return true
// special cases for Grafana requests,
// served without `vmalert` prefix:
case "/rules":
// Grafana makes an extra request to `/rules`
// handler in addition to `/api/v1/rules` calls in alerts UI
var data []*rule.ApiGroup
rf, err := newRulesFilter(r)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return true
}
data = rh.groups(rf)
WriteListGroups(w, r, data, rf.filter)
return true
case "/vmalert/api/v1/notifiers", "/api/v1/notifiers":
data, err := rh.listNotifiers()
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -138,14 +135,15 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
return true
case "/vmalert/api/v1/rules", "/api/v1/rules":
// path used by Grafana for ng alerting
var data []byte
rf, err := newRulesFilter(r)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
data, err := rh.listGroups(rf)
data, err = rh.listGroups(rf)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -154,14 +152,14 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
case "/vmalert/api/v1/alerts", "/api/v1/alerts":
// path used by Grafana for ng alerting
gf, err := newGroupsFilter(r)
rf, err := newRulesFilter(r)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
data, err := rh.listAlerts(gf)
data, err := rh.listAlerts(rf)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -170,12 +168,12 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
case "/vmalert/api/v1/alert", "/api/v1/alert":
alert, err := rh.getAlert(r)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
data, err := marshalJson(alert, "alert")
data, err := json.Marshal(alert)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "failed to marshal alert: %s", err)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -184,16 +182,16 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
case "/vmalert/api/v1/rule", "/api/v1/rule":
apiRule, err := rh.getRule(r)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
rwu := rule.ApiRuleWithUpdates{
ApiRule: apiRule,
StateUpdates: apiRule.Updates,
}
data, err := marshalJson(rwu, "rule")
data, err := json.Marshal(rwu)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "failed to marshal rule: %s", err)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -202,12 +200,12 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
case "/vmalert/api/v1/group", "/api/v1/group":
group, err := rh.getGroup(r)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "%s", err)
return true
}
data, err := marshalJson(group, "group")
data, err := json.Marshal(group)
if err != nil {
errJson(w, r, err)
httpserver.Errorf(w, r, "failed to marshal group: %s", err)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -227,10 +225,10 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
}
}
func (rh *requestHandler) getGroup(r *http.Request) (*rule.ApiGroup, *httpserver.ErrorWithStatusCode) {
func (rh *requestHandler) getGroup(r *http.Request) (*rule.ApiGroup, error) {
groupID, err := strconv.ParseUint(r.FormValue(rule.ParamGroupID), 10, 64)
if err != nil {
return nil, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err), http.StatusBadRequest)
return nil, fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err)
}
obj, err := rh.m.groupAPI(groupID)
if err != nil {
@@ -239,14 +237,14 @@ func (rh *requestHandler) getGroup(r *http.Request) (*rule.ApiGroup, *httpserver
return obj, nil
}
func (rh *requestHandler) getRule(r *http.Request) (rule.ApiRule, *httpserver.ErrorWithStatusCode) {
func (rh *requestHandler) getRule(r *http.Request) (rule.ApiRule, error) {
groupID, err := strconv.ParseUint(r.FormValue(rule.ParamGroupID), 10, 64)
if err != nil {
return rule.ApiRule{}, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err), http.StatusBadRequest)
return rule.ApiRule{}, fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err)
}
ruleID, err := strconv.ParseUint(r.FormValue(rule.ParamRuleID), 10, 64)
if err != nil {
return rule.ApiRule{}, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamRuleID, err), http.StatusBadRequest)
return rule.ApiRule{}, fmt.Errorf("failed to read %q param: %w", rule.ParamRuleID, err)
}
obj, err := rh.m.ruleAPI(groupID, ruleID)
if err != nil {
@@ -255,14 +253,14 @@ func (rh *requestHandler) getRule(r *http.Request) (rule.ApiRule, *httpserver.Er
return obj, nil
}
func (rh *requestHandler) getAlert(r *http.Request) (*rule.ApiAlert, *httpserver.ErrorWithStatusCode) {
func (rh *requestHandler) getAlert(r *http.Request) (*rule.ApiAlert, error) {
groupID, err := strconv.ParseUint(r.FormValue(rule.ParamGroupID), 10, 64)
if err != nil {
return nil, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err), http.StatusBadRequest)
return nil, fmt.Errorf("failed to read %q param: %w", rule.ParamGroupID, err)
}
alertID, err := strconv.ParseUint(r.FormValue(rule.ParamAlertID), 10, 64)
if err != nil {
return nil, errResponse(fmt.Errorf("failed to read %q param: %w", rule.ParamAlertID, err), http.StatusBadRequest)
return nil, fmt.Errorf("failed to read %q param: %w", rule.ParamAlertID, err)
}
a, err := rh.m.alertAPI(groupID, alertID)
if err != nil {
@@ -272,76 +270,28 @@ func (rh *requestHandler) getAlert(r *http.Request) (*rule.ApiAlert, *httpserver
}
type listGroupsResponse struct {
Status string `json:"status"`
Page int `json:"page,omitempty"`
TotalPages int `json:"total_pages,omitempty"`
TotalGroups int `json:"total_groups,omitempty"`
TotalRules int `json:"total_rules,omitempty"`
Data struct {
Status string `json:"status"`
Data struct {
Groups []*rule.ApiGroup `json:"groups"`
} `json:"data"`
}
type groupsFilter struct {
groupNames []string
files []string
dsType config.Type
}
func newGroupsFilter(r *http.Request) (*groupsFilter, *httpserver.ErrorWithStatusCode) {
_ = r.ParseForm()
vs := r.Form
gf := &groupsFilter{
groupNames: vs["rule_group[]"],
files: vs["file[]"],
}
dsType := vs.Get("datasource_type")
if len(dsType) > 0 {
if config.SupportedType(dsType) {
gf.dsType = config.NewRawType(dsType)
} else {
return nil, errResponse(fmt.Errorf(`invalid parameter "datasource_type": not supported value %q`, dsType), http.StatusBadRequest)
}
}
return gf, nil
}
func (gf *groupsFilter) matches(group *rule.Group) bool {
if len(gf.groupNames) > 0 && !slices.Contains(gf.groupNames, group.Name) {
return false
}
if len(gf.files) > 0 && !slices.Contains(gf.files, group.File) {
return false
}
if len(gf.dsType.Name) > 0 && gf.dsType.String() != group.Type.String() {
return false
}
return true
}
// see https://prometheus.io/docs/prometheus/latest/querying/api/#rules
type rulesFilter struct {
gf *groupsFilter
ruleNames []string
ruleType string
excludeAlerts bool
states []string
maxGroups int
pageNum int
search string
extendedStates bool
files []string
groupNames []string
ruleNames []string
ruleType string
excludeAlerts bool
filter string
dsType config.Type
}
func newRulesFilter(r *http.Request) (*rulesFilter, *httpserver.ErrorWithStatusCode) {
gf, err := newGroupsFilter(r)
if err != nil {
return nil, err
}
func newRulesFilter(r *http.Request) (*rulesFilter, error) {
rf := &rulesFilter{}
query := r.URL.Query()
var rf rulesFilter
rf.gf = gf
vs := r.Form
ruleTypeParam := vs.Get("type")
ruleTypeParam := query.Get("type")
if len(ruleTypeParam) > 0 {
if ruleType, ok := ruleTypeMap[ruleTypeParam]; ok {
rf.ruleType = ruleType
@@ -350,146 +300,102 @@ func newRulesFilter(r *http.Request) (*rulesFilter, *httpserver.ErrorWithStatusC
}
}
states := vs["state"]
if len(states) == 0 {
states = vs["filter"]
dsType := query.Get("datasource_type")
if len(dsType) > 0 {
if config.SupportedType(dsType) {
rf.dsType = config.NewRawType(dsType)
} else {
return nil, errResponse(fmt.Errorf(`invalid parameter "datasource_type": not supported value %q`, dsType), http.StatusBadRequest)
}
}
for _, s := range states {
values := strings.Split(s, ",")
for _, v := range values {
if len(v) == 0 {
continue
}
if !slices.Contains(ruleStates, v) {
return nil, errResponse(fmt.Errorf(`invalid parameter "state": contains not supported value %q`, v), http.StatusBadRequest)
}
rf.states = append(rf.states, v)
filter := strings.ToLower(query.Get("filter"))
if len(filter) > 0 {
if filter == "nomatch" || filter == "unhealthy" {
rf.filter = filter
} else {
return nil, errResponse(fmt.Errorf(`invalid parameter "filter": not supported value %q`, filter), http.StatusBadRequest)
}
}
rf.excludeAlerts = httputil.GetBool(r, "exclude_alerts")
rf.extendedStates = httputil.GetBool(r, "extended_states")
rf.ruleNames = append([]string{}, vs["rule_name[]"]...)
rf.search = strings.ToLower(vs.Get("search"))
pageNum := vs.Get("page_num")
maxGroups := vs.Get("group_limit")
if pageNum != "" {
if maxGroups == "" {
return nil, errResponse(fmt.Errorf(`"group_limit" needs to be present in order to paginate over the groups`), http.StatusBadRequest)
}
v, err := strconv.Atoi(pageNum)
if err != nil || v <= 0 {
return nil, errResponse(fmt.Errorf(`"page_num" is expected to be a positive number, found %q`, pageNum), http.StatusBadRequest)
}
rf.pageNum = v
}
if maxGroups != "" {
v, err := strconv.Atoi(maxGroups)
if err != nil || v <= 0 {
return nil, errResponse(fmt.Errorf(`"group_limit" is expected to be a positive number, found %q`, maxGroups), http.StatusBadRequest)
}
rf.maxGroups = v
}
return &rf, nil
rf.ruleNames = append([]string{}, r.Form["rule_name[]"]...)
rf.groupNames = append([]string{}, r.Form["rule_group[]"]...)
rf.files = append([]string{}, r.Form["file[]"]...)
return rf, nil
}
func (rf *rulesFilter) matchesRule(r *rule.ApiRule) bool {
if rf.ruleType != "" && rf.ruleType != r.Type {
func (rf *rulesFilter) matchesGroup(group *rule.Group) bool {
if len(rf.groupNames) > 0 && !slices.Contains(rf.groupNames, group.Name) {
return false
}
if len(rf.ruleNames) > 0 && !slices.Contains(rf.ruleNames, r.Name) {
if len(rf.files) > 0 && !slices.Contains(rf.files, group.File) {
return false
}
if len(rf.states) == 0 {
return true
if len(rf.dsType.Name) > 0 && rf.dsType.String() != group.Type.String() {
return false
}
return slices.Contains(rf.states, r.State)
return true
}
func (rh *requestHandler) groups(rf *rulesFilter) *listGroupsResponse {
func (rh *requestHandler) groups(rf *rulesFilter) []*rule.ApiGroup {
rh.m.groupsMu.RLock()
defer rh.m.groupsMu.RUnlock()
skipGroups := (rf.pageNum - 1) * rf.maxGroups
lr := &listGroupsResponse{
Status: "success",
}
lr.Data.Groups = make([]*rule.ApiGroup, 0)
if skipGroups >= len(rh.m.groups) {
return lr
}
// sort list of groups for deterministic output
groups := make([]*rule.Group, 0, len(rh.m.groups))
groups := make([]*rule.ApiGroup, 0)
for _, group := range rh.m.groups {
groups = append(groups, group)
}
slices.SortFunc(groups, func(a, b *rule.Group) int {
nameCmp := cmp.Compare(a.Name, b.Name)
if nameCmp != 0 {
return nameCmp
}
return cmp.Compare(a.File, b.File)
})
for _, group := range groups {
if !rf.gf.matches(group) {
if !rf.matchesGroup(group) {
continue
}
groupFound := len(rf.search) == 0 || strings.Contains(strings.ToLower(group.Name), rf.search) || strings.Contains(strings.ToLower(group.File), rf.search)
g := group.ToAPI()
// the returned list should always be non-nil
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4221
filteredRules := make([]rule.ApiRule, 0)
for _, rule := range g.Rules {
if !groupFound && !strings.Contains(strings.ToLower(rule.Name), rf.search) {
if rf.ruleType != "" && rf.ruleType != rule.Type {
continue
}
if rf.extendedStates {
rule.ExtendState()
if len(rf.ruleNames) > 0 && !slices.Contains(rf.ruleNames, rule.Name) {
continue
}
if !rf.matchesRule(&rule) {
if (rule.LastError == "" && rf.filter == "unhealthy") || (!isNoMatch(rule) && rf.filter == "nomatch") {
continue
}
if rf.excludeAlerts {
rule.Alerts = nil
}
g.States[rule.State]++
if rule.LastError != "" {
g.Unhealthy++
} else {
g.Healthy++
}
if isNoMatch(rule) {
g.NoMatch++
}
filteredRules = append(filteredRules, rule)
}
if len(g.Rules) == 0 || len(filteredRules) > 0 {
if rf.maxGroups > 0 {
lr.TotalGroups++
lr.TotalRules += len(filteredRules)
}
if skipGroups > 0 {
skipGroups--
continue
}
if rf.maxGroups == 0 || len(lr.Data.Groups) < rf.maxGroups {
g.Rules = filteredRules
lr.Data.Groups = append(lr.Data.Groups, g)
}
g.Rules = filteredRules
groups = append(groups, g)
}
// sort list of groups for deterministic output
slices.SortFunc(groups, func(a, b *rule.ApiGroup) int {
if a.Name != b.Name {
return strings.Compare(a.Name, b.Name)
}
}
if rf.maxGroups > 0 {
lr.Page = rf.pageNum
lr.TotalPages = max(int(math.Ceil(float64(lr.TotalGroups)/float64(rf.maxGroups))), 1)
}
return lr
return strings.Compare(a.File, b.File)
})
return groups
}
func (rh *requestHandler) listGroups(rf *rulesFilter) ([]byte, *httpserver.ErrorWithStatusCode) {
lr := rh.groups(rf)
if rf.pageNum > 1 && len(lr.Data.Groups) == 0 {
return nil, errResponse(fmt.Errorf(`page_num exceeds total amount of pages`), http.StatusBadRequest)
}
if lr.Page > lr.TotalPages {
return nil, errResponse(fmt.Errorf(`page_num=%d exceeds total amount of pages in result=%d`, lr.Page, lr.TotalPages), http.StatusBadRequest)
}
func (rh *requestHandler) listGroups(rf *rulesFilter) ([]byte, error) {
lr := listGroupsResponse{Status: "success"}
lr.Data.Groups = rh.groups(rf)
b, err := json.Marshal(lr)
if err != nil {
return nil, errResponse(fmt.Errorf(`error encoding list of groups: %w`, err), http.StatusInternalServerError)
return nil, &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
StatusCode: http.StatusInternalServerError,
}
}
return b, nil
}
@@ -528,14 +434,14 @@ func (rh *requestHandler) groupAlerts() []rule.GroupAlerts {
return gAlerts
}
func (rh *requestHandler) listAlerts(gf *groupsFilter) ([]byte, *httpserver.ErrorWithStatusCode) {
func (rh *requestHandler) listAlerts(rf *rulesFilter) ([]byte, error) {
rh.m.groupsMu.RLock()
defer rh.m.groupsMu.RUnlock()
lr := listAlertsResponse{Status: "success"}
lr.Data.Alerts = make([]*rule.ApiAlert, 0)
for _, group := range rh.m.groups {
if !gf.matches(group) {
if !rf.matchesGroup(group) {
continue
}
g := group.ToAPI()
@@ -554,7 +460,10 @@ func (rh *requestHandler) listAlerts(gf *groupsFilter) ([]byte, *httpserver.Erro
b, err := json.Marshal(lr)
if err != nil {
return nil, errResponse(fmt.Errorf(`error encoding list of active alerts: %w`, err), http.StatusInternalServerError)
return nil, &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
StatusCode: http.StatusInternalServerError,
}
}
return b, nil
}
@@ -566,7 +475,7 @@ type listNotifiersResponse struct {
} `json:"data"`
}
func (rh *requestHandler) listNotifiers() ([]byte, *httpserver.ErrorWithStatusCode) {
func (rh *requestHandler) listNotifiers() ([]byte, error) {
targets := notifier.GetTargets()
lr := listNotifiersResponse{Status: "success"}
@@ -588,7 +497,10 @@ func (rh *requestHandler) listNotifiers() ([]byte, *httpserver.ErrorWithStatusCo
b, err := json.Marshal(lr)
if err != nil {
return nil, errResponse(fmt.Errorf(`error encoding list of notifiers: %w`, err), http.StatusInternalServerError)
return nil, &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf(`error encoding list of notifiers: %w`, err),
StatusCode: http.StatusInternalServerError,
}
}
return b, nil
}
@@ -599,8 +511,3 @@ func errResponse(err error, sc int) *httpserver.ErrorWithStatusCode {
StatusCode: sc,
}
}
func errJson(w http.ResponseWriter, r *http.Request, err *httpserver.ErrorWithStatusCode) {
w.Header().Set("Content-Type", "application/json")
httpserver.Errorf(w, r, `{"error":%q,"errorType":%d}`, err, err.StatusCode)
}

View File

@@ -12,7 +12,7 @@
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
) %}
{% func Controls(prefix, currentIcon, currentText string, icons, states map[string]string, search bool) %}
{% func Controls(prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) %}
<div class="btn-toolbar mb-3" role="toolbar">
<div class="d-flex gap-2 justify-content-between w-100">
<div class="d-flex gap-2 align-items-center">
@@ -28,10 +28,10 @@
<use href="{%s prefix %}static/icons/icons.svg#expand"/>
</svg>
</a>
{% if len(states) > 0 %}
{% if len(filters) > 0 %}
<span class="d-none d-md-inline-block">Filter by status:</span>
<svg class="d-md-none" width="20" height="20">
<use href="{%s prefix %}static/icons/icons.svg#state">
<use href="{%s prefix %}static/icons/icons.svg#filter">
</svg>
<div class="dropdown">
<button
@@ -46,10 +46,10 @@
</svg>
</button>
<ul class="dropdown-menu">
{% for key, title := range states %}
{% for key, title := range filters %}
{% if title != currentText %}
<li>
<a class="dropdown-item" onclick="groupForState('{%s key %}')">
<a class="dropdown-item" onclick="groupFilter('{%s key %}')">
<span class="d-none d-md-inline-block">{%s title %}</span>
<svg class="d-md-none" width="22" height="22">
<use href="{%s prefix %}static/icons/icons.svg#{%s icons[key] %}"/>
@@ -97,10 +97,10 @@
{%= tpl.Footer(r) %}
{% endfunc %}
{% func ListGroups(r *http.Request, groups []*rule.ApiGroup, state string) %}
{% func ListGroups(r *http.Request, groups []*rule.ApiGroup, filter string) %}
{%code
prefix := vmalertutil.Prefix(r.URL.Path)
states := map[string]string{
filters := map[string]string{
"": "All",
"unhealthy": "Unhealthy",
"nomatch": "No Match",
@@ -110,14 +110,14 @@
"unhealthy": "unhealthy",
"nomatch": "nomatch",
}
currentText := states[state]
currentIcon := icons[state]
currentText := filters[filter]
currentIcon := icons[filter]
%}
{%= tpl.Header(r, navItems, "Groups", getLastConfigError()) %}
{%= Controls(prefix, currentIcon, currentText, icons, states, true) %}
{%= Controls(prefix, currentIcon, currentText, icons, filters, true) %}
{% if len(groups) > 0 %}
{% for _, g := range groups %}
<div id="group-{%s g.ID %}" class="w-100 border-0 flex-column vm-group{% if g.States["unhealthy"] > 0 %} alert-danger{% endif %}">
<div id="group-{%s g.ID %}" class="w-100 border-0 flex-column vm-group{% if g.Unhealthy > 0 %} alert-danger{% endif %}">
<span class="d-flex justify-content-between">
<a
class="vm-group-search"
@@ -130,9 +130,9 @@
data-bs-target="#item-{%s g.ID %}"
>
<span class="d-flex gap-2">
{% if g.States["unhealthy"] > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d g.States["unhealthy"] %}</span> {% endif %}
{% if g.States["nomatch"] > 0 %}<span class="badge bg-warning" title="Number of rules with status NoMatch">{%d g.States["nomatch"] %}</span> {% endif %}
<span class="badge bg-success" title="Number of rules with status Ok">{%d g.States["ok"] %}</span>
{% if g.Unhealthy > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d g.Unhealthy %}</span> {% endif %}
{% if g.NoMatch > 0 %}<span class="badge bg-warning" title="Number of rules with status NoMatch">{%d g.NoMatch %}</span> {% endif %}
<span class="badge bg-success" title="Number of rules with status Ok">{%d g.Healthy %}</span>
</span>
</span>
</span>
@@ -189,7 +189,7 @@
<b>record:</b> {%s r.Name %}
{% endif %}
|
{%= seriesFetchedWarn(prefix, &r) %}
{%= seriesFetchedWarn(prefix, r) %}
<span><a target="_blank" href="{%s prefix+r.WebLink() %}">Details</a></span>
</div>
<div class="col-12">
@@ -476,7 +476,7 @@
{% endfunc %}
{% func Rule(r *http.Request, rule rule.ApiRule) %}
{% func RuleDetails(r *http.Request, rule rule.ApiRule) %}
{%code prefix := vmalertutil.Prefix(r.URL.Path) %}
{%= tpl.Header(r, navItems, "", getLastConfigError()) %}
{%code
@@ -661,8 +661,8 @@
<span class="badge bg-warning text-dark" title="This firing state is kept because of `keep_firing_for`">stabilizing</span>
{% endfunc %}
{% func seriesFetchedWarn(prefix string, r *rule.ApiRule) %}
{% if r.IsNoMatch() %}
{% func seriesFetchedWarn(prefix string, r rule.ApiRule) %}
{% if isNoMatch(r) %}
<svg
data-bs-toggle="tooltip"
title="No match! This rule's last evaluation hasn't selected any time series from the datasource.
@@ -673,3 +673,9 @@
</svg>
{% endif %}
{% endfunc %}
{%code
func isNoMatch (r rule.ApiRule) bool {
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
}
%}

View File

@@ -31,7 +31,7 @@ var (
)
//line app/vmalert/web.qtpl:15
func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText string, icons, states map[string]string, search bool) {
func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) {
//line app/vmalert/web.qtpl:15
qw422016.N().S(`
<div class="btn-toolbar mb-3" role="toolbar">
@@ -59,7 +59,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
</a>
`)
//line app/vmalert/web.qtpl:31
if len(states) > 0 {
if len(filters) > 0 {
//line app/vmalert/web.qtpl:31
qw422016.N().S(`
<span class="d-none d-md-inline-block">Filter by status:</span>
@@ -68,7 +68,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
//line app/vmalert/web.qtpl:34
qw422016.E().S(prefix)
//line app/vmalert/web.qtpl:34
qw422016.N().S(`static/icons/icons.svg#state">
qw422016.N().S(`static/icons/icons.svg#filter">
</svg>
<div class="dropdown">
<button
@@ -97,7 +97,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
<ul class="dropdown-menu">
`)
//line app/vmalert/web.qtpl:49
for key, title := range states {
for key, title := range filters {
//line app/vmalert/web.qtpl:49
qw422016.N().S(`
`)
@@ -106,7 +106,7 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
//line app/vmalert/web.qtpl:50
qw422016.N().S(`
<li>
<a class="dropdown-item" onclick="groupForState('`)
<a class="dropdown-item" onclick="groupFilter('`)
//line app/vmalert/web.qtpl:52
qw422016.E().S(key)
//line app/vmalert/web.qtpl:52
@@ -176,22 +176,22 @@ func StreamControls(qw422016 *qt422016.Writer, prefix, currentIcon, currentText
}
//line app/vmalert/web.qtpl:77
func WriteControls(qq422016 qtio422016.Writer, prefix, currentIcon, currentText string, icons, states map[string]string, search bool) {
func WriteControls(qq422016 qtio422016.Writer, prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) {
//line app/vmalert/web.qtpl:77
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:77
StreamControls(qw422016, prefix, currentIcon, currentText, icons, states, search)
StreamControls(qw422016, prefix, currentIcon, currentText, icons, filters, search)
//line app/vmalert/web.qtpl:77
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:77
}
//line app/vmalert/web.qtpl:77
func Controls(prefix, currentIcon, currentText string, icons, states map[string]string, search bool) string {
func Controls(prefix, currentIcon, currentText string, icons, filters map[string]string, search bool) string {
//line app/vmalert/web.qtpl:77
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:77
WriteControls(qb422016, prefix, currentIcon, currentText, icons, states, search)
WriteControls(qb422016, prefix, currentIcon, currentText, icons, filters, search)
//line app/vmalert/web.qtpl:77
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:77
@@ -324,13 +324,13 @@ func Welcome(r *http.Request) string {
}
//line app/vmalert/web.qtpl:100
func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule.ApiGroup, state string) {
func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule.ApiGroup, filter string) {
//line app/vmalert/web.qtpl:100
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:102
prefix := vmalertutil.Prefix(r.URL.Path)
states := map[string]string{
filters := map[string]string{
"": "All",
"unhealthy": "Unhealthy",
"nomatch": "No Match",
@@ -340,8 +340,8 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
"unhealthy": "unhealthy",
"nomatch": "nomatch",
}
currentText := states[state]
currentIcon := icons[state]
currentText := filters[filter]
currentIcon := icons[filter]
//line app/vmalert/web.qtpl:115
qw422016.N().S(`
@@ -352,7 +352,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:117
StreamControls(qw422016, prefix, currentIcon, currentText, icons, states, true)
StreamControls(qw422016, prefix, currentIcon, currentText, icons, filters, true)
//line app/vmalert/web.qtpl:117
qw422016.N().S(`
`)
@@ -371,7 +371,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
//line app/vmalert/web.qtpl:120
qw422016.N().S(`" class="w-100 border-0 flex-column vm-group`)
//line app/vmalert/web.qtpl:120
if g.States["unhealthy"] > 0 {
if g.Unhealthy > 0 {
//line app/vmalert/web.qtpl:120
qw422016.N().S(` alert-danger`)
//line app/vmalert/web.qtpl:120
@@ -418,11 +418,11 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
<span class="d-flex gap-2">
`)
//line app/vmalert/web.qtpl:133
if g.States["unhealthy"] > 0 {
if g.Unhealthy > 0 {
//line app/vmalert/web.qtpl:133
qw422016.N().S(`<span class="badge bg-danger" title="Number of rules with status Error">`)
//line app/vmalert/web.qtpl:133
qw422016.N().D(g.States["unhealthy"])
qw422016.N().D(g.Unhealthy)
//line app/vmalert/web.qtpl:133
qw422016.N().S(`</span> `)
//line app/vmalert/web.qtpl:133
@@ -431,11 +431,11 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:134
if g.States["nomatch"] > 0 {
if g.NoMatch > 0 {
//line app/vmalert/web.qtpl:134
qw422016.N().S(`<span class="badge bg-warning" title="Number of rules with status NoMatch">`)
//line app/vmalert/web.qtpl:134
qw422016.N().D(g.States["nomatch"])
qw422016.N().D(g.NoMatch)
//line app/vmalert/web.qtpl:134
qw422016.N().S(`</span> `)
//line app/vmalert/web.qtpl:134
@@ -444,7 +444,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
qw422016.N().S(`
<span class="badge bg-success" title="Number of rules with status Ok">`)
//line app/vmalert/web.qtpl:135
qw422016.N().D(g.States["ok"])
qw422016.N().D(g.Healthy)
//line app/vmalert/web.qtpl:135
qw422016.N().S(`</span>
</span>
@@ -617,7 +617,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
|
`)
//line app/vmalert/web.qtpl:192
streamseriesFetchedWarn(qw422016, prefix, &r)
streamseriesFetchedWarn(qw422016, prefix, r)
//line app/vmalert/web.qtpl:192
qw422016.N().S(`
<span><a target="_blank" href="`)
@@ -750,22 +750,22 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*rule
}
//line app/vmalert/web.qtpl:234
func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, groups []*rule.ApiGroup, state string) {
func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, groups []*rule.ApiGroup, filter string) {
//line app/vmalert/web.qtpl:234
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:234
StreamListGroups(qw422016, r, groups, state)
StreamListGroups(qw422016, r, groups, filter)
//line app/vmalert/web.qtpl:234
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:234
}
//line app/vmalert/web.qtpl:234
func ListGroups(r *http.Request, groups []*rule.ApiGroup, state string) string {
func ListGroups(r *http.Request, groups []*rule.ApiGroup, filter string) string {
//line app/vmalert/web.qtpl:234
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:234
WriteListGroups(qb422016, r, groups, state)
WriteListGroups(qb422016, r, groups, filter)
//line app/vmalert/web.qtpl:234
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:234
@@ -1462,7 +1462,7 @@ func Alert(r *http.Request, alert *rule.ApiAlert) string {
}
//line app/vmalert/web.qtpl:479
func StreamRule(qw422016 *qt422016.Writer, r *http.Request, rule rule.ApiRule) {
func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule rule.ApiRule) {
//line app/vmalert/web.qtpl:479
qw422016.N().S(`
`)
@@ -1859,22 +1859,22 @@ func StreamRule(qw422016 *qt422016.Writer, r *http.Request, rule rule.ApiRule) {
}
//line app/vmalert/web.qtpl:642
func WriteRule(qq422016 qtio422016.Writer, r *http.Request, rule rule.ApiRule) {
func WriteRuleDetails(qq422016 qtio422016.Writer, r *http.Request, rule rule.ApiRule) {
//line app/vmalert/web.qtpl:642
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:642
StreamRule(qw422016, r, rule)
StreamRuleDetails(qw422016, r, rule)
//line app/vmalert/web.qtpl:642
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:642
}
//line app/vmalert/web.qtpl:642
func Rule(r *http.Request, rule rule.ApiRule) string {
func RuleDetails(r *http.Request, rule rule.ApiRule) string {
//line app/vmalert/web.qtpl:642
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:642
WriteRule(qb422016, r, rule)
WriteRuleDetails(qb422016, r, rule)
//line app/vmalert/web.qtpl:642
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:642
@@ -2015,12 +2015,12 @@ func badgeStabilizing() string {
}
//line app/vmalert/web.qtpl:664
func streamseriesFetchedWarn(qw422016 *qt422016.Writer, prefix string, r *rule.ApiRule) {
func streamseriesFetchedWarn(qw422016 *qt422016.Writer, prefix string, r rule.ApiRule) {
//line app/vmalert/web.qtpl:664
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:665
if r.IsNoMatch() {
if isNoMatch(r) {
//line app/vmalert/web.qtpl:665
qw422016.N().S(`
<svg
@@ -2045,7 +2045,7 @@ func streamseriesFetchedWarn(qw422016 *qt422016.Writer, prefix string, r *rule.A
}
//line app/vmalert/web.qtpl:675
func writeseriesFetchedWarn(qq422016 qtio422016.Writer, prefix string, r *rule.ApiRule) {
func writeseriesFetchedWarn(qq422016 qtio422016.Writer, prefix string, r rule.ApiRule) {
//line app/vmalert/web.qtpl:675
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:675
@@ -2056,7 +2056,7 @@ func writeseriesFetchedWarn(qq422016 qtio422016.Writer, prefix string, r *rule.A
}
//line app/vmalert/web.qtpl:675
func seriesFetchedWarn(prefix string, r *rule.ApiRule) string {
func seriesFetchedWarn(prefix string, r rule.ApiRule) string {
//line app/vmalert/web.qtpl:675
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:675
@@ -2069,3 +2069,8 @@ func seriesFetchedWarn(prefix string, r *rule.ApiRule) string {
return qs422016
//line app/vmalert/web.qtpl:675
}
//line app/vmalert/web.qtpl:678
func isNoMatch(r rule.ApiRule) bool {
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
}

View File

@@ -210,7 +210,7 @@ func TestHandler(t *testing.T) {
}
})
t.Run("/api/v1/rules&states", func(t *testing.T) {
t.Run("/api/v1/rules&filters", func(t *testing.T) {
check := func(url string, statusCode, expGroups, expRules int) {
t.Helper()
lr := listGroupsResponse{}
@@ -252,15 +252,9 @@ func TestHandler(t *testing.T) {
check("/api/v1/rules?rule_group[]=group&file[]=foo", 200, 0, 0)
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml", 200, 3, 6)
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=foo", 200, 0, 0)
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=foo", 200, 3, 0)
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert", 200, 3, 3)
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert&rule_name[]=record", 200, 3, 6)
check("/api/v1/rules?group_limit=1", 200, 1, 2)
check("/api/v1/rules?group_limit=1&type=alert", 200, 1, 1)
check("/api/v1/rules?group_limit=1&type=record", 200, 1, 1)
check("/api/v1/rules?group_limit=2", 200, 2, 4)
check(fmt.Sprintf("/api/v1/rules?group_limit=1&page_num=%d", 1), 200, 1, 2)
})
t.Run("/api/v1/rules&exclude_alerts=true", func(t *testing.T) {
// check if response returns active alerts by default

View File

@@ -13,7 +13,6 @@ import (
"net/url"
"os"
"regexp"
"slices"
"sort"
"strconv"
"strings"
@@ -29,7 +28,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fscore"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
@@ -67,11 +65,10 @@ type AuthConfig struct {
type UserInfo struct {
Name string `yaml:"name,omitempty"`
BearerToken string `yaml:"bearer_token,omitempty"`
JWT *JWTConfig `yaml:"jwt,omitempty"`
AuthToken string `yaml:"auth_token,omitempty"`
Username string `yaml:"username,omitempty"`
Password string `yaml:"password,omitempty"`
BearerToken string `yaml:"bearer_token,omitempty"`
AuthToken string `yaml:"auth_token,omitempty"`
Username string `yaml:"username,omitempty"`
Password string `yaml:"password,omitempty"`
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
@@ -92,8 +89,6 @@ type UserInfo struct {
MetricLabels map[string]string `yaml:"metric_labels,omitempty"`
AccessLog *AccessLog `yaml:"access_log,omitempty"`
concurrencyLimitCh chan struct{}
concurrencyLimitReached *metrics.Counter
@@ -106,40 +101,11 @@ type UserInfo struct {
requestsDuration *metrics.Summary
}
// AccessLog represents configuration for access log settings.
type AccessLog struct {
Filters *AccessLogFilters `yaml:"filters"`
}
// AccessLogFilters represents list of filters for access logs printing
type AccessLogFilters struct {
// SkipStatusCodes is a list of HTTP status codes for which access logs will be skipped
SkipStatusCodes []int `yaml:"skip_status_codes"`
}
func (ui *UserInfo) logRequest(r *http.Request, userName string, statusCode int, duration time.Duration) {
if ui.AccessLog == nil {
return
}
filters := ui.AccessLog.Filters
if filters != nil && len(filters.SkipStatusCodes) > 0 {
if slices.Contains(filters.SkipStatusCodes, statusCode) {
return
}
}
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
logger.Infof("access_log request_host=%q request_uri=%q status_code=%d remote_addr=%s user_agent=%q referer=%q duration_ms=%d username=%q",
r.Host, requestURI, statusCode, remoteAddr, r.UserAgent(), r.Referer(), duration.Milliseconds(), userName)
}
// HeadersConf represents config for request and response headers.
type HeadersConf struct {
RequestHeaders []*Header `yaml:"headers,omitempty"`
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
KeepOriginalHost *bool `yaml:"keep_original_host,omitempty"`
hasAnyPlaceHolders bool
RequestHeaders []*Header `yaml:"headers,omitempty"`
ResponseHeaders []*Header `yaml:"response_headers,omitempty"`
KeepOriginalHost *bool `yaml:"keep_original_host,omitempty"`
}
func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
@@ -147,7 +113,7 @@ func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
case ui.concurrencyLimitCh <- struct{}{}:
return nil
default:
// The number of concurrently executed requests for the given user equals the limit.
// The number of concurrently executed requests for the given user equals the limt.
// Wait until some of the currently executed requests are finished, so the current request could be executed.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
select {
@@ -382,7 +348,6 @@ func (bus *backendURLs) add(u *url.URL) {
url: u,
healthCheckContext: bus.healthChecksContext,
healthCheckWG: &bus.healthChecksWG,
hasPlaceHolders: hasAnyPlaceholders(u),
})
}
@@ -400,8 +365,6 @@ type backendURL struct {
concurrentRequests atomic.Int32
url *url.URL
hasPlaceHolders bool
}
func (bu *backendURL) isBroken() bool {
@@ -625,7 +588,7 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
// Slow path - select other backend urls.
n := atomicCounter.Add(1) - 1
for i := range uint32(len(bus)) {
for i := uint32(0); i < uint32(len(bus)); i++ {
idx := (n + i) % uint32(len(bus))
bu := bus[idx]
if bu.isBroken() {
@@ -635,7 +598,7 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
// The Load() in front of CompareAndSwap() avoids CAS overhead for items with values bigger than 0.
if bu.concurrentRequests.Load() == 0 && bu.concurrentRequests.CompareAndSwap(0, 1) {
atomicCounter.CompareAndSwap(n+1, idx+1)
// There is no need in the call bu.get(), because we already incremented bu.concurrentRequests above.
// There is no need in the call bu.get(), because we already incremented bu.concrrentRequests above.
return bu
}
}
@@ -836,9 +799,6 @@ var (
// authUsers contains the currently loaded auth users
authUsers atomic.Pointer[map[string]*UserInfo]
// jwt authentication cache
jwtAuthCache atomic.Pointer[jwtCache]
authConfigWG sync.WaitGroup
stopCh chan struct{}
)
@@ -878,16 +838,6 @@ func reloadAuthConfigData(data []byte) (bool, error) {
return false, fmt.Errorf("failed to parse auth config: %w", err)
}
jui, oidcDP, err := parseJWTUsers(ac)
if err != nil {
return false, fmt.Errorf("failed to parse JWT users from auth config: %w", err)
}
oidcDP.startDiscovery()
jwtc := &jwtCache{
users: jui,
oidcDP: oidcDP,
}
m, err := parseAuthConfigUsers(ac)
if err != nil {
return false, fmt.Errorf("failed to parse users from auth config: %w", err)
@@ -904,15 +854,9 @@ func reloadAuthConfigData(data []byte) (bool, error) {
}
metrics.RegisterSet(ac.ms)
jwtcPrev := jwtAuthCache.Load()
if jwtcPrev != nil {
jwtcPrev.oidcDP.stopDiscovery()
}
authConfig.Store(ac)
authConfigData.Store(&data)
authUsers.Store(&m)
jwtAuthCache.Store(jwtc)
return true, nil
}
@@ -937,18 +881,12 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
if ui.BearerToken != "" {
return nil, fmt.Errorf("field bearer_token can't be specified for unauthorized_user section")
}
if ui.JWT != nil {
return nil, fmt.Errorf("field jwt can't be specified for unauthorized_user section")
}
if ui.AuthToken != "" {
return nil, fmt.Errorf("field auth_token can't be specified for unauthorized_user section")
}
if ui.Name != "" {
return nil, fmt.Errorf("field name can't be specified for unauthorized_user section")
}
if err := parseJWTPlaceholdersForUserInfo(ui, false); err != nil {
return nil, err
}
if err := ui.initURLs(); err != nil {
return nil, err
}
@@ -989,27 +927,16 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
}
for i := range uis {
ui := &uis[i]
// users with jwt tokens are parsed by parseJWTUsers function.
// the function also checks that users with jwt tokens do not have auth tokens, bearer tokens, usernames and passwords.
if ui.JWT != nil {
continue
}
ats, err := getAuthTokens(ui.AuthToken, ui.BearerToken, ui.Username, ui.Password)
if err != nil {
return nil, err
}
for _, at := range ats {
if uiOld := byAuthToken[at]; uiOld != nil {
return nil, fmt.Errorf("duplicate auth token=%q found for username=%q, name=%q; the previous one is set for username=%q, name=%q",
at, ui.Username, ui.Name, uiOld.Username, uiOld.Name)
}
}
if err := parseJWTPlaceholdersForUserInfo(ui, false); err != nil {
return nil, err
}
if err := ui.initURLs(); err != nil {
return nil, err
}
@@ -1109,7 +1036,6 @@ func (ui *UserInfo) initURLs() error {
return err
}
}
for _, e := range ui.URLMaps {
if len(e.SrcPaths) == 0 && len(e.SrcHosts) == 0 && len(e.SrcQueryArgs) == 0 && len(e.SrcHeaders) == 0 {
return fmt.Errorf("missing `src_paths`, `src_hosts`, `src_query_args` and `src_headers` in `url_map`")
@@ -1169,9 +1095,6 @@ func (ui *UserInfo) name() string {
h := xxhash.Sum64([]byte(ui.AuthToken))
return fmt.Sprintf("auth_token:hash:%016X", h)
}
if ui.JWT != nil {
return `jwt`
}
return ""
}

View File

@@ -4,11 +4,8 @@ import (
"bytes"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"testing"
"time"
"gopkg.in/yaml.v2"
@@ -279,50 +276,6 @@ users:
url_prefix: http://foo.bar
metric_labels:
not-prometheus-compatible: value
`)
// placeholder in url_prefix
f(`
users:
- username: foo
password: bar
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
`)
// placeholder in a header
f(`
users:
- username: foo
password: bar
headers:
- 'X-Foo: {{a_placeholder}}'
url_prefix: 'http://ahost'
`)
// placeholder in url_prefix
f(`
users:
- username: foo
password: bar
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
`)
// placeholder in a header in url_map
f(`
users:
- username: foo
password: bar
url_map:
- src_paths: ["/select/.*"]
headers:
- 'X-Foo: {{a_placeholder}}'
url_prefix: 'http://ahost'
`)
// placeholder in a header in url_map
f(`
users:
- username: foo
password: bar
url_map:
- src_paths: ["/select/.*"]
url_prefix: 'http://ahost/{{a_placeholder}}/foobar'
`)
}
@@ -425,7 +378,7 @@ users:
RetryStatusCodes: []int{500, 501},
LoadBalancingPolicy: "first_available",
MergeQueryArgs: []string{"foo", "bar"},
DropSrcPathPrefixParts: new(1),
DropSrcPathPrefixParts: intp(1),
DiscoverBackendIPs: &discoverBackendIPsTrue,
},
}, nil)
@@ -668,47 +621,6 @@ unauthorized_user:
},
},
})
// skip user info with jwt, it is parsed by parseJWTUsers
f(`
users:
- username: foo
password: bar
url_prefix: http://aaa:343/bbb
- jwt: {skip_verify: true}
url_prefix: http://aaa:343/bbb
`, map[string]*UserInfo{
getHTTPAuthBasicToken("foo", "bar"): {
Username: "foo",
Password: "bar",
URLPrefix: mustParseURL("http://aaa:343/bbb"),
},
}, nil)
// Multiple users with access logs enabled
f(`
users:
- username: foo
url_prefix: http://foo
access_log: {}
- username: bar
url_prefix: https://bar/x/
access_log:
filters:
skip_status_codes: [404]
`, map[string]*UserInfo{
getHTTPAuthBasicToken("foo", ""): {
Username: "foo",
URLPrefix: mustParseURL("http://foo"),
AccessLog: &AccessLog{},
},
getHTTPAuthBasicToken("bar", ""): {
Username: "bar",
URLPrefix: mustParseURL("https://bar/x/"),
AccessLog: &AccessLog{Filters: &AccessLogFilters{SkipStatusCodes: []int{404}}},
},
}, nil)
}
func TestParseAuthConfigPassesTLSVerificationConfig(t *testing.T) {
@@ -919,7 +831,7 @@ func TestBrokenBackend(t *testing.T) {
bus[1].setBroken()
// broken backend should never return while there are healthy backends
for range int(1e3) {
for i := 0; i < 1e3; i++ {
b := up.getBackendURL()
if b.isBroken() {
t.Fatalf("unexpected broken backend %q", b.url)
@@ -996,41 +908,6 @@ func TestDiscoverBackendIPsWithIPV6(t *testing.T) {
}
func TestLogRequest(t *testing.T) {
ui := &UserInfo{AccessLog: &AccessLog{}}
testOutput := &bytes.Buffer{}
logger.SetOutputForTests(testOutput)
defer logger.ResetOutputForTest()
req, err := http.NewRequest("GET", "http://localhost:8080/select/0/prometheus", nil)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
f := func(user string, status int, duration time.Duration, expectedLog string) {
t.Helper()
testOutput.Reset()
ui.logRequest(req, user, status, duration)
got := testOutput.String()
if expectedLog == "" && got != "" {
t.Fatalf("expected empty log, got %q", got)
}
if !strings.Contains(got, expectedLog) {
t.Fatalf("output \n%q \nshould contain \n%q", testOutput.String(), expectedLog)
}
}
f("foo", 200, 10*time.Millisecond, `access_log request_host="localhost:8080" request_uri="" status_code=200 remote_addr="" user_agent="" referer="" duration_ms=10 username="foo"`)
f("foo", 404, time.Second, `access_log request_host="localhost:8080" request_uri="" status_code=404 remote_addr="" user_agent="" referer="" duration_ms=1000 username="foo"`)
ui.AccessLog.Filters = &AccessLogFilters{SkipStatusCodes: []int{200}}
f("foo", 200, 10*time.Millisecond, ``)
f("foo", 404, 10*time.Millisecond, `access_log request_host="localhost:8080" request_uri="" status_code=404 remote_addr="" user_agent="" referer="" duration_ms=10 username="foo"`)
}
func getRegexs(paths []string) []*Regex {
var sps []*Regex
for _, path := range paths {
@@ -1086,6 +963,10 @@ func mustParseURLs(us []string) *URLPrefix {
return up
}
func intp(n int) *int {
return &n
}
func mustNewRegex(s string) *Regex {
var re Regex
if err := yaml.Unmarshal([]byte(s), &re); err != nil {

View File

@@ -116,20 +116,6 @@ users:
- "http://default1:8888/unsupported_url_handler"
- "http://default2:8888/unsupported_url_handler"
# A JWT token based routing:
# - Requests with JWT token that has the following structure:
# {"team": "ops", "security": {"read_access": "1"}, "vm_access": {"metrics_account_id": 1000,"metrics_project_id":5}}
# is routed to vmselect nodes and request url placeholder replaced with metrics tenant identificators
- name: jwt-opts-team
jwt:
match_claims:
team: ops
security.read_access: "1"
skip_verify: true
url_prefix:
- "http://vmselect1:8481/select/{{.MetricsTenant}}/prometheus"
- "http://vmselect2:8481/select/{{.MetricsTenant}}/prometheus"
# Requests without Authorization header are proxied according to `unauthorized_user` section.
# Requests are proxied in round-robin fashion between `url_prefix` backends.
# The deny_partial_response query arg is added to all the proxied requests.
@@ -139,8 +125,3 @@ unauthorized_user:
- http://vmselect-az1/?deny_partial_response=1
- http://vmselect-az2/?deny_partial_response=1
retry_status_codes: [503, 500]
# log access for requests routed to this user
access_log:
filters:
# except requests with Status Codes below
skip_status_codes: [200, 202]

View File

@@ -1,486 +0,0 @@
package main
import (
"fmt"
"net/url"
"os"
"slices"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
const (
metricsTenantPlaceholder = `{{.MetricsTenant}}`
metricsExtraLabelsPlaceholder = `{{.MetricsExtraLabels}}`
metricsExtraFiltersPlaceholder = `{{.MetricsExtraFilters}}`
logsAccountIDPlaceholder = `{{.LogsAccountID}}`
logsProjectIDPlaceholder = `{{.LogsProjectID}}`
logsExtraFiltersPlaceholder = `{{.LogsExtraFilters}}`
logsExtraStreamFiltersPlaceholder = `{{.LogsExtraStreamFilters}}`
placeholderPrefix = `{{`
)
var allPlaceholders = []string{
metricsTenantPlaceholder,
metricsExtraLabelsPlaceholder,
metricsExtraFiltersPlaceholder,
logsAccountIDPlaceholder,
logsProjectIDPlaceholder,
logsExtraFiltersPlaceholder,
logsExtraStreamFiltersPlaceholder,
}
var urlPathPlaceHolders = []string{
metricsTenantPlaceholder,
logsAccountIDPlaceholder,
logsProjectIDPlaceholder,
}
type jwtCache struct {
// users contain UserInfo`s from AuthConfig with JWTConfig set
users []*UserInfo
oidcDP *oidcDiscovererPool
}
type JWTConfig struct {
PublicKeys []string `yaml:"public_keys,omitempty"`
PublicKeyFiles []string `yaml:"public_key_files,omitempty"`
SkipVerify bool `yaml:"skip_verify,omitempty"`
OIDC *oidcConfig `yaml:"oidc,omitempty"`
MatchClaims map[string]string `yaml:"match_claims,omitempty"`
parsedMatchClaims []*jwt.Claim
// verifierPool is used to verify JWT tokens.
// It is initialized from PublicKeys and/or PublicKeyFiles.
// In this case, it is initialized once at config reload and never updated until next reload
// In case of OIDC, it is initialized on config reload and periodically updated by discovery process.
verifierPool atomic.Pointer[jwt.VerifierPool]
}
func parseJWTUsers(ac *AuthConfig) ([]*UserInfo, *oidcDiscovererPool, error) {
jui := make([]*UserInfo, 0, len(ac.Users))
oidcDP := &oidcDiscovererPool{}
uniqClaims := make(map[string]*UserInfo)
var sortedClaims []string
for idx, ui := range ac.Users {
jwtToken := ui.JWT
if jwtToken == nil {
continue
}
if ui.AuthToken != "" || ui.BearerToken != "" || ui.Username != "" || ui.Password != "" {
return nil, nil, fmt.Errorf("auth_token, bearer_token, username and password cannot be specified if jwt is set")
}
if len(jwtToken.PublicKeys) == 0 && len(jwtToken.PublicKeyFiles) == 0 && !jwtToken.SkipVerify && jwtToken.OIDC == nil {
return nil, nil, fmt.Errorf("jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true")
}
var claimsString string
sortedClaims = sortedClaims[:0]
parsedClaims := make([]*jwt.Claim, 0, len(jwtToken.MatchClaims))
for ck, cv := range jwtToken.MatchClaims {
sortedClaims = append(sortedClaims, fmt.Sprintf("%s=%s", ck, cv))
pc, err := jwt.NewClaim(ck, cv)
if err != nil {
return nil, nil, fmt.Errorf("incorrect match claim, key=%q, value regex=%q: %w", ck, cv, err)
}
parsedClaims = append(parsedClaims, pc)
}
ui.JWT.parsedMatchClaims = parsedClaims
sort.Strings(sortedClaims)
claimsString = strings.Join(sortedClaims, ",")
if oldUI, ok := uniqClaims[claimsString]; ok {
return nil, nil, fmt.Errorf("duplicate match claims=%q found for name=%q at idx=%d; the previous one is set for name=%q", claimsString, ui.Name, idx, oldUI.Name)
}
uniqClaims[claimsString] = &ui
if len(jwtToken.PublicKeys) > 0 || len(jwtToken.PublicKeyFiles) > 0 {
keys := make([]any, 0, len(jwtToken.PublicKeys)+len(jwtToken.PublicKeyFiles))
for i := range jwtToken.PublicKeys {
k, err := jwt.ParseKey([]byte(jwtToken.PublicKeys[i]))
if err != nil {
return nil, nil, err
}
keys = append(keys, k)
}
for _, filePath := range jwtToken.PublicKeyFiles {
keyData, err := os.ReadFile(filePath)
if err != nil {
return nil, nil, fmt.Errorf("cannot read public key from file %q: %w", filePath, err)
}
k, err := jwt.ParseKey(keyData)
if err != nil {
return nil, nil, fmt.Errorf("cannot parse public key from file %q: %w", filePath, err)
}
keys = append(keys, k)
}
vp, err := jwt.NewVerifierPool(keys)
if err != nil {
return nil, nil, err
}
jwtToken.verifierPool.Store(vp)
}
if jwtToken.OIDC != nil {
if len(jwtToken.PublicKeys) > 0 || len(jwtToken.PublicKeyFiles) > 0 || jwtToken.SkipVerify {
return nil, nil, fmt.Errorf("jwt with oidc cannot contain public keys or have skip_verify=true")
}
if jwtToken.OIDC.Issuer == "" {
return nil, nil, fmt.Errorf("oidc issuer cannot be empty")
}
isserURL, err := url.Parse(jwtToken.OIDC.Issuer)
if err != nil {
return nil, nil, fmt.Errorf("oidc issuer %q must be a valid URL", jwtToken.OIDC.Issuer)
}
if isserURL.Scheme != "https" && isserURL.Scheme != "http" {
return nil, nil, fmt.Errorf("oidc issuer %q must have http or https scheme", jwtToken.OIDC.Issuer)
}
oidcDP.createOrAdd(ui.JWT.OIDC.Issuer, &ui.JWT.verifierPool)
}
if err := parseJWTPlaceholdersForUserInfo(&ui, true); err != nil {
return nil, nil, err
}
if err := ui.initURLs(); err != nil {
return nil, nil, err
}
metricLabels, err := ui.getMetricLabels()
if err != nil {
return nil, nil, fmt.Errorf("cannot parse metric_labels: %w", err)
}
ui.requests = ac.ms.GetOrCreateCounter(`vmauth_user_requests_total` + metricLabels)
ui.requestErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_errors_total` + metricLabels)
ui.backendRequests = ac.ms.GetOrCreateCounter(`vmauth_user_request_backend_requests_total` + metricLabels)
ui.backendErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_backend_errors_total` + metricLabels)
ui.requestsDuration = ac.ms.GetOrCreateSummary(`vmauth_user_request_duration_seconds` + metricLabels)
mcr := ui.getMaxConcurrentRequests()
ui.concurrencyLimitCh = make(chan struct{}, mcr)
ui.concurrencyLimitReached = ac.ms.GetOrCreateCounter(`vmauth_user_concurrent_requests_limit_reached_total` + metricLabels)
_ = ac.ms.GetOrCreateGauge(`vmauth_user_concurrent_requests_capacity`+metricLabels, func() float64 {
return float64(cap(ui.concurrencyLimitCh))
})
_ = ac.ms.GetOrCreateGauge(`vmauth_user_concurrent_requests_current`+metricLabels, func() float64 {
return float64(len(ui.concurrencyLimitCh))
})
rt, err := newRoundTripper(ui.TLSCAFile, ui.TLSCertFile, ui.TLSKeyFile, ui.TLSServerName, ui.TLSInsecureSkipVerify)
if err != nil {
return nil, nil, fmt.Errorf("cannot initialize HTTP RoundTripper: %w", err)
}
ui.rt = rt
jui = append(jui, &ui)
}
// sort by amount of matching claims
// it allows to more specific claim win in case of clash
sort.SliceStable(jui, func(i, j int) bool {
return len(jui[i].JWT.MatchClaims) > len(jui[j].JWT.MatchClaims)
})
return jui, oidcDP, nil
}
var tokenPool sync.Pool
func getToken() *jwt.Token {
tkn := tokenPool.Get()
if tkn == nil {
return &jwt.Token{}
}
return tkn.(*jwt.Token)
}
func putToken(tkn *jwt.Token) {
tkn.Reset()
tokenPool.Put(tkn)
}
func getJWTUserInfo(ats []string) (*UserInfo, *jwt.Token) {
js := *jwtAuthCache.Load()
if len(js.users) == 0 {
return nil, nil
}
tkn := getToken()
for _, at := range ats {
if strings.Count(at, ".") != 2 {
continue
}
at, _ = strings.CutPrefix(at, `http_auth:`)
tkn.Reset()
if err := tkn.Parse(at, true); err != nil {
if *logInvalidAuthTokens {
logger.Infof("cannot parse jwt token: %s", err)
}
continue
}
if tkn.IsExpired(time.Now()) {
if *logInvalidAuthTokens {
// TODO: add more context:
// token claims with issuer
logger.Infof("jwt token is expired")
}
continue
}
if ui := getUserInfoByJWTToken(tkn, js.users); ui != nil {
return ui, tkn
}
}
putToken(tkn)
return nil, nil
}
func getUserInfoByJWTToken(tkn *jwt.Token, users []*UserInfo) *UserInfo {
for _, ui := range users {
if !tkn.MatchClaims(ui.JWT.parsedMatchClaims) {
continue
}
if ui.JWT.SkipVerify {
return ui
}
if ui.JWT.OIDC != nil {
// OIDC requires iss claim.
// It must match the discovery issuer URL set in OIDC config.
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
if tkn.Issuer() == "" {
if *logInvalidAuthTokens {
logger.Infof("jwt token must have issuer filed")
}
return nil
}
if tkn.Issuer() != ui.JWT.OIDC.Issuer {
if *logInvalidAuthTokens {
logger.Infof("jwt token issuer: %q does not match oidc issuer: %q", tkn.Issuer(), ui.JWT.OIDC.Issuer)
}
return nil
}
}
vp := ui.JWT.verifierPool.Load()
if vp == nil {
if *logInvalidAuthTokens {
logger.Infof("jwt verifier not initialed")
}
return nil
}
if err := vp.Verify(tkn); err != nil {
if *logInvalidAuthTokens {
logger.Infof("cannot verify jwt token: %s", err)
}
return nil
}
return ui
}
if *logInvalidAuthTokens {
logger.Infof("no user match jwt token")
}
return nil
}
func replaceJWTPlaceholders(bu *backendURL, hc HeadersConf, vma *jwt.VMAccessClaim) (*url.URL, HeadersConf) {
if !bu.hasPlaceHolders && !hc.hasAnyPlaceHolders {
return bu.url, hc
}
targetURL := bu.url
data := jwtClaimsData(vma)
if bu.hasPlaceHolders {
// template url params and request path
// make a copy of url
uCopy := *bu.url
for _, uph := range urlPathPlaceHolders {
replacement := data[uph]
uCopy.Path = strings.ReplaceAll(uCopy.Path, uph, replacement[0])
}
query := uCopy.Query()
var foundAnyQueryPlaceholder bool
var templatedValues []string
for param, values := range query {
templatedValues = templatedValues[:0]
// filter in-place values with placeholders
// and accumulate replacements
// it will change the order of param values
// but it's not guaranteed
// and will be changed in any way with multiple arg templates
var cnt int
for _, value := range values {
if dv, ok := data[value]; ok {
foundAnyQueryPlaceholder = true
templatedValues = append(templatedValues, dv...)
continue
}
values[cnt] = value
cnt++
}
values = values[:cnt]
values = append(values, templatedValues...)
query[param] = values
}
if foundAnyQueryPlaceholder {
uCopy.RawQuery = query.Encode()
}
targetURL = &uCopy
}
if hc.hasAnyPlaceHolders {
// make a copy of headers and update only values with placeholder
rhs := make([]*Header, 0, len(hc.RequestHeaders))
for _, rh := range hc.RequestHeaders {
if dv, ok := data[rh.Value]; ok {
rh := &Header{
Name: rh.Name,
Value: strings.Join(dv, ","),
}
rhs = append(rhs, rh)
continue
}
rhs = append(rhs, rh)
}
hc.RequestHeaders = rhs
}
return targetURL, hc
}
func jwtClaimsData(vma *jwt.VMAccessClaim) map[string][]string {
data := map[string][]string{
// TODO: optimize at parsing stage
metricsTenantPlaceholder: {fmt.Sprintf("%d:%d", vma.MetricsAccountID, vma.MetricsProjectID)},
metricsExtraLabelsPlaceholder: vma.MetricsExtraLabels,
metricsExtraFiltersPlaceholder: vma.MetricsExtraFilters,
// TODO: optimize at parsing stage
logsAccountIDPlaceholder: {fmt.Sprintf("%d", vma.LogsAccountID)},
logsProjectIDPlaceholder: {fmt.Sprintf("%d", vma.LogsProjectID)},
logsExtraFiltersPlaceholder: vma.LogsExtraFilters,
logsExtraStreamFiltersPlaceholder: vma.LogsExtraStreamFilters,
}
return data
}
func parseJWTPlaceholdersForUserInfo(ui *UserInfo, isAllowed bool) error {
if ui.URLPrefix != nil {
if err := validateJWTPlaceholdersForURL(ui.URLPrefix, isAllowed); err != nil {
return err
}
}
if err := parsePlaceholdersForHC(&ui.HeadersConf, isAllowed); err != nil {
return err
}
if ui.DefaultURL != nil {
if err := validateJWTPlaceholdersForURL(ui.DefaultURL, isAllowed); err != nil {
return fmt.Errorf("invalid `default_url` placeholders: %w", err)
}
}
for i := range ui.URLMaps {
e := &ui.URLMaps[i]
if e.URLPrefix != nil {
if err := validateJWTPlaceholdersForURL(e.URLPrefix, isAllowed); err != nil {
return fmt.Errorf("invalid `url_map` `url_prefix` placeholders: %w", err)
}
}
if err := parsePlaceholdersForHC(&e.HeadersConf, isAllowed); err != nil {
return fmt.Errorf("invalid `url_map` headers placeholders: %w", err)
}
}
return nil
}
func validateJWTPlaceholdersForURL(up *URLPrefix, isAllowed bool) error {
for _, bu := range up.busOriginal {
ok := strings.Contains(bu.Path, placeholderPrefix)
if ok && !isAllowed {
return fmt.Errorf("placeholder: %q is only allowed at JWT token context", bu.Path)
}
if ok {
p := bu.Path
for _, ph := range allPlaceholders {
p = strings.ReplaceAll(p, ph, ``)
}
if strings.Contains(p, placeholderPrefix) {
return fmt.Errorf("invalid placeholder found in URL request path: %q, supported values are: %s", bu.Path, strings.Join(allPlaceholders, ", "))
}
}
for param, values := range bu.Query() {
for _, value := range values {
ok := strings.Contains(value, placeholderPrefix)
if ok && !isAllowed {
return fmt.Errorf("query param: %q with placeholder: %q is only allowed at JWT token context", param, value)
}
if ok {
// possible placeholder
if !slices.Contains(allPlaceholders, value) {
return fmt.Errorf("query param: %q has unsupported placeholder string: %q, supported values are: %s", param, value, strings.Join(allPlaceholders, ", "))
}
}
}
}
}
return nil
}
func parsePlaceholdersForHC(hc *HeadersConf, isAllowed bool) error {
for _, rhs := range hc.RequestHeaders {
ok := strings.Contains(rhs.Value, placeholderPrefix)
if ok && !isAllowed {
return fmt.Errorf("request header: %q placeholder: %q is only supported at JWT context", rhs.Name, rhs.Value)
}
if ok {
if !slices.Contains(allPlaceholders, rhs.Value) {
return fmt.Errorf("request header: %q has unsupported placeholder: %q, supported values are: %s", rhs.Name, rhs.Value, strings.Join(allPlaceholders, ", "))
}
hc.hasAnyPlaceHolders = true
}
}
for _, rhs := range hc.ResponseHeaders {
if strings.Contains(rhs.Value, placeholderPrefix) {
return fmt.Errorf("response header placeholders are not supported; found placeholder prefix at header: %q with value: %q", rhs.Name, rhs.Value)
}
}
return nil
}
func hasAnyPlaceholders(u *url.URL) bool {
if strings.Contains(u.Path, placeholderPrefix) {
return true
}
if len(u.Query()) == 0 {
return false
}
for _, values := range u.Query() {
for _, value := range values {
if strings.HasPrefix(value, placeholderPrefix) {
return true
}
}
}
return false
}

View File

@@ -1,503 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
)
func TestJWTParseAuthConfigFailure(t *testing.T) {
validRSAPublicKey := `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiX7oPWKOWRQsGFEWvwZO
mL2PYsdYUsu9nr0qtPCjxQHUJgLfT3rdKlvKpPFYv7ZmKnqTncg36Wz9uiYmWJ7e
IB5Z+fko8kVIMzarCqVvpAJDzYF/pUii68xvuYoK3L9TIOAeyCXv+prwnr2IH+Mw
9AONzWbRrYoO74XyTE9vMU5qmI/L1VPk+PR8lqPOSptLvzsfoaIk2ED4yK2nRB+6
st+k4nccPqbErqHc8aiXnXfugfnr6b+NPFYUzKsDqkymGOokVijrI8B3jNw6c6Do
zphk+D3wgLsXYHfMcZbXIMqffqm/aB8Qg88OpFOkQ3rd2p6R9+hacnZkfkn3Phiw
yQIDAQAB
-----END PUBLIC KEY-----
`
// ECDSA with the P-521 curve
validECDSAPublicKey := `-----BEGIN PUBLIC KEY-----
MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAU9RmtkCRuYTKCyvLlDn5DtBZOHSe
QTa5j9q/oQVpCKqcXVFrH5dgh0GL+P/ZhkeuowPzCZqntGf0+7wPt9OxSJcADVJm
dv92m540MXss8zdHf5qtE0gsu2Ved0R7Z8a8QwGZ/1mYZ+kFGGbdQTlSvRqDySTq
XOtclIk1uhc03oL9nOQ=
-----END PUBLIC KEY-----
`
f := func(s string, expErr string) {
t.Helper()
ac, err := parseAuthConfig([]byte(s))
if err != nil {
if expErr != err.Error() {
t.Fatalf("unexpected error; got\n%q\nwant\n%q", err.Error(), expErr)
}
return
}
users, oidcDP, err := parseJWTUsers(ac)
if err == nil {
t.Fatalf("expecting non-nil error; got %v", users)
}
if expErr != err.Error() {
t.Fatalf("unexpected error; got\n%q\nwant \n%q", err.Error(), expErr)
}
if oidcDP != nil {
t.Fatalf("expecting nil oidcDP; got %v", oidcDP)
}
}
// unauthorized_user cannot be used with jwt
f(`
unauthorized_user:
jwt: {skip_verify: true}
url_prefix: http://foo.bar
`, `field jwt can't be specified for unauthorized_user section`)
// username and jwt in a single config
f(`
users:
- username: foo
jwt: {skip_verify: true}
url_prefix: http://foo.bar
`, `auth_token, bearer_token, username and password cannot be specified if jwt is set`)
// bearer_token and jwt in a single config
f(`
users:
- bearer_token: foo
jwt: {skip_verify: true}
url_prefix: http://foo.bar
`, `auth_token, bearer_token, username and password cannot be specified if jwt is set`)
// bearer_token and jwt in a single config
f(`
users:
- auth_token: "Foo token"
jwt: {skip_verify: true}
url_prefix: http://foo.bar
`, `auth_token, bearer_token, username and password cannot be specified if jwt is set`)
// jwt public_keys or skip_verify must be set, part 1
f(`
users:
- jwt: {}
url_prefix: http://foo.bar
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
// jwt public_keys or skip_verify must be set, part 2
f(`
users:
- jwt: {public_keys: null}
url_prefix: http://foo.bar
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
// jwt public_keys or skip_verify must be set, part 3
f(`
users:
- jwt: {public_keys: []}
url_prefix: http://foo.bar
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
// jwt public_keys, public_key_files or skip_verify must be set
f(`
users:
- jwt: {public_key_files: []}
url_prefix: http://foo.bar
`, `jwt must contain at least a single public key, public_key_files, oidc or have skip_verify=true`)
// invalid public key, part 1
f(`
users:
- jwt: {public_keys: [""]}
url_prefix: http://foo.bar
`, `failed to parse key "": failed to decode PEM block containing public key`)
// invalid public key, part 2
f(`
users:
- jwt: {public_keys: ["invalid"]}
url_prefix: http://foo.bar
`, `failed to parse key "invalid": failed to decode PEM block containing public key`)
// invalid public key, part 2
f(fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
- %q
- "invalid"
url_prefix: http://foo.bar
`, validRSAPublicKey, validECDSAPublicKey), `failed to parse key "invalid": failed to decode PEM block containing public key`)
// several jwt users
// invalid public key, part 2
f(fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
url_prefix: http://foo.bar
- jwt:
public_keys:
- %q
url_prefix: http://foo.bar
`, validRSAPublicKey, validECDSAPublicKey), `duplicate match claims="" found for name="" at idx=1; the previous one is set for name=""`)
// public key file doesn't exist
f(`
users:
- jwt:
public_key_files:
- /path/to/nonexistent/file.pem
url_prefix: http://foo.bar
`, "cannot read public key from file \"/path/to/nonexistent/file.pem\": open /path/to/nonexistent/file.pem: no such file or directory")
// public key file invalid
// auth with key from file
publicKeyFile := filepath.Join(t.TempDir(), "a_public_key.pem")
if err := os.WriteFile(publicKeyFile, []byte(`invalidPEM`), 0o644); err != nil {
t.Fatalf("failed to write public key file: %s", err)
}
f(`
users:
- jwt:
public_key_files:
- `+publicKeyFile+`
url_prefix: http://foo.bar
`, "cannot parse public key from file \""+publicKeyFile+"\": failed to parse key \"invalidPEM\": failed to decode PEM block containing public key")
// unsupported placeholder in a header
f(`
users:
- jwt:
skip_verify: true
url_prefix: http://foo.bar/{{.UnsupportedPlaceholder}}/foo`,
"invalid placeholder found in URL request path: \"/{{.UnsupportedPlaceholder}}/foo\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
)
// unsupported placeholder in a header
f(`
users:
- jwt:
skip_verify: true
headers:
- "AccountID: {{.UnsupportedPlaceholder}}"
url_prefix: http://foo.bar
`,
"request header: \"AccountID\" has unsupported placeholder: \"{{.UnsupportedPlaceholder}}\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
)
// spaces in templating not allowed
f(`
users:
- jwt:
skip_verify: true
headers:
- "AccountID: {{ .LogsAccountID }}"
url_prefix: http://foo.bar
`,
"request header: \"AccountID\" has unsupported placeholder: \"{{ .LogsAccountID }}\", supported values are: {{.MetricsTenant}}, {{.MetricsExtraLabels}}, {{.MetricsExtraFilters}}, {{.LogsAccountID}}, {{.LogsProjectID}}, {{.LogsExtraFilters}}, {{.LogsExtraStreamFilters}}",
)
// oidc is not an object
f(`
users:
- jwt:
oidc: "not an object"
url_prefix: http://foo.bar
`,
"cannot unmarshal AuthConfig data: yaml: unmarshal errors:\n line 4: cannot unmarshal !!str `not an ...` into main.oidcConfig",
)
// oidc issuer empty
f(`
users:
- jwt:
oidc: {}
url_prefix: http://foo.bar
`,
"oidc issuer cannot be empty",
)
// oidc issuer invalid urls
f(`
users:
- jwt:
oidc:
issuer: "::invalid-url"
url_prefix: http://foo.bar
`,
"oidc issuer \"::invalid-url\" must be a valid URL",
)
// oidc issuer invalid urls
f(`
users:
- jwt:
oidc:
issuer: "invalid-url"
url_prefix: http://foo.bar
`,
"oidc issuer \"invalid-url\" must have http or https scheme",
)
// oidc and public_keys are not allowed
f(fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
oidc:
issuer: https://example.com
url_prefix: http://foo.bar
`, validRSAPublicKey),
"jwt with oidc cannot contain public keys or have skip_verify=true",
)
// oidc and skip_verify are not allowed
f(`
users:
- jwt:
skip_verify: true
oidc:
issuer: https://example.com
url_prefix: http://foo.bar
`,
"jwt with oidc cannot contain public keys or have skip_verify=true",
)
// duplicate claims
f(`
users:
- jwt:
skip_verify: true
match_claims:
team: ops
name: user-1
url_prefix: http://foo.bar
- jwt:
skip_verify: true
match_claims:
team: ops
name: user-2
url_prefix: http://foo.bar`,
"duplicate match claims=\"team=ops\" found for name=\"user-2\" at idx=1; the previous one is set for name=\"user-1\"",
)
}
func TestJWTParseAuthConfigSuccess(t *testing.T) {
validRSAPublicKey := `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiX7oPWKOWRQsGFEWvwZO
mL2PYsdYUsu9nr0qtPCjxQHUJgLfT3rdKlvKpPFYv7ZmKnqTncg36Wz9uiYmWJ7e
IB5Z+fko8kVIMzarCqVvpAJDzYF/pUii68xvuYoK3L9TIOAeyCXv+prwnr2IH+Mw
9AONzWbRrYoO74XyTE9vMU5qmI/L1VPk+PR8lqPOSptLvzsfoaIk2ED4yK2nRB+6
st+k4nccPqbErqHc8aiXnXfugfnr6b+NPFYUzKsDqkymGOokVijrI8B3jNw6c6Do
zphk+D3wgLsXYHfMcZbXIMqffqm/aB8Qg88OpFOkQ3rd2p6R9+hacnZkfkn3Phiw
yQIDAQAB
-----END PUBLIC KEY-----
`
// ECDSA with the P-521 curve
validECDSAPublicKey := `-----BEGIN PUBLIC KEY-----
MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAU9RmtkCRuYTKCyvLlDn5DtBZOHSe
QTa5j9q/oQVpCKqcXVFrH5dgh0GL+P/ZhkeuowPzCZqntGf0+7wPt9OxSJcADVJm
dv92m540MXss8zdHf5qtE0gsu2Ved0R7Z8a8QwGZ/1mYZ+kFGGbdQTlSvRqDySTq
XOtclIk1uhc03oL9nOQ=
-----END PUBLIC KEY-----
`
f := func(s string) {
t.Helper()
ac, err := parseAuthConfig([]byte(s))
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
jui, oidcDP, err := parseJWTUsers(ac)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
oidcDP.startDiscovery()
defer oidcDP.stopDiscovery()
for _, ui := range jui {
if ui.JWT == nil {
t.Fatalf("unexpected nil JWTConfig")
}
if ui.JWT.SkipVerify {
if ui.JWT.verifierPool.Load() != nil {
t.Fatalf("unexpected non-nil verifier pool for skip_verify=true")
}
continue
}
if ui.JWT.verifierPool.Load() == nil {
t.Fatalf("unexpected nil verifier pool for non-empty public keys")
}
}
}
f(fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
url_prefix: http://foo.bar
`, validRSAPublicKey))
f(fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
url_prefix: http://foo.bar
`, validECDSAPublicKey))
f(fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
- %q
url_prefix: http://foo.bar
`, validRSAPublicKey, validECDSAPublicKey))
f(`
users:
- jwt:
skip_verify: true
url_prefix: http://foo.bar
`)
// combined with other auth methods
f(`
users:
- username: foo
password: bar
url_prefix: http://foo.bar
- jwt:
skip_verify: true
url_prefix: http://foo.bar
- bearer_token: foo
url_prefix: http://foo.bar
`)
rsaKeyFile := filepath.Join(t.TempDir(), "rsa_public_key.pem")
if err := os.WriteFile(rsaKeyFile, []byte(validRSAPublicKey), 0o644); err != nil {
t.Fatalf("failed to write RSA key file: %s", err)
}
ecdsaKeyFile := filepath.Join(t.TempDir(), "ecdsa_public_key.pem")
if err := os.WriteFile(ecdsaKeyFile, []byte(validECDSAPublicKey), 0o644); err != nil {
t.Fatalf("failed to write ECDSA key file: %s", err)
}
// Test single public key file
f(fmt.Sprintf(`
users:
- jwt:
public_key_files:
- %q
url_prefix: http://foo.bar
`, rsaKeyFile))
// Test multiple public key files
f(fmt.Sprintf(`
users:
- jwt:
public_key_files:
- %q
- %q
url_prefix: http://foo.bar
`, rsaKeyFile, ecdsaKeyFile))
// Test combined inline keys and files
f(fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
public_key_files:
- %q
url_prefix: http://foo.bar
`, validECDSAPublicKey, rsaKeyFile))
// oidc stub server
var ipSrv *httptest.Server
ipSrv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/.well-known/openid-configuration" {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]string{
"issuer": ipSrv.URL,
"jwks_uri": fmt.Sprintf("%s/jwks", ipSrv.URL),
})
return
}
if r.URL.Path == "/jwks" {
// resp generated by https://jwkset.com/generate
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`
{
"keys": [
{
"kty": "RSA",
"kid": "f13eee91-f566-4829-80fa-fca847c21f0e",
"d": "Ua1llEFz3LZ05CrK5a2JxKMUEWJGXhBPPF20hHQjzxd1w0IEJK_mhPZQG8dNtBROBNIi1FC9l6QRw-RTnVIVat5Xy4yDFNKXXL3ZLXejOHY8SXrNEIDqQ-cSwIpK9cK7Umib0PcPeEeeAED5mqDH75D8_YssWFF18kLbNB5Z9pZmn6Fshiht7l2Sh4GN-KcReOW6eiQQwckDte3OGmZCRbtEriLWJt5TUGUvfZVIlcclqNMycNB6jGa9E1pO5Up7Ki3ZbI_-6XmRgZPtqnR9oLJ1zn3fj3hYpCXo-zcqLuOu3qxcslsq5igsfBzgGtfIJHY9LfWmHUsaDEa5cAX1gQ",
"n": "xbLXXBTNREk70UCMiqZ53_mTzYh89W-UaPU61GZ-RZ5lYcLgyWOb5mdyRbvJpcgfZpsOeGAUWbk3GkQ4vqn8kUMnnWhUum2Qk9kGubOJGLW6yaURd00j3E-ilQ5xO2R_Hzz8bAojxV8GKdGTQ-iTf8z8nsSHH8kR2SERbNJCFFtwtFU7vyFWyoH4Lmvu2UpICTHFCR9RqwQVjyoKB1JjJ6Dh1L4zPTlsvQEnqoeFQHPYr0QcQSMYXdfPvlt_FiLOAOE89fX_9T2r9WbFAoda3uTRE5_aal0jxUU2cFyeVSIgauNtF07fp422XFb4XPkWQWrdNx0KX53laSIYQ9HOpw",
"e": "AQAB",
"p": "2JT57AD-Q2lamgjgyn0wL7DgYZ3OoCTTrDm5_NHg6h13uDvyIlXSukuUeWm4tzPSDedpstbS7dgXkLw5eQXBHwPYtByTcEZS8Z37CBnhMOOhfo_U1aNIPPanJACvWBgz47-TxHsxW1YhztZqghRoicBZPSSBAj49MgANJ4jF0zc",
"q": "6a4MkeSXJI-ZzQ-bgP8hwJqpLFr0AiNGQcjZMH4Nn4CPGdnGiqqe6flhfLimgbNhbb67B0-8fLIji8zGhGKDL_JSIpAAdmfs2vzeEsY2hScrqVbd1VbfRcRh0J6lsn7obxkbvQthp9sX2DQbeDcEeaFEvd9gDKQSATYEqWo7eBE",
"dp": "haL2yu6Z9RJuuxi7S3YPY33qFZF_y0St71j3L854zzw7gMxMTW9TRWwZQwk-1pv9AmNFzvnK0MNDVyUs-UXZsb932TrApshdqYRnPsppLvdl0GgDVYcYrbUr0IUzrFHSwraVAOlavRbaaXvX4EejcUvkRFvf1nh83fs2Iqy8E-U",
"dq": "Cnf5qC-Ndd3ZDg688LJ9WJuVKJ-Kfu4Fn7zXvgxnn9Wqk4XmFyA9rk21yFidXQIkQz5gMpun3g48-W5bFmMzbVp1w4af_q35NnZNnJm0p5Jxqkxx87TIm9-IYkg5NB3rW87MJ1PzNAnkr5LmCCSu1qQa6Eaxjt9qzxMUcmKH94E",
"qi": "saAeU11iaKHmye3cwCAYkegcyWbXV3xIXEVJtS9Af_yM19UhspwY2VhuwRaajcwYZwtvR9_ITmX9M-ea7uLdd7aDYO1fujC8NGbopeC4Hkr7yb5vTly3pfKf4h-3LwGGUucJUetdz1lmMIYiyuG4_gSf1yIEtPDLKzXiedgEMdI"
}
]
}
`))
return
}
http.NotFound(w, r)
}))
defer ipSrv.Close()
f(`
users:
- jwt:
oidc:
issuer: ` + ipSrv.URL + `
url_prefix: http://foo.bar
`)
// multiple match claims
f(fmt.Sprintf(`
users:
- jwt:
match_claims:
role: ro
team: dev
public_keys:
- %q
url_prefix: http://foo.bar
- jwt:
match_claims:
role: admin
team: dev
public_key_files:
- %q
- %q
url_prefix: http://foo.bar
- jwt:
match_claims:
role: viewer
team: dev
department: ceo
skip_verify: true
url_prefix: http://foo.bar
`, validRSAPublicKey, rsaKeyFile, ecdsaKeyFile))
}

View File

@@ -16,7 +16,6 @@ import (
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
@@ -48,7 +47,7 @@ var (
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
requestBufferSize = flagutil.NewBytes("requestBufferSize", 32*1024, "The size of the buffer for reading the request body before proxying the request to backends. "+
"This allows reducing the consumption of backend resources when processing requests from clients connected via slow networks. "+
"This allows reducing the comsumption of backend resources when processing requests from clients connected via slow networks. "+
"Set to 0 to disable request buffering. See https://docs.victoriametrics.com/victoriametrics/vmauth/#request-body-buffering")
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size to buffer in memory for potential retries at other backends. "+
"Request bodies larger than this size cannot be retried if the backend fails. Zero or negative value disables request body buffering and retries. "+
@@ -174,7 +173,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
// Process requests for unauthorized users
ui := authConfig.Load().UnauthorizedUser
if ui != nil {
processUserRequest(w, r, ui, nil)
processUserRequest(w, r, ui)
return true
}
@@ -182,36 +181,29 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
return true
}
if ui := getUserInfoByAuthTokens(ats); ui != nil {
processUserRequest(w, r, ui, nil)
return true
}
if ui, tkn := getJWTUserInfo(ats); ui != nil {
if tkn == nil {
logger.Panicf("BUG: unexpected nil jwt token for user %q", ui.name())
ui := getUserInfoByAuthTokens(ats)
if ui == nil {
uu := authConfig.Load().UnauthorizedUser
if uu != nil {
processUserRequest(w, r, uu)
return true
}
invalidAuthTokenRequests.Inc()
if *logInvalidAuthTokens {
err := fmt.Errorf("cannot authorize request with auth tokens %q", ats)
err = &httpserver.ErrorWithStatusCode{
Err: err,
StatusCode: http.StatusUnauthorized,
}
httpserver.Errorf(w, r, "%s", err)
} else {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
}
defer putToken(tkn)
processUserRequest(w, r, ui, tkn)
return true
}
uu := authConfig.Load().UnauthorizedUser
if uu != nil {
processUserRequest(w, r, uu, nil)
return true
}
invalidAuthTokenRequests.Inc()
if *logInvalidAuthTokens {
err := fmt.Errorf("cannot authorize request with auth tokens %q", ats)
err = &httpserver.ErrorWithStatusCode{
Err: err,
StatusCode: http.StatusUnauthorized,
}
httpserver.Errorf(w, r, "%s", err)
} else {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
}
processUserRequest(w, r, ui)
return true
}
@@ -226,37 +218,7 @@ func getUserInfoByAuthTokens(ats []string) *UserInfo {
return nil
}
// responseWriterWithStatus is a wrapper around http.ResponseWriter that captures the status code written to the response.
type responseWriterWithStatus struct {
http.ResponseWriter
status int
}
// WriteHeader records the status so it can be easily retrieved later
func (rws *responseWriterWithStatus) WriteHeader(status int) {
rws.status = status
rws.ResponseWriter.WriteHeader(status)
}
// Flush implements net/http.Flusher interface
//
// This is needed for the copyStreamToClient()
func (rws *responseWriterWithStatus) Flush() {
flusher, ok := rws.ResponseWriter.(http.Flusher)
if !ok {
logger.Panicf("BUG: it is expected http.ResponseWriter (%T) supports http.Flusher interface", rws.ResponseWriter)
}
flusher.Flush()
}
// Unwrap returns the original ResponseWriter wrapped by rws.
//
// This is needed for the net/http.ResponseController - see https://pkg.go.dev/net/http#NewResponseController
func (rws *responseWriterWithStatus) Unwrap() http.ResponseWriter {
return rws.ResponseWriter
}
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *jwt.Token) {
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
startTime := time.Now()
defer ui.requestsDuration.UpdateDuration(startTime)
@@ -265,20 +227,6 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tk
ctx, cancel := context.WithTimeout(r.Context(), *maxQueueDuration)
defer cancel()
userName := ui.name()
if userName == "" {
userName = "unauthorized"
}
if ui.AccessLog != nil {
w = &responseWriterWithStatus{ResponseWriter: w}
defer func() {
rws := w.(*responseWriterWithStatus)
duration := time.Since(startTime)
ui.logRequest(r, userName, rws.status, duration)
}()
}
// Acquire global concurrency limit.
if err := beginConcurrencyLimit(ctx); err != nil {
handleConcurrencyLimitError(w, r, err)
@@ -297,6 +245,10 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tk
}
// Read the initial chunk for the request body.
userName := ui.name()
if userName == "" {
userName = "unauthorized"
}
bb, err := bufferRequestBody(ctx, r.Body, userName)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
@@ -317,7 +269,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tk
defer ui.endConcurrencyLimit()
// Process the request.
processRequest(w, r, ui, tkn)
processRequest(w, r, ui)
}
func beginConcurrencyLimit(ctx context.Context) error {
@@ -390,7 +342,7 @@ func bufferRequestBody(ctx context.Context, r io.ReadCloser, userName string) (i
return bb, nil
}
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *jwt.Token) {
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
u := normalizeURL(r.URL)
up, hc := ui.getURLPrefixAndHeaders(u, r.Host, r.Header)
isDefault := false
@@ -416,27 +368,22 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *j
}
maxAttempts := up.getBackendsCount()
for range maxAttempts {
for i := 0; i < maxAttempts; i++ {
bu := up.getBackendURL()
if bu == nil {
break
}
targetURL := bu.url
if tkn != nil {
// for security reasons allow templating only for configured url values and headers
targetURL, hc = replaceJWTPlaceholders(bu, hc, tkn.VMAccess())
}
if isDefault {
// Don't change path and add request_path query param for default route.
targetURLCopy := *targetURL
query := targetURL.Query()
query.Set("request_path", u.String())
targetURLCopy.RawQuery = query.Encode()
targetURL = &targetURLCopy
targetURL.RawQuery = query.Encode()
} else {
// Update path for regular routes.
targetURL = mergeURLs(targetURL, u, up.dropSrcPathPrefixParts, up.mergeQueryArgs)
}
wasLocalRetry := false
again:
ok, needLocalRetry := tryProcessingRequest(w, r, targetURL, hc, up.retryStatusCodes, ui, bu)
@@ -454,7 +401,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo, tkn *j
ui.backendErrors.Inc()
}
err := &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("all the %d backends for the user %q are unavailable for proxying the request - check previous WARN logs to see the exact error for each failed backend", up.getBackendsCount(), ui.name()),
Err: fmt.Errorf("all the %d backends for the user %q are unavailable", up.getBackendsCount(), ui.name()),
StatusCode: http.StatusBadGateway,
}
httpserver.Errorf(w, r, "%s", err)

File diff suppressed because it is too large Load Diff

View File

@@ -1,194 +0,0 @@
package main
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
)
func BenchmarkJWTRequestHandler(b *testing.B) {
// Generate RSA key pair for testing
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
b.Fatalf("cannot generate RSA key: %s", err)
}
// Generate public key PEM
publicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)
if err != nil {
b.Fatalf("cannot marshal public key: %s", err)
}
publicKeyPEM := pem.EncodeToMemory(&pem.Block{
Type: "PUBLIC KEY",
Bytes: publicKeyBytes,
})
genToken := func(t *testing.B, body map[string]any, valid bool) string {
t.Helper()
headerJSON, err := json.Marshal(map[string]any{
"alg": "RS256",
"typ": "JWT",
})
if err != nil {
t.Fatalf("cannot marshal header: %s", err)
}
headerB64 := base64.RawURLEncoding.EncodeToString(headerJSON)
bodyJSON, err := json.Marshal(body)
if err != nil {
t.Fatalf("cannot marshal body: %s", err)
}
bodyB64 := base64.RawURLEncoding.EncodeToString(bodyJSON)
payload := headerB64 + "." + bodyB64
var signatureB64 string
if valid {
// Create real RSA signature
hash := crypto.SHA256
h := hash.New()
h.Write([]byte(payload))
digest := h.Sum(nil)
signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, digest)
if err != nil {
t.Fatalf("cannot sign token: %s", err)
}
signatureB64 = base64.RawURLEncoding.EncodeToString(signature)
} else {
signatureB64 = base64.RawURLEncoding.EncodeToString([]byte("invalid_signature"))
}
return payload + "." + signatureB64
}
f := func(name string, cfgStr string, r *http.Request, statusCodeExpected int) {
b.Helper()
b.ReportAllocs()
b.ResetTimer()
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
if _, err := w.Write([]byte("path: " + r.URL.Path + "\n")); err != nil {
panic(fmt.Errorf("cannot write response: %w", err))
}
}))
defer ts.Close()
cfgStr = strings.ReplaceAll(cfgStr, "{BACKEND}", ts.URL)
cfgOrigP := authConfigData.Load()
if _, err := reloadAuthConfigData([]byte(cfgStr)); err != nil {
b.Fatalf("cannot load config data: %s", err)
}
defer func() {
cfgOrig := []byte("unauthorized_user:\n url_prefix: http://foo/bar")
if cfgOrigP != nil {
cfgOrig = *cfgOrigP
}
_, err := reloadAuthConfigData(cfgOrig)
if err != nil {
b.Fatalf("cannot load the original config: %s", err)
}
}()
b.Run(name, func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
w := &fakeResponseWriter{}
for pb.Next() {
w.reset()
if !requestHandlerWithInternalRoutes(w, r) {
b.Fatalf("unexpected false is returned from requestHandler")
}
if w.statusCode != statusCodeExpected {
b.Fatalf("unexpected response code (-%d;+%d)", statusCodeExpected, w.statusCode)
}
}
})
})
}
simpleCfgStr := fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
url_prefix: {BACKEND}/foo`, string(publicKeyPEM))
noVMAccessClaimToken := genToken(b, nil, true)
expiredToken := genToken(b, map[string]any{
"exp": 10,
"vm_access": map[string]any{},
}, true)
fullToken := genToken(b, map[string]any{
"exp": time.Now().Add(10 * time.Minute).Unix(),
"scope": "email id",
"vm_access": map[string]any{
"extra_labels": map[string]string{
"label": "value1",
"label2": "value3",
},
"extra_filters": []string{"stream_filter1", "stream_filter2"},
"metrics_account_id": 123,
"metrics_project_id": 234,
"metrics_extra_labels": []string{
"label1=value1",
"label2=value2",
},
"metrics_extra_filters": []string{
`{label3="value3"}`,
`{label4="value4"}`,
},
"logs_account_id": 345,
"logs_project_id": 456,
"logs_extra_filters": []string{
`{"namespace":"my-app","env":"prod"}`,
},
"logs_extra_stream_filters": []string{
`{"team":"dev"}`,
},
},
}, true)
// tenant headers are overwritten if set as placeholders
// extra_filters extra_stream_filters from vm_access claim merged with statically defined
request := httptest.NewRequest(`GET`, "http://some-host.com/query", nil)
request.Header.Set(`Authorization`, `Bearer `+fullToken)
f("full_template",
fmt.Sprintf(`
users:
- jwt:
public_keys:
- %q
headers:
- "AccountID: {{.LogsAccountID}}"
- "ProjectID: {{.LogsProjectID}}"
url_prefix: {BACKEND}/select/logsql/?extra_filters=aStaticFilter&extra_stream_filters=aStaticStreamFilter&extra_filters={{.LogsExtraFilters}}&extra_stream_filters={{.LogsExtraStreamFilters}}`, string(publicKeyPEM)),
request,
http.StatusOK,
)
// token without vm_access claim
request = httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
request.Header.Set(`Authorization`, `Bearer `+noVMAccessClaimToken)
f("token_without_claim", simpleCfgStr, request, http.StatusUnauthorized)
// expired token
request = httptest.NewRequest(`GET`, "http://some-host.com/abc", nil)
request.Header.Set(`Authorization`, `Bearer `+expiredToken)
f("expired_token", simpleCfgStr, request, http.StatusUnauthorized)
}

View File

@@ -1,195 +0,0 @@
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/jwt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
)
type oidcConfig struct {
Issuer string `yaml:"issuer"`
}
type oidcDiscovererPool struct {
ds map[string]*oidcDiscoverer
context context.Context
cancel func()
wg *sync.WaitGroup
}
func (dp *oidcDiscovererPool) createOrAdd(issuer string, vp *atomic.Pointer[jwt.VerifierPool]) {
if dp.ds == nil {
dp.ds = make(map[string]*oidcDiscoverer)
dp.context, dp.cancel = context.WithCancel(context.Background())
dp.wg = &sync.WaitGroup{}
}
ds, found := dp.ds[issuer]
if !found {
ds = &oidcDiscoverer{
issuer: issuer,
}
dp.ds[issuer] = ds
}
ds.vps = append(ds.vps, vp)
}
func (dp *oidcDiscovererPool) startDiscovery() {
if len(dp.ds) == 0 {
return
}
for _, d := range dp.ds {
dp.wg.Go(func() {
if err := d.refreshVerifierPools(dp.context); err != nil {
logger.Errorf("failed to initialize OIDC verifier pool at start for issuer %q: %s", d.issuer, err)
}
})
}
dp.wg.Wait()
for _, d := range dp.ds {
dp.wg.Go(func() {
d.run(dp.context)
})
}
}
func (dp *oidcDiscovererPool) stopDiscovery() {
if len(dp.ds) == 0 {
return
}
dp.cancel()
dp.wg.Wait()
}
type oidcDiscoverer struct {
issuer string
vps []*atomic.Pointer[jwt.VerifierPool]
}
func (d *oidcDiscoverer) run(ctx context.Context) {
t := time.NewTimer(timeutil.AddJitterToDuration(time.Second * 10))
defer t.Stop()
for {
select {
case <-t.C:
if err := d.refreshVerifierPools(ctx); errors.Is(err, context.Canceled) {
return
} else if err != nil {
t.Reset(timeutil.AddJitterToDuration(time.Second * 10))
logger.Errorf("failed to refresh OIDC verifier pool for issuer %q: %v", d.issuer, err)
continue
}
// OIDC may return Cache-Control header with max-age directive.
// It could be used as time range for next refresh.
// https://openid.net/specs/openid-connect-core-1_0.html#RotateEncKeys
t.Reset(timeutil.AddJitterToDuration(time.Minute * 5))
case <-ctx.Done():
return
}
}
}
func (d *oidcDiscoverer) refreshVerifierPools(ctx context.Context) error {
cfg, err := getOpenIDConfiguration(ctx, d.issuer)
if err != nil {
return err
}
// The issuer in the OIDC configuration must match the expected issuer.
// https://openid.net/specs/openid-connect-core-1_0.html#RotateEncKeys
if cfg.Issuer != d.issuer {
return fmt.Errorf("openid configuration issuer %q does not match expected issuer %q", cfg.Issuer, d.issuer)
}
verifierPool, err := fetchAndParseJWKs(ctx, cfg.JWKsURI)
if err != nil {
return err
}
for _, vp := range d.vps {
vp.Store(verifierPool)
}
return nil
}
// See https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata for details.
type openidConfig struct {
Issuer string `json:"issuer"`
JWKsURI string `json:"jwks_uri"`
}
var oidcHTTPClient = &http.Client{
Timeout: time.Second * 5,
}
func fetchAndParseJWKs(ctx context.Context, jwksURI string) (*jwt.VerifierPool, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, jwksURI, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request for fetching jwks keys from %q: %w", jwksURI, err)
}
resp, err := oidcHTTPClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to fetch jwks keys from %q: %w", jwksURI, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code %d when fetching jwks keys from %q", resp.StatusCode, jwksURI)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body from %q: %w", jwksURI, err)
}
vp, err := jwt.ParseJWKs(b)
if err != nil {
return nil, fmt.Errorf("failed to parse jwks keys from %q: %v", jwksURI, err)
}
return vp, nil
}
func getOpenIDConfiguration(ctx context.Context, issuer string) (openidConfig, error) {
issuer, _ = strings.CutSuffix(issuer, "/")
configURL := fmt.Sprintf("%s/.well-known/openid-configuration", issuer)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, configURL, nil)
if err != nil {
return openidConfig{}, fmt.Errorf("failed to create request for fetching openid config from %q: %w", configURL, err)
}
resp, err := oidcHTTPClient.Do(req)
if err != nil {
return openidConfig{}, fmt.Errorf("failed to fetch openid config from %q: %w", configURL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return openidConfig{}, fmt.Errorf("unexpected status code %d when fetching openid config from %q", resp.StatusCode, configURL)
}
var cfg openidConfig
if err := json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
return openidConfig{}, fmt.Errorf("failed to decode openid config from %q: %s", configURL, err)
}
return cfg, nil
}

View File

@@ -174,7 +174,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
},
RetryStatusCodes: []int{503, 501},
LoadBalancingPolicy: "first_available",
DropSrcPathPrefixParts: new(2),
DropSrcPathPrefixParts: intp(2),
}, "/a/b/c", "http://foo.bar/c", `bb: aaa`, `x: y`, []int{503, 501}, "first_available", 2)
f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar/federate"),
@@ -219,13 +219,13 @@ func TestCreateTargetURLSuccess(t *testing.T) {
},
RetryStatusCodes: []int{503, 500, 501},
LoadBalancingPolicy: "first_available",
DropSrcPathPrefixParts: new(1),
DropSrcPathPrefixParts: intp(1),
},
{
SrcPaths: getRegexs([]string{"/api/v1/write"}),
URLPrefix: mustParseURL("http://vminsert/0/prometheus"),
RetryStatusCodes: []int{},
DropSrcPathPrefixParts: new(0),
DropSrcPathPrefixParts: intp(0),
},
{
SrcPaths: getRegexs([]string{"/metrics"}),
@@ -242,7 +242,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
},
},
RetryStatusCodes: []int{502},
DropSrcPathPrefixParts: new(2),
DropSrcPathPrefixParts: intp(2),
}
f(ui, "http://host42/vmsingle/api/v1/query?query=up&db=foo", "http://vmselect/0/prometheus/api/v1/query?db=foo&query=up",
"xx: aa\nyy: asdf", "qwe: rty", []int{503, 500, 501}, "first_available", 1)
@@ -259,7 +259,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
SrcPaths: getRegexs([]string{"/api/v1/write"}),
URLPrefix: mustParseURL("http://vminsert/0/prometheus"),
RetryStatusCodes: []int{},
DropSrcPathPrefixParts: new(0),
DropSrcPathPrefixParts: intp(0),
},
{
SrcPaths: getRegexs([]string{"/metrics/a/b"}),
@@ -275,7 +275,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
},
},
RetryStatusCodes: []int{502},
DropSrcPathPrefixParts: new(2),
DropSrcPathPrefixParts: intp(2),
}
f(ui, "https://foo-host/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "", "", []int{}, "least_loaded", 0)
f(ui, "https://foo-host/metrics/a/b", "http://metrics-server/b", "", "", []int{502}, "least_loaded", 2)

View File

@@ -7,8 +7,6 @@ import (
"math"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
@@ -47,7 +45,7 @@ func New(retries int, factor float64, minDuration time.Duration) (*Backoff, erro
// Retry process retries until all attempts are completed
func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
var attempt uint64
for i := range b.retries {
for i := 0; i < b.retries; i++ {
err := cb()
if err == nil {
return attempt, nil
@@ -57,7 +55,6 @@ func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
return attempt, err // fail fast if not recoverable
}
attempt++
retriesTotal.Inc()
backoff := float64(b.minDuration) * math.Pow(b.factor, float64(i))
dur := time.Duration(backoff)
logger.Errorf("got error: %s on attempt: %d; will retry in %v", err, attempt, dur)
@@ -77,7 +74,3 @@ func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
}
return attempt, fmt.Errorf("execution failed after %d retry attempts", b.retries)
}
var (
retriesTotal = metrics.NewCounter(`vmctl_backoff_retries_total`)
)

View File

@@ -14,12 +14,6 @@ const (
globalSilent = "s"
globalVerbose = "verbose"
globalDisableProgressBar = "disable-progress-bar"
globalPushMetricsURL = "pushmetrics.url"
globalPushMetricsInterval = "pushmetrics.interval"
globalPushExtraLabels = "pushmetrics.extraLabel"
globalPushHeaders = "pushmetrics.header"
globalPushDisableCompression = "pushmetrics.disableCompression"
)
var (
@@ -39,29 +33,6 @@ var (
Value: false,
Usage: "Whether to disable progress bar during the import.",
},
&cli.StringSliceFlag{
Name: globalPushMetricsURL,
Usage: "Optional URL to push metrics. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#push-metrics",
},
&cli.DurationFlag{
Name: globalPushMetricsInterval,
Value: 10 * time.Second,
Usage: "Interval for pushing metrics to every -pushmetrics.url",
},
&cli.StringSliceFlag{
Name: globalPushExtraLabels,
Usage: "Extra labels to add to pushed metrics. In case of collision, label value defined by flag will have priority. " +
"Flag can be set multiple times, to add few additional labels. " +
"For example, -pushmetrics.extraLabel='instance=\"foo\"' adds instance=\"foo\" label to all the metrics pushed to every -pushmetrics.url",
},
&cli.StringSliceFlag{
Name: globalPushHeaders,
Usage: "Optional HTTP headers to add to pushed metrics. Flag can be set multiple times, to add few additional headers.",
},
&cli.BoolFlag{
Name: globalPushDisableCompression,
Usage: "Whether to disable compression when pushing metrics.",
},
}
)

View File

@@ -7,8 +7,6 @@ import (
"log"
"sync"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
@@ -54,7 +52,6 @@ func (ip *influxProcessor) run(ctx context.Context) error {
return nil
}
influxSeriesTotal.Add(len(series))
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing series"), len(series))
if err := barpool.Start(); err != nil {
return err
@@ -70,11 +67,9 @@ func (ip *influxProcessor) run(ctx context.Context) error {
wg.Go(func() {
for s := range seriesCh {
if err := ip.do(s); err != nil {
influxErrorsTotal.Inc()
errCh <- fmt.Errorf("request failed for %q.%q: %s", s.Measurement, s.Field, err)
return
}
influxSeriesProcessed.Inc()
bar.Increment()
}
})
@@ -86,7 +81,6 @@ func (ip *influxProcessor) run(ctx context.Context) error {
case infErr := <-errCh:
return fmt.Errorf("influx error: %s", infErr)
case vmErr := <-ip.im.Errors():
influxErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, ip.isVerbose))
case seriesCh <- s:
}
@@ -99,7 +93,6 @@ func (ip *influxProcessor) run(ctx context.Context) error {
// drain import errors channel
for vmErr := range ip.im.Errors() {
if vmErr.Err != nil {
influxErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, ip.isVerbose))
}
}
@@ -174,9 +167,3 @@ func (ip *influxProcessor) do(s *influx.Series) error {
}
}
}
var (
influxSeriesTotal = metrics.NewCounter(`vmctl_influx_migration_series_total`)
influxSeriesProcessed = metrics.NewCounter(`vmctl_influx_migration_series_processed`)
influxErrorsTotal = metrics.NewCounter(`vmctl_influx_migration_errors_total`)
)

View File

@@ -4,8 +4,6 @@ import (
"sync"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
)
@@ -47,16 +45,9 @@ func (l *Limiter) Register(dataLen int) {
t := timerpool.Get(d)
<-t.C
timerpool.Put(t)
limiterThrottleEventsTotal.Inc()
}
l.budget += limit
l.deadline = time.Now().Add(time.Second)
}
l.budget -= int64(dataLen)
limiterBytesProcessed.Add(dataLen)
}
var (
limiterBytesProcessed = metrics.NewCounter(`vmctl_limiter_bytes_processed_total`)
limiterThrottleEventsTotal = metrics.NewCounter(`vmctl_limiter_throttle_events_total`)
)

View File

@@ -2,7 +2,6 @@ package main
import (
"context"
"flag"
"fmt"
"log"
"net/http"
@@ -20,9 +19,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
@@ -44,20 +41,11 @@ func main() {
ctx, cancelCtx := context.WithCancel(context.Background())
start := time.Now()
beforeFn := func(c *cli.Context) error {
flag.Parse()
logger.Init()
isSilent = c.Bool(globalSilent)
if c.Bool(globalDisableProgressBar) {
barpool.Disable(true)
}
netutil.EnableIPv6()
pushmetrics.InitWith(&pushmetrics.Config{
URLs: c.StringSlice(globalPushMetricsURL),
Interval: c.Duration(globalPushMetricsInterval),
ExtraLabels: c.StringSlice(globalPushExtraLabels),
DisableCompression: c.Bool(globalPushDisableCompression),
Headers: c.StringSlice(globalPushHeaders),
})
return nil
}
app := &cli.App{
@@ -463,7 +451,6 @@ func main() {
log.Fatalln(err)
}
log.Printf("Total time: %v", time.Since(start))
pushmetrics.StopAndPush()
}
func initConfigVM(c *cli.Context) (vm.Config, error) {

View File

@@ -8,8 +8,6 @@ import (
"net/http"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/auth"
)
@@ -38,15 +36,12 @@ type Response struct {
// Explore finds metric names by provided filter from api/v1/label/__name__/values
func (c *Client) Explore(ctx context.Context, f Filter, tenantID string, start, end time.Time) ([]string, error) {
startTime := time.Now()
exploreRequestsTotal.Inc()
url := fmt.Sprintf("%s/%s", c.Addr, nativeMetricNamesAddr)
if tenantID != "" {
url = fmt.Sprintf("%s/select/%s/prometheus/%s", c.Addr, tenantID, nativeMetricNamesAddr)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
exploreRequestsErrorsTotal.Inc()
return nil, fmt.Errorf("cannot create request to %q: %s", url, err)
}
@@ -58,53 +53,37 @@ func (c *Client) Explore(ctx context.Context, f Filter, tenantID string, start,
resp, err := c.do(req, http.StatusOK)
if err != nil {
exploreRequestsErrorsTotal.Inc()
exploreDuration.UpdateDuration(startTime)
return nil, fmt.Errorf("series request failed: %s", err)
}
var response Response
if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
exploreRequestsErrorsTotal.Inc()
exploreDuration.UpdateDuration(startTime)
return nil, fmt.Errorf("cannot decode series response: %s", err)
}
exploreDuration.UpdateDuration(startTime)
return response.MetricNames, resp.Body.Close()
}
// ImportPipe uses pipe reader in request to process data
func (c *Client) ImportPipe(ctx context.Context, dstURL string, pr *io.PipeReader) error {
startTime := time.Now()
importRequestsTotal.Inc()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, dstURL, pr)
if err != nil {
importRequestsErrorsTotal.Inc()
return fmt.Errorf("cannot create import request to %q: %s", c.Addr, err)
}
importResp, err := c.do(req, http.StatusNoContent)
if err != nil {
importRequestsErrorsTotal.Inc()
importDuration.UpdateDuration(startTime)
return fmt.Errorf("import request failed: %s", err)
}
if err := importResp.Body.Close(); err != nil {
importRequestsErrorsTotal.Inc()
importDuration.UpdateDuration(startTime)
return fmt.Errorf("cannot close import response body: %s", err)
}
importDuration.UpdateDuration(startTime)
return nil
}
// ExportPipe makes request by provided filter and return io.ReadCloser which can be used to get data
func (c *Client) ExportPipe(ctx context.Context, url string, f Filter) (io.ReadCloser, error) {
startTime := time.Now()
exportRequestsTotal.Inc()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
exportRequestsErrorsTotal.Inc()
return nil, fmt.Errorf("cannot create request to %q: %s", c.Addr, err)
}
@@ -123,11 +102,8 @@ func (c *Client) ExportPipe(ctx context.Context, url string, f Filter) (io.ReadC
resp, err := c.do(req, http.StatusOK)
if err != nil {
exportRequestsErrorsTotal.Inc()
exportDuration.UpdateDuration(startTime)
return nil, fmt.Errorf("export request failed: %w", err)
}
exportDuration.UpdateDuration(startTime)
return resp.Body, nil
}
@@ -186,16 +162,3 @@ func (c *Client) do(req *http.Request, expSC int) (*http.Response, error) {
}
return resp, err
}
var (
importRequestsTotal = metrics.NewCounter(`vmctl_vm_native_requests_total{type="import"}`)
exportRequestsTotal = metrics.NewCounter(`vmctl_vm_native_requests_total{type="export"}`)
exploreRequestsTotal = metrics.NewCounter(`vmctl_vm_native_requests_total{type="explore"}`)
importRequestsErrorsTotal = metrics.NewCounter(`vmctl_vm_native_request_errors_total{type="import"}`)
exportRequestsErrorsTotal = metrics.NewCounter(`vmctl_vm_native_request_errors_total{type="export"}`)
exploreRequestsErrorsTotal = metrics.NewCounter(`vmctl_vm_native_request_errors_total{type="explore"}`)
importDuration = metrics.NewHistogram(`vmctl_vm_native_import_duration_seconds`)
exportDuration = metrics.NewHistogram(`vmctl_vm_native_export_duration_seconds`)
exploreDuration = metrics.NewHistogram(`vmctl_vm_native_explore_duration_seconds`)
)

View File

@@ -7,8 +7,6 @@ import (
"sync"
"time"
vmetrics "github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/cheggaaa/pb/v3"
@@ -59,7 +57,6 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
if !prompt(ctx, question) {
return nil
}
op.im.ResetStats()
var startTime int64
if op.oc.HardTS != 0 {
@@ -87,7 +84,6 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
seriesCh := make(chan queryObj, op.otsdbcc)
errCh := make(chan error)
// we're going to make serieslist * queryRanges queries, so we should represent that in the progress bar
otsdbSeriesTotal.Add(len(serieslist) * queryRanges)
bar := pb.StartNew(len(serieslist) * queryRanges)
defer func(bar *pb.ProgressBar) {
bar.Finish()
@@ -97,11 +93,9 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
wg.Go(func() {
for s := range seriesCh {
if err := op.do(s); err != nil {
otsdbErrorsTotal.Inc()
errCh <- fmt.Errorf("couldn't retrieve series for %s : %s", metric, err)
return
}
otsdbSeriesProcessed.Inc()
bar.Increment()
}
})
@@ -121,7 +115,6 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
case otsdbErr := <-errCh:
return fmt.Errorf("opentsdb error: %s", otsdbErr)
case vmErr := <-op.im.Errors():
otsdbErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
case seriesCh <- queryObj{
Tr: tr, StartTime: startTime,
@@ -146,7 +139,6 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
op.im.Close()
for vmErr := range op.im.Errors() {
if vmErr.Err != nil {
otsdbErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
}
}
@@ -177,9 +169,3 @@ func (op *otsdbProcessor) do(s queryObj) error {
}
return op.im.Input(&ts)
}
var (
otsdbSeriesTotal = vmetrics.NewCounter(`vmctl_opentsdb_migration_series_total`)
otsdbSeriesProcessed = vmetrics.NewCounter(`vmctl_opentsdb_migration_series_processed`)
otsdbErrorsTotal = vmetrics.NewCounter(`vmctl_opentsdb_migration_errors_total`)
)

View File

@@ -109,7 +109,7 @@ func (c Client) FindMetrics(q string) ([]string, error) {
return nil, fmt.Errorf("failed to send GET request to %q: %s", q, err)
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("bad return from OpenTSDB: %d: %v", resp.StatusCode, resp)
return nil, fmt.Errorf("bad return from OpenTSDB: %q: %v", resp.StatusCode, resp)
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
@@ -133,7 +133,7 @@ func (c Client) FindSeries(metric string) ([]Meta, error) {
return nil, fmt.Errorf("failed to set GET request to %q: %s", q, err)
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("bad return from OpenTSDB: %d: %v", resp.StatusCode, resp)
return nil, fmt.Errorf("bad return from OpenTSDB: %q: %v", resp.StatusCode, resp)
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)

View File

@@ -11,8 +11,6 @@ import (
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
@@ -115,7 +113,6 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
}
func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
promBlocksTotal.Add(len(blocks))
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing blocks"), len(blocks))
if err := barpool.Start(); err != nil {
return err
@@ -131,11 +128,9 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
wg.Go(func() {
for br := range blockReadersCh {
if err := pp.do(br); err != nil {
promErrorsTotal.Inc()
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
return
}
promBlocksProcessed.Inc()
bar.Increment()
}
})
@@ -148,7 +143,6 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
return fmt.Errorf("prometheus error: %s", promErr)
case vmErr := <-pp.im.Errors():
close(blockReadersCh)
promErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
case blockReadersCh <- br:
}
@@ -162,7 +156,6 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
// drain import errors channel
for vmErr := range pp.im.Errors() {
if vmErr.Err != nil {
promErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
}
}
@@ -172,9 +165,3 @@ func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
return nil
}
var (
promBlocksTotal = metrics.NewCounter(`vmctl_prometheus_migration_blocks_total`)
promBlocksProcessed = metrics.NewCounter(`vmctl_prometheus_migration_blocks_processed`)
promErrorsTotal = metrics.NewCounter(`vmctl_prometheus_migration_errors_total`)
)

View File

@@ -7,8 +7,6 @@ import (
"sync"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
@@ -53,7 +51,6 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
return nil
}
remoteReadRangesTotal.Add(len(ranges))
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing ranges"), len(ranges))
if err := barpool.Start(); err != nil {
return err
@@ -73,11 +70,9 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
wg.Go(func() {
for r := range rangeC {
if err := rrp.do(ctx, r); err != nil {
remoteReadErrorsTotal.Inc()
errCh <- fmt.Errorf("request failed for: %s", err)
return
}
remoteReadRangesProcessed.Inc()
bar.Increment()
}
})
@@ -88,7 +83,6 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
case infErr := <-errCh:
return fmt.Errorf("remote read error: %s", infErr)
case vmErr := <-rrp.dst.Errors():
remoteReadErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, rrp.isVerbose))
case rangeC <- &remoteread.Filter{
StartTimestampMs: r[0].UnixMilli(),
@@ -104,7 +98,6 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
// drain import errors channel
for vmErr := range rrp.dst.Errors() {
if vmErr.Err != nil {
remoteReadErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, rrp.isVerbose))
}
}
@@ -125,9 +118,3 @@ func (rrp *remoteReadProcessor) do(ctx context.Context, filter *remoteread.Filte
return nil
})
}
var (
remoteReadRangesTotal = metrics.NewCounter(`vmctl_remote_read_migration_ranges_total`)
remoteReadRangesProcessed = metrics.NewCounter(`vmctl_remote_read_migration_ranges_processed`)
remoteReadErrorsTotal = metrics.NewCounter(`vmctl_remote_read_migration_errors_total`)
)

View File

@@ -76,11 +76,11 @@ func (ts *TimeSeries) write(w io.Writer) (int, error) {
pointsCount := len(timestampsBatch)
cw.printf(`},"timestamps":[`)
for i := range pointsCount - 1 {
for i := 0; i < pointsCount-1; i++ {
cw.printf(`%d,`, timestampsBatch[i])
}
cw.printf(`%d],"values":[`, timestampsBatch[pointsCount-1])
for i := range pointsCount - 1 {
for i := 0; i < pointsCount-1; i++ {
cw.printf(`%v,`, valuesBatch[i])
}
cw.printf("%v]}\n", valuesBatch[pointsCount-1])

View File

@@ -12,8 +12,6 @@ import (
"sync"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
@@ -82,12 +80,6 @@ type Importer struct {
s *stats
backoff *backoff.Backoff
importRequestsTotal *metrics.Counter
importRequestsErrorsTotal *metrics.Counter
importSamplesTotal *metrics.Counter
importBytesTotal *metrics.Counter
importDuration *metrics.Histogram
}
// ResetStats resets im stats.
@@ -155,12 +147,6 @@ func NewImporter(ctx context.Context, cfg Config) (*Importer, error) {
input: make(chan *TimeSeries, cfg.Concurrency*4),
errors: make(chan *ImportError, cfg.Concurrency),
backoff: cfg.Backoff,
importRequestsTotal: metrics.GetOrCreateCounter(`vmctl_importer_requests_total`),
importRequestsErrorsTotal: metrics.GetOrCreateCounter(`vmctl_importer_request_errors_total`),
importSamplesTotal: metrics.GetOrCreateCounter(`vmctl_importer_samples_total`),
importBytesTotal: metrics.GetOrCreateCounter(`vmctl_importer_bytes_total`),
importDuration: metrics.GetOrCreateHistogram(`vmctl_importer_request_duration_seconds`),
}
if err := im.Ping(); err != nil {
return nil, fmt.Errorf("ping to %q failed: %s", addr, err)
@@ -325,13 +311,9 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
return nil
}
startTime := time.Now()
im.importRequestsTotal.Inc()
pr, pw := io.Pipe()
req, err := http.NewRequest(http.MethodPost, im.importPath, pr)
if err != nil {
im.importRequestsErrorsTotal.Inc()
return fmt.Errorf("cannot create request to %q: %s", im.addr, err)
}
if im.user != "" {
@@ -351,7 +333,6 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
if im.compress {
zw, err := gzip.NewWriterLevel(w, 1)
if err != nil {
im.importRequestsErrorsTotal.Inc()
return fmt.Errorf("unexpected error when creating gzip writer: %s", err)
}
w = zw
@@ -363,39 +344,29 @@ func (im *Importer) Import(tsBatch []*TimeSeries) error {
for _, ts := range tsBatch {
n, err := ts.write(bw)
if err != nil {
im.importRequestsErrorsTotal.Inc()
return fmt.Errorf("write err: %w", err)
}
totalBytes += n
totalSamples += len(ts.Values)
}
if err := bw.Flush(); err != nil {
im.importRequestsErrorsTotal.Inc()
return err
}
if closer, ok := w.(io.Closer); ok {
err := closer.Close()
if err != nil {
im.importRequestsErrorsTotal.Inc()
return err
}
}
if err := pw.Close(); err != nil {
im.importRequestsErrorsTotal.Inc()
return err
}
requestErr := <-errCh
if requestErr != nil {
im.importRequestsErrorsTotal.Inc()
im.importDuration.UpdateDuration(startTime)
return fmt.Errorf("import request error for %q: %w", im.addr, requestErr)
}
im.importSamplesTotal.Add(totalSamples)
im.importBytesTotal.Add(totalBytes)
im.importDuration.UpdateDuration(startTime)
im.s.Lock()
im.s.bytes += uint64(totalBytes)
im.s.samples += uint64(totalSamples)

View File

@@ -9,8 +9,6 @@ import (
"sync"
"time"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
@@ -84,19 +82,13 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
if !prompt(ctx, question) {
return nil
}
migrationTenantsTotal.Set(uint64(len(tenants)))
}
for _, tenantID := range tenants {
err := p.runBackfilling(ctx, tenantID, ranges)
if err != nil {
migrationErrorsTotal.Inc()
return fmt.Errorf("migration failed: %s", err)
}
if p.interCluster {
migrationTenantsProcessed.Inc()
}
}
log.Println("Import finished!")
@@ -164,7 +156,6 @@ func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcU
p.s.bytes += uint64(written)
p.s.requests++
p.s.Unlock()
migrationBytesTransferredTotal.AddInt64(written)
if err := pw.Close(); err != nil {
return err
@@ -208,7 +199,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
var foundSeriesMsg string
var requestsToMake int
var metricsMap = map[string][][]time.Time{
var metrics = map[string][][]time.Time{
"": ranges,
}
@@ -220,11 +211,11 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
if !p.disablePerMetricRequests {
format = fmt.Sprintf(nativeWithBackoffTpl, barPrefix)
metricsMap, err = p.explore(ctx, p.src, tenantID, ranges)
metrics, err = p.explore(ctx, p.src, tenantID, ranges)
if err != nil {
return fmt.Errorf("failed to explore metric names: %s", err)
}
if len(metricsMap) == 0 {
if len(metrics) == 0 {
errMsg := "no metrics found"
if tenantID != "" {
errMsg = fmt.Sprintf("%s for tenant id: %s", errMsg, tenantID)
@@ -232,14 +223,10 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
log.Println(errMsg)
return nil
}
for _, m := range metricsMap {
for _, m := range metrics {
requestsToMake += len(m)
}
foundSeriesMsg = fmt.Sprintf("Found %d unique metric names to import. Total import/export requests to make %d", len(metricsMap), requestsToMake)
migrationMetricsTotal.Add(len(metricsMap))
} else {
requestsToMake = len(ranges)
foundSeriesMsg = fmt.Sprintf("Found %d unique metric names to import. Total import/export requests to make %d", len(metrics), requestsToMake)
}
if !p.interCluster {
@@ -253,7 +240,6 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
log.Print(foundSeriesMsg)
}
migrationRequestsPlanned.Add(requestsToMake)
bar := barpool.NewSingleProgress(format, requestsToMake)
bar.Start()
defer bar.Finish()
@@ -262,7 +248,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
errCh := make(chan error, p.cc)
var wg sync.WaitGroup
for range p.cc {
for i := 0; i < p.cc; i++ {
wg.Go(func() {
for f := range filterCh {
if !p.disablePerMetricRequests {
@@ -277,13 +263,12 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
return
}
}
migrationRequestsCompleted.Inc()
}
})
}
// any error breaks the import
for mName, mRanges := range metricsMap {
for mName, mRanges := range metrics {
match, err := buildMatchWithFilter(p.filter.Match, mName)
if err != nil {
logger.Errorf("failed to build filter %q for metric name %q: %s", p.filter.Match, mName, err)
@@ -303,9 +288,6 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
}:
}
}
if !p.disablePerMetricRequests {
migrationMetricsProcessed.Inc()
}
}
close(filterCh)
@@ -414,18 +396,3 @@ func buildMatchWithFilter(filter string, metricName string) (string, error) {
match := "{" + strings.Join(filters, " or ") + "}"
return match, nil
}
var (
migrationMetricsTotal = metrics.NewCounter(`vmctl_vm_native_migration_metrics_total`)
migrationMetricsProcessed = metrics.NewCounter(`vmctl_vm_native_migration_metrics_processed`)
migrationRequestsPlanned = metrics.NewCounter(`vmctl_vm_native_migration_requests_planned`)
migrationRequestsCompleted = metrics.NewCounter(`vmctl_vm_native_migration_requests_completed`)
migrationErrorsTotal = metrics.NewCounter(`vmctl_vm_native_migration_errors_total`)
migrationTenantsTotal = metrics.NewCounter(`vmctl_vm_native_migration_tenants_total`)
migrationTenantsProcessed = metrics.NewCounter(`vmctl_vm_native_migration_tenants_processed`)
migrationBytesTransferredTotal = metrics.NewCounter(`vmctl_vm_native_migration_bytes_transferred_total`)
)

View File

@@ -182,7 +182,6 @@ func (ctx *InsertCtx) WriteMetadata(mmpbs []prompb.MetricMetadata) error {
mm.Type = mmpb.Type
mm.Unit = bytesutil.ToUnsafeBytes(mmpb.Unit)
}
ctx.mms = mms
err := vmstorage.AddMetadataRows(mms)
if err != nil {
@@ -207,7 +206,6 @@ func (ctx *InsertCtx) WritePromMetadata(mmps []prometheus.Metadata) error {
mm.Help = bytesutil.ToUnsafeBytes(mmpb.Help)
mm.Type = mmpb.Type
}
ctx.mms = mms
err := vmstorage.AddMetadataRows(mms)
if err != nil {

View File

@@ -55,7 +55,7 @@ var (
deduplicator *streamaggr.Deduplicator
)
// CheckStreamAggrConfig checks config pointed by -streamaggr.config
// CheckStreamAggrConfig checks config pointed by -stramaggr.config
func CheckStreamAggrConfig() error {
if *streamAggrConfig == "" {
return nil

View File

@@ -45,14 +45,15 @@ func insertRows(sketches []*datadogsketches.Sketch, extraLabels []prompb.Label)
ms := sketch.ToSummary()
for _, m := range ms {
ctx.Labels = ctx.Labels[:0]
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10557
ctx.AddLabel("host", sketch.Host) // newly added
ctx.AddLabel("", m.Name)
for _, label := range m.Labels {
ctx.AddLabel(label.Name, label.Value)
}
for _, tag := range sketch.Tags {
name, value := datadogutil.SplitTag(tag)
if name == "host" {
name = "exported_host"
}
ctx.AddLabel(name, value)
}
for j := range extraLabels {

View File

@@ -77,7 +77,7 @@ func push(ctx *common.InsertCtx, tss []prompb.TimeSeries) {
r := &ts.Samples[i]
metricNameRaw, err = ctx.WriteDataPointExt(metricNameRaw, ctx.Labels, r.Timestamp, r.Value)
if err != nil {
logger.Errorf("cannot write promscrape data to storage: %s", err)
logger.Errorf("cannot write promscape data to storage: %s", err)
return
}
}

View File

@@ -30,7 +30,6 @@ var (
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce restore duration")
maxBytesPerSecond = flagutil.NewBytes("maxBytesPerSecond", 0, "The maximum download speed. There is no limit if it is set to 0")
skipBackupCompleteCheck = flag.Bool("skipBackupCompleteCheck", false, "Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file")
SkipPreallocation = flag.Bool("skipFilePreallocation", false, "Whether to skip pre-allocated files. This will likely be slower in most cases, but allows restores to resume mid file on failure")
)
func main() {
@@ -64,7 +63,6 @@ func main() {
Src: srcFS,
Dst: dstFS,
SkipBackupCompleteCheck: *skipBackupCompleteCheck,
SkipPreallocation: *SkipPreallocation,
}
pushmetrics.Init()
if err := a.Run(ctx); err != nil {

View File

@@ -142,7 +142,7 @@ type aggrStatePercentile struct {
func newAggrStatePercentile(pointsLen int, n float64) aggrState {
hs := make([]*histogram.Fast, pointsLen)
for i := range pointsLen {
for i := 0; i < pointsLen; i++ {
hs[i] = histogram.NewFast()
}
return &aggrStatePercentile{

View File

@@ -9,7 +9,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
@@ -50,7 +49,7 @@ func (ec *evalConfig) newTimestamps(step int64) []int64 {
pointsLen := ec.pointsLen(step)
timestamps := make([]int64, pointsLen)
ts := ec.startTime
for i := range pointsLen {
for i := 0; i < pointsLen; i++ {
timestamps[i] = ts
ts += step
}
@@ -197,17 +196,12 @@ func newNextSeriesForSearchQuery(ec *evalConfig, sq *storage.SearchQuery, expr g
pathExpression: safePathExpression(expr),
}
s.summarize(aggrAvg, ec.startTime, ec.endTime, ec.storageStep, 0)
// A negative or zero duration will cause timer.C to return immediately
remainingTimeout := ec.deadline.Deadline() - fasttime.UnixTimestamp()
t := timerpool.Get(time.Duration(remainingTimeout) * time.Second)
t := timerpool.Get(30 * time.Second)
defer timerpool.Put(t)
select {
case seriesCh <- s:
case <-t.C:
logger.Errorf("reached timeout when processing the %s (full query: %s), it can be due to the amount of storageNodes configured in vmselect is more than vmselects available CPU count "+
"or vmselect is heavy loaded. Consider adding resources or increasing `-search.maxQueryDuration` or `timeout` parameter in the query.",
logger.Errorf("resource leak when processing the %s (full query: %s); please report this error to VictoriaMetrics developers",
expr.AppendString(nil), ec.originalQuery)
}
return nil

View File

@@ -25,7 +25,7 @@ func naturalLess(a, b string) bool {
}
func getNonNumPrefix(s string) (prefix string, tail string) {
for i := range len(s) {
for i := 0; i < len(s); i++ {
ch := s[i]
if ch >= '0' && ch <= '9' {
return s[:i], s[i:]

View File

@@ -82,7 +82,7 @@ func RenderHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
if s := r.FormValue("maxDataPoints"); len(s) > 0 {
n, err := strconv.ParseFloat(s, 64)
if err != nil {
return fmt.Errorf("cannot parse maxDataPoints=%d: %w", maxDataPoints, err)
return fmt.Errorf("cannot parse maxDataPoints=%q: %w", maxDataPoints, err)
}
if n <= 0 {
return fmt.Errorf("maxDataPoints must be greater than 0; got %f", n)
@@ -209,7 +209,7 @@ func parseInterval(s string) (int64, error) {
s = strings.TrimSpace(s)
prefix := s
var suffix string
for i := range len(s) {
for i := 0; i < len(s); i++ {
ch := s[i]
if ch != '-' && ch != '+' && ch != '.' && (ch < '0' || ch > '9') {
prefix = s[:i]

View File

@@ -1228,7 +1228,7 @@ func transformDelay(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, er
stepsLocal = len(values)
}
copy(values[stepsLocal:], values[:len(values)-stepsLocal])
for i := range stepsLocal {
for i := 0; i < stepsLocal; i++ {
values[i] = nan
}
}
@@ -1740,7 +1740,7 @@ func transformGroup(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFunc, er
func groupSeriesLists(ec *evalConfig, args []*graphiteql.ArgExpr, expr graphiteql.Expr) (nextSeriesFunc, error) {
var nextSeriess []nextSeriesFunc
for i := range args {
for i := 0; i < len(args); i++ {
nextSeries, err := evalSeriesList(ec, args, "seriesList", i)
if err != nil {
for _, f := range nextSeriess {
@@ -3233,7 +3233,7 @@ func transformSeriesByTag(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSeriesFu
return nil, fmt.Errorf("at least one tagExpression must be passed to seriesByTag")
}
var tagExpressions []string
for i := range args {
for i := 0; i < len(args); i++ {
te, err := getString(args, "tagExpressions", i)
if err != nil {
return nil, err
@@ -3633,7 +3633,7 @@ var graphiteToGolangRe = regexp.MustCompile(`\\(\d+)`)
func getNodes(args []*graphiteql.ArgExpr) ([]graphiteql.Expr, error) {
var nodes []graphiteql.Expr
for i := range args {
for i := 0; i < len(args); i++ {
expr := args[i].Expr
switch expr.(type) {
case *graphiteql.NumberExpr, *graphiteql.StringExpr:
@@ -4052,7 +4052,7 @@ func formatPathsFromSeriesExpressions(seriesExpressions []string, sortPaths bool
func newNaNSeries(ec *evalConfig, step int64) *series {
values := make([]float64, ec.pointsLen(step))
for i := range values {
for i := 0; i < len(values); i++ {
values[i] = nan
}
return &series{
@@ -5244,7 +5244,7 @@ func transformLinearRegression(ec *evalConfig, fe *graphiteql.FuncExpr) (nextSer
func linearRegressionForSeries(ec *evalConfig, fe *graphiteql.FuncExpr, ss, sourceSeries []*series) (nextSeriesFunc, error) {
var resp []*series
for i := range ss {
for i := 0; i < len(ss); i++ {
source := sourceSeries[i]
s := ss[i]
s.Tags["linearRegressions"] = fmt.Sprintf("%d, %d", ec.startTime/1e3, ec.endTime/1e3)
@@ -5258,7 +5258,7 @@ func linearRegressionForSeries(ec *evalConfig, fe *graphiteql.FuncExpr, ss, sour
continue
}
values := s.Values
for j := range values {
for j := 0; j < len(values); j++ {
values[j] = offset + (float64(int(s.Timestamps[0])+j*int(s.step)))*factor
}
resp = append(resp, s)
@@ -5370,7 +5370,7 @@ func holtWinterConfidenceBands(ec *evalConfig, fe *graphiteql.FuncExpr, args []*
valuesLen := len(forecastValues)
upperBand := make([]float64, 0, valuesLen)
lowerBand := make([]float64, 0, valuesLen)
for i := range valuesLen {
for i := 0; i < valuesLen; i++ {
forecastItem := forecastValues[i]
deviationItem := deviationValues[i]
if math.IsNaN(forecastItem) || math.IsNaN(deviationItem) {
@@ -5464,7 +5464,7 @@ func transformHoltWintersAberration(ec *evalConfig, fe *graphiteql.FuncExpr) (ne
return nil, fmt.Errorf("bug, len mismatch for series: %d and upperBand values: %d or lowerBand values: %d", len(values), len(upperBand), len(lowerBand))
}
aberration := make([]float64, 0, len(values))
for i := range values {
for i := 0; i < len(values); i++ {
v := values[i]
upperValue := upperBand[i]
lowerValue := lowerBand[i]

View File

@@ -280,7 +280,7 @@ func isMetricExprChar(ch byte) bool {
}
func appendEscapedIdent(dst []byte, s string) []byte {
for i := range len(s) {
for i := 0; i < len(s); i++ {
ch := s[i]
if isIdentChar(ch) || isMetricExprChar(ch) {
if i == 0 && !isFirstIdentChar(ch) {

View File

@@ -321,23 +321,19 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
return true
case "/tags/tagSeries":
graphiteTagsTagSeriesRequests.Inc()
err := &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("graphite tag registration has been disabled and is planned to be removed in future. " +
"See: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10544"),
StatusCode: http.StatusNotImplemented,
if err := graphite.TagsTagSeriesHandler(startTime, w, r); err != nil {
graphiteTagsTagSeriesErrors.Inc()
httpserver.Errorf(w, r, "%s", err)
return true
}
graphiteTagsTagSeriesErrors.Inc()
httpserver.Errorf(w, r, "%s", err)
return true
case "/tags/tagMultiSeries":
graphiteTagsTagMultiSeriesRequests.Inc()
err := &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("graphite tag registration has been disabled and is planned to be removed in future. " +
"See: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10544"),
StatusCode: http.StatusNotImplemented,
if err := graphite.TagsTagMultiSeriesHandler(startTime, w, r); err != nil {
graphiteTagsTagMultiSeriesErrors.Inc()
httpserver.Errorf(w, r, "%s", err)
return true
}
graphiteTagsTagMultiSeriesErrors.Inc()
httpserver.Errorf(w, r, "%s", err)
return true
case "/tags":
graphiteTagsRequests.Inc()
@@ -743,26 +739,6 @@ func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request, path string) {
req := r.Clone(r.Context())
req.URL.Path = strings.TrimPrefix(path, "prometheus")
req.Host = vmalertProxyHost
if strings.HasPrefix(r.Header.Get(`User-Agent`), `Grafana`) {
// Grafana currently supports only Prometheus-style alerts. If other alert types
// (e.g. logs or traces) are returned, it may fail with "Error loading alerts".
//
// Grafana queries the vmalert API directly, bypassing the VictoriaMetrics datasource,
// so query params (such as datasource_type) cannot be enforced on the Grafana side.
//
// To ensure compatibility, we detect Grafana requests via the User-Agent and enforce
// `datasource_type=prometheus`.
//
// See:
// - https://github.com/VictoriaMetrics/victoriametrics-datasource/issues/329#issuecomment-3847585443
// - https://github.com/VictoriaMetrics/victoriametrics-datasource/issues/59
q := req.URL.Query()
q.Set("datasource_type", "prometheus")
req.URL.RawQuery = q.Encode()
req.RequestURI = ""
}
vmalertProxy.ServeHTTP(w, req)
}

View File

@@ -5,8 +5,6 @@ import (
"errors"
"flag"
"fmt"
"math"
"slices"
"sort"
"sync"
"sync/atomic"
@@ -492,7 +490,10 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
}
// Prepare worker channels.
workers := max(min(len(upws), gomaxprocs), 1)
workers := min(len(upws), gomaxprocs)
if workers < 1 {
workers = 1
}
itemsPerWorker := (len(upws) + workers - 1) / workers
workChs := make([]chan *unpackWork, workers)
for i := range workChs {
@@ -577,7 +578,6 @@ func mergeSortBlocks(dst *Result, sbh *sortBlocksHeap, dedupInterval int64) {
return
}
heap.Init(sbh)
var dedupSamples int
for {
sbs := sbh.sbs
top := sbs[0]
@@ -593,7 +593,6 @@ func mergeSortBlocks(dst *Result, sbh *sortBlocksHeap, dedupInterval int64) {
if n := equalSamplesPrefix(top, sbNext); n > 0 && dedupInterval > 0 {
// Skip n replicated samples at top if deduplication is enabled.
top.NextIdx = topNextIdx + n
dedupSamples += n
} else {
// Copy samples from top to dst with timestamps not exceeding tsNext.
top.NextIdx = topNextIdx + binarySearchTimestamps(top.Timestamps[topNextIdx:], tsNext)
@@ -608,8 +607,8 @@ func mergeSortBlocks(dst *Result, sbh *sortBlocksHeap, dedupInterval int64) {
}
}
timestamps, values := storage.DeduplicateSamples(dst.Timestamps, dst.Values, dedupInterval)
dedupSamples += len(dst.Timestamps) - len(timestamps)
dedupsDuringSelect.Add(dedupSamples)
dedups := len(dst.Timestamps) - len(timestamps)
dedupsDuringSelect.Add(dedups)
dst.Timestamps = timestamps
dst.Values = values
}
@@ -635,7 +634,7 @@ func equalTimestampsPrefix(a, b []int64) int {
func equalValuesPrefix(a, b []float64) int {
for i, v := range a {
if i >= len(b) || math.Float64bits(v) != math.Float64bits(b[i]) {
if i >= len(b) || v != b[i] {
return i
}
}
@@ -830,7 +829,12 @@ func GraphiteTags(qt *querytracer.Tracer, filter string, limit int, deadline sea
}
func hasString(a []string, s string) bool {
return slices.Contains(a, s)
for _, x := range a {
if x == s {
return true
}
}
return false
}
// LabelValues returns label values matching the given labelName and sq until the given deadline.

View File

@@ -1,11 +1,8 @@
package netstorage
import (
"math"
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
)
func TestMergeSortBlocks(t *testing.T) {
@@ -197,111 +194,3 @@ func TestMergeSortBlocks(t *testing.T) {
Values: []float64{7, 24, 26},
})
}
func TestEqualSamplesPrefix(t *testing.T) {
f := func(a, b *sortBlock, expected int) {
t.Helper()
actual := equalSamplesPrefix(a, b)
if actual != expected {
t.Fatalf("unexpected result: got %d, want %d", actual, expected)
}
}
// Empty blocks
f(&sortBlock{}, &sortBlock{}, 0)
// Identical blocks
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
}, &sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
}, 4)
// Non-zero NextIdx
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
NextIdx: 2,
}, &sortBlock{
Timestamps: []int64{10, 20, 3, 4},
Values: []float64{50, 60, 7, 8},
NextIdx: 2,
}, 2)
// Non-zero NextIdx with mismatch
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
NextIdx: 1,
}, &sortBlock{
Timestamps: []int64{10, 2, 3, 4},
Values: []float64{50, 6, 7, 80},
NextIdx: 1,
}, 2)
// Different lengths
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
}, &sortBlock{
Timestamps: []int64{1, 2, 3},
Values: []float64{5, 6, 7},
}, 3)
// Timestamps diverge
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
}, &sortBlock{
Timestamps: []int64{1, 2, 30, 4},
Values: []float64{5, 6, 7, 8},
}, 2)
// Values diverge
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
}, &sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 60, 7, 8},
}, 1)
// Zero matches
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, 6, 7, 8},
}, &sortBlock{
Timestamps: []int64{5, 6, 7, 8},
Values: []float64{1, 2, 3, 4},
}, 0)
// Compare staleness markers, matching
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, decimal.StaleNaN, 7, 8},
}, &sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{5, decimal.StaleNaN, 7, 8},
}, 4)
// Special float values: +Inf, -Inf, 0, -0
f(&sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{math.Inf(1), math.Inf(-1), math.Copysign(0, +1), math.Copysign(0, -1)},
}, &sortBlock{
Timestamps: []int64{1, 2, 3, 4},
Values: []float64{math.Inf(1), math.Inf(-1), math.Copysign(0, +1), math.Copysign(0, -1)},
}, 4)
// Positive zero vs negative zero (bitwise different)
f(&sortBlock{
Timestamps: []int64{1, 2},
Values: []float64{5, math.Copysign(0, +1)},
}, &sortBlock{
Timestamps: []int64{1, 2},
Values: []float64{5, math.Copysign(0, -1)},
}, 1)
}

View File

@@ -10,14 +10,14 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
b.Run(fmt.Sprintf("replicationFactor-%d", replicationFactor), func(b *testing.B) {
const samplesPerBlock = 8192
var blocks []*sortBlock
for j := range 10 {
for j := 0; j < 10; j++ {
timestamps := make([]int64, samplesPerBlock)
values := make([]float64, samplesPerBlock)
for i := range timestamps {
timestamps[i] = int64(j*samplesPerBlock + i)
values[i] = float64(j*samplesPerBlock + i)
}
for range replicationFactor {
for i := 0; i < replicationFactor; i++ {
blocks = append(blocks, &sortBlock{
Timestamps: timestamps,
Values: values,
@@ -30,7 +30,7 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
b.Run("overlapped-blocks-bestcase", func(b *testing.B) {
const samplesPerBlock = 8192
var blocks []*sortBlock
for j := range 10 {
for j := 0; j < 10; j++ {
timestamps := make([]int64, samplesPerBlock)
values := make([]float64, samplesPerBlock)
for i := range timestamps {
@@ -45,7 +45,7 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
for j := 1; j < len(blocks); j++ {
prev := blocks[j-1].Timestamps
curr := blocks[j].Timestamps
for i := range samplesPerBlock / 2 {
for i := 0; i < samplesPerBlock/2; i++ {
prev[i+samplesPerBlock/2], curr[i] = curr[i], prev[i+samplesPerBlock/2]
}
}
@@ -54,7 +54,7 @@ func BenchmarkMergeSortBlocks(b *testing.B) {
b.Run("overlapped-blocks-worstcase", func(b *testing.B) {
const samplesPerBlock = 8192
var blocks []*sortBlock
for j := range 5 {
for j := 0; j < 5; j++ {
timestamps := make([]int64, samplesPerBlock)
values := make([]float64, samplesPerBlock)
for i := range timestamps {

View File

@@ -6,13 +6,11 @@ import (
"math"
"net/http"
"runtime"
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/metricsql"
@@ -529,14 +527,6 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName s
return err
}
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxLabelsAPISeries)
if strings.HasPrefix(labelName, "U__") {
// This label seems to be Unicode-encoded according to the Prometheus spec.
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
// Spec: https://github.com/prometheus/proposals/blob/main/proposals/0028-utf8.md
labelName = unescapePrometheusLabelName(labelName)
}
labelValues, err := netstorage.LabelValues(qt, labelName, sq, limit, cp.deadline)
if err != nil {
return fmt.Errorf("cannot obtain values for label %q: %w", labelName, err)
@@ -1014,7 +1004,14 @@ func removeEmptyValuesAndTimeseries(tss []netstorage.Result) []netstorage.Result
dst := tss[:0]
for i := range tss {
ts := &tss[i]
if !slices.ContainsFunc(ts.Values, math.IsNaN) {
hasNaNs := false
for _, v := range ts.Values {
if math.IsNaN(v) {
hasNaNs = true
break
}
}
if !hasNaNs {
// Fast path: nothing to remove.
if len(ts.Values) > 0 {
dst = append(dst, *ts)
@@ -1339,70 +1336,3 @@ func calculateMaxUniqueTimeSeriesForResource(maxConcurrentRequests, remainingMem
func GetMaxUniqueTimeSeries() int {
return maxUniqueTimeseriesValue
}
// copied from https://github.com/prometheus/common/blob/adea6285c1c7447fcb7bfdeb6abfc6eff893e0a7/model/metric.go#L483
// it's not possible to use direct import due to increased binary size
func unescapePrometheusLabelName(name string) string {
// lower function taken from strconv.atoi.
lower := func(c byte) byte {
return c | ('x' - 'X')
}
if len(name) == 0 {
return name
}
escapedName, found := strings.CutPrefix(name, "U__")
if !found {
return name
}
var unescaped strings.Builder
TOP:
for i := 0; i < len(escapedName); i++ {
// All non-underscores are treated normally.
if escapedName[i] != '_' {
unescaped.WriteByte(escapedName[i])
continue
}
i++
if i >= len(escapedName) {
return name
}
// A double underscore is a single underscore.
if escapedName[i] == '_' {
unescaped.WriteByte('_')
continue
}
// We think we are in a UTF-8 code, process it.
var utf8Val uint
for j := 0; i < len(escapedName); j++ {
// This is too many characters for a utf8 value based on the MaxRune
// value of '\U0010FFFF'.
if j >= 6 {
return name
}
// Found a closing underscore, convert to a rune, check validity, and append.
if escapedName[i] == '_' {
utf8Rune := rune(utf8Val)
if !utf8.ValidRune(utf8Rune) {
return name
}
unescaped.WriteRune(utf8Rune)
continue TOP
}
r := lower(escapedName[i])
utf8Val *= 16
switch {
case r >= '0' && r <= '9':
utf8Val += uint(r) - '0'
case r >= 'a' && r <= 'f':
utf8Val += uint(r) - 'a' + 10
default:
return name
}
i++
}
// Didn't find closing underscore, invalid.
return name
}
return unescaped.String()
}

View File

@@ -742,7 +742,7 @@ func getRangeTopKTimeseries(tss []*timeseries, modifier *metricsql.ModifierExpr,
func reverseSeries(tss []*timeseries) {
j := len(tss)
for i := range len(tss) / 2 {
for i := 0; i < len(tss)/2; i++ {
j--
tss[i], tss[j] = tss[j], tss[i]
}
@@ -983,7 +983,7 @@ func getPerPointIQRBounds(tss []*timeseries) ([]float64, []float64) {
var qs []float64
lower := make([]float64, pointsLen)
upper := make([]float64, pointsLen)
for i := range pointsLen {
for i := 0; i < pointsLen; i++ {
values = values[:0]
for _, ts := range tss {
v := ts.Values[i]

View File

@@ -53,7 +53,7 @@ func TestIncrementalAggr(t *testing.T) {
Values: valuesExpected,
}}
// run the test multiple times to make sure there are no side effects on concurrency
for i := range 10 {
for i := 0; i < 10; i++ {
iafc := newIncrementalAggrFuncContext(ae, callbacks)
tssSrcCopy := copyTimeseries(tssSrc)
if err := testIncrementalParallelAggr(iafc, tssSrcCopy, tssExpected); err != nil {

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"math"
"regexp"
"slices"
"sort"
"strings"
"sync"
@@ -1166,61 +1165,6 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
},
}
return evalExpr(qt, ec, be)
// the cached rate result could be inaccurate in edge cases, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10098
case "rate":
if iafc != nil {
if !strings.EqualFold(iafc.ae.Name, "sum") {
qt.Printf("do not apply instant rollup optimization for incremental aggregate %s()", iafc.ae.Name)
return evalAt(qt, timestamp, window)
}
qt.Printf("optimized calculation for sum(rate(m[d])) as (sum(increase(m[d])) / d)")
afe := expr.(*metricsql.AggrFuncExpr)
fe := afe.Args[0].(*metricsql.FuncExpr)
feIncrease := *fe
feIncrease.Name = "increase"
// copy RollupExpr to drop possible offset,
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
newArg.Offset = nil
feIncrease.Args = []metricsql.Expr{newArg}
d := newArg.Window.Duration(ec.Step)
if d == 0 {
d = ec.Step
}
afeIncrease := *afe
afeIncrease.Args = []metricsql.Expr{&feIncrease}
be := &metricsql.BinaryOpExpr{
Op: "/",
KeepMetricNames: true,
Left: &afeIncrease,
Right: &metricsql.NumberExpr{
N: float64(d) / 1000,
},
}
return evalExpr(qt, ec, be)
}
qt.Printf("optimized calculation for instant rollup rate(m[d]) as (increase(m[d]) / d)")
fe := expr.(*metricsql.FuncExpr)
feIncrease := *fe
feIncrease.Name = "increase"
// copy RollupExpr to drop possible offset,
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
newArg.Offset = nil
feIncrease.Args = []metricsql.Expr{newArg}
d := newArg.Window.Duration(ec.Step)
if d == 0 {
d = ec.Step
}
be := &metricsql.BinaryOpExpr{
Op: "/",
KeepMetricNames: fe.KeepMetricNames,
Left: &feIncrease,
Right: &metricsql.NumberExpr{
N: float64(d) / 1000,
},
}
return evalExpr(qt, ec, be)
case "max_over_time":
if iafc != nil {
if !strings.EqualFold(iafc.ae.Name, "max") {
@@ -1769,7 +1713,6 @@ func evalRollupFuncNoCache(qt *querytracer.Tracer, ec *EvalConfig, funcName stri
return nil, err
}
defer rml.Put(uint64(rollupMemorySize))
qs.addMemoryUsage(rollupMemorySize)
qt.Printf("the rollup evaluation needs an estimated %d bytes of RAM for %d series and %d points per series (summary %d points)",
rollupMemorySize, timeseriesLen, pointsPerSeries, rollupPoints)
@@ -1991,7 +1934,14 @@ func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]flo
return values, timestamps
}
// Remove Prometheus staleness marks, so non-default rollup functions don't hit NaN values.
if !slices.ContainsFunc(values, decimal.IsStaleNaN) {
hasStaleSamples := false
for _, v := range values {
if decimal.IsStaleNaN(v) {
hasStaleSamples = true
break
}
}
if !hasStaleSamples {
// Fast path: values have no Prometheus staleness marks.
return values, timestamps
}

View File

@@ -37,7 +37,7 @@ func Exec(qt *querytracer.Tracer, ec *EvalConfig, q string, isFirstPointOnly boo
if querystats.Enabled() {
startTime := time.Now()
defer func() {
querystats.RegisterQuery(q, ec.End-ec.Start, startTime, ec.QueryStats.memoryUsage())
querystats.RegisterQuery(q, ec.End-ec.Start, startTime)
ec.QueryStats.addExecutionTimeMsec(startTime)
}()
}
@@ -313,7 +313,7 @@ func escapeDots(s string) string {
return s
}
result := make([]byte, 0, len(s)+2*dotsCount)
for i := range len(s) {
for i := 0; i < len(s); i++ {
if s[i] == '.' && (i == 0 || s[i-1] != '\\') && (i+1 == len(s) || i+1 < len(s) && s[i+1] != '*' && s[i+1] != '+' && s[i+1] != '{') {
// Escape a dot if the following conditions are met:
// - if it isn't escaped already, i.e. if there is no `\` char before the dot.

View File

@@ -67,7 +67,7 @@ func TestExecSuccess(t *testing.T) {
Deadline: searchutil.NewDeadline(time.Now(), time.Minute, ""),
RoundDigits: 100,
}
for range 5 {
for i := 0; i < 5; i++ {
result, err := Exec(nil, ec, q, false)
if err != nil {
t.Fatalf(`unexpected error when executing %q: %s`, q, err)
@@ -4018,12 +4018,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`histogram_fraction(scalar)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(123, 456, time())`
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`histogram_quantile(single-value-no-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_quantile(0.6, label_set(100, "foo", "bar"))`
@@ -4036,12 +4030,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`histogram_fraction(single-value-no-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(123,456, label_set(100, "foo", "bar"))`
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`histogram_quantile(single-value-invalid-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_quantile(0.6, label_set(100, "le", "foobar"))`
@@ -4054,12 +4042,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`histogram_fraction(single-value-invalid-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(50, 60, label_set(100, "le", "foobar"))`
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`histogram_quantile(single-value-inf-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_quantile(0.6, label_set(100, "le", "+Inf"))`
@@ -4201,28 +4183,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_fraction(single-value-valid-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(0, 100, label_set(100, "le", "200"))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.5, 0.5, 0.5, 0.5, 0.5, 0.5},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_fraction(single-value-valid-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(200, 300, label_set(100, "le", "200"))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0, 0, 0, 0, 0, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_quantile(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
t.Parallel()
q := `sort(histogram_quantile(0.6, label_set(100, "le", "200"), "foobar"))`
@@ -4252,7 +4212,7 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r1, r2, r3}
f(q, resultExpected)
})
t.Run(`histogram_share(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
t.Run(`histogram_quantile(single-value-valid-le, boundsLabel)`, func(t *testing.T) {
t.Parallel()
q := `sort(histogram_share(120, label_set(100, "le", "200"), "foobar"))`
r1 := netstorage.Result{
@@ -4351,37 +4311,7 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_fraction(single-value-valid-le-max-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(0,100, (
label_set(100, "le", "100"),
label_set(40, "le", "50"),
label_set(0, "le", "10"),
))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{1, 1, 1, 1, 1, 1},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_fraction(single-value-valid-le-min-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(0,10, (
label_set(100, "le", "100"),
label_set(40, "le", "50"),
label_set(0, "le", "10"),
))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0, 0, 0, 0, 0, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_share(single-value-valid-le-mid-le-1)`, func(t *testing.T) {
t.Run(`histogram_share(single-value-valid-le-mid-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_share(105, (
label_set(100, "le", "200"),
@@ -4395,34 +4325,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_share(single-value-valid-le-mid-le-2)`, func(t *testing.T) {
t.Parallel()
q := `histogram_share(55, (
label_set(100, "le", "200"),
label_set(0, "le", "55"),
))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0, 0, 0, 0, 0, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_fraction(single-value-valid-le-mid-le)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(55,105, (
label_set(100, "le", "200"),
label_set(0, "le", "55"),
))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966, 0.3448275862068966},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_quantile(single-value-valid-le-min-phi-no-zero-bucket)`, func(t *testing.T) {
t.Parallel()
q := `histogram_quantile(0, label_set(100, "le", "200"))`
@@ -4456,17 +4358,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_fraction(scalar-phi)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(25, time() / 8, label_set(100, "le", "200"))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.5, 0.625, 0.75, 0.875, 0.875, 0.875},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_quantile(duplicate-le)`, func(t *testing.T) {
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3225
t.Parallel()
@@ -4548,36 +4439,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r1, r2}
f(q, resultExpected)
})
t.Run(`histogram_fraction(valid)`, func(t *testing.T) {
t.Parallel()
q := `sort(histogram_fraction(0, 25,
label_set(90, "foo", "bar", "le", "10")
or label_set(100, "foo", "bar", "le", "30")
or label_set(300, "foo", "bar", "le", "+Inf")
or label_set(200, "tag", "xx", "le", "10")
or label_set(300, "tag", "xx", "le", "30")
))`
r1 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.325, 0.325, 0.325, 0.325, 0.325, 0.325},
Timestamps: timestampsExpected,
}
r1.MetricName.Tags = []storage.Tag{{
Key: []byte("foo"),
Value: []byte("bar"),
}}
r2 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666, 0.9166666666666666},
Timestamps: timestampsExpected,
}
r2.MetricName.Tags = []storage.Tag{{
Key: []byte("tag"),
Value: []byte("xx"),
}}
resultExpected := []netstorage.Result{r1, r2}
f(q, resultExpected)
})
t.Run(`histogram_quantile(negative-bucket-count)`, func(t *testing.T) {
t.Parallel()
q := `histogram_quantile(0.6,
@@ -4694,25 +4555,6 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_fraction(normal-bucket-count)`, func(t *testing.T) {
t.Parallel()
q := `histogram_fraction(22,35,
label_set(0, "foo", "bar", "le", "10")
or label_set(100, "foo", "bar", "le", "30")
or label_set(300, "foo", "bar", "le", "+Inf")
)`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333, 0.1333333333333333},
Timestamps: timestampsExpected,
}
r.MetricName.Tags = []storage.Tag{{
Key: []byte("foo"),
Value: []byte("bar"),
}}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_quantile(normal-bucket-count, boundsLabel)`, func(t *testing.T) {
t.Parallel()
q := `sort(histogram_quantile(0.2,
@@ -9985,7 +9827,7 @@ func TestExecError(t *testing.T) {
Deadline: searchutil.NewDeadline(time.Now(), time.Minute, ""),
RoundDigits: 100,
}
for range 4 {
for i := 0; i < 4; i++ {
rv, err := Exec(nil, ec, q, false)
if err == nil {
t.Fatalf(`expecting non-nil error on %q`, q)

View File

@@ -55,7 +55,7 @@ type parseCache struct {
func newParseCache() *parseCache {
pc := new(parseCache)
for i := range parseBucketCount {
for i := 0; i < parseBucketCount; i++ {
pc.buckets[i] = newParseBucket()
}
return pc
@@ -75,7 +75,7 @@ func (pc *parseCache) get(q string) *parseCacheValue {
func (pc *parseCache) requests() uint64 {
var n uint64
for i := range parseBucketCount {
for i := 0; i < parseBucketCount; i++ {
n += pc.buckets[i].requests.Load()
}
return n
@@ -83,7 +83,7 @@ func (pc *parseCache) requests() uint64 {
func (pc *parseCache) misses() uint64 {
var n uint64
for i := range parseBucketCount {
for i := 0; i < parseBucketCount; i++ {
n += pc.buckets[i].misses.Load()
}
return n
@@ -91,7 +91,7 @@ func (pc *parseCache) misses() uint64 {
func (pc *parseCache) len() uint64 {
var n uint64
for i := range parseBucketCount {
for i := 0; i < parseBucketCount; i++ {
n += pc.buckets[i].len()
}
return n

View File

@@ -17,7 +17,7 @@ func testGetParseCacheValue(q string) *parseCacheValue {
func testGenerateQueries(items int) []string {
queries := make([]string, items)
for i := range items {
for i := 0; i < items; i++ {
queries[i] = fmt.Sprintf(`node_time_seconds{instance="node%d", job="job%d"}`, i, i)
}
return queries
@@ -102,7 +102,7 @@ func TestParseCacheBucketOverflow(t *testing.T) {
v := testGetParseCacheValue(queries[0])
// Fill bucket
for i := range parseBucketMaxLen {
for i := 0; i < parseBucketMaxLen; i++ {
b.put(queries[i], v)
}
expectedLen = uint64(parseBucketMaxLen)

View File

@@ -15,7 +15,7 @@ func BenchmarkCachePutNoOverFlow(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := range items {
for i := 0; i < items; i++ {
pc.put(queries[i], v)
}
}
@@ -32,14 +32,14 @@ func BenchmarkCacheGetNoOverflow(b *testing.B) {
queries := testGenerateQueries(items)
v := testGetParseCacheValue(queries[0])
for i := range queries {
for i := 0; i < len(queries); i++ {
pc.put(queries[i], v)
}
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := range items {
for i := 0; i < items; i++ {
if v := pc.get(queries[i]); v == nil {
b.Errorf("unexpected nil value obtained from cache for query: %s ", queries[i])
}
@@ -59,7 +59,7 @@ func BenchmarkCachePutGetNoOverflow(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := range items {
for i := 0; i < items; i++ {
pc.put(queries[i], v)
if res := pc.get(queries[i]); res == nil {
b.Errorf("unexpected nil value obtained from cache for query: %s ", queries[i])
@@ -79,7 +79,7 @@ func BenchmarkCachePutOverflow(b *testing.B) {
queries := testGenerateQueries(items)
v := testGetParseCacheValue(queries[0])
for i := range parseCacheMaxLen {
for i := 0; i < parseCacheMaxLen; i++ {
c.put(queries[i], v)
}
@@ -105,7 +105,7 @@ func BenchmarkCachePutGetOverflow(b *testing.B) {
queries := testGenerateQueries(items)
v := testGetParseCacheValue(queries[0])
for i := range parseCacheMaxLen {
for i := 0; i < parseCacheMaxLen; i++ {
c.put(queries[i], v)
}
@@ -141,8 +141,8 @@ var testSimpleQueries = []string{
func BenchmarkParsePromQLWithCacheSimple(b *testing.B) {
b.ReportAllocs()
for range b.N {
for j := range testSimpleQueries {
for i := 0; i < b.N; i++ {
for j := 0; j < len(testSimpleQueries); j++ {
_, err := parsePromQLWithCache(testSimpleQueries[j])
if err != nil {
b.Errorf("unexpected error: %s", err)
@@ -155,7 +155,7 @@ func BenchmarkParsePromQLWithCacheSimpleParallel(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := range testSimpleQueries {
for i := 0; i < len(testSimpleQueries); i++ {
_, err := parsePromQLWithCache(testSimpleQueries[i])
if err != nil {
b.Errorf("unexpected error: %s", err)
@@ -210,8 +210,8 @@ var testComplexQueries = []string{
func BenchmarkParsePromQLWithCacheComplex(b *testing.B) {
b.ReportAllocs()
for range b.N {
for j := range testComplexQueries {
for i := 0; i < b.N; i++ {
for j := 0; j < len(testComplexQueries); j++ {
_, err := parsePromQLWithCache(testComplexQueries[j])
if err != nil {
b.Errorf("unexpected error: %s", err)
@@ -224,7 +224,7 @@ func BenchmarkParsePromQLWithCacheComplexParallel(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for i := range testComplexQueries {
for i := 0; i < len(testComplexQueries); i++ {
_, err := parsePromQLWithCache(testComplexQueries[i])
if err != nil {
b.Errorf("unexpected error: %s", err)

View File

@@ -13,8 +13,6 @@ type QueryStats struct {
ExecutionDuration atomic.Pointer[time.Duration]
// SeriesFetched contains the number of series fetched from storage or cache.
SeriesFetched atomic.Int64
// MemoryUsage contains the estimated memory consumption of the query
MemoryUsage atomic.Int64
at *auth.Token
@@ -55,17 +53,3 @@ func (qs *QueryStats) addExecutionTimeMsec(startTime time.Time) {
d := time.Since(startTime)
qs.ExecutionDuration.Store(&d)
}
func (qs *QueryStats) addMemoryUsage(memoryUsage int64) {
if qs == nil {
return
}
qs.MemoryUsage.Store(memoryUsage)
}
func (qs *QueryStats) memoryUsage() int64 {
if qs == nil {
return 0
}
return qs.MemoryUsage.Load()
}

Some files were not shown because too many files have changed in this diff Show More