mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
29 Commits
flaky-vmau
...
weakpointe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9261be945 | ||
|
|
16a75129be | ||
|
|
68bdb5e4d3 | ||
|
|
4360d10962 | ||
|
|
ce9c868f59 | ||
|
|
212ce1baf0 | ||
|
|
1a091e5831 | ||
|
|
bac186fc65 | ||
|
|
15ce9e5e49 | ||
|
|
2c1596ea84 | ||
|
|
21d4f844ab | ||
|
|
da0002ce66 | ||
|
|
f35b9ed36d | ||
|
|
b4dc67cba6 | ||
|
|
70afdd0285 | ||
|
|
51efd2c32b | ||
|
|
1e208a8c79 | ||
|
|
e49027df8f | ||
|
|
a518a4a904 | ||
|
|
ad46fce7d4 | ||
|
|
7cc13ee1cc | ||
|
|
74fcd10d2e | ||
|
|
59007cda51 | ||
|
|
5869a39e7b | ||
|
|
c3c802a61c | ||
|
|
8b92af9d45 | ||
|
|
e313874d01 | ||
|
|
58a4e48901 | ||
|
|
16d75ab0bd |
23
.github/copilot-instructions.md
vendored
Normal file
23
.github/copilot-instructions.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# Project Overview
|
||||
|
||||
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
|
||||
## Folder Structure
|
||||
|
||||
- `/app`: Contains the compilable binaries.
|
||||
- `/lib`: Contains the golang reusable libraries
|
||||
- `/docs/victoriametrics`: Contains documentation for the project.
|
||||
- `/apptest/tests`: Contains integration tests.
|
||||
|
||||
## Libraries and Frameworks
|
||||
|
||||
- Backend: Golang, no framework. Use third-party libraries sparingly.
|
||||
- Frontend: React.
|
||||
|
||||
## Code review guidelines
|
||||
|
||||
Ensure the feature or bugfix includes a changelog entry in /docs/victoriametrics/changelog/CHANGELOG.md.
|
||||
Verify the entry is under the ## tip section and matches the structure and style of existing entries.
|
||||
Chore-only changes may be omitted from the changelog.
|
||||
|
||||
|
||||
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -59,7 +59,7 @@ jobs:
|
||||
arch: amd64
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
|
||||
2
.github/workflows/codeql-analysis-go.yml
vendored
2
.github/workflows/codeql-analysis-go.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
|
||||
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@@ -16,12 +16,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: __vm
|
||||
|
||||
- name: Checkout private code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
|
||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
@@ -97,7 +97,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
|
||||
2
.github/workflows/vmui.yml
vendored
2
.github/workflows/vmui.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
|
||||
11
Makefile
11
Makefile
@@ -12,11 +12,12 @@ PKG_TAG := $(BUILDINFO_TAG)
|
||||
endif
|
||||
|
||||
EXTRA_DOCKER_TAG_SUFFIX ?=
|
||||
EXTRA_GO_BUILD_TAGS ?=
|
||||
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
|
||||
TAR_OWNERSHIP ?= --owner=1000 --group=1000
|
||||
|
||||
GOLANGCI_LINT_VERSION := 2.2.1
|
||||
GOLANGCI_LINT_VERSION := 2.4.0
|
||||
|
||||
.PHONY: $(MAKECMDGOALS)
|
||||
|
||||
@@ -470,16 +471,16 @@ vendor-update:
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-pure:
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-goos-goarch:
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-windows-goarch:
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc
|
||||
|
||||
@@ -209,7 +209,7 @@ func Init() {
|
||||
// In this case it is impossible to prevent from sending many duplicates of samples passed to TryPush() to all the configured -remoteWrite.url
|
||||
// if these samples couldn't be sent to the -remoteWrite.url with the disabled persistent queue. So it is better sending samples
|
||||
// to the remaining -remoteWrite.url and dropping them on the blocked queue.
|
||||
dropSamplesOnFailureGlobal = *dropSamplesOnOverload || disableOnDiskQueueAny && len(disableOnDiskQueues) > 1
|
||||
dropSamplesOnFailureGlobal = *dropSamplesOnOverload || disableOnDiskQueueAny && len(*remoteWriteURLs) > 1
|
||||
|
||||
dropDanglingQueues()
|
||||
|
||||
|
||||
@@ -28,8 +28,8 @@ var (
|
||||
"Defines how many retries to make before giving up on rule if request for it returns an error.")
|
||||
disableProgressBar = flag.Bool("replay.disableProgressBar", false, "Whether to disable rendering progress bars during the replay. "+
|
||||
"Progress bar rendering might be verbose or break the logs parsing, so it is recommended to be disabled when not used in interactive mode.")
|
||||
ruleEvaluationConcurrency = flag.Int("replay.ruleEvaluationConcurrency", 1, "The maximum number of concurrent `/query_range` requests for a single rule. "+
|
||||
"Increasing this value when replaying for a long time and a single request range is limited by `-replay.maxDatapointsPerQuery`.")
|
||||
ruleEvaluationConcurrency = flag.Int("replay.ruleEvaluationConcurrency", 1, "The maximum number of concurrent '/query_range' requests when replay recording rule or alerting rule with for=0. "+
|
||||
"Increasing this value when replaying for a long time, since each request is limited by -replay.maxDatapointsPerQuery.")
|
||||
)
|
||||
|
||||
func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw remotewrite.RWClient) (totalRows, droppedRows int, err error) {
|
||||
|
||||
@@ -246,24 +246,33 @@ func TestReplay(t *testing.T) {
|
||||
|
||||
// multiple rules + rule concurrency + group concurrency
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, 3, 0, []config.Group{
|
||||
{Rules: []config.Rule{{Alert: "foo-group-single-concurrent", Expr: "sum(up) > 1"}, {Alert: "bar-group-single-concurrent", Expr: "max(up) < 1"}}, Concurrency: 2}}, &fakeReplayQuerier{
|
||||
{Rules: []config.Rule{{Alert: "foo-group-single-concurrent", For: promutil.NewDuration(30 * time.Second), Expr: "sum(up) > 1"}, {Alert: "bar-group-single-concurrent", Expr: "max(up) < 1"}}, Concurrency: 2}}, &fakeReplayQuerier{
|
||||
registry: map[string]map[string][]datasource.Metric{
|
||||
"sum(up) > 1": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:01:00+12:02:00": {{
|
||||
Timestamps: []int64{1},
|
||||
"12:00:00+12:01:00": {{
|
||||
Timestamps: []int64{1609502460},
|
||||
Values: []float64{1},
|
||||
}},
|
||||
"12:01:00+12:02:00": {{
|
||||
Timestamps: []int64{1609502520},
|
||||
Values: []float64{1},
|
||||
}},
|
||||
"12:02:00+12:02:30": {{
|
||||
Timestamps: []int64{1609502580},
|
||||
Values: []float64{1},
|
||||
}},
|
||||
"12:02:00+12:02:30": {},
|
||||
},
|
||||
"max(up) < 1": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:00:00+12:01:00": {{
|
||||
Timestamps: []int64{1609502460},
|
||||
Values: []float64{1},
|
||||
}},
|
||||
"12:01:00+12:02:00": {{
|
||||
Timestamps: []int64{1},
|
||||
Timestamps: []int64{1609502520},
|
||||
Values: []float64{1},
|
||||
}},
|
||||
"12:02:00+12:02:30": {},
|
||||
},
|
||||
},
|
||||
}, 4)
|
||||
}, 10)
|
||||
}
|
||||
|
||||
@@ -341,11 +341,15 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
return []datasource.Metric{{Timestamps: []int64{0}, Values: []float64{math.NaN()}}}, nil
|
||||
}
|
||||
for _, s := range res.Data {
|
||||
ls, as, err := ar.expandTemplates(s, qFn, time.Time{})
|
||||
ls, err := ar.expandLabelTemplates(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to expand templates: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
alertID := hash(ls.processed)
|
||||
as, err := ar.expandAnnotationTemplates(s, qFn, time.Time{}, ls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := ar.newAlert(s, time.Time{}, ls.processed, as) // initial alert
|
||||
|
||||
prevT := time.Time{}
|
||||
@@ -363,7 +367,7 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
a.State = notifier.StatePending
|
||||
a.ActiveAt = at
|
||||
// re-template the annotations as active timestamp is changed
|
||||
_, a.Annotations, _ = ar.expandTemplates(s, qFn, at)
|
||||
a.Annotations, _ = ar.expandAnnotationTemplates(s, qFn, at, ls)
|
||||
a.Start = time.Time{}
|
||||
} else if at.Sub(a.ActiveAt) >= ar.For && a.State != notifier.StateFiring {
|
||||
a.State = notifier.StateFiring
|
||||
@@ -376,13 +380,15 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
}
|
||||
result = append(result, ar.alertToTimeSeries(a, s.Timestamps[i])...)
|
||||
|
||||
// save alert's state on last iteration, so it can be used on the next execRange call
|
||||
if at.Equal(end) {
|
||||
// if for>0, save alert's state on last iteration, so it can be used on the next execRange call
|
||||
if ar.For > 0 && at.Equal(end) {
|
||||
holdAlertState[alertID] = a
|
||||
}
|
||||
}
|
||||
}
|
||||
ar.alerts = holdAlertState
|
||||
if len(holdAlertState) > 0 {
|
||||
ar.alerts = holdAlertState
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -428,9 +434,22 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
expandedLabels := make([]*labelSet, len(res.Data))
|
||||
expandedAnnotations := make([]map[string]string, len(res.Data))
|
||||
for i, m := range res.Data {
|
||||
ls, as, err := ar.expandTemplates(m, qFn, ts)
|
||||
ls, err := ar.expandLabelTemplates(m)
|
||||
if err != nil {
|
||||
curState.Err = fmt.Errorf("failed to expand templates: %w", err)
|
||||
curState.Err = err
|
||||
return nil, curState.Err
|
||||
}
|
||||
at := ts
|
||||
alertID := hash(ls.processed)
|
||||
if a, ok := ar.alerts[alertID]; ok {
|
||||
// modify activeAt for annotation templating if the alert has already triggered(in state Pending or Firing)
|
||||
if a.State != notifier.StateInactive {
|
||||
at = a.ActiveAt
|
||||
}
|
||||
}
|
||||
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls)
|
||||
if err != nil {
|
||||
curState.Err = err
|
||||
return nil, curState.Err
|
||||
}
|
||||
expandedLabels[i] = ls
|
||||
@@ -473,6 +492,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
a.KeepFiringSince = time.Time{}
|
||||
continue
|
||||
}
|
||||
|
||||
a := ar.newAlert(m, ts, labels.processed, annotations)
|
||||
a.ID = alertID
|
||||
a.State = notifier.StatePending
|
||||
@@ -536,12 +556,18 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
return append(tss, ar.toTimeSeries(ts.Unix())...), nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) expandTemplates(m datasource.Metric, qFn templates.QueryFn, ts time.Time) (*labelSet, map[string]string, error) {
|
||||
func (ar *AlertingRule) expandLabelTemplates(m datasource.Metric) (*labelSet, error) {
|
||||
qFn := func(_ string) ([]datasource.Metric, error) {
|
||||
return nil, fmt.Errorf("`query` template isn't supported in rule label")
|
||||
}
|
||||
ls, err := ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to expand labels: %w", err)
|
||||
return nil, fmt.Errorf("failed to expand label templates: %s", err)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templates.QueryFn, activeAt time.Time, ls *labelSet) (map[string]string, error) {
|
||||
tplData := notifier.AlertTplData{
|
||||
Value: m.Values[0],
|
||||
Type: ar.Type.String(),
|
||||
@@ -549,14 +575,14 @@ func (ar *AlertingRule) expandTemplates(m datasource.Metric, qFn templates.Query
|
||||
Expr: ar.Expr,
|
||||
AlertID: hash(ls.processed),
|
||||
GroupID: ar.GroupID,
|
||||
ActiveAt: ts,
|
||||
ActiveAt: activeAt,
|
||||
For: ar.For,
|
||||
}
|
||||
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to template annotations: %w", err)
|
||||
return nil, fmt.Errorf("failed to expand annotation templates: %s", err)
|
||||
}
|
||||
return ls, as, nil
|
||||
return as, nil
|
||||
}
|
||||
|
||||
// toTimeSeries creates `ALERTS` and `ALERTS_FOR_STATE` for active alerts
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -267,8 +268,15 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
if got.State != exp.State {
|
||||
t.Fatalf("evalIndex %d: expected state %d; got %d", i, exp.State, got.State)
|
||||
}
|
||||
if rule.Annotations != nil && exp.Annotations != nil {
|
||||
if !reflect.DeepEqual(got.Annotations, exp.Annotations) {
|
||||
t.Fatalf("evalIndex %d: expected annotations %v; got %v", i, exp.Annotations, got.Annotations)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// reset ts for next test
|
||||
ts, _ = time.Parse(time.RFC3339, "2024-10-29T00:00:00Z")
|
||||
}
|
||||
|
||||
f(newTestAlertingRule("empty", 0), [][]datasource.Metric{}, nil, nil)
|
||||
@@ -522,7 +530,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
f(newTestAlertingRule("for-pending=>firing=>inactive=>pending=>firing", defaultStep), [][]datasource.Metric{
|
||||
f(newTestAlertingRuleWithCustomFields("for-pending=>firing=>inactive=>pending=>firing", defaultStep, 0, 0, map[string]string{"activeAt": "{{ $activeAt.UnixMilli }}"}), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
// empty step to set alert inactive
|
||||
@@ -530,11 +538,11 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
}, map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending, Annotations: map[string]string{"activeAt": strconv.FormatInt(ts.UnixMilli(), 10)}}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring, Annotations: map[string]string{"activeAt": strconv.FormatInt(ts.UnixMilli(), 10)}}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive, Annotations: map[string]string{"activeAt": strconv.FormatInt(ts.UnixMilli(), 10)}}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending, Annotations: map[string]string{"activeAt": strconv.FormatInt(ts.Add(defaultStep*3).UnixMilli(), 10)}}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring, Annotations: map[string]string{"activeAt": strconv.FormatInt(ts.Add(defaultStep*3).UnixMilli(), 10)}}}},
|
||||
}, nil)
|
||||
|
||||
f(newTestAlertingRuleWithCustomFields("for-pending=>firing=>keepfiring=>firing", defaultStep, 0, defaultStep, nil), [][]datasource.Metric{
|
||||
|
||||
@@ -587,6 +587,11 @@ func (g *Group) Replay(start, end time.Time, rw remotewrite.RWClient, maxDataPoi
|
||||
|
||||
func replayRuleRange(r Rule, ri rangeIterator, bar *pb.ProgressBar, rw remotewrite.RWClient, replayRuleRetryAttempts, ruleEvaluationConcurrency int) int {
|
||||
fmt.Printf("> Rule %q (ID: %d)\n", r, r.ID())
|
||||
// alerting rule with for>0 can't be replayed concurrently, since the status change might depend on the previous evaluation
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/commit/abcb21aa5ee918ba9a4e9cde495dba06e1e9564c
|
||||
if r, ok := r.(*AlertingRule); ok && r.For > 0 {
|
||||
ruleEvaluationConcurrency = 1
|
||||
}
|
||||
sem := make(chan struct{}, ruleEvaluationConcurrency)
|
||||
wg := sync.WaitGroup{}
|
||||
res := make(chan int, int(ri.end.Sub(ri.start)/ri.step)+1)
|
||||
|
||||
@@ -437,7 +437,7 @@ func TestRecordingRuleExec_Negative(t *testing.T) {
|
||||
|
||||
_, err = rr.exec(context.TODO(), time.Now(), 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot execute recroding rule: %s", err)
|
||||
t.Fatalf("cannot execute recording rule: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,106 +1,110 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
# special tag to reduce resulting binary size
|
||||
# See this issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
VMBACKUP_GO_BUILD_TAGS=disable_grpc_modules
|
||||
|
||||
vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) app-local
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local
|
||||
|
||||
vmbackup-race:
|
||||
APP_NAME=vmbackup RACE=-race $(MAKE) app-local
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) RACE=-race $(MAKE) app-local
|
||||
|
||||
vmbackup-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker
|
||||
|
||||
vmbackup-pure-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-pure
|
||||
|
||||
vmbackup-linux-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmbackup-linux-arm-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmbackup-linux-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmbackup-linux-ppc64le-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-ppc64le
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmbackup-linux-386-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-386
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmbackup-darwin-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
vmbackup-darwin-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmbackup-freebsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-freebsd-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmbackup-openbsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-openbsd-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmbackup-windows-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-windows-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker
|
||||
|
||||
package-vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmbackup-amd64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmbackup-arm:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmbackup-arm64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmbackup-ppc64le:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-ppc64le
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmbackup-386:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-386
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) publish-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) publish-via-docker
|
||||
|
||||
vmbackup-linux-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-arm:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-ppc64le:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-s390x:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-loong64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-386:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-darwin-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-darwin-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-freebsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-openbsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmbackup $(MAKE) app-local-windows-goarch
|
||||
GOARCH=amd64 APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) app-local-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local-pure
|
||||
|
||||
@@ -1,106 +1,110 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
# special tag to reduce resulting binary size
|
||||
# See this issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
VMRESTORE_GO_BUILD_TAGS=disable_grpc_modules
|
||||
|
||||
vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) app-local
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local
|
||||
|
||||
vmrestore-race:
|
||||
APP_NAME=vmrestore RACE=-race $(MAKE) app-local
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) RACE=-race $(MAKE) app-local
|
||||
|
||||
vmrestore-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker
|
||||
|
||||
vmrestore-pure-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-pure
|
||||
|
||||
vmrestore-linux-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmrestore-linux-arm-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmrestore-linux-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmrestore-linux-ppc64le-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-ppc64le
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmrestore-linux-386-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-386
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmrestore-darwin-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
vmrestore-darwin-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmrestore-freebsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-freebsd-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmrestore-openbsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-openbsd-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmrestore-windows-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-windows-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker
|
||||
|
||||
package-vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmrestore-amd64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmrestore-arm:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmrestore-arm64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmrestore-ppc64le:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-ppc64le
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmrestore-386:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-386
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) publish-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) publish-via-docker
|
||||
|
||||
vmrestore-linux-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-arm:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-ppc64le:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-s390x:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-loong64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-386:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-darwin-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-darwin-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-freebsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-openbsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmrestore $(MAKE) app-local-windows-goarch
|
||||
GOARCH=amd64 APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) app-local-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local-pure
|
||||
|
||||
@@ -10,7 +10,7 @@ func TestParseIntervalSuccess(t *testing.T) {
|
||||
t.Helper()
|
||||
interval, err := parseInterval(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in parseInterva(%q): %s", s, err)
|
||||
t.Fatalf("unexpected error in parseInterval(%q): %s", s, err)
|
||||
}
|
||||
if interval != intervalExpected {
|
||||
t.Fatalf("unexpected result for parseInterval(%q); got %d; want %d", s, interval, intervalExpected)
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestScanStringSuccess(t *testing.T) {
|
||||
t.Fatalf("unexpected string scanned from %s; got %s; want %s", s, result, sExpected)
|
||||
}
|
||||
if !strings.HasPrefix(s, result) {
|
||||
t.Fatalf("invalid prefix for scanne string %s: %s", s, result)
|
||||
t.Fatalf("invalid prefix for scanned string %s: %s", s, result)
|
||||
}
|
||||
}
|
||||
f(`""`, `""`)
|
||||
|
||||
@@ -210,7 +210,7 @@ func (p *parser) parseMetricExprOrFuncCall() (Expr, error) {
|
||||
}
|
||||
return fe, nil
|
||||
default:
|
||||
// Metric epxression or bool expression or None.
|
||||
// Metric expression or bool expression or None.
|
||||
if isBool(ident) {
|
||||
be := &BoolExpr{
|
||||
B: strings.EqualFold(ident, "true"),
|
||||
|
||||
@@ -269,7 +269,7 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
||||
}
|
||||
|
||||
// Slow path - spin up multiple local workers for parallel data processing.
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-poing,
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-pong,
|
||||
// which reduces the scalability on systems with many CPU cores.
|
||||
|
||||
// Prepare the work for workers.
|
||||
@@ -485,7 +485,7 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
|
||||
}
|
||||
|
||||
// Slow path - spin up multiple local workers for parallel data unpacking.
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-poing,
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-pong,
|
||||
// which reduces the scalability on systems with many CPU cores.
|
||||
|
||||
// Prepare the work for workers.
|
||||
|
||||
@@ -135,7 +135,7 @@ func (tbf *tmpBlocksFile) WriteBlockRefData(b []byte) (tmpBlockAddr, error) {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Len() returnt tbf size in bytes.
|
||||
// Len() return tbf size in bytes.
|
||||
func (tbf *tmpBlocksFile) Len() uint64 {
|
||||
return tbf.offset
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp
|
||||
rightValues := right[i].Values
|
||||
dstValues := dst[i].Values
|
||||
if len(leftValues) != len(rightValues) || len(leftValues) != len(dstValues) {
|
||||
logger.Panicf("BUG: len(leftVaues) must match len(rightValues) and len(dstValues); got %d vs %d vs %d",
|
||||
logger.Panicf("BUG: len(leftValues) must match len(rightValues) and len(dstValues); got %d vs %d vs %d",
|
||||
len(leftValues), len(rightValues), len(dstValues))
|
||||
}
|
||||
for j, a := range leftValues {
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestValidateMaxPointsPerSeriesFailure(t *testing.T) {
|
||||
f := func(start, end, step int64, maxPoints int) {
|
||||
t.Helper()
|
||||
if err := ValidateMaxPointsPerSeries(start, end, step, maxPoints); err == nil {
|
||||
t.Fatalf("expecint non-nil error for ValidateMaxPointsPerSeries(start=%d, end=%d, step=%d, maxPoints=%d)", start, end, step, maxPoints)
|
||||
t.Fatalf("expecting non-nil error for ValidateMaxPointsPerSeries(start=%d, end=%d, step=%d, maxPoints=%d)", start, end, step, maxPoints)
|
||||
}
|
||||
}
|
||||
// zero step
|
||||
|
||||
@@ -2443,13 +2443,14 @@ func rollupFake(_ *rollupFuncArg) float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// getScalar expects result from a [scalar](https://prometheus.io/docs/prometheus/latest/querying/basics/#expression-language-data-types).
|
||||
func getScalar(arg any, argNum int) ([]float64, error) {
|
||||
ts, ok := arg.([]*timeseries)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(`unexpected type for arg #%d; got %T; want %T`, argNum+1, arg, ts)
|
||||
return nil, fmt.Errorf(`arg #%d must be a scalar`, argNum+1)
|
||||
}
|
||||
if len(ts) != 1 {
|
||||
return nil, fmt.Errorf(`arg #%d must contain a single timeseries; got %d timeseries`, argNum+1, len(ts))
|
||||
return nil, fmt.Errorf(`arg #%d must be a scalar`, argNum+1)
|
||||
}
|
||||
return ts[0].Values, nil
|
||||
}
|
||||
@@ -2466,14 +2467,15 @@ func getIntNumber(arg any, argNum int) (int, error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// getString expects result from a string expression, which contains a single timeseries with only NaN values.
|
||||
func getString(tss []*timeseries, argNum int) (string, error) {
|
||||
if len(tss) != 1 {
|
||||
return "", fmt.Errorf(`arg #%d must contain a single timeseries; got %d timeseries`, argNum+1, len(tss))
|
||||
return "", fmt.Errorf(`arg #%d must be a string`, argNum+1)
|
||||
}
|
||||
ts := tss[0]
|
||||
for _, v := range ts.Values {
|
||||
if !math.IsNaN(v) {
|
||||
return "", fmt.Errorf(`arg #%d contains non-string timeseries`, argNum+1)
|
||||
return "", fmt.Errorf(`arg #%d must be a string`, argNum+1)
|
||||
}
|
||||
}
|
||||
return string(ts.MetricName.MetricGroup), nil
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -36,7 +36,7 @@
|
||||
<meta property="og:title" content="UI for VictoriaMetrics">
|
||||
<meta property="og:url" content="https://victoriametrics.com/">
|
||||
<meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data">
|
||||
<script type="module" crossorigin src="./assets/index-BT5pWGkz.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-Ck5nH8JI.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-BVRvRxZ2.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-BHg4iVVe.css">
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24.6 AS build-web-stage
|
||||
FROM golang:1.25.0 AS build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -183,3 +184,32 @@ func (app *ServesMetrics) GetMetricsByPrefix(t *testing.T, prefix string) []floa
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func (app *ServesMetrics) GetMetricsByRegexp(t *testing.T, re *regexp.Regexp) []float64 {
|
||||
t.Helper()
|
||||
|
||||
values := []float64{}
|
||||
|
||||
metrics, statusCode := app.cli.Get(t, app.metricsURL)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
for _, metric := range strings.Split(metrics, "\n") {
|
||||
if !re.MatchString(metric) {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(metric, " ")
|
||||
if len(parts) < 2 {
|
||||
t.Fatalf("unexpected record format: got %q, want metric name and value separated by a space", metric)
|
||||
}
|
||||
|
||||
value, err := strconv.ParseFloat(parts[len(parts)-1], 64)
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse metric value %s: %v", metric, err)
|
||||
}
|
||||
|
||||
values = append(values, value)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
@@ -173,7 +173,7 @@ func (tc *TestCase) MustStartVmagent(instance string, flags []string, promScrape
|
||||
// vminsert, and one vmselect.
|
||||
//
|
||||
// Both Vmsingle and Vmcluster implement the PrometheusWriteQuerier used in
|
||||
// business logic tests to abstract out the infrasture.
|
||||
// business logic tests to abstract out the infrastructure.
|
||||
//
|
||||
// This type is not suitable for infrastructure tests where custom cluster
|
||||
// setups are often required.
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestClusterMultilevelSelect(t *testing.T) {
|
||||
//
|
||||
// vmselect (L2) -> vmselect (L1) -> vmstorage <- vminsert
|
||||
//
|
||||
// vmisert writes data into vmstorage.
|
||||
// vminsert writes data into vmstorage.
|
||||
// vmselect (L2) reads that data via vmselect (L1).
|
||||
|
||||
vmstorage := tc.MustStartVmstorage("vmstorage", []string{
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
@@ -248,3 +249,88 @@ func TestSingleVMAgentDowngradeRemoteWriteProtocol(t *testing.T) {
|
||||
t.Fatalf("unexpected number of dropped packets; got %d, want %d", actualPacketsDroppedCount, expectedPacketsDroppedTotal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleVMAgentDropOnOverload(t *testing.T) {
|
||||
tc := apptest.NewTestCase(t)
|
||||
defer tc.Stop()
|
||||
|
||||
remoteWriteSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer remoteWriteSrv.Close()
|
||||
|
||||
remoteWriteSrv2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
}))
|
||||
defer remoteWriteSrv2.Close()
|
||||
|
||||
vmagent := tc.MustStartVmagent("vmagent", []string{
|
||||
`-remoteWrite.flushInterval=50ms`,
|
||||
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
|
||||
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv2.URL),
|
||||
"-remoteWrite.disableOnDiskQueue=true",
|
||||
// use only 1 worker to get a full queue faster
|
||||
"-remoteWrite.queues=1",
|
||||
// fastqueue size is roughly memory.Allowed() / len(urls) / *maxRowsPerBlock / 100
|
||||
// Use very large maxRowsPerBlock to get fastqueue of minimal length(2).
|
||||
// See initRemoteWriteCtxs function in remotewrite.go for details.
|
||||
"-remoteWrite.maxRowsPerBlock=1000000000",
|
||||
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent",
|
||||
}, ``)
|
||||
|
||||
const (
|
||||
retries = 20
|
||||
period = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
waitFor := func(f func() bool) {
|
||||
t.Helper()
|
||||
for i := 0; i < retries; i++ {
|
||||
if f() {
|
||||
return
|
||||
}
|
||||
time.Sleep(period)
|
||||
}
|
||||
t.Fatalf("timed out waiting for retry #%d", retries)
|
||||
}
|
||||
|
||||
// Real remote write URLs are hidden in metrics
|
||||
url1 := "1:secret-url"
|
||||
url2 := "2:secret-url"
|
||||
|
||||
// Wait until first request got flushed to remote write server
|
||||
vmagent.APIV1ImportPrometheusNoWaitFlush(t, []string{
|
||||
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
|
||||
}, apptest.QueryOpts{})
|
||||
|
||||
waitFor(
|
||||
func() bool {
|
||||
return vmagent.RemoteWriteRequests(t, url1) == 1 && vmagent.RemoteWriteRequests(t, url2) == 1
|
||||
},
|
||||
)
|
||||
|
||||
// Send 2 more requests, the first RW endpoint should receive everything, the second should add them to the queue
|
||||
// since worker is busy with the first request.
|
||||
for i := 0; i < 2; i++ {
|
||||
vmagent.APIV1ImportPrometheusNoWaitFlush(t, []string{
|
||||
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
|
||||
}, apptest.QueryOpts{})
|
||||
|
||||
waitFor(
|
||||
func() bool {
|
||||
return vmagent.RemoteWriteRequests(t, url1) == 2+i && vmagent.RemoteWritePendingInmemoryBlocks(t, url2) == 1+i
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Send one more request.
|
||||
vmagent.APIV1ImportPrometheusNoWaitFlush(t, []string{
|
||||
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
|
||||
}, apptest.QueryOpts{})
|
||||
|
||||
waitFor(
|
||||
func() bool {
|
||||
return vmagent.RemoteWriteRequests(t, url1) == 4 && vmagent.RemoteWriteSamplesDropped(t, url2) > 0
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -104,6 +104,36 @@ func (app *Vmagent) RemoteWritePacketsDroppedTotal(t *testing.T) int {
|
||||
return int(total)
|
||||
}
|
||||
|
||||
// RemoteWriteSamplesDropped sums up the total number of dropped remote write samples for given remote write URL.
|
||||
func (app *Vmagent) RemoteWriteSamplesDropped(t *testing.T, url string) int {
|
||||
re := regexp.MustCompile(fmt.Sprintf("vmagent_remotewrite_samples_dropped_total{.*url=%q.*}", url))
|
||||
total := 0.0
|
||||
for _, v := range app.GetMetricsByRegexp(t, re) {
|
||||
total += v
|
||||
}
|
||||
return int(total)
|
||||
}
|
||||
|
||||
// RemoteWritePendingInmemoryBlocks sums up the total number of pending in-memory blocks for given remote write URL.
|
||||
func (app *Vmagent) RemoteWritePendingInmemoryBlocks(t *testing.T, url string) int {
|
||||
re := regexp.MustCompile(fmt.Sprintf("vmagent_remotewrite_pending_inmemory_blocks{.*url=%q.*}", url))
|
||||
total := 0.0
|
||||
for _, v := range app.GetMetricsByRegexp(t, re) {
|
||||
total += v
|
||||
}
|
||||
return int(total)
|
||||
}
|
||||
|
||||
// RemoteWriteRequests sums up the total number of sending requests for given remote write URL.
|
||||
func (app *Vmagent) RemoteWriteRequests(t *testing.T, url string) int {
|
||||
re := regexp.MustCompile(fmt.Sprintf("vmagent_remotewrite_requests_total{.*url=%q.*}", url))
|
||||
total := 0.0
|
||||
for _, v := range app.GetMetricsByRegexp(t, re) {
|
||||
total += v
|
||||
}
|
||||
return int(total)
|
||||
}
|
||||
|
||||
// ReloadRelabelConfigs sends SIGHUP to trigger relabel config reload
|
||||
// and waits until vmagent_relabel_config_reloads_total increases.
|
||||
// Fails the test if no reload is detected within 3 seconds.
|
||||
@@ -123,9 +153,7 @@ func (app *Vmagent) ReloadRelabelConfigs(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
if currTotal <= prevTotal {
|
||||
t.Fatalf("relabel configs were not reloaded after SIGHUP signal; previous total: %f, current total: %f", prevTotal, currTotal)
|
||||
}
|
||||
t.Fatalf("relabel configs were not reloaded after SIGHUP signal; previous total: %f, current total: %f", prevTotal, currTotal)
|
||||
}
|
||||
|
||||
// sendBlocking sends the data to vmstorage by executing `send` function and
|
||||
|
||||
@@ -56,31 +56,32 @@ func StartVmauth(instance string, flags []string, cli *Client, configFilePath st
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateConfiguration performs configuration file reload for app and waits for configuration apply
|
||||
// UpdateConfiguration updates the vmauth configuration file with the provided YAML content,
|
||||
// sends SIGHUP to trigger config reload
|
||||
// and waits until vmauth_config_last_reload_total increases.
|
||||
// Fails the test if no reload is detected within 2 seconds.
|
||||
func (app *Vmauth) UpdateConfiguration(t *testing.T, configFileYAML string) {
|
||||
t.Helper()
|
||||
|
||||
// Since the metric vmauth_config_last_reload_success_timestamp_seconds has second precision,
|
||||
// we need to wait for at least 1 second before reloading the config to see the change.
|
||||
time.Sleep(time.Millisecond * 1100)
|
||||
|
||||
ct := app.GetIntMetric(t, "vmauth_config_last_reload_success_timestamp_seconds")
|
||||
fs.MustWriteSync(app.configFilePath, []byte(configFileYAML))
|
||||
|
||||
prevTotal := app.GetIntMetric(t, "vmauth_config_last_reload_total")
|
||||
|
||||
if err := app.process.Signal(syscall.SIGHUP); err != nil {
|
||||
t.Fatalf("unexpected signal error: %s", err)
|
||||
}
|
||||
|
||||
// Since the metric vmauth_config_last_reload_success_timestamp_seconds has second precision,
|
||||
// we have to wait longer than 1 second to account for the worst case scenario.
|
||||
for range 15 {
|
||||
ts := app.GetIntMetric(t, "vmauth_config_last_reload_success_timestamp_seconds")
|
||||
if ts > ct {
|
||||
var currTotal int
|
||||
for range 20 {
|
||||
currTotal = app.GetIntMetric(t, "vmauth_config_last_reload_total")
|
||||
if currTotal > prevTotal {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
t.Fatalf("timeout waiting for config reload success")
|
||||
|
||||
t.Fatalf("config were not reloaded after SIGHUP signal; previous total: %d, current total: %d", prevTotal, currTotal)
|
||||
}
|
||||
|
||||
// GetHTTPListenAddr returns listen http addr
|
||||
|
||||
@@ -49,7 +49,7 @@ func StartVminsert(instance string, flags []string, cli *Client, output io.Write
|
||||
graphiteListenAddrRE,
|
||||
openTSDBListenAddrRE,
|
||||
}
|
||||
// Add storateNode REs to block until vminsert establishes connections with
|
||||
// Add storageNode REs to block until vminsert establishes connections with
|
||||
// all storage nodes. The extracted values are unused.
|
||||
for _, sn := range storageNodes(flags) {
|
||||
logRecord := fmt.Sprintf("successfully dialed -storageNode=\"%s\"", sn)
|
||||
|
||||
@@ -34,28 +34,28 @@
|
||||
# for details
|
||||
tsbs: tsbs-build tsbs-generate-data tsbs-load-data tsbs-generate-queries tsbs-run-queries
|
||||
|
||||
TSBS_SCALE := 100000
|
||||
TSBS_END := $(shell date -u +%Y-%m-%dT00:00:00Z)
|
||||
TSBS_START := $(shell \
|
||||
TSBS_SCALE ?= 100000
|
||||
TSBS_END ?= $(shell date -u +%Y-%m-%dT00:00:00Z)
|
||||
TSBS_START ?= $(shell \
|
||||
NOW=$$(date -u +%s); \
|
||||
START=$$((NOW - 86400)); \
|
||||
date -u -d "@$$START" +%Y-%m-%dT00:00:00Z 2>/dev/null || \
|
||||
date -u -r $$START +%Y-%m-%dT00:00:00Z 2>/dev/null \
|
||||
)
|
||||
TSBS_STEP := 80s
|
||||
TSBS_QUERIES := 1000
|
||||
TSBS_WORKERS := 4
|
||||
TSBS_STEP ?= 80s
|
||||
TSBS_QUERIES ?= 1000
|
||||
TSBS_WORKERS ?= 4
|
||||
TSBS_DATA_FILE := /tmp/tsbs-data-$(TSBS_SCALE)-$(TSBS_START)-$(TSBS_END)-$(TSBS_STEP).gz
|
||||
TSBS_QUERY_FILE := /tmp/tsbs-queries-$(TSBS_SCALE)-$(TSBS_START)-$(TSBS_END)-$(TSBS_QUERIES).gz
|
||||
# For cluster setup use http://vminsert:8480/insert/0/influx/write
|
||||
TSBS_WRITE_URLS := http://localhost:8428/write
|
||||
TSBS_WRITE_URLS ?= http://localhost:8428/write
|
||||
# For cluster setup use http://vmselect:8481/select/0/prometheus
|
||||
TSBS_READ_URLS := http://localhost:8428
|
||||
TSBS_METRICS_URL := http://localhost:8428/metrics
|
||||
TSBS_READ_URLS ?= http://localhost:8428
|
||||
TSBS_METRICS_URL ?= http://localhost:8428/metrics
|
||||
|
||||
# Build TSBS tools
|
||||
tsbs-build:
|
||||
test -d /tmp/tsbs || (git clone https://github.com/timescale/tsbs.git /tmp/tsbs && \
|
||||
test -d /tmp/tsbs/cmd/tsbs_run_queries_victoriametrics || (git clone https://github.com/timescale/tsbs.git /tmp/tsbs && \
|
||||
cd /tmp/tsbs/cmd/tsbs_generate_data && GOBIN=/tmp/tsbs/bin go install && \
|
||||
cd /tmp/tsbs/cmd/tsbs_generate_queries && GOBIN=/tmp/tsbs/bin go install && \
|
||||
cd /tmp/tsbs/cmd/tsbs_load_victoriametrics && GOBIN=/tmp/tsbs/bin go install && \
|
||||
|
||||
@@ -7,7 +7,7 @@ ROOT_IMAGE ?= alpine:3.22.1
|
||||
ROOT_IMAGE_SCRATCH ?= scratch
|
||||
CERTS_IMAGE := alpine:3.22.1
|
||||
|
||||
GO_BUILDER_IMAGE := golang:1.24.6-alpine
|
||||
GO_BUILDER_IMAGE := golang:1.25.0-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
DOCKER ?= docker
|
||||
@@ -43,7 +43,7 @@ app-via-docker: package-builder
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -trimpath -buildvcs=false \
|
||||
-ldflags "-extldflags '-static' $(GO_BUILDINFO)" \
|
||||
-tags 'netgo osusergo musl' \
|
||||
-tags 'netgo osusergo musl $(EXTRA_GO_BUILD_TAGS)' \
|
||||
-o bin/$(APP_NAME)$(APP_SUFFIX)-prod $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-via-docker-windows: package-builder
|
||||
@@ -58,7 +58,7 @@ app-via-docker-windows: package-builder
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -trimpath -buildvcs=false \
|
||||
-ldflags "-s -w -extldflags '-static' $(GO_BUILDINFO)" \
|
||||
-tags 'netgo osusergo' \
|
||||
-tags 'netgo osusergo $(EXTRA_GO_BUILD_TAGS)' \
|
||||
-o bin/$(APP_NAME)-windows$(APP_SUFFIX)-prod.exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
package-via-docker: package-base
|
||||
|
||||
@@ -3,7 +3,7 @@ services:
|
||||
# It scrapes targets defined in --promscrape.config
|
||||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
ports:
|
||||
@@ -35,14 +35,14 @@ services:
|
||||
# vmstorage shards. Each shard receives 1/N of all metrics sent to vminserts,
|
||||
# where N is number of vmstorages (2 in this case).
|
||||
vmstorage-1:
|
||||
image: victoriametrics/vmstorage:v1.123.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.124.0-cluster
|
||||
volumes:
|
||||
- strgdata-1:/storage
|
||||
command:
|
||||
- "--storageDataPath=/storage"
|
||||
restart: always
|
||||
vmstorage-2:
|
||||
image: victoriametrics/vmstorage:v1.123.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.124.0-cluster
|
||||
volumes:
|
||||
- strgdata-2:/storage
|
||||
command:
|
||||
@@ -52,7 +52,7 @@ services:
|
||||
# vminsert is ingestion frontend. It receives metrics pushed by vmagent,
|
||||
# pre-process them and distributes across configured vmstorage shards.
|
||||
vminsert-1:
|
||||
image: victoriametrics/vminsert:v1.123.0-cluster
|
||||
image: victoriametrics/vminsert:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
- "--storageNode=vmstorage-2:8400"
|
||||
restart: always
|
||||
vminsert-2:
|
||||
image: victoriametrics/vminsert:v1.123.0-cluster
|
||||
image: victoriametrics/vminsert:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -73,7 +73,7 @@ services:
|
||||
# vmselect is a query fronted. It serves read queries in MetricsQL or PromQL.
|
||||
# vmselect collects results from configured `--storageNode` shards.
|
||||
vmselect-1:
|
||||
image: victoriametrics/vmselect:v1.123.0-cluster
|
||||
image: victoriametrics/vmselect:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -83,7 +83,7 @@ services:
|
||||
- "--vmalert.proxyURL=http://vmalert:8880"
|
||||
restart: always
|
||||
vmselect-2:
|
||||
image: victoriametrics/vmselect:v1.123.0-cluster
|
||||
image: victoriametrics/vmselect:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -98,7 +98,7 @@ services:
|
||||
# read requests from Grafana, vmui, vmalert among vmselects.
|
||||
# It can be used as an authentication proxy.
|
||||
vmauth:
|
||||
image: victoriametrics/vmauth:v1.123.0
|
||||
image: victoriametrics/vmauth:v1.124.0
|
||||
depends_on:
|
||||
- "vmselect-1"
|
||||
- "vmselect-2"
|
||||
@@ -112,7 +112,7 @@ services:
|
||||
|
||||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
ports:
|
||||
|
||||
@@ -3,7 +3,7 @@ services:
|
||||
# It scrapes targets defined in --promscrape.config
|
||||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -18,7 +18,7 @@ services:
|
||||
# VictoriaMetrics instance, a single process responsible for
|
||||
# storing metrics and serve read requests.
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
- 8089:8089
|
||||
@@ -54,7 +54,7 @@ services:
|
||||
|
||||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
- "alertmanager"
|
||||
|
||||
@@ -100,7 +100,7 @@ groups:
|
||||
summary: "Churn rate is more than 10% on \"{{ $labels.instance }}\" for the last 15m"
|
||||
description: "VM constantly creates new time series on \"{{ $labels.instance }}\".\n
|
||||
This effect is known as Churn Rate.\n
|
||||
High Churn Rate tightly connected with database performance and may
|
||||
High Churn Rate is tightly connected with database performance and may
|
||||
result in unexpected OOM's or slow queries."
|
||||
|
||||
- alert: TooHighChurnRate24h
|
||||
@@ -117,7 +117,7 @@ groups:
|
||||
description: "The number of created new time series over last 24h is 3x times higher than
|
||||
current number of active series on \"{{ $labels.instance }}\".\n
|
||||
This effect is known as Churn Rate.\n
|
||||
High Churn Rate tightly connected with database performance and may
|
||||
High Churn Rate is tightly connected with database performance and may
|
||||
result in unexpected OOM's or slow queries."
|
||||
|
||||
- alert: TooHighSlowInsertsRate
|
||||
@@ -135,4 +135,4 @@ groups:
|
||||
summary: "Percentage of slow inserts is more than 5% on \"{{ $labels.instance }}\" for the last 15m"
|
||||
description: "High rate of slow inserts on \"{{ $labels.instance }}\" may be a sign of resource exhaustion
|
||||
for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series.
|
||||
See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183"
|
||||
See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -14,7 +14,7 @@ services:
|
||||
restart: always
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
@@ -40,7 +40,7 @@ services:
|
||||
restart: always
|
||||
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -59,7 +59,7 @@ services:
|
||||
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr": },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
|
||||
restart: always
|
||||
vmanomaly:
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
||||
@@ -1005,7 +1005,7 @@
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Anoamlies: Read Latency",
|
||||
"title": "Anomalies: Read Latency",
|
||||
"type": "state-timeline"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -18,7 +18,7 @@ services:
|
||||
- vlogs
|
||||
|
||||
generator:
|
||||
image: golang:1.24.6-alpine
|
||||
image: golang:1.25.0-alpine
|
||||
restart: always
|
||||
working_dir: /go/src/app
|
||||
volumes:
|
||||
|
||||
@@ -2,7 +2,7 @@ version: "3"
|
||||
|
||||
services:
|
||||
generator:
|
||||
image: golang:1.24.6-alpine
|
||||
image: golang:1.25.0-alpine
|
||||
restart: always
|
||||
working_dir: /go/src/app
|
||||
volumes:
|
||||
|
||||
@@ -4,12 +4,12 @@ Benchmark compares VictoriaLogs with ELK stack and Grafana Loki.
|
||||
|
||||
Benchmark is based on:
|
||||
|
||||
- Logs from this repository - https://github.com/logpai/loghub
|
||||
- Logs from this repository - [https://github.com/logpai/loghub](https://github.com/logpai/loghub)
|
||||
- [logs generator](./generator)
|
||||
|
||||
For ELK suite it uses:
|
||||
|
||||
- filebeat - https://www.elastic.co/beats/filebeat
|
||||
- filebeat - [https://www.elastic.co/beats/filebeat](https://www.elastic.co/beats/filebeat)
|
||||
- elastic + kibana
|
||||
|
||||
For Grafana Loki suite it uses:
|
||||
@@ -24,7 +24,7 @@ For Grafana Loki suite it uses:
|
||||
|
||||
- VictoriaLogs instance
|
||||
- vmsingle - port forwarded to `localhost:8428` to see UI
|
||||
- exporters for system metris
|
||||
- exporters for system metrics
|
||||
|
||||
ELK suite uses [docker-compose-elk.yml](./docker-compose-elk.yml) with the following services:
|
||||
|
||||
@@ -54,7 +54,7 @@ Each filebeat than writes logs to elastic and VictoriaLogs via elasticsearch-com
|
||||
1. Download and unarchive logs by running:
|
||||
|
||||
```shell
|
||||
cd source_logs
|
||||
cd source_logs
|
||||
bash download.sh
|
||||
```
|
||||
|
||||
@@ -74,11 +74,11 @@ Unarchived logs size per file for reference:
|
||||
13G hadoop-*.log
|
||||
```
|
||||
|
||||
2. (optional) If needed, adjust amount of logs sent by generator by modifying `-outputRateLimitItems` and
|
||||
1. (optional) If needed, adjust amount of logs sent by generator by modifying `-outputRateLimitItems` and
|
||||
`outputRateLimitPeriod` parameters in [docker-compose.yml](./docker-compose.yml). By default, it is configured to
|
||||
send 10000 logs per second.
|
||||
|
||||
3. (optional) Build victoria-logs image and adjust `image` parameter in [docker-compose.yml](./docker-compose.yml):
|
||||
1. (optional) Build victoria-logs image and adjust `image` parameter in [docker-compose.yml](./docker-compose.yml):
|
||||
|
||||
```shell
|
||||
make package-victoria-logs
|
||||
@@ -95,26 +95,27 @@ output.elasticsearch:
|
||||
hosts: [ "http://vlogs:9428/insert/elasticsearch/" ]
|
||||
```
|
||||
|
||||
4. Choose a suite to run.
|
||||
1. Choose a suite to run.
|
||||
|
||||
In order to run ELK suite use the following command:
|
||||
```
|
||||
|
||||
```sh
|
||||
make docker-up-elk
|
||||
```
|
||||
|
||||
In order to run Loki suite use the following command:
|
||||
```
|
||||
|
||||
```sh
|
||||
make docker-up-loki
|
||||
```
|
||||
|
||||
|
||||
5. Navigate to `http://localhost:3000/` to see Grafana dashboards with resource usage
|
||||
1. Navigate to `http://localhost:3000/` to see Grafana dashboards with resource usage
|
||||
comparison.
|
||||
|
||||
Navigate to `http://localhost:3000/d/hkm6P6_4z/elastic-vs-vlogs` to see ELK suite results.
|
||||
|
||||
Navigate to `http://localhost:3000/d/hkm6P6_4y/loki-vs-vlogs` to see Loki suite results.
|
||||
|
||||
|
||||
Example results vs ELK:
|
||||
|
||||

|
||||
|
||||
@@ -14,6 +14,19 @@ aliases:
|
||||
---
|
||||
Please find the changelog for VictoriaMetrics Anomaly Detection below.
|
||||
|
||||
## v1.25.3
|
||||
Released: 2025-08-19
|
||||
|
||||
- FEATURE: Added forecasting capabilities to the [`ProphetModel`](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet) this allows users to generate *future* (point-wise and interval) predictions with offsets defined by `forecast_at` argument (e.g. `['1d', '1w']`) at *current* timestamp and store these in respective series, e.g. `yhat_1d`, `yhat_lower_1d`, `yhat_upper_1d`, etc. This feature is particularly useful for scenarios where future predictions are needed, such as capacity planning or trend analysis. See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#forecasting) for more details.
|
||||
|
||||
- IMPROVEMENT: Added `logger_levels` argument to `settings` [config section](https://docs.victoriametrics.com/anomaly-detection/components/settings/#logger-levels) to allow setting specific log levels for individual components. Useful for debugging specific components. For example, `logger_levels: { "reader.vm": "DEBUG" }` will set the log level for the `VmReader` component to `DEBUG`, while leaving other components at their default log levels. Also is supported in [hot reload](https://docs.victoriametrics.com/anomaly-detection/components/#hot-reload) mode, allowing for dynamic log level changes without service restarts.
|
||||
|
||||
- IMPROVEMENT: Added logging of URLs used for querying VictoriaMetrics TSDB in [`VmReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) to ease the debugging of incomplete data retrieval, incorrect endpoints, or misconfigured tenant IDs. The URLs are logged at the `DEBUG` level, so you can control their verbosity using the `--loggerLevelComponents` argument with `reader.vm=DEBUG` or `reader=DEBUG` to see the URLs in the logs.
|
||||
|
||||
- IMPROVEMENT: Added `offset` [argument](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) to `VmReader` on reader and query levels to allow for flexible time offset adjustments in the reader. Useful for correcting for data collection delays. The `offset` can be specified as a string (e.g., "15s", "-20s") and will be applied to all queries processed by the reader. See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#using-offsets) for more details.
|
||||
|
||||
- BUGFIX: Resolved the issue where symlink-ed configuration files were not properly processed by [hot reload](https://docs.victoriametrics.com/anomaly-detection/components/#hot-reload) mechanism, leading to the service not picking up changes made to the original files. Now it properly resolves symlinks and reloads the configuration when the original file is modified.
|
||||
|
||||
## v1.25.2
|
||||
Released: 2025-07-30
|
||||
|
||||
|
||||
@@ -54,6 +54,25 @@ Respective config is defined in a [`reader`](https://docs.victoriametrics.com/an
|
||||
## Handling noisy input data
|
||||
`vmanomaly` operates on data fetched from VictoriaMetrics using [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) queries, so the initial data quality can be fine-tuned with aggregation, grouping, and filtering to reduce noise and improve anomaly detection accuracy.
|
||||
|
||||
## Using offsets
|
||||
`vmanomaly` supports {{% available_from "v1.25.3" anomaly %}} the use of offsets in the [`reader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) section to adjust the time range of the data being queried. This can be particularly useful for correcting for data collection delays or other timing issues. It can be also defined or overridden on [per-query basis](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters).
|
||||
|
||||
For example, if you want to query data with a 60-second delay (e.g. data collection happened 1 sec ago, however, timestamps written to VictoriaMetrics are 60 seconds in the past), you can set the `offset` argument to `-60s` in the reader section:
|
||||
|
||||
```yaml
|
||||
reader:
|
||||
class: 'vm'
|
||||
datasource_url: 'http://localhost:8428'
|
||||
sampling_period: '10s'
|
||||
offset: '-60s'
|
||||
queries:
|
||||
vmb:
|
||||
expr: 'avg(vm_blocks)'
|
||||
cpu_custom_offset:
|
||||
expr: 'avg(rate(vm_cpu_usage[5m]))'
|
||||
offset: '-30s' # this will override the global offset for this query only
|
||||
```
|
||||
|
||||
## Handling timezones
|
||||
|
||||
`vmanomaly` supports timezone-aware anomaly detection {{% available_from "v1.18.0" anomaly %}} through a `tz` argument, available both at the [reader level](https://docs.victoriametrics.com/anomaly-detection/components/reader#vm-reader) and at the [query level](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters).
|
||||
@@ -179,6 +198,22 @@ While `vmanomaly` detects anomalies and produces scores, it *does not directly g
|
||||
|
||||
<img src="https://docs.victoriametrics.com/anomaly-detection/guides/guide-vmanomaly-vmalert/guide-vmanomaly-vmalert_overview.webp" alt="node_exporter_example_diagram" style="width:60%"/>
|
||||
|
||||
Once anomaly scores are written back to VictoriaMetrics, you can use [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) expressions subset in `vmalert` to define alerting rules based on these scores. Reasonable defaults are `anomaly_score > 1`:
|
||||
|
||||
```yaml
|
||||
groups:
|
||||
- name: vmanomaly_alerts
|
||||
rules:
|
||||
- alert: HighAnomalyScore
|
||||
expr: anomaly_score > 1 # or similar expressions, like `min(anomaly_score{...}) by (...) > 1`
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Anomaly score > 1 for {{ $labels.for }} query"
|
||||
description: "Anomaly score is {{ $value }} for query {{ $labels.for }}. Value: {{ $value }}."
|
||||
```
|
||||
|
||||
## Preventing alert fatigue
|
||||
Produced anomaly scores are designed in such a way that values from 0.0 to 1.0 indicate non-anomalous data, while a value greater than 1.0 is generally classified as an anomaly. However, there are no perfect models for anomaly detection, that's why reasonable defaults expressions like `anomaly_score > 1` may not work 100% of the time. However, anomaly scores, produced by `vmanomaly` are written back as metrics to VictoriaMetrics, where tools like [`vmalert`](https://docs.victoriametrics.com/victoriametrics/vmalert/) can use [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) expressions to fine-tune alerting thresholds and conditions, balancing between avoiding [false negatives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-negative) and reducing [false positives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-positive).
|
||||
|
||||
@@ -228,6 +263,117 @@ writer:
|
||||
|
||||
Configuration above will produce N intervals of full length (`fit_window`=14d + `fit_every`=1h) until `to_iso` timestamp is reached to run N consecutive `fit` calls to train models; Then these models will be used to produce `M = [fit_every / sampling_frequency]` infer datapoints for `fit_every` range at the end of each such interval, imitating M consecutive calls of `infer_every` in `PeriodicScheduler` [config](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#periodic-scheduler). These datapoints then will be written back to VictoriaMetrics TSDB, defined in `writer` [section](https://docs.victoriametrics.com/anomaly-detection/components/writer#vm-writer) for further visualization (i.e. in VMUI or Grafana)
|
||||
|
||||
## Forecasting
|
||||
|
||||
Not intended for forecasting in its core, `vmanomaly` can still be used to produce forecasts using [ProphetModel](https://docs.victoriametrics.com/anomaly-detection/components/models#prophet) {{% available_from "v1.25.3" anomaly %}}, which can be helpful in scenarios like capacity planning, resource allocation, or trend analysis, if the underlying data is complex and can't be handled by inline MetricsQL queries, including [predict_linear](https://docs.victoriametrics.com/victoriametrics/metricsql/#predict_linear).
|
||||
|
||||
> However, please note that this mode should be used with care, as the model will produce `yhat_{h}` (and probably `yhat_lower_{h}`, and `yhat_upper_{h}`) time series **for each timeseries returned by input queries and for each forecasting horizon specified in `forecast_at` argument, which can lead to a significant increase in the number of active timeseries in VictoriaMetrics TSDB**.
|
||||
|
||||
Here's an example of how to produce forecasts using `vmanomaly` and combine it with the regular model, e.g. to estimate daily outcomes for a disk usage metric:
|
||||
|
||||
```yaml
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler
|
||||
schedulers:
|
||||
periodic_5m: # this scheduler will be used to produce anomaly scores each 5 minutes using "regular" simple model
|
||||
class: 'periodic'
|
||||
fit_every: '30d'
|
||||
fit_window: '3d'
|
||||
infer_every: '5m'
|
||||
periodic_forecast: # this scheduler will be used to produce forecasts each 24h using "daily" model
|
||||
class: 'periodic'
|
||||
fit_every: '7d'
|
||||
fit_window: '730d' # to fit the model on 2 years of data to account for seasonality and holidays
|
||||
infer_every: '24h'
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader
|
||||
reader:
|
||||
class: 'vm'
|
||||
datasource_url: 'http://play.victoriametrics.com'
|
||||
tenant_id: '0:0'
|
||||
sampling_period: '5m'
|
||||
# other reader params ...
|
||||
queries:
|
||||
disk_usage_perc_5m:
|
||||
expr: |
|
||||
max_over_time(
|
||||
1 - (node_filesystem_avail_bytes{mountpoint="/",fstype!="rootfs"}
|
||||
/
|
||||
node_filesystem_size_bytes{mountpoint="/",fstype!="rootfs"}),
|
||||
1h
|
||||
)
|
||||
data_range: [0, 1]
|
||||
# step: '1m' # default will be inherited from sampling_period
|
||||
disk_usage_perc_1d:
|
||||
expr: |
|
||||
max_over_time(
|
||||
1 - (node_filesystem_avail_bytes{mountpoint="/",fstype!="rootfs"}
|
||||
/
|
||||
node_filesystem_size_bytes{mountpoint="/",fstype!="rootfs"}),
|
||||
24h
|
||||
)
|
||||
step: '1d' # override default step to 1d, as we want to produce daily forecasts
|
||||
data_range: [0, 1]
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/models/
|
||||
models:
|
||||
quantile_5m:
|
||||
class: 'quantile_online' # online model, which updates itself each infer call
|
||||
queries: ['disk_usage_perc_5m']
|
||||
schedulers: ['periodic_5m']
|
||||
clip_predictions: True
|
||||
detection_direction: 'above_expected' # as we are interested in spikes in capacity planning
|
||||
quantiles: [0.25, 0.5, 0.75] # to produce median and upper quartiles
|
||||
iqr_threshold: 2.0
|
||||
|
||||
prophet_1d:
|
||||
class: 'prophet'
|
||||
queries: ['disk_usage_perc_1d']
|
||||
schedulers: ['periodic_forecast']
|
||||
clip_predictions: True
|
||||
detection_direction: 'above_expected' # as we are interested in spikes in capacity planning
|
||||
forecast_at: ['3d', '7d'] # this will produce forecasts for 3 and 7 days ahead
|
||||
provide_series: ['yhat', 'yhat_upper'] # to write forecasts back to VictoriaMetrics, omitting `yhat_lower` as it is not needed in this example
|
||||
# other model params, yearly_seasonality may stay
|
||||
# https://facebook.github.io/prophet/docs/quick_start#python-api
|
||||
args:
|
||||
interval_width: 0.98 # see https://facebook.github.io/prophet/docs/uncertainty_intervals
|
||||
country_holidays: 'US'
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/writer/#vm-writer
|
||||
writer:
|
||||
class: 'vm'
|
||||
datasource_url: '{your_victoriametrics_url_for_writing}'
|
||||
# tenant_id: '0:0' # or your tenant ID if using clustered VictoriaMetrics
|
||||
# other writer params ...
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/writer/#metrics-formatting
|
||||
metric_format:
|
||||
__name__: $VAR
|
||||
for: $QUERY_KEY
|
||||
```
|
||||
|
||||
Then, respective alerts can be configured in [`vmalert`](https://docs.victoriametrics.com/victoriametrics/vmalert/) to notify disk exhaustion risks, e.g. if the forecasted disk usage exceeds 90% in the next 3 days:
|
||||
|
||||
```yaml
|
||||
groups:
|
||||
- name: disk_usage_alerts
|
||||
rules:
|
||||
- alert: DiskUsageHigh
|
||||
expr: |
|
||||
yhat_7d{for="disk_usage_perc_1d"} > 0.9
|
||||
for: 24h
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Disk usage is forecasted to exceed 90% in the next 3 days"
|
||||
description: "Disk usage is forecasted to exceed 90% in the next 3 days for instance {{ $labels.instance }}. Forecasted value: {{ $value }}."
|
||||
- alert: DiskUsageCritical
|
||||
expr: |
|
||||
yhat_3d{for="disk_usage_perc_1d"} > 0.95
|
||||
for: 24h
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Disk usage is forecasted to exceed 95% in the next 3 days"
|
||||
description: "Disk usage is forecasted to exceed 95% in the next 3 days for instance {{ $labels.instance }}. Forecasted value: {{ $value }}."
|
||||
```
|
||||
|
||||
## Resource consumption of vmanomaly
|
||||
`vmanomaly` itself is a lightweight service, resource usage is primarily dependent on [scheduling](https://docs.victoriametrics.com/anomaly-detection/components/scheduler) (how often and on what data to fit/infer your models), [# and size of timeseries returned by your queries](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader), and the complexity of the employed [models](https://docs.victoriametrics.com/anomaly-detection/components/models). Its resource usage is directly related to these factors, making it adaptable to various operational scales. Various optimizations are available to balance between RAM usage, processing speed, and model capacity. These options are described in the sections below.
|
||||
|
||||
@@ -243,7 +389,7 @@ services:
|
||||
# ...
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
# ...
|
||||
ports:
|
||||
- "8490:8490"
|
||||
@@ -456,7 +602,7 @@ options:
|
||||
Here’s an example of using the config splitter to divide configurations based on the `extra_filters` argument from the reader section:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.25.2 && docker image tag victoriametrics/vmanomaly:v1.25.2 vmanomaly
|
||||
docker pull victoriametrics/vmanomaly:v1.25.3 && docker image tag victoriametrics/vmanomaly:v1.25.3 vmanomaly
|
||||
```
|
||||
|
||||
```sh
|
||||
|
||||
@@ -121,13 +121,13 @@ Below are the steps to get `vmanomaly` up and running inside a Docker container:
|
||||
1. Pull Docker image:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.25.2
|
||||
docker pull victoriametrics/vmanomaly:v1.25.3
|
||||
```
|
||||
|
||||
2. (Optional step) tag the `vmanomaly` Docker image:
|
||||
|
||||
```sh
|
||||
docker image tag victoriametrics/vmanomaly:v1.25.2 vmanomaly
|
||||
docker image tag victoriametrics/vmanomaly:v1.25.3 vmanomaly
|
||||
```
|
||||
|
||||
3. Start the `vmanomaly` Docker container with a *license file*, use the command below.
|
||||
@@ -163,7 +163,7 @@ docker run -it --user 1000:1000 \
|
||||
services:
|
||||
# ...
|
||||
vmanomaly:
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
volumes:
|
||||
$YOUR_LICENSE_FILE_PATH:/license
|
||||
$YOUR_CONFIG_FILE_PATH:/config.yml
|
||||
@@ -220,6 +220,14 @@ settings:
|
||||
n_workers: 4 # number of workers to run workload in parallel, set to 0 or negative number to use all available CPU cores
|
||||
anomaly_score_outside_data_range: 5.0 # default anomaly score for anomalies outside expected data range
|
||||
restore_state: True # restore state from previous run, available since v1.24.0
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/settings/#logger-levels
|
||||
# to override service-global logger levels, use the `logger_levels` section
|
||||
logger_levels:
|
||||
# vmanomaly: info
|
||||
# scheduler: info
|
||||
# reader: info
|
||||
# writer: info
|
||||
model.prophet: warning
|
||||
|
||||
schedulers:
|
||||
1d_1m:
|
||||
@@ -299,6 +307,9 @@ For optimal service behavior, consider the following tweaks when configuring `vm
|
||||
- Set up [anomaly score dashboard](https://docs.victoriametrics.com/anomaly-detection/presets/#grafana-dashboard) to visualize the results of anomaly detection.
|
||||
- Set up [self-monitoring dashboard](https://docs.victoriametrics.com/anomaly-detection/self-monitoring/) to monitor the health of `vmanomaly` service and its components.
|
||||
|
||||
**Logging**:
|
||||
- Tune logging levels in the `settings.logger_levels` [section](https://docs.victoriametrics.com/anomaly-detection/components/settings/#logger-levels) to control the verbosity of logs. This can help in debugging and monitoring the service behavior, as well as in disabling excessive logging for production environments.
|
||||
|
||||
## Check also
|
||||
|
||||
Please refer to the following links for a deeper understanding of Anomaly Detection and `vmanomaly`:
|
||||
|
||||
@@ -652,7 +652,7 @@ models:
|
||||
|
||||
> `ProphetModel` is a [univariate](#univariate-models), [non-rolling](#non-rolling-models), [offline](#offline-models) model.
|
||||
|
||||
> {{% available_from "v1.18.2" anomaly %}} the format for `tz_seasonalities` has been updated to enhance flexibility. Previously, it accepted a list of strings (e.g., `['hod', 'minute']`). Now, it follows the same structure as custom seasonalities defined in the `seasonalities` argument (e.g., `{"name": "hod", "fourier_order": 5, "mode": "additive"}`). This change is backward-compatible, so older configurations will be automatically converted to the new format using default values.
|
||||
> {{% available_from "v1.25.3" anomaly %}} Producing forecasts for future timestamps is now supported. To enable this, set the `forecast_at` argument to a list of relative future offsets (e.g., `['1h', '1d']`). The model will then generate forecasts for these future timestamps, which can be useful for planning and resource allocation. Output series are affected by [provide_series](#provide-series) argument, which need to include at least `yhat` for point-wise forecasts (and `yhat_lower` or/and `yhat_upper` for respective confidence intervals). See the example below for more details.
|
||||
|
||||
*Parameters specific for vmanomaly*:
|
||||
|
||||
@@ -661,7 +661,11 @@ models:
|
||||
- `scale`{{% available_from "v1.18.0" anomaly %}} (float): Is used to adjust the margins between `yhat` and [`yhat_lower`, `yhat_upper`]. New margin = `|yhat_* - yhat_lower| * scale`. Defaults to 1 (no scaling is applied). See `scale`[common arg](https://docs.victoriametrics.com/anomaly-detection/components/models/#scale) section for detailed instructions and 2-sided option.
|
||||
- `tz_aware`{{% available_from "v1.18.0" anomaly %}} (bool): Enables handling of timezone-aware timestamps. Default is `False`. Should be used with `tz_seasonalities` and `tz_use_cyclical_encoding` parameters.
|
||||
- `tz_seasonalities`{{% available_from "v1.18.0" anomaly %}} (list[dict]): Specifies timezone-aware seasonal components. Requires `tz_aware=True`. Supported options include `minute`, `hod` (hour of day), `dow` (day of week), and `month` (month of year). {{% available_from "v1.18.2" anomaly %}} users can configure additional parameters for each seasonality, such as `fourier_order`, `prior_scale`, and `mode`. For more details, please refer to the **Timezone-unaware** configuration example below.
|
||||
> {{% available_from "v1.18.2" anomaly %}} the format for `tz_seasonalities` has been updated to enhance flexibility. Previously, it accepted a list of strings (e.g., `['hod', 'minute']`). Now, it follows the same structure as custom seasonalities defined in the `seasonalities` argument (e.g., `{"name": "hod", "fourier_order": 5, "mode": "additive"}`). This change is backward-compatible, so older configurations will be automatically converted to the new format using default values.
|
||||
- `tz_use_cyclical_encoding`{{% available_from "v1.18.0" anomaly %}} (bool): If set to `True`, applies [cyclical encoding technique](https://www.kaggle.com/code/avanwyk/encoding-cyclical-features-for-deep-learning) to timezone-aware seasonalities. Should be used with `tz_aware=True` and `tz_seasonalities`.
|
||||
- `forecast_at`{{% available_from "v1.25.3" anomaly %}} (list[str]): Specifies future relative offsets for which forecasts should be generated (e.g., `['1h', '1d']`). Works similarly to [predict_linear](https://docs.victoriametrics.com/victoriametrics/metricsql/#predict_linear) in MetricQL, but with more flexibility and seasonality support - produced series will have *the same timestamp* as the other [output](#vmanomaly-output) series, but with the forecasted value for the *future timestamp*. Defaults to `[]` (empty list, meaning no future forecasts are produced). If set, `provide_series` must include at least `yhat` for point-wise forecasts (and `yhat_lower` or/and `yhat_upper` for respective confidence intervals). For example, if `forecast_at` is set to `['1h', '1d']`, the model will produce forecasts for both the next hour and the next day, and these series can be accessed by `yhat_1h`, `yhat_lower_1h`, `yhat_upper_1h`, `yhat_1d`, `yhat_lower_1d`, and `yhat_upper_1d` in the output, respectively. See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#forecasting) for more details.
|
||||
|
||||
> `forecast_at` parameter can lead to **significant increase in active timeseries** if you have a lot of time series returned by your queries, as it will produce additional series for each of the future timestamps specified in `forecast_at` (optionally multiplied by 1-3 if interval forecasts are included). For example, if you have 1000 time series returned by your query and set `forecast_at` to `[1h, 1d, 1w]`, and `provide_series` includes `yhat_lower` and `yhat_upper`, it will produce 1000 (series) * 3 (intervals) * 3 (predictions, point + interval) = 9000 additional timeseries. Consider using it only on small subset of metrics (e.g. grouped by `host` or `region`) to avoid this issue, as it also **proportionally (to the number of `forecast_at` elements) increases the timings of inference calls**.
|
||||
|
||||
> Apart from standard [`vmanomaly` output](#vmanomaly-output), Prophet model can provide additional metrics.
|
||||
|
||||
@@ -1308,7 +1312,7 @@ monitoring:
|
||||
Let's pull the docker image for `vmanomaly`:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.25.2
|
||||
docker pull victoriametrics/vmanomaly:v1.25.3
|
||||
```
|
||||
|
||||
Now we can run the docker container putting as volumes both config and model file:
|
||||
@@ -1322,7 +1326,7 @@ docker run -it \
|
||||
-v $(PWD)/license:/license \
|
||||
-v $(PWD)/custom_model.py:/vmanomaly/model/custom.py \
|
||||
-v $(PWD)/custom.yaml:/config.yaml \
|
||||
victoriametrics/vmanomaly:v1.25.2 /config.yaml \
|
||||
victoriametrics/vmanomaly:v1.25.3 /config.yaml \
|
||||
--licenseFile=/license
|
||||
```
|
||||
|
||||
|
||||
@@ -85,14 +85,18 @@ There is change{{% available_from "v1.13.0" anomaly %}} of [`queries`](https://d
|
||||
|
||||
> The recommended approach for using per-query `tenant_id`s is to set both `reader.tenant_id` and `writer.tenant_id` to `multitenant`. See [this section](https://docs.victoriametrics.com/anomaly-detection/components/writer/#multitenancy-support) for more details. Configurations where `reader.tenant_id` equals `writer.tenant_id` and is not `multitenant` are also considered safe, provided there is a single, DISTINCT `tenant_id` defined in the reader (either at the reader level or the query level, if set).
|
||||
|
||||
- `offset` {{% available_from "v1.25.3" anomaly %}} (string): this optional argument allows specifying a time offset for the query, which can be useful for adjusting the query time range to account for data collection delays or other timing issues. The offset is specified as a string (e.g., "15s", "-20s") and will be applied to the query time range. Valid resolutions are `ms`, `s`, `m`, `h`, `d` (miliseconds, seconds, minutes, hours, days). If not set, defaults to `0s` (0). See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#using-offsets) for more details.
|
||||
|
||||
### Per-query config example
|
||||
```yaml
|
||||
reader:
|
||||
class: 'vm'
|
||||
sampling_period: '1m'
|
||||
datasource_url: 'https://play.victoriametrics.com/' # source victoriametrics/prometheus
|
||||
max_points_per_query: 10000
|
||||
data_range: [0, 'inf']
|
||||
tenant_id: 'multitenant'
|
||||
offset: '0s' # optional, defaults to 0s if not set
|
||||
# other reader params ...
|
||||
queries:
|
||||
ingestion_rate_t1:
|
||||
@@ -109,6 +113,7 @@ reader:
|
||||
max_points_per_query: 5000 # overrides reader-level value of 10000 for `ingestion_rate` query
|
||||
tz: 'America/New_York' # to override reader-wise `tz`
|
||||
tenant_id: '2:0' # overriding tenant_id to isolate data
|
||||
offset: '-15s' # to override reader-wise `offset` and query data 15 seconds earlier to account for data collection delays
|
||||
```
|
||||
|
||||
### Config parameters
|
||||
@@ -395,10 +400,24 @@ Optional argument{{% available_from "v1.18.0" anomaly %}} specifies the [IANA](h
|
||||
Optional argument{{% available_from "v1.18.1" anomaly %}} allows defining **valid** data ranges for input of all the queries in `queries`. Defaults to `["-inf", "inf"]` if not set and can be overridden on a [per-query basis](#per-query-parameters).
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
<span style="white-space: nowrap;">`offset`</span>
|
||||
</td>
|
||||
<td>
|
||||
|
||||
`60s`
|
||||
</td>
|
||||
<td>
|
||||
Optional argument{{% available_from "v1.25.3" anomaly %}} allows specifying a time offset for all queries in `queries`. Defaults to `0s` (0) if not set and can be overridden on a [per-query basis](#per-query-parameters).
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
Config file example:
|
||||
<br>
|
||||
Config section example:
|
||||
|
||||
```yaml
|
||||
reader:
|
||||
@@ -407,6 +426,7 @@ reader:
|
||||
tenant_id: '0:0'
|
||||
tz: 'America/New_York'
|
||||
data_range: [1, 'inf'] # reader-level
|
||||
offset: '0s' # reader-level
|
||||
queries:
|
||||
ingestion_rate:
|
||||
expr: 'sum(rate(vm_rows_inserted_total[5m])) by (type) > 0'
|
||||
@@ -414,6 +434,7 @@ reader:
|
||||
data_range: [0, 'inf'] # if set, overrides reader-level data_range
|
||||
tz: 'Australia/Sydney' # if set, overrides reader-level tz
|
||||
# tenant_id: '1:0' # if set, overrides reader-level tenant_id
|
||||
# offset: '-15s' # if set, overrides reader-level offset
|
||||
sampling_period: '1m'
|
||||
query_from_last_seen_timestamp: True # false by default
|
||||
latency_offset: '1ms'
|
||||
|
||||
@@ -305,3 +305,27 @@ reader: # can be partially reused, because its class and datasource URL are unc
|
||||
This means that the service upon restart:
|
||||
1. Won't restore the state of `zscore_online` model, because its `z_threshold` argument **has changed**, retraining from scratch is needed on the last `fit_window` = 24 hours of data for `q1`, `q2` and `q3` (as model's `queries` arg is not set so it defaults to all queries found in the reader).
|
||||
2. Will **partially** restore the state of `prophet` model, because its class and schedulers are unchanged, but **only instances trained on timeseries returned by `q1` query**. New fit/infer jobs will be set for new query `q3`. The old query `q2` artifacts will be dropped upon restart - all respective models and data for (`prophet`, `q2`) combination will be removed from the database file and from the disk.
|
||||
|
||||
|
||||
### Logger Levels
|
||||
|
||||
{{% available_from "v1.25.3" anomaly %}} `vmanomaly` service supports per-component logger levels, allowing to control the verbosity of logs for each component independently. This can be useful for debugging or monitoring specific components without overwhelming the logs with information from other components. Prefixes are also supported, allowing to set the logger level for all components with a specific prefix.
|
||||
|
||||
The logger levels can be set in the `settings` section of the config file under `logger_levels` key, where the key is the component name or prefix and the value is the desired logger level. The available logger levels are: `debug`, `info`, `warning`, `error`, and `critical`.
|
||||
|
||||
> Best used in combination with [hot-reload](https://docs.victoriametrics.com/anomaly-detection/components/#hot-reload) to change the logger levels *on-the-fly* without restarting the service through a short-circuit config check than doesn't even trigger the state restoration logic.
|
||||
|
||||
Here's an example configuration that sets the logger level for the `reader` component to `debug` and for the `writer` component to `critical`, while `--loggerLevel` [command line argument](https://docs.victoriametrics.com/anomaly-detection/quickstart/#command-line-arguments) sets the default logger level to `INFO` for all (the other) components, unless overridden by the config:
|
||||
|
||||
> If commented out in hot-reload mode during hot-reload event, the logger level for the component will be set back to what `--loggerLevel` command line argument is set to, which defaults to `info` if not specified.
|
||||
|
||||
```yaml
|
||||
settings:
|
||||
n_workers: 4
|
||||
restore_state: True # enables state restoration
|
||||
logger_levels:
|
||||
reader.vm: debug # affects only VmReader logs
|
||||
model: warning # applies to all components with 'model' prefix, such as 'model.zscore_online', 'model.prophet', etc.
|
||||
# once commented out in hot-reload mode, will use the default logger level set by --loggerLevel command line argument
|
||||
# monitoring.push: critical
|
||||
```
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
- To use *vmanomaly*, part of the enterprise package, a license key is required. Obtain your key [here](https://victoriametrics.com/products/enterprise/trial/) for this tutorial or for enterprise use.
|
||||
- In the tutorial, we'll be using the following VictoriaMetrics components:
|
||||
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) (v1.123.0)
|
||||
- [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/) (v1.123.0)
|
||||
- [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/) (v1.123.0)
|
||||
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) (v1.124.0)
|
||||
- [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/) (v1.124.0)
|
||||
- [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/) (v1.124.0)
|
||||
- [Grafana](https://grafana.com/) (v.10.2.1)
|
||||
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/)
|
||||
- [Node exporter](https://github.com/prometheus/node_exporter#node-exporter) (v1.7.0) and [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (v0.27.0)
|
||||
@@ -315,7 +315,7 @@ Let's wrap it all up together into the `docker-compose.yml` file.
|
||||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -332,7 +332,7 @@ services:
|
||||
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
@@ -365,7 +365,7 @@ services:
|
||||
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -387,7 +387,7 @@ services:
|
||||
restart: always
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
||||
@@ -241,27 +241,27 @@ services:
|
||||
- grafana_data:/var/lib/grafana/
|
||||
|
||||
vmsingle:
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
command:
|
||||
- -httpListenAddr=0.0.0.0:8429
|
||||
|
||||
vmstorage:
|
||||
image: victoriametrics/vmstorage:v1.123.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.124.0-cluster
|
||||
|
||||
vminsert:
|
||||
image: victoriametrics/vminsert:v1.123.0-cluster
|
||||
image: victoriametrics/vminsert:v1.124.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8400
|
||||
- -httpListenAddr=0.0.0.0:8480
|
||||
|
||||
vmselect:
|
||||
image: victoriametrics/vmselect:v1.123.0-cluster
|
||||
image: victoriametrics/vmselect:v1.124.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8401
|
||||
- -httpListenAddr=0.0.0.0:8481
|
||||
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
command:
|
||||
@@ -270,7 +270,7 @@ services:
|
||||
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
|
||||
|
||||
vmgateway-cluster:
|
||||
image: victoriametrics/vmgateway:v1.123.0-enterprise
|
||||
image: victoriametrics/vmgateway:v1.124.0-enterprise
|
||||
ports:
|
||||
- 8431:8431
|
||||
volumes:
|
||||
@@ -286,7 +286,7 @@ services:
|
||||
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
|
||||
|
||||
vmgateway-single:
|
||||
image: victoriametrics/vmgateway:v1.123.0-enterprise
|
||||
image: victoriametrics/vmgateway:v1.124.0-enterprise
|
||||
ports:
|
||||
- 8432:8431
|
||||
volumes:
|
||||
@@ -397,7 +397,7 @@ Once iDP configuration is done, vmagent configuration needs to be updated to use
|
||||
|
||||
```yaml
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
- ./vmagent-client-secret:/etc/vmagent/oauth2-client-secret
|
||||
|
||||
@@ -27,5 +27,5 @@ to [the latest available releases](https://docs.victoriametrics.com/victoriametr
|
||||
|
||||
## Currently supported LTS release lines
|
||||
|
||||
- v1.122.x - the latest one is [v1.122.1 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.1)
|
||||
- v1.110.x - the latest one is [v1.110.15 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.15)
|
||||
- v1.122.x - the latest one is [v1.122.2 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.2)
|
||||
- v1.110.x - the latest one is [v1.110.16 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.16)
|
||||
|
||||
@@ -57,9 +57,9 @@ and performing [regular upgrades](https://docs.victoriametrics.com/victoriametri
|
||||
Download the newest available [VictoriaMetrics release](https://docs.victoriametrics.com/victoriametrics/changelog/)
|
||||
from [DockerHub](https://hub.docker.com/r/victoriametrics/victoria-metrics) or [Quay](https://quay.io/repository/victoriametrics/victoria-metrics?tab=tags):
|
||||
```sh
|
||||
docker pull victoriametrics/victoria-metrics:v1.123.0
|
||||
docker pull victoriametrics/victoria-metrics:v1.124.0
|
||||
docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 \
|
||||
victoriametrics/victoria-metrics:v1.123.0 --selfScrapeInterval=5s -storageDataPath=victoria-metrics-data
|
||||
victoriametrics/victoria-metrics:v1.124.0 --selfScrapeInterval=5s -storageDataPath=victoria-metrics-data
|
||||
```
|
||||
_For Enterprise images see [this link](https://docs.victoriametrics.com/victoriametrics/enterprise/#docker-images)._
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
build:
|
||||
list: never
|
||||
publishResources: false
|
||||
render: never
|
||||
---
|
||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
See [case studies for VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/casestudies/).
|
||||
|
||||
|
||||
@@ -442,12 +442,12 @@ The most common sources of cluster instability are:
|
||||
If the graphs show high CPU usage, then the cluster is likely overloaded and requires more resources.
|
||||
Note that short-lived 100% CPU spikes may not be visible in metrics with typical 10–30s scrape intervals,
|
||||
but can still cause transient network failures. In such cases, check CPU usage at the OS level with higher-resolution tools.
|
||||
Consider increasing `-vmstorageDialTimeout` and `-rpc.handshakeTimeout`{{% available_from "#" %}} to mitigate the effects of CPU spikes.
|
||||
Consider increasing `-vmstorageDialTimeout` and `-rpc.handshakeTimeout`{{% available_from "v1.124.0" %}} to mitigate the effects of CPU spikes.
|
||||
|
||||
If resource usage looks normal but networking issues still occur, then the root cause is likely outside VictoriaMetrics.
|
||||
This may be caused by unreliable or congested network links, especially across availability zones or regions.
|
||||
In multi-AZ setups, consider [a multi-level cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/#multi-level-cluster-setup) with region-local load balancers to reduce cross-zone connections.
|
||||
If the network cannot be improved, increasing timeouts such as `-vmstorageDialTimeout`, `-rpc.handshakeTimeout`{{% available_from "#" %}}, or `-search.maxQueueDuration` may help, but should be done cautiously, as higher timeouts can impact cluster stability in other ways.
|
||||
If the network cannot be improved, increasing timeouts such as `-vmstorageDialTimeout`, `-rpc.handshakeTimeout`{{% available_from "v1.124.0" %}}, or `-search.maxQueueDuration` may help, but should be done cautiously, as higher timeouts can impact cluster stability in other ways.
|
||||
Keep in mind that VictoriaMetrics assumes reliable networking between components. If the network is unstable, the overall cluster stability may degrade regardless of resource availability.
|
||||
|
||||
The obvious solution against VictoriaMetrics cluster instability is to make sure cluster components
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
build:
|
||||
list: never
|
||||
publishResources: false
|
||||
render: never
|
||||
---
|
||||
The following `tip` changes can be tested by building VictoriaMetrics components from the latest commits according to the following docs:
|
||||
|
||||
* [How to build single-node VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-build-from-sources)
|
||||
@@ -18,20 +24,32 @@ See also [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-rel
|
||||
|
||||
## tip
|
||||
|
||||
* FEATURE: upgrade Go builder from Go1.24.6 to Go1.25. See [Go1.25 release notes](https://tip.golang.org/doc/go1.25).
|
||||
|
||||
## [v1.124.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.124.0)
|
||||
|
||||
Released at 2025-08-15
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.24.5 to Go1.24.6. See [the list of issues addressed in Go1.24.6](https://github.com/golang/go/issues?q=milestone%3AGo1.24.6+label%3ACherryPickApproved).
|
||||
|
||||
* FEATURE: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and [vmselect](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): protect graphite `/render` API endpoint with new flag `-search.maxGraphitePathExpressionLen`. See this PR [#9534](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9534) for details.
|
||||
* FEATURE: expose `vm_total_disk_space_bytes` metric at the [`/metrics` page](https://docs.victoriametrics.com/#monitoring), which shows the total disk space for the data directory specified via [`-storageDataPath`](https://docs.victoriametrics.com/#storage). This metric can be useful for building alerts and graphs for the percentatge of free disk space via `vm_free_disk_space_bytes / vm_total_disk_space_bytes`. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9523#issuecomment-3149459926).
|
||||
* FEATURE: all: leave non-existing environment variables as is in config files instead of failure. For example, if the file referred by `-promscrape.config` contains `%{NON_EXISTING_ENV_VAR}` placeholder, then it is left as is instead of failing to load the file. This simplifies the usage of environment variables in config files and in command-line flags according to [these docs](https://docs.victoriametrics.com/victoriametrics/#environment-variables). Users can easily notice non-existing env vars in config files and in command-line flags by looking at their values - they will literally contain `%{NON_EXISTING_ENV_VAR}` strings.
|
||||
* FEATURE: `vmselect`, `vminsert` and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): add rpc handshake timeout configuration via `-rpc.handshakeTimeout` flag (default 5s). Set deadline for the entire handshake process instead of per-operation timeout. See [#9345](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9345) for more details.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): add `-enableMetadata` command-line flag to allow sending metadata to the configured `-remoteWrite.url`, metadata can be scraped from targets, received via VictoriaMetrics remote write, Prometheus remote write v1 or OpenTelemetry protocol. See [#2974](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2974).
|
||||
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent/): treat single `remoteWrite.disableOnDiskQueue` flag the same way as if it is explicitly specified for each remote write url. See [#9565](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9565) for details.
|
||||
* BUGFIX: [vmalert-tool](https://docs.victoriametrics.com/victoriametrics/vmalert-tool/): print a proper error message when templating function fails during execution. Previously, vmalert-tool could throw a misleading panic message instead.
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/victoriametrics/vmauth/): properly read proxy-protocol header. See this PR [#9546](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9546) for details.
|
||||
* BUGFIX: [dashboards/vmagent](https://grafana.com/grafana/dashboards/12683): fix samples rate panel not showing data in case vmagent is not scraping metrics. Previously, the panel would not display "samples in" results if vmagent only accepts metrics via push protocols.
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): prevent resource leak on partial responses for `/federate` API requests. This this PR [9536](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9536) for details. Thanks to the @fxrlv.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): prevent performance degradation on hitting daily or hourly series cardinality limits. See this issue [9554](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9554) for details.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): gracefully handle `histogram_quantile` calculation for histograms where buckets are created on demand. This change isn't needed for users who use native instrumentation SDKs for metrics from [VictoriaMetrics](https://github.com/VictoriaMetrics/metrics), [Prometheus](https://prometheus.io/docs/instrumenting/clientlibs/) or [OpenTelemetry](https://opentelemetry.io/docs/languages/).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): return a proper error message when the function argument is expected to be a string or scalar.
|
||||
* BUGFIX: [dashboards/victoriametrics-cluster](https://grafana.com/grafana/dashboards/11176): fix panels showing 99th percentile of series or samples read per query or per series. Before, panels were showing the summarized value across all vmselect instances, which didn't make much sense. Now, panels show the max value across the vmselect instances, making it easier to understand complexity of the heaviest queries served.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/): fix potential data race and missing firing states when replaying alerting rule with `-replay.ruleEvaluationConcurrency>1`.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/): fix the `{{ $activeAt }}` variable value in annotation templating when the alert has already triggered. See this issue [#9543](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9543) for details.
|
||||
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup/), [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager/): allow enabling checksum calculation for PUT requests by using `-s3ChecksumAlgorithm` command-line flag. This is required for S3 configurations with WORM being enabled. See [#9532](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9532).
|
||||
|
||||
## [v1.123.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.123.0)
|
||||
|
||||
@@ -45,11 +63,31 @@ Released at 2025-08-01
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent/) and [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/): improve Kubernetes service discovery performance with large amount of configured role selectors. See this [#9354](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9354) issue for details. Thanks to the @fxrlv
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/victoriametrics/vmctl/): add an option to change path for temporary files storage when migrating from Prometheus snapshots. See [#9505](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9505) for the details.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent/) and [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/): improve Kubernetes service discovery performance with large amount of configured role selectors. See this [#9354](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9354) issue for details. Thanks to the @fxrlv
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): add `-enableMetadata` command-line flag to allow sending metadata to the configured `-remoteWrite.url`, metadata can be scraped from targets, received via VictoriaMetrics remote write, Prometheus remote write v1 or OpenTelemetry protocol. See [#2974](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2974).
|
||||
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/victoriametrics/vmauth/): do not configure `-httpListenAddr.useProxyProtocol` for `-httpInternalListenAddr`. See this issue [#9515](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9515) for details.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#vmui): always display the tenant selector if the list of tenants is not empty. See [#9396](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9396).
|
||||
|
||||
## [v1.122.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.2)
|
||||
|
||||
Released at 2025-08-15
|
||||
|
||||
**v1.122.x is a line of [LTS releases](https://docs.victoriametrics.com/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html).
|
||||
All these fixes are also included in [the latest community release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
The v1.122.x line will be supported for at least 12 months since [v1.122.0](https://docs.victoriametrics.com/changelog/#v11220) release**
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.24.5 to Go1.24.6. See [the list of issues addressed in Go1.24.6](https://github.com/golang/go/issues?q=milestone%3AGo1.24.6+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent/): treat single `remoteWrite.disableOnDiskQueue` flag the same way as if it is explicitly specified for each remote write url. See [#9565](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9565) for details.
|
||||
* BUGFIX: [vmalert-tool](https://docs.victoriametrics.com/victoriametrics/vmalert-tool/): print a proper error message when templating function fails during execution. Previously, vmalert-tool could throw a misleading panic message instead.
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/victoriametrics/vmauth/): properly read proxy-protocol header. See this PR [#9546](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9546) for details.
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): prevent resource leak on partial responses for `/federate` API requests. This this PR [9536](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9536) for details. Thanks to the @fxrlv.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): prevent performance degradation on hitting daily or hourly series cardinality limits. See this issue [9554](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9554) for details.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): gracefully handle `histogram_quantile` calculation for histograms where buckets are created on demand. This change isn't needed for users who use native instrumentation SDKs for metrics from [VictoriaMetrics](https://github.com/VictoriaMetrics/metrics), [Prometheus](https://prometheus.io/docs/instrumenting/clientlibs/) or [OpenTelemetry](https://opentelemetry.io/docs/languages/).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): return a proper error message when the function argument is expected to be a string or scalar.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/): fix potential data race and missing firing states when replaying alerting rule with `-replay.ruleEvaluationConcurrency>1`.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/): fix the `{{ $activeAt }}` variable value in annotation templating when the alert has already triggered. See this issue [#9543](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9543) for details.
|
||||
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup/), [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager/): allow enabling checksum calculation for PUT requests by using `-s3ChecksumAlgorithm` command-line flag. This is required for S3 configurations with WORM being enabled. See [#9532](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9532).
|
||||
|
||||
## [v1.122.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.1)
|
||||
|
||||
Released at 2025-08-01
|
||||
@@ -406,6 +444,25 @@ Released at 2025-02-10
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): fix discrepancies when using `or` binary operator. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7759) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7640) issues for details.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): properly update number of unique series for [cardinality limiter](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#cardinality-limiter) on ingestion. Previously, limit could undercount the real number of the ingested unique series.
|
||||
|
||||
## [v1.110.16](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.16)
|
||||
|
||||
Released at 2025-08-15
|
||||
|
||||
**v1.110.x is a line of [LTS releases](https://docs.victoriametrics.com/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html).
|
||||
All these fixes are also included in [the latest community release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
The v1.110.x line will be supported for at least 12 months since [v1.110.0](https://docs.victoriametrics.com/changelog/#v11100) release**
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.24.5 to Go1.24.6. See [the list of issues addressed in Go1.24.6](https://github.com/golang/go/issues?q=milestone%3AGo1.24.6+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent/): treat single `remoteWrite.disableOnDiskQueue` flag the same way as if it is explicitly specified for each remote write url. See [#9565](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9565) for details.
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/victoriametrics/vmauth/): properly read proxy-protocol header. See this PR [#9546](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9546) for details.
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): prevent resource leak on partial responses for `/federate` API requests. This this PR [9536](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9536) for details. Thanks to the @fxrlv.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): prevent performance degradation on hitting daily or hourly series cardinality limits. See this issue [9554](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9554) for details.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): gracefully handle `histogram_quantile` calculation for histograms where buckets are created on demand. This change isn't needed for users who use native instrumentation SDKs for metrics from [VictoriaMetrics](https://github.com/VictoriaMetrics/metrics), [Prometheus](https://prometheus.io/docs/instrumenting/clientlibs/) or [OpenTelemetry](https://opentelemetry.io/docs/languages/).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): return a proper error message when the function argument is expected to be a string or scalar.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/): fix the `{{ $activeAt }}` variable value in annotation templating when the alert has already triggered. See this issue [#9543](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9543) for details.
|
||||
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup/), [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager/): allow enabling checksum calculation for PUT requests by using `-s3ChecksumAlgorithm` command-line flag. This is required for S3 configurations with WORM being enabled. See [#9532](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9532).
|
||||
|
||||
## [v1.110.15](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.15)
|
||||
|
||||
Released at 2025-08-01
|
||||
|
||||
@@ -89,7 +89,7 @@ VictoriaMetrics Enterprise components are available in the following forms:
|
||||
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
|
||||
|
||||
Binary releases of VictoriaMetrics Enterprise are available [at the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz`.
|
||||
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz`.
|
||||
|
||||
In order to run binary release of VictoriaMetrics Enterprise component, please download the `*-enterprise.tar.gz` archive for your OS and architecture
|
||||
from the [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) and unpack it. Then run the unpacked binary.
|
||||
@@ -107,8 +107,8 @@ For example, the following command runs VictoriaMetrics Enterprise binary with t
|
||||
obtained at [this page](https://victoriametrics.com/products/enterprise/trial/):
|
||||
|
||||
```sh
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz
|
||||
tar -xzf victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz
|
||||
tar -xzf victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz
|
||||
./victoria-metrics-prod -license=BASE64_ENCODED_LICENSE_KEY
|
||||
```
|
||||
|
||||
@@ -123,7 +123,7 @@ Alternatively, VictoriaMetrics Enterprise license can be stored in the file and
|
||||
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
|
||||
|
||||
Docker images for VictoriaMetrics Enterprise are available at VictoriaMetrics [Docker Hub](https://hub.docker.com/u/victoriametrics) and [Quay](https://quay.io/organization/victoriametrics).
|
||||
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.123.0-enterprise`.
|
||||
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.124.0-enterprise`.
|
||||
|
||||
In order to run Docker image of VictoriaMetrics Enterprise component, it is required to provide the license key via command-line
|
||||
flag as described [here](#binary-releases).
|
||||
@@ -133,13 +133,13 @@ Enterprise license key can be obtained at [this page](https://victoriametrics.co
|
||||
For example, the following command runs VictoriaMetrics Enterprise Docker image with the specified license key:
|
||||
|
||||
```sh
|
||||
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.123.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
|
||||
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.124.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
|
||||
```
|
||||
|
||||
Alternatively, the license code can be stored in the file and then referred via `-licenseFile` command-line flag:
|
||||
|
||||
```sh
|
||||
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.123.0-enterprise -licenseFile=/path/to/vm-license
|
||||
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.124.0-enterprise -licenseFile=/path/to/vm-license
|
||||
```
|
||||
|
||||
Example docker-compose configuration:
|
||||
@@ -148,7 +148,7 @@ version: "3.5"
|
||||
services:
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
@@ -180,7 +180,7 @@ is used to provide key in plain-text:
|
||||
```yaml
|
||||
server:
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
|
||||
license:
|
||||
key: {BASE64_ENCODED_LICENSE_KEY}
|
||||
@@ -191,7 +191,7 @@ In order to provide key via existing secret, the following values file is used:
|
||||
```yaml
|
||||
server:
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
|
||||
license:
|
||||
secret:
|
||||
@@ -240,7 +240,7 @@ spec:
|
||||
license:
|
||||
key: {BASE64_ENCODED_LICENSE_KEY}
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
```
|
||||
|
||||
In order to provide key via existing secret, the following custom resource is used:
|
||||
@@ -257,7 +257,7 @@ spec:
|
||||
name: vm-license
|
||||
key: license
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
```
|
||||
|
||||
Example secret with license key:
|
||||
@@ -290,7 +290,7 @@ Builds are available for amd64 and arm64
|
||||
|
||||
Example archive:
|
||||
|
||||
`victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz`
|
||||
`victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz`
|
||||
|
||||
Includes:
|
||||
|
||||
@@ -299,7 +299,7 @@ Includes:
|
||||
|
||||
Example Docker image:
|
||||
|
||||
`victoriametrics/victoria-metrics:v1.123.0-enterprise-fips` – uses the FIPS-compatible binary and based on `scratch` image.
|
||||
`victoriametrics/victoria-metrics:v1.124.0-enterprise-fips` – uses the FIPS-compatible binary and based on `scratch` image.
|
||||
|
||||
## Monitoring license expiration
|
||||
|
||||
|
||||
@@ -36,8 +36,8 @@ scrape_configs:
|
||||
After you created the `scrape.yaml` file, download and unpack [single-node VictoriaMetrics](https://docs.victoriametrics.com/) to the same directory:
|
||||
|
||||
```
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
```
|
||||
|
||||
Then start VictoriaMetrics and instruct it to scrape targets defined in `scrape.yaml` and save scraped metrics
|
||||
@@ -152,8 +152,8 @@ Then start [single-node VictoriaMetrics](https://docs.victoriametrics.com/) acco
|
||||
|
||||
```yaml
|
||||
# Download and unpack single-node VictoriaMetrics
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
|
||||
# Run single-node VictoriaMetrics with the given scrape.yaml
|
||||
./victoria-metrics-prod -promscrape.config=scrape.yaml
|
||||
|
||||
@@ -598,7 +598,7 @@ e.g. it sets `scrape_series_added` metric to zero. See [these docs](#automatical
|
||||
|
||||
## Metric metadata
|
||||
|
||||
By default, `vmagent` ignores metric metadata exposed by scrape targets in [Prometheus exposition format](https://github.com/prometheus/docs/blob/main/docs/instrumenting/exposition_formats.md), received via [Prometheus remote write v1](https://prometheus.io/docs/specs/prw/remote_write_spec/) or [OpenTelemetry protocol](https://github.com/open-telemetry/opentelemetry-proto/blob/v1.7.0/opentelemetry/proto/metrics/v1/metrics.proto). Set `-enableMetadata=true` to enable metadata processing{{% available_from "#" %}}.
|
||||
By default, `vmagent` ignores metric metadata exposed by scrape targets in [Prometheus exposition format](https://github.com/prometheus/docs/blob/main/docs/instrumenting/exposition_formats.md), received via [Prometheus remote write v1](https://prometheus.io/docs/specs/prw/remote_write_spec/) or [OpenTelemetry protocol](https://github.com/open-telemetry/opentelemetry-proto/blob/v1.7.0/opentelemetry/proto/metrics/v1/metrics.proto). Set `-enableMetadata=true` to enable metadata processing{{% available_from "v1.124.0" %}}.
|
||||
During processing, metadata won't be dropped or modified by [relabeling](https://docs.victoriametrics.com/victoriametrics/relabeling/) or [streaming aggregation](https://docs.victoriametrics.com/victoriametrics/stream-aggregation/).
|
||||
|
||||
When `-enableMultitenantHandlers` is enabled, vmagent adds tenant info to metadata received via the [multitenant endpoints](https://docs.victoriametrics.com/victoriametrics/vmagent/#multitenancy) (/insert/<accountID>/<suffix>). However, if `vm_account_id` or `vm_project_id` labels are added directly to metrics before reaching vmagent, and vmagent writes to the [vminsert multitenant endpoints](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/#multitenancy-via-labels), the tenant info won't be attached and the metadata will be stored under the default tenant of VictoriaMetrics cluster.
|
||||
@@ -805,8 +805,8 @@ In this case `-remoteWrite.disableOnDiskQueue` command-line flag can be passed t
|
||||
and the `-remoteWrite.disableOnDiskQueue` command-line flag is set:
|
||||
|
||||
- It returns `429 Too Many Requests` HTTP error to clients, which send data to `vmagent` via [supported HTTP endpoints](#how-to-push-data-to-vmagent).
|
||||
If `-remoteWrite.dropSamplesOnOverload` command-line flag is set or if multiple `-remoteWrite.disableOnDiskQueue` command-line flags are set
|
||||
for different `-remoteWrite.url` options, then the ingested samples are silently dropped instead of returning the error to clients.
|
||||
If `-remoteWrite.dropSamplesOnOverload` command-line flag is set or if multiple `-remoteWrite.url` command-line flags are set,
|
||||
then the ingested samples are silently dropped instead of returning the error to clients.
|
||||
- It suspends consuming data from [Kafka side](#reading-metrics-from-kafka) or [Google PubSub side](#google-pubsub-integration) until the remote storage becomes available.
|
||||
If `-remoteWrite.dropSamplesOnOverload` command-line flag is set or if multiple `-remoteWrite.disableOnDiskQueue` command-line flags are set
|
||||
for different `-remoteWrite.url` options, then the fetched samples are silently dropped instead of suspending data consumption from Kafka or Google PubSub.
|
||||
|
||||
@@ -865,8 +865,8 @@ There are following non-required `replay` flags:
|
||||
* `-replay.disableProgressBar` - whether to disable progress bar which shows progress work.
|
||||
Progress bar may generate a lot of log records, which is not formatted as standard VictoriaMetrics logger.
|
||||
It could break logs parsing by external system and generate additional load on it.
|
||||
* `-replay.ruleEvaluationConcurrency` - The maximum number of concurrent `/query_range` requests for a single rule.
|
||||
Increasing this value when replaying for a long time and a single request range is limited by `-replay.maxDatapointsPerQuery`.
|
||||
* `-replay.ruleEvaluationConcurrency` - The maximum number of concurrent `/query_range` requests when replay recording rule or alerting rule with for=0.
|
||||
Increasing this value when replaying for a long time, since each request is limited by `-replay.maxDatapointsPerQuery`.
|
||||
The default value is `1`.
|
||||
|
||||
See full description for these flags in `./vmalert -help`.
|
||||
|
||||
@@ -110,7 +110,7 @@ Backup manager launched with the following configuration:
|
||||
```sh
|
||||
export NODE_IP=192.168.0.10
|
||||
export VMSTORAGE_ENDPOINT=http://127.0.0.1:8428
|
||||
./vmbackupmanager -dst=gs://vmstorage-data/$NODE_IP -credsFilePath=credentials.json -storageDataPath=/vmstorage-data -snapshot.createURL=$VMSTORAGE_ENDPOINT/snapshot/create -eula
|
||||
./vmbackupmanager -dst=gs://vmstorage-data/$NODE_IP -credsFilePath=credentials.json -storageDataPath=/vmstorage-data -snapshot.createURL=$VMSTORAGE_ENDPOINT/snapshot/create -licenseFile=/path/to/vm-license
|
||||
```
|
||||
|
||||
Expected logs in vmbackupmanager:
|
||||
@@ -170,7 +170,7 @@ We enable backup retention policy for backup manager by using following configur
|
||||
export NODE_IP=192.168.0.10
|
||||
export VMSTORAGE_ENDPOINT=http://127.0.0.1:8428
|
||||
./vmbackupmanager -dst=gs://vmstorage-data/$NODE_IP -credsFilePath=credentials.json -storageDataPath=/vmstorage-data -snapshot.createURL=$VMSTORAGE_ENDPOINT/snapshot/create
|
||||
-keepLastDaily=3 -eula
|
||||
-keepLastDaily=3 -licenseFile=/path/to/vm-license
|
||||
```
|
||||
|
||||
Expected logs in backup manager on start:
|
||||
@@ -196,14 +196,14 @@ You can protect any backup against deletion by retention policy with the `vmback
|
||||
For instance:
|
||||
|
||||
```sh
|
||||
./vmbackupmanager backup lock daily/2021-02-13 -dst=<DST_PATH> -storageDataPath=/vmstorage-data -eula
|
||||
./vmbackupmanager backup lock daily/2021-02-13 -dst=<DST_PATH> -storageDataPath=/vmstorage-data
|
||||
```
|
||||
|
||||
After that the backup won't be deleted by retention policy.
|
||||
You can view the `locked` attribute in backup list:
|
||||
|
||||
```sh
|
||||
./vmbackupmanager backup list -dst=<DST_PATH> -storageDataPath=/vmstorage-data -eula
|
||||
./vmbackupmanager backup list -dst=<DST_PATH> -storageDataPath=/vmstorage-data
|
||||
```
|
||||
|
||||
To remove protection, you can use the command `vmbackupmanager backups unlock`.
|
||||
@@ -211,7 +211,7 @@ To remove protection, you can use the command `vmbackupmanager backups unlock`.
|
||||
For example:
|
||||
|
||||
```sh
|
||||
./vmbackupmanager backup unlock daily/2021-02-13 -dst=<DST_PATH> -storageDataPath=/vmstorage-data -eula
|
||||
./vmbackupmanager backup unlock daily/2021-02-13 -dst=<DST_PATH> -storageDataPath=/vmstorage-data
|
||||
```
|
||||
|
||||
## API methods
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
build:
|
||||
list: never
|
||||
publishResources: false
|
||||
render: never
|
||||
---
|
||||
VictoriaMetrics command-line tool (**vmctl**) provides the following migration modes:
|
||||
- [Prometheus](https://docs.victoriametrics.com/victoriametrics/vmctl/prometheus/) to VictoriaMetrics via [snapshot](https://prometheus.io/docs/prometheus/latest/querying/api/#snapshot)
|
||||
- [InfluxDB](https://docs.victoriametrics.com/victoriametrics/vmctl/influxdb/) to VictoriaMetrics
|
||||
@@ -26,9 +32,9 @@ vmctl command-line tool is available as:
|
||||
|
||||
Download and unpack vmctl:
|
||||
```sh
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/vmutils-darwin-arm64-v1.123.0.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/vmutils-darwin-arm64-v1.124.0.tar.gz
|
||||
|
||||
tar xzf vmutils-darwin-arm64-v1.123.0.tar.gz
|
||||
tar xzf vmutils-darwin-arm64-v1.124.0.tar.gz
|
||||
```
|
||||
|
||||
Once binary is unpacked, see the full list of supported modes by running the following command:
|
||||
@@ -352,4 +358,4 @@ Moved to [vmctl/victoriametrics](https://docs.victoriametrics.com/victoriametric
|
||||
|
||||
###### Tuning
|
||||
|
||||
Moved to [vmctl#migration-tips](https://docs.victoriametrics.com/victoriametrics/vmctl#migration-tips).
|
||||
Moved to [vmctl#migration-tips](https://docs.victoriametrics.com/victoriametrics/vmctl#migration-tips).
|
||||
|
||||
@@ -88,7 +88,7 @@ Start the single version of VictoriaMetrics
|
||||
Start vmgateway
|
||||
|
||||
```sh
|
||||
./bin/vmgateway -eula -enable.auth -read.url http://localhost:8428 --write.url http://localhost:8428
|
||||
./bin/vmgateway -licenseFile=/path/to/vm-license -enable.auth -read.url http://localhost:8428 --write.url http://localhost:8428
|
||||
```
|
||||
|
||||
Retrieve data from the database
|
||||
@@ -163,9 +163,9 @@ EOF
|
||||
# start cluster
|
||||
|
||||
# start vmstorage, vmselect and vminsert
|
||||
./bin/vmstorage -eula
|
||||
./bin/vmselect -eula -storageNode 127.0.0.1:8401
|
||||
./bin/vminsert -eula -storageNode 127.0.0.1:8400
|
||||
./bin/vmstorage -licenseFile=/path/to/vm-license
|
||||
./bin/vmselect -licenseFile=/path/to/vm-license -storageNode 127.0.0.1:8401
|
||||
./bin/vminsert -licenseFile=/path/to/vm-license -storageNode 127.0.0.1:8400
|
||||
|
||||
# create base rate limiting config:
|
||||
cat << EOF > limit.yaml
|
||||
@@ -184,7 +184,7 @@ limits:
|
||||
EOF
|
||||
|
||||
# start gateway with `-clusterMode`
|
||||
./bin/vmgateway -eula -enable.rateLimit -ratelimit.config limit.yaml -datasource.url http://localhost:8428 -enable.auth -clusterMode -write.url=http://localhost:8480 --read.url=http://localhost:8481
|
||||
./bin/vmgateway -licenseFile=/path/to/vm-license -enable.rateLimit -ratelimit.config limit.yaml -datasource.url http://localhost:8428 -enable.auth -clusterMode -write.url=http://localhost:8480 --read.url=http://localhost:8481
|
||||
|
||||
# ingest simple metric to tenant 1:5
|
||||
curl 'http://localhost:8431/api/v1/import/prometheus' -X POST -d 'foo{bar="baz1"} 123' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MjAxNjIwMDAwMDAsInZtX2FjY2VzcyI6eyJ0ZW5hbnRfaWQiOnsiYWNjb3VudF9pZCI6MTV9fX0.PB1_KXDKPUp-40pxOGk6lt_jt9Yq80PIMpWVJqSForQ'
|
||||
@@ -210,7 +210,7 @@ Note that both flags support passing multiple keys and also can be used together
|
||||
|
||||
Example usage:
|
||||
```sh
|
||||
./bin/vmgateway -eula \
|
||||
./bin/vmgateway -licenseFile=/path/to/vm-license \
|
||||
-enable.auth \
|
||||
-write.url=http://localhost:8480 \
|
||||
-read.url=http://localhost:8481 \
|
||||
@@ -238,7 +238,7 @@ When `auth.oidcDiscoveryEndpoints` is specified `vmgateway` will fetch JWKS keys
|
||||
|
||||
Example usage for tokens issued by Azure Active Directory:
|
||||
```sh
|
||||
/bin/vmgateway -eula \
|
||||
/bin/vmgateway -licenseFile=/path/to/vm-license \
|
||||
-enable.auth \
|
||||
-write.url=http://localhost:8480 \
|
||||
-read.url=http://localhost:8481 \
|
||||
@@ -247,7 +247,7 @@ Example usage for tokens issued by Azure Active Directory:
|
||||
|
||||
Example usage for tokens issued by Google:
|
||||
```sh
|
||||
/bin/vmgateway -eula \
|
||||
/bin/vmgateway -licenseFile=/path/to/vm-license \
|
||||
-enable.auth \
|
||||
-write.url=http://localhost:8480 \
|
||||
-read.url=http://localhost:8481 \
|
||||
@@ -263,7 +263,7 @@ When `auth.jwksEndpoints` is specified `vmgateway` will fetch public keys from t
|
||||
|
||||
Example usage for tokens issued by Azure Active Directory:
|
||||
```sh
|
||||
/bin/vmgateway -eula \
|
||||
/bin/vmgateway -licenseFile=/path/to/vm-license \
|
||||
-enable.auth \
|
||||
-write.url=http://localhost:8480 \
|
||||
-read.url=http://localhost:8481 \
|
||||
@@ -272,7 +272,7 @@ Example usage for tokens issued by Azure Active Directory:
|
||||
|
||||
Example usage for tokens issued by Google:
|
||||
```sh
|
||||
/bin/vmgateway -eula \
|
||||
/bin/vmgateway -licenseFile=/path/to/vm-license \
|
||||
-enable.auth \
|
||||
-write.url=http://localhost:8480 \
|
||||
-read.url=http://localhost:8481 \
|
||||
|
||||
22
go.mod
22
go.mod
@@ -1,12 +1,6 @@
|
||||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
go 1.24.6
|
||||
|
||||
// This is needed in order to avoid vmbackup and vmrestore binary size increase by 20MB
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
//
|
||||
// TODO: remove this entry after https://github.com/googleapis/google-cloud-go/issues/11448 is fixed
|
||||
replace cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0
|
||||
go 1.25.0
|
||||
|
||||
// Pin AWS libraries to version before 2025-01-15
|
||||
// Release notes: https://github.com/aws/aws-sdk-go-v2/releases/tag/release-2025-01-15
|
||||
@@ -66,13 +60,18 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.4 // indirect
|
||||
cloud.google.com/go/auth v0.16.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
|
||||
@@ -92,11 +91,15 @@ require (
|
||||
github.com/aws/smithy-go v1.22.5 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
@@ -127,6 +130,7 @@ require (
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.131.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.131.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
@@ -136,9 +140,11 @@ require (
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/component v1.37.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.37.0 // indirect
|
||||
@@ -151,6 +157,7 @@ require (
|
||||
go.opentelemetry.io/collector/processor v1.37.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.128.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
|
||||
@@ -158,6 +165,7 @@ require (
|
||||
go.opentelemetry.io/otel/log v0.13.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
|
||||
37
go.sum
37
go.sum
@@ -1,3 +1,5 @@
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.121.4 h1:cVvUiY0sX0xwyxPwdSU2KsF9knOVmtRyAMt8xou0iTs=
|
||||
cloud.google.com/go v0.121.4/go.mod h1:XEBchUiHFJbz4lKBZwYBDHV/rSyfFktk737TLDU089s=
|
||||
cloud.google.com/go/auth v0.16.3 h1:kabzoQ9/bobUmnseYnBO6qQG7q4a/CffFRlJSxv2wCc=
|
||||
@@ -8,10 +10,16 @@ cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeO
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
||||
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
||||
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
|
||||
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
|
||||
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
|
||||
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
|
||||
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
|
||||
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
||||
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
||||
cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsLxI=
|
||||
cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU=
|
||||
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 h1:Hr5FTipp7SL07o2FvoVOX9HRiRH3CR3Mj8pxqCcdD5A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
@@ -34,8 +42,16 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJ
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20250728123024-98593029b5aa h1:qTB0QsUpBe/WzXQKcALj3Ossizb2daUHXmaVoWFdVlE=
|
||||
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20250728123024-98593029b5aa/go.mod h1:jeov7Un2x4Dpxw2Qn2MWa0kbwNn1Gc2Iw+8gvPqGsZk=
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
|
||||
@@ -131,8 +147,11 @@ github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
||||
@@ -145,6 +164,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -340,6 +361,8 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 h1:4+LP7qmsLSGbmc66m1s5dKRMBw
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32/go.mod h1:kzh+BSAvpoyHHdHBCDhmSWtBc1NbLMZ2lWHqnBoxFks=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -376,6 +399,8 @@ github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAz
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
||||
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/component v1.37.0 h1:yc5X0WhZwlpJ+W8Sg1fpRRjiUu3nByLe1wVOKWWRWRQ=
|
||||
@@ -416,6 +441,8 @@ go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCu
|
||||
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 h1:FGre0nZh5BSw7G73VpT3xs38HchsfPsa2aZtMp0NPOs=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0/go.mod h1:X2PYPViI2wTPIMIOBjG17KNybTzsrATnvPJ02kkz7LM=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 h1:rbRJ8BBoVMsQShESYZ0FkvcITu8X8QNwJogcLUmDNNw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0/go.mod h1:ru6KHrNtNHxM4nD/vd6QrLVWgKhxPYgblq4VAtNawTQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.62.0 h1:wCeciVlAfb5DC8MQl/DlmAv/FVPNpQgFvI/71+hatuc=
|
||||
@@ -424,6 +451,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/X
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/log v0.13.0 h1:yoxRoIZcohB6Xf0lNv9QIyCzQvrtGZklVbdCoyb7dls=
|
||||
go.opentelemetry.io/otel/log v0.13.0/go.mod h1:INKfG4k1O9CL25BaM1qLe0zIedOpvlS5Z7XgSbmN83E=
|
||||
go.opentelemetry.io/otel/log/logtest v0.13.0 h1:xxaIcgoEEtnwdgj6D6Uo9K/Dynz9jqIxSDu2YObJ69Q=
|
||||
|
||||
@@ -31,6 +31,8 @@ var (
|
||||
s3StorageClass = flag.String("s3StorageClass", "", "The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, "+
|
||||
"DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.\n"+
|
||||
"See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html")
|
||||
s3ChecksumAlgorithm = flag.String("s3ChecksumAlgorithm", "", "Objects integrity checksum algorithm which is applied while uploading objects to AWS S3. "+
|
||||
"Supported values are: SHA256, SHA1, CRC32C, CRC32")
|
||||
s3TLSInsecureSkipVerify = flag.Bool("s3TLSInsecureSkipVerify", false, "Whether to skip TLS verification when connecting to the S3 endpoint.")
|
||||
s3Tags = flag.String("s3ObjectTags", "", `S3 tags to be set for uploaded objects. Must be set in JSON format: {"param1":"value1",...,"paramN":"valueN"}.`)
|
||||
)
|
||||
@@ -262,7 +264,8 @@ func NewRemoteFS(ctx context.Context, path string) (common.RemoteFS, error) {
|
||||
ConfigFilePath: *configFilePath,
|
||||
CustomEndpoint: *customS3Endpoint,
|
||||
TLSInsecureSkipVerify: *s3TLSInsecureSkipVerify,
|
||||
StorageClass: s3remote.StringToS3StorageClass(*s3StorageClass),
|
||||
StorageClass: s3remote.StringToStorageClass(*s3StorageClass),
|
||||
ChecksumAlgorithm: s3remote.StringToChecksumAlgorithm(*s3ChecksumAlgorithm),
|
||||
S3ForcePathStyle: *s3ForcePathStyle,
|
||||
ProfileName: *configProfile,
|
||||
Bucket: bucket,
|
||||
|
||||
@@ -44,7 +44,7 @@ type FS struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// envLoookupFunc is used for looking up environment variables in tests.
|
||||
// envLookupFunc is used for looking up environment variables in tests.
|
||||
envLookupFunc func(name string) (string, bool)
|
||||
}
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||
pathReal, err := filepath.EvalSymlinks(pathOrig)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || strings.Contains(err.Error(), "no such file or directory") {
|
||||
// Remove symlink that points to nowere.
|
||||
// Remove symlink that points to nowhere.
|
||||
logger.Infof("removing broken symlink %q", pathOrig)
|
||||
if err := os.Remove(pathOrig); err != nil {
|
||||
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
|
||||
|
||||
@@ -45,11 +45,16 @@ func validateStorageClass(storageClass s3types.StorageClass) error {
|
||||
return fmt.Errorf("unsupported S3 storage class: %s. Supported values: %v", storageClass, supportedStorageClasses)
|
||||
}
|
||||
|
||||
// StringToS3StorageClass converts string types to AWS S3 StorageClass type for value comparison
|
||||
func StringToS3StorageClass(sc string) s3types.StorageClass {
|
||||
// StringToStorageClass converts string types to AWS S3 StorageClass type for value comparison
|
||||
func StringToStorageClass(sc string) s3types.StorageClass {
|
||||
return s3types.StorageClass(sc)
|
||||
}
|
||||
|
||||
// StringToChecksumAlgorithm converts string types to AWS S3 ChecksumAlgorithm type for value comparison
|
||||
func StringToChecksumAlgorithm(alg string) s3types.ChecksumAlgorithm {
|
||||
return s3types.ChecksumAlgorithm(alg)
|
||||
}
|
||||
|
||||
// FS represents filesystem for backups in S3.
|
||||
//
|
||||
// Init must be called before calling other FS methods.
|
||||
@@ -75,6 +80,9 @@ type FS struct {
|
||||
// Object Storage Class: https://aws.amazon.com/s3/storage-classes/
|
||||
StorageClass s3types.StorageClass
|
||||
|
||||
// Checksum algorithm
|
||||
ChecksumAlgorithm s3types.ChecksumAlgorithm
|
||||
|
||||
// The name of S3 config profile to use.
|
||||
ProfileName string
|
||||
|
||||
@@ -340,12 +348,13 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
||||
r: r,
|
||||
}
|
||||
input := &s3.PutObjectInput{
|
||||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
Body: sr,
|
||||
StorageClass: fs.StorageClass,
|
||||
Metadata: fs.Metadata,
|
||||
Tagging: fs.tags,
|
||||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
Body: sr,
|
||||
StorageClass: fs.StorageClass,
|
||||
Metadata: fs.Metadata,
|
||||
ChecksumAlgorithm: fs.ChecksumAlgorithm,
|
||||
Tagging: fs.tags,
|
||||
}
|
||||
|
||||
_, err := fs.uploader.Upload(fs.ctx, input)
|
||||
@@ -432,12 +441,13 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
||||
r: bytes.NewReader(data),
|
||||
}
|
||||
input := &s3.PutObjectInput{
|
||||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
Body: sr,
|
||||
StorageClass: fs.StorageClass,
|
||||
Metadata: fs.Metadata,
|
||||
Tagging: fs.tags,
|
||||
Bucket: aws.String(fs.Bucket),
|
||||
Key: aws.String(path),
|
||||
Body: sr,
|
||||
StorageClass: fs.StorageClass,
|
||||
Metadata: fs.Metadata,
|
||||
ChecksumAlgorithm: fs.ChecksumAlgorithm,
|
||||
Tagging: fs.tags,
|
||||
}
|
||||
_, err := fs.uploader.Upload(fs.ctx, input)
|
||||
if err != nil {
|
||||
|
||||
@@ -84,7 +84,7 @@ func TestByteBufferReadFrom(t *testing.T) {
|
||||
t.Fatalf("unexpected number of bytes read; got %d; want %d", n, 0)
|
||||
}
|
||||
if len(bb.B) != 0 {
|
||||
t.Fatalf("unexpejcted len(bb.B); got %d; want %d", len(bb.B), 0)
|
||||
t.Fatalf("unexpected len(bb.B); got %d; want %d", len(bb.B), 0)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestBuffer(t *testing.T) {
|
||||
|
||||
cbLen := cb.Len()
|
||||
if cbLen != totalSize {
|
||||
t.Fatalf("nexpected Buffer.Len value; got %d; want %d", cbLen, totalSize)
|
||||
t.Fatalf("unexpected Buffer.Len value; got %d; want %d", cbLen, totalSize)
|
||||
}
|
||||
|
||||
size := cb.SizeBytes()
|
||||
|
||||
@@ -33,7 +33,7 @@ func (rh *ConsistentHash) GetNodeIdx(h uint64, excludeIdxs []int) int {
|
||||
|
||||
if len(excludeIdxs) == len(rh.nodeHashes) {
|
||||
// All the nodes are excluded. Treat this case as no nodes are excluded.
|
||||
// This is better from load-balacning PoV than selecting some static node.
|
||||
// This is better from load-balancing PoV than selecting some static node.
|
||||
excludeIdxs = nil
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestConsistentHash(t *testing.T) {
|
||||
}
|
||||
maxIndexMismatches := float64(len(keys)) / float64(len(nodes))
|
||||
if float64(indexMismatches) > maxIndexMismatches {
|
||||
t.Fatalf("too many index mismtaches after excluding a node; got %d; want no more than %f", indexMismatches, maxIndexMismatches)
|
||||
t.Fatalf("too many index mismatches after excluding a node; got %d; want no more than %f", indexMismatches, maxIndexMismatches)
|
||||
}
|
||||
expectedPerIdxCount = float64(len(keys)) / float64(len(nodes)-1)
|
||||
for i, perIdxCount := range perIdxCounts {
|
||||
|
||||
@@ -83,7 +83,7 @@ func UnmarshalInt64(src []byte) int64 {
|
||||
return v
|
||||
}
|
||||
|
||||
// MarshalVarInt64 appends marshalsed v to dst and returns the result.
|
||||
// MarshalVarInt64 appends marshaled v to dst and returns the result.
|
||||
func MarshalVarInt64(dst []byte, v int64) []byte {
|
||||
u := uint64((v << 1) ^ (v >> 63))
|
||||
|
||||
@@ -578,7 +578,7 @@ type Uint64s struct {
|
||||
var uint64sPool sync.Pool
|
||||
|
||||
// GetUint32s returns an uint32 slice with the given size.
|
||||
// The slize contents isn't initialized - it may contain garbage.
|
||||
// The slice contents isn't initialized - it may contain garbage.
|
||||
func GetUint32s(size int) *Uint32s {
|
||||
v := uint32sPool.Get()
|
||||
if v == nil {
|
||||
|
||||
@@ -94,7 +94,7 @@ func IsPartiallyRemovedDir(dirPath string) bool {
|
||||
des := MustReadDir(dirPath)
|
||||
if len(des) == 0 {
|
||||
// Delete empty dirs too, since they may appear when the unclean shutdown happens after the deleteDirFilename is deleted,
|
||||
// but before the directory is deleted istelf.
|
||||
// but before the directory is deleted itself.
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ type server struct {
|
||||
// In such cases the caller must serve the request.
|
||||
type RequestHandler func(w http.ResponseWriter, r *http.Request) bool
|
||||
|
||||
// ServeOptions defiens optional parameters for http server
|
||||
// ServeOptions defines optional parameters for http server
|
||||
type ServeOptions struct {
|
||||
// UseProxyProtocol if is set to true for the corresponding addr, then the incoming connections are accepted via proxy protocol.
|
||||
// See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
|
||||
@@ -104,7 +104,7 @@ func readProxyProto(r io.Reader) (net.Addr, error) {
|
||||
// Read the protocol block itself
|
||||
bb.B = bytesutil.ResizeNoCopyMayOverallocate(bb.B, blockLen)
|
||||
if _, err := io.ReadFull(r, bb.B); err != nil {
|
||||
return nil, fmt.Errorf("cannot read proxy protocol block with the lehgth %d bytes: %w", blockLen, err)
|
||||
return nil, fmt.Errorf("cannot read proxy protocol block with the length %d bytes: %w", blockLen, err)
|
||||
}
|
||||
switch command {
|
||||
case 0:
|
||||
|
||||
@@ -72,7 +72,7 @@ func MustOpenFastQueue(path, name string, maxInmemoryBlocks int, maxPendingBytes
|
||||
return fq
|
||||
}
|
||||
|
||||
// IsPersistentQueueDisabled returns true if persistend queue at fq is disabled.
|
||||
// IsPersistentQueueDisabled returns true if persistent queue at fq is disabled.
|
||||
func (fq *FastQueue) IsPersistentQueueDisabled() bool {
|
||||
return fq.isPQDisabled
|
||||
}
|
||||
|
||||
@@ -663,7 +663,7 @@ func (opts *Options) NewConfig() (*Config, error) {
|
||||
}
|
||||
if opts.OAuth2 != nil {
|
||||
if actx.getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth, `bearer_token` and `ouath2`")
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth, `bearer_token` and `oauth2`")
|
||||
}
|
||||
if err := actx.initFromOAuth2Config(baseDir, opts.OAuth2); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize oauth2: %w", err)
|
||||
|
||||
@@ -231,7 +231,7 @@ scrape_configs:
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
sws := cfg.getStaticScrapeWork()
|
||||
swsExpected := []*ScrapeWork{
|
||||
@@ -304,7 +304,7 @@ scrape_configs:
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
sws := cfg.getStaticScrapeWork()
|
||||
swsExpected := []*ScrapeWork{{
|
||||
@@ -330,7 +330,7 @@ scrape_configs:
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
sws := cfg.getFileSDScrapeWork(nil)
|
||||
if !equalStaticConfigForScrapeWorks(sws, sws) {
|
||||
@@ -1193,7 +1193,7 @@ scrape_configs:
|
||||
scrape_configs:
|
||||
- job_name: path wo slash
|
||||
enable_compression: false
|
||||
static_configs:
|
||||
static_configs:
|
||||
- targets: ["foo.bar:1234"]
|
||||
relabel_configs:
|
||||
- replacement: metricspath
|
||||
|
||||
@@ -179,7 +179,7 @@ func getRefreshTokenFunc(sdc *SDConfig, ac, proxyAC *promauth.Config, env *cloud
|
||||
return nil, fmt.Errorf("missing `client_id` config option for `authentication_method: OAuth`")
|
||||
}
|
||||
if sdc.ClientSecret.String() == "" {
|
||||
return nil, fmt.Errorf("missing `client_secrect` config option for `authentication_method: OAuth`")
|
||||
return nil, fmt.Errorf("missing `client_secret` config option for `authentication_method: OAuth`")
|
||||
}
|
||||
q := url.Values{
|
||||
"grant_type": []string{"client_credentials"},
|
||||
|
||||
@@ -55,7 +55,7 @@ type InstancesResponse struct {
|
||||
NextPageToken string `xml:"nextToken"`
|
||||
}
|
||||
|
||||
// ReservationSet represetns ReservationSet from https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html
|
||||
// ReservationSet represents ReservationSet from https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html
|
||||
type ReservationSet struct {
|
||||
Items []Reservation `xml:"item"`
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ type Tag struct {
|
||||
Content string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// DataCenterInfo -eureka datacentre metadata
|
||||
// DataCenterInfo -eureka datacenter metadata
|
||||
type DataCenterInfo struct {
|
||||
Name string `xml:"name"`
|
||||
Metadata MetaData `xml:"metadata"`
|
||||
|
||||
@@ -963,7 +963,7 @@ func (uw *urlWatcher) maybeUpdateDependedScrapeWorksLocked() {
|
||||
continue
|
||||
}
|
||||
if attachNodeMetadata && role == "node" && (uwx.role == "pod" || uwx.role == "endpoints" || uwx.role == "endpointslice") {
|
||||
// pod, endpoints and enpointslices objects depend on node objects if attachNodeMetadata is set
|
||||
// pod, endpoints and endpointslices objects depend on node objects if attachNodeMetadata is set
|
||||
uwx.needRecreateScrapeWorks = true
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func appendThreeStrings(dst []byte, a, b, c string) []byte {
|
||||
return dst
|
||||
}
|
||||
|
||||
// OwnerReference represents OwnerReferense from k8s API.
|
||||
// OwnerReference represents OwnerReference from k8s API.
|
||||
//
|
||||
// See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#ownerreference-v1-meta
|
||||
type OwnerReference struct {
|
||||
|
||||
@@ -46,7 +46,7 @@ func (sdc *SDConfig) role() string {
|
||||
return sdc.Role
|
||||
}
|
||||
|
||||
// AttachMetadata represents `attach_metadata` option at `kuberentes_sd_config`.
|
||||
// AttachMetadata represents `attach_metadata` option at `kubernetes_sd_config`.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
|
||||
type AttachMetadata struct {
|
||||
|
||||
@@ -105,7 +105,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||
|
||||
func getAPIServerPath(serverURL string) (string, string, error) {
|
||||
if serverURL == "" {
|
||||
return "", "", fmt.Errorf("missing servier url")
|
||||
return "", "", fmt.Errorf("missing server url")
|
||||
}
|
||||
if !strings.Contains(serverURL, "://") {
|
||||
serverURL = "http://" + serverURL
|
||||
|
||||
57
lib/promutil/labelscompressorv2_test.go
Normal file
57
lib/promutil/labelscompressorv2_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package promutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
|
||||
func TestLabelsCompressorV2(t *testing.T) {
|
||||
lc := NewLabelsCompressorV2()
|
||||
|
||||
labels1 := []prompb.Label{
|
||||
{Name: "label1", Value: "value1"},
|
||||
{Name: "label2", Value: "value2"},
|
||||
{Name: "label3", Value: "value3"},
|
||||
}
|
||||
labels2 := []prompb.Label{
|
||||
{Name: "label3", Value: "value3"},
|
||||
{Name: "label4", Value: "value4"},
|
||||
{Name: "label5", Value: "value5"},
|
||||
}
|
||||
|
||||
compressed1 := lc.Compress(labels1)
|
||||
compressed2 := lc.Compress(labels2)
|
||||
|
||||
runtime.GC()
|
||||
cleaned := lc.Cleanup()
|
||||
if cleaned != 0 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned zero unused labels, got %d", cleaned)
|
||||
}
|
||||
|
||||
decompressed1 := compressed1.Decompress()
|
||||
if !reflect.DeepEqual(labels1, decompressed1) {
|
||||
t.Fatalf("decompressed labels1 do not match original: got %+v, want %+v", decompressed1, labels1)
|
||||
}
|
||||
|
||||
compressed1 = Key{}
|
||||
runtime.GC()
|
||||
cleaned = lc.Cleanup()
|
||||
if cleaned != 2 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned two unused labels, got %d", cleaned)
|
||||
}
|
||||
|
||||
decompressed2 := compressed2.Decompress()
|
||||
if !reflect.DeepEqual(labels2, decompressed2) {
|
||||
t.Fatalf("decompressed labels2 do not match original: got %+v, want %+v", decompressed2, labels2)
|
||||
}
|
||||
|
||||
compressed2 = Key{}
|
||||
runtime.GC()
|
||||
cleaned = lc.Cleanup()
|
||||
if cleaned != 3 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned two unused labels, got %d", cleaned)
|
||||
}
|
||||
}
|
||||
102
lib/promutil/labelscomressorv2.go
Normal file
102
lib/promutil/labelscomressorv2.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package promutil
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
"weak"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
|
||||
type Key struct {
|
||||
labelRefs []labelRef
|
||||
}
|
||||
|
||||
func (k Key) Decompress() []prompb.Label {
|
||||
res := make([]prompb.Label, 0, len(k.labelRefs))
|
||||
for i := range k.labelRefs {
|
||||
res = append(res, cloneLabel(*k.labelRefs[i].label))
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type labelRef struct {
|
||||
label *prompb.Label
|
||||
}
|
||||
|
||||
type LabelsCompressorV2 struct {
|
||||
mux sync.Mutex
|
||||
labels map[prompb.Label]weak.Pointer[prompb.Label]
|
||||
}
|
||||
|
||||
func NewLabelsCompressorV2() *LabelsCompressorV2 {
|
||||
lc := &LabelsCompressorV2{
|
||||
labels: make(map[prompb.Label]weak.Pointer[prompb.Label]),
|
||||
}
|
||||
|
||||
go lc.cleanup()
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) Compress(labels []prompb.Label) Key {
|
||||
lc.mux.Lock()
|
||||
defer lc.mux.Unlock()
|
||||
|
||||
labelRefs := make([]labelRef, 0, len(labels))
|
||||
for i := range labels {
|
||||
wl := lc.labels[labels[i]]
|
||||
l := wl.Value()
|
||||
if l == nil {
|
||||
labelKey := cloneLabel(labels[i])
|
||||
labelVal := cloneLabel(labels[i])
|
||||
|
||||
wl = weak.Make(&labelVal)
|
||||
lc.labels[labelKey] = wl
|
||||
|
||||
l = wl.Value()
|
||||
}
|
||||
|
||||
labelRefs = append(labelRefs, labelRef{
|
||||
label: l,
|
||||
})
|
||||
}
|
||||
|
||||
return Key{
|
||||
labelRefs: labelRefs,
|
||||
}
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) cleanup() {
|
||||
t := time.NewTicker(5 * time.Minute)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
lc.Cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) Cleanup() int {
|
||||
lc.mux.Lock()
|
||||
defer lc.mux.Unlock()
|
||||
|
||||
count := 0
|
||||
|
||||
for l, wl := range lc.labels {
|
||||
if wl.Value() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Println(l)
|
||||
|
||||
count++
|
||||
delete(lc.labels, l)
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func (uw *unmarshalWork) runCallback(rows []csvimport.Row) {
|
||||
ctx.wg.Done()
|
||||
}
|
||||
|
||||
// Unmarshal implements prototparserutil.UnmarshalWork
|
||||
// Unmarshal implements protoparserutil.UnmarshalWork
|
||||
func (uw *unmarshalWork) Unmarshal() {
|
||||
uw.rows.Unmarshal(bytesutil.ToUnsafeString(uw.reqBuf), uw.cds)
|
||||
rows := uw.rows.Rows
|
||||
|
||||
@@ -43,7 +43,7 @@ func (rs *Rows) Reset() {
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals grahite plaintext protocol rows from s.
|
||||
// Unmarshal unmarshals graphite plaintext protocol rows from s.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
//
|
||||
|
||||
@@ -10,7 +10,7 @@ func TestNextUnquotedChar(t *testing.T) {
|
||||
t.Helper()
|
||||
n := nextUnquotedChar(s, ch, noUnescape, true)
|
||||
if n != nExpected {
|
||||
t.Fatalf("unexpected n for nextUnqotedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
t.Fatalf("unexpected n for nextUnquotedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ func ProcessRequestBody(b []byte) ([]byte, error) {
|
||||
}
|
||||
totalLength := varIntLength + int(messageLength)
|
||||
if totalLength > len(r.Data) {
|
||||
return nil, fmt.Errorf("failed to parse OpenTelementry message: insufficient length of buffer")
|
||||
return nil, fmt.Errorf("failed to parse OpenTelemetry message: insufficient length of buffer")
|
||||
}
|
||||
dst = append(dst, r.Data[varIntLength:totalLength]...)
|
||||
r.Data = r.Data[totalLength:]
|
||||
|
||||
@@ -273,7 +273,7 @@ func compareValues(values, valuesExpected []float64) error {
|
||||
return fmt.Errorf("expecting NaN at position #%d; got %v", i, v)
|
||||
}
|
||||
} else if v != vExpected {
|
||||
return fmt.Errorf("unepxected value at position #%d; got %v; want %v", i, v, vExpected)
|
||||
return fmt.Errorf("unexpected value at position #%d; got %v; want %v", i, v, vExpected)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -15,7 +15,7 @@ type RateLimiter struct {
|
||||
// perSecondLimit is the per-second limit of resources.
|
||||
perSecondLimit int64
|
||||
|
||||
// stopCh is used for unbloking rate limiting.
|
||||
// stopCh is used for unblocking rate limiting.
|
||||
stopCh <-chan struct{}
|
||||
|
||||
// mu protects budget and deadline from concurrent access.
|
||||
|
||||
@@ -123,7 +123,7 @@ func (r *Regex) GetLiterals() []string {
|
||||
return a
|
||||
}
|
||||
|
||||
// String returns string represetnation for r
|
||||
// String returns string representation for r
|
||||
func (r *Regex) String() string {
|
||||
return r.exprStr
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func simplifyRegexpExt(sre *syntax.Regexp, keepBeginOp, keepEndOp bool) *syntax.
|
||||
}
|
||||
}
|
||||
sre.Sub = subs
|
||||
// Remove anchros from the beginning and the end of regexp, since they
|
||||
// Remove anchors from the beginning and the end of regexp, since they
|
||||
// will be added later.
|
||||
if !keepBeginOp {
|
||||
for len(sre.Sub) > 0 && sre.Sub[0].Op == syntax.OpBeginText {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -24,11 +24,11 @@ func TestIndexDB_MetricIDsNotMappedToTSIDsAreDeleted(t *testing.T) {
|
||||
return keys
|
||||
}
|
||||
|
||||
synctest.Run(func() {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
s := MustOpenStorage(t.Name(), OpenOptions{})
|
||||
defer s.MustClose()
|
||||
idb, putIndexDB := s.getCurrIndexDB()
|
||||
defer putIndexDB()
|
||||
idbPrev, idbCurr := s.getPrevAndCurrIndexDBs()
|
||||
defer s.putPrevAndCurrIndexDBs(idbPrev, idbCurr)
|
||||
|
||||
type want struct {
|
||||
missingMetricIDs []uint64
|
||||
@@ -37,7 +37,7 @@ func TestIndexDB_MetricIDsNotMappedToTSIDsAreDeleted(t *testing.T) {
|
||||
}
|
||||
assertGetTSIDsFromMetricIDs := func(metricIDs []uint64, want want) {
|
||||
t.Helper()
|
||||
tsids, err := idb.getTSIDsFromMetricIDs(nil, metricIDs, noDeadline)
|
||||
tsids, err := idbCurr.getTSIDsFromMetricIDs(nil, metricIDs, noDeadline)
|
||||
if err != nil {
|
||||
t.Fatalf("getTSIDsFromMetricIDs() failed unexpectedly: %v", err)
|
||||
}
|
||||
@@ -48,13 +48,13 @@ func TestIndexDB_MetricIDsNotMappedToTSIDsAreDeleted(t *testing.T) {
|
||||
if diff := cmp.Diff(want.missingMetricIDs, missingMetricIDs); diff != "" {
|
||||
t.Fatalf("unexpected tsids (-want, +got):\n%s", diff)
|
||||
}
|
||||
if got, want := idb.extDB.missingTSIDsForMetricID.Load(), want.missingTSIDsForMetricID; got != want {
|
||||
if got, want := idbCurr.missingTSIDsForMetricID.Load(), want.missingTSIDsForMetricID; got != want {
|
||||
t.Fatalf("unexpected missingTSIDsForMetricID metric value: got %d, want %d", got, want)
|
||||
}
|
||||
wantDeletedMetricIDs := &uint64set.Set{}
|
||||
wantDeletedMetricIDs.AddMulti(want.deletedMetricIDs)
|
||||
if !s.getDeletedMetricIDs().Equal(wantDeletedMetricIDs) {
|
||||
t.Fatalf("deleted metricIDs set is different from %v", want.deletedMetricIDs)
|
||||
t.Fatalf("deleted metricIDs set is different from %v: %v", want.deletedMetricIDs, s.getDeletedMetricIDs().AppendTo(nil))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user