Compare commits
64 Commits
articles-v
...
issue-1050
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c248236526 | ||
|
|
095db27338 | ||
|
|
a3df0f890b | ||
|
|
0785d16711 | ||
|
|
dc94aa9339 | ||
|
|
032f70e262 | ||
|
|
7029283f7d | ||
|
|
6c1534c7b1 | ||
|
|
0c05b0b15b | ||
|
|
a2b1d1eb62 | ||
|
|
e3cd3329d6 | ||
|
|
6c57246940 | ||
|
|
05112e54e2 | ||
|
|
ce227fe7d9 | ||
|
|
e4524eb2fb | ||
|
|
b9ba5dacc3 | ||
|
|
1a8fe4f2f8 | ||
|
|
2dcfbd8e19 | ||
|
|
728269a5af | ||
|
|
eaf24ec631 | ||
|
|
e47f7a9d4e | ||
|
|
02279b8594 | ||
|
|
65a44bd9e5 | ||
|
|
431dda673e | ||
|
|
d66b7a2283 | ||
|
|
fd45463b5f | ||
|
|
153c5bb803 | ||
|
|
a29229a877 | ||
|
|
aa94652ec3 | ||
|
|
ad85524fb1 | ||
|
|
3fe606770f | ||
|
|
b3054bbadd | ||
|
|
443ea9cbc6 | ||
|
|
a36395500b | ||
|
|
a6ff705771 | ||
|
|
cc3a14b16b | ||
|
|
7ef08b1781 | ||
|
|
969cb5b4ae | ||
|
|
b9f0e614bd | ||
|
|
ed44c08f5f | ||
|
|
3ae44e734b | ||
|
|
0c5886012d | ||
|
|
d3264bd78f | ||
|
|
1f87faafec | ||
|
|
521b73dfc5 | ||
|
|
61db79c10a | ||
|
|
460ac6468c | ||
|
|
c42023c586 | ||
|
|
9c5fbe1a30 | ||
|
|
653576d8b1 | ||
|
|
8a20ccf21d | ||
|
|
1a01dbbec7 | ||
|
|
630e413812 | ||
|
|
b639e7e641 | ||
|
|
858c318e1f | ||
|
|
b8327ce09c | ||
|
|
7514511c68 | ||
|
|
33d524bf13 | ||
|
|
d07c1c73d1 | ||
|
|
a896673c42 | ||
|
|
c60ab2d57a | ||
|
|
49e51611d7 | ||
|
|
902ca83177 | ||
|
|
66e3f8736b |
11
.github/pull_request_template.md
vendored
@@ -1,10 +1 @@
|
||||
### Describe Your Changes
|
||||
|
||||
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
|
||||
|
||||
### Checklist
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
|
||||
- [ ] My change adheres to [VictoriaMetrics development goals](https://docs.victoriametrics.com/victoriametrics/goals/).
|
||||
Before creating the PR, please read [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist) and remove this line after confirming you understand and follow them.
|
||||
|
||||
2
.github/workflows/check-licenses.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
- run: go version
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
|
||||
8
.github/workflows/codeql-analysis-go.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
- run: go version
|
||||
|
||||
- name: Cache Go artifacts
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
@@ -50,14 +50,14 @@ jobs:
|
||||
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
uses: github/codeql-action/init@v4.35.1
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
uses: github/codeql-action/autobuild@v4.35.1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
uses: github/codeql-action/analyze@v4.35.1
|
||||
with:
|
||||
category: 'language:go'
|
||||
|
||||
11
.github/workflows/test.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
- run: go version
|
||||
|
||||
- name: Cache golangci-lint
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: |
|
||||
~/.cache/golangci-lint
|
||||
@@ -66,8 +66,8 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
scenario:
|
||||
- 'test-full'
|
||||
- 'test-full-386'
|
||||
- 'test'
|
||||
- 'test-386'
|
||||
- 'test-pure'
|
||||
|
||||
steps:
|
||||
@@ -88,11 +88,6 @@ jobs:
|
||||
- name: Run tests
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v6
|
||||
with:
|
||||
files: ./coverage.txt
|
||||
|
||||
apptest:
|
||||
name: apptest
|
||||
runs-on: apptest
|
||||
|
||||
3
Makefile
@@ -457,6 +457,9 @@ test:
|
||||
test-race:
|
||||
go test -tags 'synctest' -race ./lib/... ./app/...
|
||||
|
||||
test-386:
|
||||
GOARCH=386 go test -tags 'synctest' ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
CGO_ENABLED=0 go test -tags 'synctest' ./lib/... ./app/...
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
[](https://hub.docker.com/u/victoriametrics)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions/workflows/build.yml)
|
||||
[](https://app.codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://slack.victoriametrics.com)
|
||||
[](https://x.com/VictoriaMetrics/)
|
||||
|
||||
@@ -118,8 +118,8 @@ func main() {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
vminsert.Stop()
|
||||
vminsertcommon.StopIngestionRateLimiter()
|
||||
vminsert.Stop()
|
||||
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
|
||||
@@ -102,6 +102,8 @@ var (
|
||||
"cannot be pushed into the configured -remoteWrite.url systems in a timely manner. See https://docs.victoriametrics.com/victoriametrics/vmagent/#disabling-on-disk-persistence")
|
||||
disableMetadataPerURL = flagutil.NewArrayBool("remoteWrite.disableMetadata", "Whether to disable sending metadata to the corresponding -remoteWrite.url. "+
|
||||
"By default, metadata sending is controlled by the global -enableMetadata flag")
|
||||
enableRerouting = flag.Bool("remoteWrite.enableRerouting", false, "Whether to reroute samples to available remote storage systems when there's any remote storage system and its persistent queue can not "+
|
||||
"keep up with the data ingestion rate. If this flag is not set, then it will be calculated automatically based on -remoteWrite.disableOnDiskQueue. See https://docs.victoriametrics.com/victoriametrics/vmagent/#disabling-on-disk-persistence")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -215,6 +217,10 @@ func Init() {
|
||||
// to the remaining -remoteWrite.url and dropping them on the blocked queue.
|
||||
dropSamplesOnFailureGlobal = *dropSamplesOnOverload || disableOnDiskQueueAny && len(*remoteWriteURLs) > 1
|
||||
|
||||
if *shardByURL && !flagutil.IsSet("remoteWrite.enableRerouting") {
|
||||
*enableRerouting = disableOnDiskQueueAny
|
||||
}
|
||||
|
||||
dropDanglingQueues()
|
||||
|
||||
// Start config reloader.
|
||||
@@ -498,11 +504,13 @@ func tryPush(at *auth.Token, wr *prompb.WriteRequest, forceDropSamplesOnFailure
|
||||
//
|
||||
// calculateHealthyRwctxIdx will rely on the order of rwctx to be in ascending order.
|
||||
func getEligibleRemoteWriteCtxs(tss []prompb.TimeSeries, forceDropSamplesOnFailure bool) ([]*remoteWriteCtx, bool) {
|
||||
if !disableOnDiskQueueAny {
|
||||
if (*shardByURL && !*enableRerouting) || !disableOnDiskQueueAny {
|
||||
return rwctxsGlobal, true
|
||||
}
|
||||
|
||||
// This code is applicable if at least a single remote storage has -disableOnDiskQueue
|
||||
// This code is applicable when:
|
||||
// 1. remoteWrite.shardByUrl is disabled and at least a single remote storage has -disableOnDiskQueue.
|
||||
// 2. remoteWrite.shardByUrl is enabled and remoteWrite.enableRerouting is set to true.
|
||||
rwctxs := make([]*remoteWriteCtx, 0, len(rwctxsGlobal))
|
||||
for _, rwctx := range rwctxsGlobal {
|
||||
if !rwctx.fq.IsWriteBlocked() {
|
||||
|
||||
@@ -222,6 +222,9 @@ func (r *Rule) Validate() error {
|
||||
if r.Expr == "" {
|
||||
return fmt.Errorf("expression can't be empty")
|
||||
}
|
||||
if _, ok := r.Labels["__name__"]; ok {
|
||||
return fmt.Errorf("invalid rule label __name__")
|
||||
}
|
||||
return checkOverflow(r.XXX, "rule")
|
||||
}
|
||||
|
||||
|
||||
@@ -136,6 +136,9 @@ func TestRuleValidate(t *testing.T) {
|
||||
if err := (&Rule{Alert: "alert"}).Validate(); err == nil {
|
||||
t.Fatalf("expected empty expr error")
|
||||
}
|
||||
if err := (&Rule{Record: "record", Expr: "sum(test)", Labels: map[string]string{"__name__": "test"}}).Validate(); err == nil {
|
||||
t.Fatalf("invalid rule label; got %s", err)
|
||||
}
|
||||
if err := (&Rule{Alert: "alert", Expr: "test>0"}).Validate(); err != nil {
|
||||
t.Fatalf("expected valid rule; got %s", err)
|
||||
}
|
||||
|
||||
@@ -87,6 +87,7 @@ func (m *Metric) DelLabel(key string) {
|
||||
for i, l := range m.Labels {
|
||||
if l.Name == key {
|
||||
m.Labels = append(m.Labels[:i], m.Labels[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,9 +312,11 @@ type labelSet struct {
|
||||
// On k conflicts in origin set, the original value is preferred and copied
|
||||
// to processed with `exported_%k` key. The copy happens only if passed v isn't equal to origin[k] value.
|
||||
func (ls *labelSet) add(k, v string) {
|
||||
// do not add label with empty value, since it has no meaning.
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
|
||||
// do not add label with empty value to the result, as it has no meaning:
|
||||
// if the label already exists in the original query result, remove it to preserve compatibility with relabeling, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10766.
|
||||
// otherwise, ignore the label, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984.
|
||||
if v == "" {
|
||||
delete(ls.processed, k)
|
||||
return
|
||||
}
|
||||
ls.processed[k] = v
|
||||
|
||||
@@ -1363,6 +1363,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
{Name: "instance", Value: "0.0.0.0:8800"},
|
||||
{Name: "group", Value: "vmalert"},
|
||||
{Name: "alertname", Value: "ConfigurationReloadFailure"},
|
||||
{Name: "pod", Value: "vmalert-0"},
|
||||
},
|
||||
Values: []float64{1},
|
||||
Timestamps: []int64{time.Now().UnixNano()},
|
||||
@@ -1374,6 +1375,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
"group": "vmalert", // this shouldn't have effect since value in metric is equal
|
||||
"invalid_label": "{{ .Values.mustRuntimeFail }}",
|
||||
"empty_label": "", // this should be dropped
|
||||
"pod": "", // this should remove the pod label from query result
|
||||
},
|
||||
Expr: "sum(vmalert_alerting_rules_error) by(instance, group, alertname) > 0",
|
||||
Name: "AlertingRulesError",
|
||||
@@ -1385,6 +1387,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
"group": "vmalert",
|
||||
"alertname": "ConfigurationReloadFailure",
|
||||
"alertgroup": "vmalert",
|
||||
"pod": "vmalert-0",
|
||||
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
|
||||
}
|
||||
|
||||
|
||||
@@ -409,6 +409,9 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
g.mu.Unlock()
|
||||
defer g.evalCancel()
|
||||
|
||||
// start the interval ticker before the first evaluation,
|
||||
// so that the evaluation timestamps of groups with the `eval_offset` option are also aligned,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10773
|
||||
t := time.NewTicker(g.Interval)
|
||||
defer t.Stop()
|
||||
|
||||
|
||||
@@ -293,9 +293,11 @@ func (rr *RecordingRule) toTimeSeries(m datasource.Metric) prompb.TimeSeries {
|
||||
}
|
||||
// add extra labels configured by user
|
||||
for k := range rr.Labels {
|
||||
// do not add label with empty value, since it has no meaning.
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
|
||||
// do not add label with empty value to the result, as it has no meaning:
|
||||
// if the label already exists in the original query result, remove it to preserve compatibility with relabeling, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10766.
|
||||
// otherwise, ignore the label, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984.
|
||||
if rr.Labels[k] == "" {
|
||||
m.DelLabel(k)
|
||||
continue
|
||||
}
|
||||
existingLabel := promrelabel.GetLabelByName(m.Labels, k)
|
||||
|
||||
@@ -163,11 +163,13 @@ func TestRecordingRule_Exec(t *testing.T) {
|
||||
f(&RecordingRule{
|
||||
Name: "job:foo",
|
||||
Labels: map[string]string{
|
||||
"source": "test",
|
||||
"source": "test",
|
||||
"empty_label": "", // this should be dropped
|
||||
"pod": "", // this should remove the pod label from query result
|
||||
},
|
||||
}, [][]datasource.Metric{{
|
||||
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo"),
|
||||
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin"),
|
||||
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo", "pod", "vmalert-0"),
|
||||
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin", "pod", "vmalert-1"),
|
||||
metricWithValueAndLabels(t, 1, "__name__", "baz", "job", "baz", "source", "test"),
|
||||
}}, [][]prompb.TimeSeries{{
|
||||
newTimeSeries([]float64{2}, []int64{ts.UnixNano()}, []prompb.Label{
|
||||
|
||||
@@ -52,7 +52,13 @@ var (
|
||||
"alert": rule.TypeAlerting,
|
||||
"record": rule.TypeRecording,
|
||||
}
|
||||
ruleStates = []string{"ok", "nomatch", "inactive", "firing", "pending", "unhealthy"}
|
||||
|
||||
// The "recovering", "noData", "normal", "error" states are used by Grafana.
|
||||
// Ignore "recovering" since it is not currently acknowledged by vmalert,
|
||||
// treat "noData" as an alias for "nomatch",
|
||||
// treat "normal" as an alias for "inactive",
|
||||
// treat "error" as an alias for "unhealthy"
|
||||
ruleStates = []string{"ok", "nomatch", "inactive", "firing", "pending", "unhealthy", "recovering", "noData", "normal", "error"}
|
||||
)
|
||||
|
||||
type requestHandler struct {
|
||||
@@ -363,6 +369,15 @@ func newRulesFilter(r *http.Request) (*rulesFilter, *httpserver.ErrorWithStatusC
|
||||
if !slices.Contains(ruleStates, v) {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "state": contains not supported value %q`, v), http.StatusBadRequest)
|
||||
}
|
||||
// Replace grafana states with supported internal states
|
||||
switch v {
|
||||
case "noData":
|
||||
v = "nomatch"
|
||||
case "normal":
|
||||
v = "inactive"
|
||||
case "error":
|
||||
v = "unhealthy"
|
||||
}
|
||||
rf.states = append(rf.states, v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -357,6 +357,7 @@ func bufferRequestBody(ctx context.Context, r io.ReadCloser, userName string) (i
|
||||
|
||||
maxBufSize := max(requestBufferSize.IntN(), maxRequestBodySizeToRetry.IntN())
|
||||
if maxBufSize <= 0 {
|
||||
// Request buffering is disabled.
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -480,6 +481,9 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
canRetry := !bbOK || bb.canRetry()
|
||||
|
||||
res, err := ui.rt.RoundTrip(req)
|
||||
if err == nil {
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
}
|
||||
|
||||
if errors.Is(r.Context().Err(), context.Canceled) {
|
||||
// Do not retry canceled requests.
|
||||
@@ -549,7 +553,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
w.WriteHeader(res.StatusCode)
|
||||
|
||||
err = copyStreamToClient(w, res.Body)
|
||||
_ = res.Body.Close()
|
||||
|
||||
if errors.Is(r.Context().Err(), context.Canceled) {
|
||||
// Do not retry canceled requests.
|
||||
@@ -792,10 +795,11 @@ func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err err
|
||||
}
|
||||
|
||||
// bufferedBody serves two purposes:
|
||||
// 1. Enables request retries when the body size does not exceed maxBodySize
|
||||
// by fully buffering the body in memory.
|
||||
// 2. Prevents slow clients from reducing effective server capacity by
|
||||
// buffering the request body before acquiring a per-user concurrency slot.
|
||||
//
|
||||
// 1. It enables request retries when the request body size does not exceed maxBufSize
|
||||
// by fully buffering the request body in memory.
|
||||
// 2. It prevents slow clients from reducing effective server capacity
|
||||
// by buffering the request body before acquiring a per-user concurrency slot.
|
||||
//
|
||||
// See bufferRequestBody for details on how bufferedBody is used.
|
||||
type bufferedBody struct {
|
||||
@@ -819,7 +823,7 @@ func newBufferedBody(r io.ReadCloser, buf []byte, maxBufSize int) *bufferedBody
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051
|
||||
|
||||
if len(buf) < maxBufSize {
|
||||
// Read the full request body into buf.
|
||||
// The full request body has been already read into buf.
|
||||
r = nil
|
||||
}
|
||||
|
||||
@@ -832,7 +836,7 @@ func newBufferedBody(r io.ReadCloser, buf []byte, maxBufSize int) *bufferedBody
|
||||
// Read implements io.Reader interface.
|
||||
func (bb *bufferedBody) Read(p []byte) (int, error) {
|
||||
if bb.cannotRetry {
|
||||
return 0, fmt.Errorf("cannot read already closed body")
|
||||
return 0, fmt.Errorf("cannot read already closed request body")
|
||||
}
|
||||
if bb.bufOffset < len(bb.buf) {
|
||||
n := copy(p, bb.buf[bb.bufOffset:])
|
||||
|
||||
197
app/vmselect/vmui/assets/index-C24BPpD_.js
Normal file
66
app/vmselect/vmui/assets/vendor-BWBgVCcr.js
Normal file
1118
app/vmui/packages/vmui/package-lock.json
generated
@@ -23,14 +23,14 @@
|
||||
"classnames": "^2.5.1",
|
||||
"dayjs": "^1.11.20",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"marked": "^17.0.5",
|
||||
"preact": "^10.29.0",
|
||||
"qs": "^6.15.0",
|
||||
"marked": "^18.0.0",
|
||||
"preact": "^10.29.1",
|
||||
"qs": "^6.15.1",
|
||||
"react-input-mask": "^2.0.4",
|
||||
"react-router-dom": "^7.13.2",
|
||||
"react-router-dom": "^7.14.1",
|
||||
"uplot": "^1.6.32",
|
||||
"vite": "^8.0.7",
|
||||
"web-vitals": "^5.1.0"
|
||||
"vite": "^8.0.8",
|
||||
"web-vitals": "^5.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/eslintrc": "^3.3.5",
|
||||
@@ -39,24 +39,24 @@
|
||||
"@testing-library/jest-dom": "^6.9.1",
|
||||
"@testing-library/preact": "^3.2.4",
|
||||
"@types/lodash.debounce": "^4.0.9",
|
||||
"@types/node": "^25.5.0",
|
||||
"@types/node": "^25.6.0",
|
||||
"@types/qs": "^6.15.0",
|
||||
"@types/react": "^19.2.14",
|
||||
"@types/react-input-mask": "^3.0.6",
|
||||
"@types/react-router-dom": "^5.3.3",
|
||||
"@typescript-eslint/eslint-plugin": "^8.57.2",
|
||||
"@typescript-eslint/parser": "^8.57.2",
|
||||
"@typescript-eslint/eslint-plugin": "^8.58.2",
|
||||
"@typescript-eslint/parser": "^8.58.2",
|
||||
"cross-env": "^10.1.0",
|
||||
"eslint": "^9.39.2",
|
||||
"eslint-plugin-react": "^7.37.5",
|
||||
"eslint-plugin-unused-imports": "^4.4.1",
|
||||
"globals": "^17.4.0",
|
||||
"globals": "^17.5.0",
|
||||
"http-proxy-middleware": "^3.0.5",
|
||||
"jsdom": "^29.0.1",
|
||||
"postcss": "^8.5.8",
|
||||
"sass-embedded": "^1.98.0",
|
||||
"typescript": "^5.9.3",
|
||||
"vitest": "^4.1.1"
|
||||
"jsdom": "^29.0.2",
|
||||
"postcss": "^8.5.10",
|
||||
"sass-embedded": "^1.99.0",
|
||||
"typescript": "^6.0.2",
|
||||
"vitest": "^4.1.4"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { useMemo } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import { Alert as APIAlert } from "../../../types";
|
||||
import { createSearchParams } from "react-router-dom";
|
||||
import { Alert as APIAlert, Group } from "../../../types";
|
||||
import { Link } from "react-router-dom";
|
||||
import Button from "../../Main/Button/Button";
|
||||
import Badges, { BadgeColor } from "../Badges";
|
||||
import { formatEventTime } from "../helpers";
|
||||
@@ -9,12 +9,14 @@ import {
|
||||
SearchIcon,
|
||||
} from "../../Main/Icons";
|
||||
import CodeExample from "../../Main/CodeExample/CodeExample";
|
||||
import router from "../../../router";
|
||||
|
||||
interface BaseAlertProps {
|
||||
item: APIAlert;
|
||||
group?: Group;
|
||||
}
|
||||
|
||||
const BaseAlert = ({ item }: BaseAlertProps) => {
|
||||
const BaseAlert = ({ item, group }: BaseAlertProps) => {
|
||||
const query = item?.expression;
|
||||
const alertLabels = item?.labels || {};
|
||||
const alertLabelsItems = useMemo(() => {
|
||||
@@ -24,13 +26,19 @@ const BaseAlert = ({ item }: BaseAlertProps) => {
|
||||
}]));
|
||||
}, [alertLabels]);
|
||||
|
||||
const openQueryLink = () => {
|
||||
const params = {
|
||||
const queryLink = useMemo(() => {
|
||||
if (!group?.interval) return;
|
||||
|
||||
const params = new URLSearchParams({
|
||||
"g0.expr": query,
|
||||
"g0.end_time": ""
|
||||
};
|
||||
window.open(`#/?${createSearchParams(params).toString()}`, "_blank", "noopener noreferrer");
|
||||
};
|
||||
"g0.end_time": item.activeAt,
|
||||
// Interval is the Group's evaluation interval in float seconds as present in the file. See: /app/vmalert/rule/web.go
|
||||
"g0.step_input": `${group.interval}s`,
|
||||
"g0.relative_time": "none",
|
||||
});
|
||||
|
||||
return `${router.home}?${params.toString()}`;
|
||||
}, [query, item.activeAt, group?.interval]);
|
||||
|
||||
return (
|
||||
<div className="vm-explore-alerts-alert-item">
|
||||
@@ -45,15 +53,22 @@ const BaseAlert = ({ item }: BaseAlertProps) => {
|
||||
style={{ "text-align": "end" }}
|
||||
colSpan={2}
|
||||
>
|
||||
<Button
|
||||
size="small"
|
||||
variant="outlined"
|
||||
color="gray"
|
||||
startIcon={<SearchIcon />}
|
||||
onClick={openQueryLink}
|
||||
>
|
||||
<span className="vm-button-text">Run query</span>
|
||||
</Button>
|
||||
{queryLink && (
|
||||
<Link
|
||||
to={queryLink}
|
||||
target={"_blank"}
|
||||
rel="noreferrer"
|
||||
>
|
||||
<Button
|
||||
size="small"
|
||||
variant="outlined"
|
||||
color="gray"
|
||||
startIcon={<SearchIcon />}
|
||||
>
|
||||
<span className="vm-button-text">Run query</span>
|
||||
</Button>
|
||||
</Link>
|
||||
)}
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
||||
@@ -1,19 +1,21 @@
|
||||
import { useMemo } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import { Rule as APIRule } from "../../../types";
|
||||
import { useNavigate, createSearchParams } from "react-router-dom";
|
||||
import { Group, Rule as APIRule } from "../../../types";
|
||||
import { useNavigate, Link } from "react-router-dom";
|
||||
import { SearchIcon, DetailsIcon } from "../../Main/Icons";
|
||||
import Button from "../../Main/Button/Button";
|
||||
import Alert from "../../Main/Alert/Alert";
|
||||
import Badges, { BadgeColor } from "../Badges";
|
||||
import { formatDuration, formatEventTime } from "../helpers";
|
||||
import CodeExample from "../../Main/CodeExample/CodeExample";
|
||||
import router from "../../../router";
|
||||
|
||||
interface BaseRuleProps {
|
||||
item: APIRule;
|
||||
group?: Group;
|
||||
}
|
||||
|
||||
const BaseRule = ({ item }: BaseRuleProps) => {
|
||||
const BaseRule = ({ item, group }: BaseRuleProps) => {
|
||||
const query = item?.query;
|
||||
const navigate = useNavigate();
|
||||
const openAlertLink = (id: string) => {
|
||||
@@ -33,13 +35,19 @@ const BaseRule = ({ item }: BaseRuleProps) => {
|
||||
}]));
|
||||
}, [ruleLabels]);
|
||||
|
||||
const openQueryLink = () => {
|
||||
const params = {
|
||||
const queryLink = useMemo(() => {
|
||||
if (!group?.interval) return;
|
||||
|
||||
const params = new URLSearchParams({
|
||||
"g0.expr": query,
|
||||
"g0.end_time": ""
|
||||
};
|
||||
window.open(`#/?${createSearchParams(params).toString()}`, "_blank", "noopener noreferrer");
|
||||
};
|
||||
"g0.end_time": item.lastEvaluation,
|
||||
// Interval is the Group's evaluation interval in float seconds as present in the file. See: /app/vmalert/rule/web.go
|
||||
"g0.step_input": `${group.interval}s`,
|
||||
"g0.relative_time": "none",
|
||||
});
|
||||
|
||||
return `${router.home}?${params.toString()}`;
|
||||
}, [query, item.lastEvaluation, group?.interval]);
|
||||
|
||||
return (
|
||||
<div className="vm-explore-alerts-rule-item">
|
||||
@@ -54,15 +62,22 @@ const BaseRule = ({ item }: BaseRuleProps) => {
|
||||
style={{ "text-align": "end" }}
|
||||
colSpan={2}
|
||||
>
|
||||
<Button
|
||||
size="small"
|
||||
variant="outlined"
|
||||
color="gray"
|
||||
startIcon={<SearchIcon />}
|
||||
onClick={openQueryLink}
|
||||
>
|
||||
<span className="vm-button-text">Run query</span>
|
||||
</Button>
|
||||
{queryLink && (
|
||||
<Link
|
||||
to={queryLink}
|
||||
target={"_blank"}
|
||||
rel="noreferrer"
|
||||
>
|
||||
<Button
|
||||
size="small"
|
||||
variant="outlined"
|
||||
color="gray"
|
||||
startIcon={<SearchIcon />}
|
||||
>
|
||||
<span className="vm-button-text">Run query</span>
|
||||
</Button>
|
||||
</Link>
|
||||
)}
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
||||
@@ -2,15 +2,16 @@ import { FC } from "preact/compat";
|
||||
import ItemHeader from "../ItemHeader";
|
||||
import Accordion from "../../Main/Accordion/Accordion";
|
||||
import "./style.scss";
|
||||
import { Rule as APIRule } from "../../../types";
|
||||
import { Group, Rule as APIRule } from "../../../types";
|
||||
import BaseRule from "../BaseRule";
|
||||
|
||||
interface RuleProps {
|
||||
states: Record<string, number>;
|
||||
rule: APIRule;
|
||||
group: Group;
|
||||
}
|
||||
|
||||
const Rule: FC<RuleProps> = ({ states, rule }) => {
|
||||
const Rule: FC<RuleProps> = ({ states, rule, group }) => {
|
||||
const state = Object.keys(states).length > 0 ? Object.keys(states)[0] : "ok";
|
||||
return (
|
||||
<div className={`vm-explore-alerts-rule vm-badge-item ${state.replace(" ", "-")}`}>
|
||||
@@ -25,7 +26,10 @@ const Rule: FC<RuleProps> = ({ states, rule }) => {
|
||||
name={rule.name}
|
||||
/>}
|
||||
>
|
||||
<BaseRule item={rule} />
|
||||
<BaseRule
|
||||
item={rule}
|
||||
group={group}
|
||||
/>
|
||||
</Accordion>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -50,7 +50,6 @@ const RulesHeader = ({
|
||||
label="Rule type"
|
||||
placeholder="Please select rule type"
|
||||
onChange={onChangeRuleType}
|
||||
autofocus={!!types.length && !isMobile}
|
||||
includeAll
|
||||
searchable
|
||||
/>
|
||||
|
||||
@@ -17,7 +17,7 @@ export const formatDuration = (raw: number) => {
|
||||
|
||||
export const formatEventTime = (raw: string) => {
|
||||
const t = dayjs(raw);
|
||||
return t.year() <= 1 ? "Never" : t.format("DD MMM YYYY HH:mm:ss");
|
||||
return t.year() <= 1 ? "Never" : t.tz().format("DD MMM YYYY HH:mm:ss");
|
||||
};
|
||||
|
||||
export const getStates = (rule: Rule) => {
|
||||
|
||||
@@ -2,10 +2,11 @@ import Spinner from "../../components/Main/Spinner/Spinner";
|
||||
import Alert from "../../components/Main/Alert/Alert";
|
||||
import { useFetchItem } from "./hooks/useFetchItem";
|
||||
import "./style.scss";
|
||||
import { Alert as APIAlert } from "../../types";
|
||||
import { Alert as APIAlert, Group as APIGroup } from "../../types";
|
||||
import ItemHeader from "../../components/ExploreAlerts/ItemHeader";
|
||||
import BaseAlert from "../../components/ExploreAlerts/BaseAlert";
|
||||
import Modal from "../../components/Main/Modal/Modal";
|
||||
import { useFetchGroup } from "./hooks/useFetchGroup";
|
||||
|
||||
interface ExploreAlertProps {
|
||||
groupId: string;
|
||||
@@ -17,10 +18,19 @@ interface ExploreAlertProps {
|
||||
const ExploreAlert = ({ groupId, id, mode, onClose }: ExploreAlertProps) => {
|
||||
const {
|
||||
item,
|
||||
isLoading,
|
||||
error,
|
||||
isLoading: isLoadingItem,
|
||||
error: errorItem,
|
||||
} = useFetchItem<APIAlert>({ groupId, id, mode });
|
||||
|
||||
const {
|
||||
group,
|
||||
isLoading: isLoadingGroup,
|
||||
error: errorGroup,
|
||||
} = useFetchGroup<APIGroup>({ id: groupId });
|
||||
|
||||
const error = errorItem || errorGroup;
|
||||
const isLoading = isLoadingItem || isLoadingGroup;
|
||||
|
||||
if (isLoading) return (
|
||||
<Spinner />
|
||||
);
|
||||
@@ -51,7 +61,12 @@ const ExploreAlert = ({ groupId, id, mode, onClose }: ExploreAlertProps) => {
|
||||
onClose={onClose}
|
||||
>
|
||||
<div className="vm-explore-alerts">
|
||||
{item && (<BaseAlert item={item} />) || (
|
||||
{item ? (
|
||||
<BaseAlert
|
||||
item={item}
|
||||
group={group}
|
||||
/>
|
||||
) : (
|
||||
<Alert variant="info">{noItemFound}</Alert>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -2,11 +2,12 @@ import Spinner from "../../components/Main/Spinner/Spinner";
|
||||
import Alert from "../../components/Main/Alert/Alert";
|
||||
import { useFetchItem } from "./hooks/useFetchItem";
|
||||
import "./style.scss";
|
||||
import { Rule as APIRule } from "../../types";
|
||||
import { Group as APIGroup, Rule as APIRule } from "../../types";
|
||||
import ItemHeader from "../../components/ExploreAlerts/ItemHeader";
|
||||
import BaseRule from "../../components/ExploreAlerts/BaseRule";
|
||||
import Modal from "../../components/Main/Modal/Modal";
|
||||
import { getStates } from "../../components/ExploreAlerts/helpers";
|
||||
import { useFetchGroup } from "./hooks/useFetchGroup";
|
||||
|
||||
interface ExploreRuleProps {
|
||||
groupId: string;
|
||||
@@ -18,10 +19,19 @@ interface ExploreRuleProps {
|
||||
const ExploreRule = ({ groupId, id, mode, onClose }: ExploreRuleProps) => {
|
||||
const {
|
||||
item,
|
||||
isLoading,
|
||||
error,
|
||||
isLoading: isLoadingItem,
|
||||
error: errorItem,
|
||||
} = useFetchItem<APIRule>({ groupId, id, mode });
|
||||
|
||||
const {
|
||||
group,
|
||||
isLoading: isLoadingGroup,
|
||||
error: errorGroup,
|
||||
} = useFetchGroup<APIGroup>({ id: groupId });
|
||||
|
||||
const error = errorItem || errorGroup;
|
||||
const isLoading = isLoadingItem || isLoadingGroup;
|
||||
|
||||
if (isLoading) return (
|
||||
<Spinner />
|
||||
);
|
||||
@@ -49,7 +59,12 @@ const ExploreRule = ({ groupId, id, mode, onClose }: ExploreRuleProps) => {
|
||||
onClose={onClose}
|
||||
>
|
||||
<div className="vm-explore-alerts">
|
||||
{item && (<BaseRule item={item} />) || (
|
||||
{item ? (
|
||||
<BaseRule
|
||||
item={item}
|
||||
group={group}
|
||||
/>
|
||||
) : (
|
||||
<Alert variant="info">{noItemFound}</Alert>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -132,7 +132,7 @@ const ExploreRules: FC = () => {
|
||||
newParams.set("page_num", "1");
|
||||
setSearchParams(newParams);
|
||||
const changes = getChanges(title, states);
|
||||
setStates(changes.length == allStates.length ? [] : changes);
|
||||
setStates(changes.length === allStates.length ? [] : changes);
|
||||
}, [states, searchParams]);
|
||||
|
||||
const handleChangeRuleType = useCallback((title: string) => {
|
||||
@@ -186,6 +186,7 @@ const ExploreRules: FC = () => {
|
||||
<Rule
|
||||
key={`rule-${rule.id}`}
|
||||
rule={rule}
|
||||
group={group}
|
||||
states={getStates(rule)}
|
||||
/>
|
||||
))}
|
||||
|
||||
@@ -15,13 +15,12 @@
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"module": "esnext",
|
||||
"moduleResolution": "node",
|
||||
"moduleResolution": "bundler",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
"jsx": "react-jsx",
|
||||
"jsxImportSource": "preact",
|
||||
"downlevelIteration": true,
|
||||
"noUnusedLocals": true,
|
||||
"paths": {
|
||||
"react": ["./node_modules/preact/compat/"],
|
||||
@@ -32,5 +31,8 @@
|
||||
},
|
||||
"include": [
|
||||
"src"
|
||||
],
|
||||
"exclude": [
|
||||
"scripts/**/*.ts"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -33,37 +33,41 @@ func (c *Client) CloseConnections() {
|
||||
c.httpCli.CloseIdleConnections()
|
||||
}
|
||||
|
||||
// Get sends a HTTP GET request, returns
|
||||
// Get sends an HTTP GET request, returns
|
||||
// the response body and status code to the caller.
|
||||
func (c *Client) Get(t *testing.T, url string) (string, int) {
|
||||
func (c *Client) Get(t *testing.T, url string, headers http.Header) (string, int) {
|
||||
t.Helper()
|
||||
return c.do(t, http.MethodGet, url, "", nil)
|
||||
return c.do(t, http.MethodGet, url, nil, headers)
|
||||
}
|
||||
|
||||
// Post sends a HTTP POST request, returns
|
||||
// Post sends an HTTP POST request, returns
|
||||
// the response body and status code to the caller.
|
||||
func (c *Client) Post(t *testing.T, url, contentType string, data []byte) (string, int) {
|
||||
func (c *Client) Post(t *testing.T, url string, data []byte, headers http.Header) (string, int) {
|
||||
t.Helper()
|
||||
return c.do(t, http.MethodPost, url, contentType, data)
|
||||
return c.do(t, http.MethodPost, url, data, headers)
|
||||
}
|
||||
|
||||
// PostForm sends a HTTP POST request containing the POST-form data, returns
|
||||
// PostForm sends an HTTP POST request containing the POST-form data with attached getHeaders, returns
|
||||
// the response body and status code to the caller.
|
||||
func (c *Client) PostForm(t *testing.T, url string, data url.Values) (string, int) {
|
||||
func (c *Client) PostForm(t *testing.T, url string, data url.Values, headers http.Header) (string, int) {
|
||||
t.Helper()
|
||||
return c.Post(t, url, "application/x-www-form-urlencoded", []byte(data.Encode()))
|
||||
if headers == nil {
|
||||
headers = make(http.Header)
|
||||
}
|
||||
headers.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
return c.Post(t, url, []byte(data.Encode()), headers)
|
||||
}
|
||||
|
||||
// Delete sends a HTTP DELETE request and returns the response body and status code
|
||||
// Delete sends an HTTP DELETE request and returns the response body and status code
|
||||
// to the caller.
|
||||
func (c *Client) Delete(t *testing.T, url string) (string, int) {
|
||||
t.Helper()
|
||||
return c.do(t, http.MethodDelete, url, "", nil)
|
||||
return c.do(t, http.MethodDelete, url, nil, nil)
|
||||
}
|
||||
|
||||
// do prepares a HTTP request, sends it to the server, receives the response
|
||||
// do prepares an HTTP request, sends it to the server, receives the response
|
||||
// from the server, returns the response body and status code to the caller.
|
||||
func (c *Client) do(t *testing.T, method, url, contentType string, data []byte) (string, int) {
|
||||
func (c *Client) do(t *testing.T, method, url string, data []byte, headers http.Header) (string, int) {
|
||||
t.Helper()
|
||||
|
||||
req, err := http.NewRequest(method, url, bytes.NewReader(data))
|
||||
@@ -71,9 +75,7 @@ func (c *Client) do(t *testing.T, method, url, contentType string, data []byte)
|
||||
t.Fatalf("could not create a HTTP request: %v", err)
|
||||
}
|
||||
|
||||
if len(contentType) > 0 {
|
||||
req.Header.Add("Content-Type", contentType)
|
||||
}
|
||||
req.Header = headers
|
||||
res, err := c.httpCli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("could not send HTTP request: %v", err)
|
||||
@@ -135,7 +137,7 @@ func (app *ServesMetrics) GetIntMetric(t *testing.T, metricName string) int {
|
||||
func (app *ServesMetrics) GetMetric(t *testing.T, metricName string) float64 {
|
||||
t.Helper()
|
||||
|
||||
metrics, statusCode := app.cli.Get(t, app.metricsURL)
|
||||
metrics, statusCode := app.cli.Get(t, app.metricsURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
@@ -161,7 +163,7 @@ func (app *ServesMetrics) GetMetricsByPrefix(t *testing.T, prefix string) []floa
|
||||
|
||||
values := []float64{}
|
||||
|
||||
metrics, statusCode := app.cli.Get(t, app.metricsURL)
|
||||
metrics, statusCode := app.cli.Get(t, app.metricsURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
@@ -190,7 +192,7 @@ func (app *ServesMetrics) GetMetricsByRegexp(t *testing.T, re *regexp.Regexp) []
|
||||
|
||||
values := []float64{}
|
||||
|
||||
metrics, statusCode := app.cli.Get(t, app.metricsURL)
|
||||
metrics, statusCode := app.cli.Get(t, app.metricsURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"sort"
|
||||
@@ -88,6 +89,23 @@ type QueryOpts struct {
|
||||
MaxLookback string
|
||||
LatencyOffset string
|
||||
Format string
|
||||
NoCache string
|
||||
Headers http.Header
|
||||
}
|
||||
|
||||
// getTenant returns tenant with optional default value
|
||||
func (qos *QueryOpts) getTenant() string {
|
||||
if qos.Tenant == "" {
|
||||
return "0"
|
||||
}
|
||||
return qos.Tenant
|
||||
}
|
||||
|
||||
func (qos *QueryOpts) getHeaders() http.Header {
|
||||
if qos.Headers == nil {
|
||||
qos.Headers = make(http.Header)
|
||||
}
|
||||
return qos.Headers
|
||||
}
|
||||
|
||||
func (qos *QueryOpts) asURLValues() url.Values {
|
||||
@@ -112,18 +130,11 @@ func (qos *QueryOpts) asURLValues() url.Values {
|
||||
addNonEmpty("max_lookback", qos.MaxLookback)
|
||||
addNonEmpty("latency_offset", qos.LatencyOffset)
|
||||
addNonEmpty("format", qos.Format)
|
||||
addNonEmpty("nocache", qos.NoCache)
|
||||
|
||||
return uv
|
||||
}
|
||||
|
||||
// getTenant returns tenant with optional default value
|
||||
func (qos *QueryOpts) getTenant() string {
|
||||
if qos.Tenant == "" {
|
||||
return "0"
|
||||
}
|
||||
return qos.Tenant
|
||||
}
|
||||
|
||||
// PrometheusAPIV1QueryResponse is an inmemory representation of the
|
||||
// /prometheus/api/v1/query or /prometheus/api/v1/query_range response.
|
||||
type PrometheusAPIV1QueryResponse struct {
|
||||
|
||||
@@ -28,7 +28,6 @@ func TestSingleBackupRestore(t *testing.T) {
|
||||
return tc.MustStartVmsingle("vmsingle", []string{
|
||||
"-storageDataPath=" + storageDataPath,
|
||||
"-retentionPeriod=100y",
|
||||
"-search.maxStalenessInterval=1m",
|
||||
})
|
||||
},
|
||||
stopSUT: func() {
|
||||
@@ -70,9 +69,7 @@ func TestClusterBackupRestore(t *testing.T) {
|
||||
VminsertInstance: "vminsert",
|
||||
VminsertFlags: []string{},
|
||||
VmselectInstance: "vmselect",
|
||||
VmselectFlags: []string{
|
||||
"-search.maxStalenessInterval=1m",
|
||||
},
|
||||
VmselectFlags: []string{},
|
||||
})
|
||||
},
|
||||
stopSUT: func() {
|
||||
@@ -100,15 +97,14 @@ func TestClusterBackupRestore(t *testing.T) {
|
||||
func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
|
||||
t := tc.T()
|
||||
|
||||
const msecPerMinute = 60 * 1000
|
||||
genData := func(count int, prefix string, start int64) (recs []string, wantSeries []map[string]string, wantQueryResults []*apptest.QueryResult) {
|
||||
genData := func(count int, prefix string, start, step int64) (recs []string, wantSeries []map[string]string, wantQueryResults []*apptest.QueryResult) {
|
||||
recs = make([]string, count)
|
||||
wantSeries = make([]map[string]string, count)
|
||||
wantQueryResults = make([]*apptest.QueryResult, count)
|
||||
for i := range count {
|
||||
name := fmt.Sprintf("%s_%03d", prefix, i)
|
||||
value := float64(i)
|
||||
timestamp := start + int64(i)*msecPerMinute
|
||||
timestamp := start + int64(i)*step
|
||||
|
||||
recs[i] = fmt.Sprintf("%s %f %d", name, value, timestamp)
|
||||
wantSeries[i] = map[string]string{"__name__": name}
|
||||
@@ -148,15 +144,17 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
|
||||
|
||||
// assertSeries retrieves all data from the storage and compares it with the
|
||||
// expected result.
|
||||
assertQueryResults := func(app apptest.PrometheusQuerier, query string, start, end int64, want []*apptest.QueryResult) {
|
||||
assertQueryResults := func(app apptest.PrometheusQuerier, query string, start, end, step int64, want []*apptest.QueryResult) {
|
||||
t.Helper()
|
||||
tc.Assert(&apptest.AssertOptions{
|
||||
Msg: "unexpected /api/v1/query_range response",
|
||||
Got: func() any {
|
||||
return app.PrometheusAPIV1QueryRange(t, query, apptest.QueryOpts{
|
||||
Start: fmt.Sprintf("%d", start),
|
||||
End: fmt.Sprintf("%d", end),
|
||||
Step: "60s",
|
||||
Start: fmt.Sprintf("%d", start),
|
||||
End: fmt.Sprintf("%d", end),
|
||||
Step: fmt.Sprintf("%dms", step),
|
||||
MaxLookback: fmt.Sprintf("%dms", step-1),
|
||||
NoCache: "1",
|
||||
})
|
||||
},
|
||||
Want: &apptest.PrometheusAPIV1QueryResponse{
|
||||
@@ -167,7 +165,6 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
|
||||
},
|
||||
},
|
||||
FailNow: true,
|
||||
Retries: 300,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -194,8 +191,9 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
|
||||
// below.
|
||||
const numMetrics = 1000
|
||||
// With 1000 metrics (one per minute), the time range spans 2 months.
|
||||
end := time.Date(2025, 3, 1, 10, 0, 0, 0, time.UTC).UnixMilli()
|
||||
start := end - numMetrics*msecPerMinute
|
||||
start := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC).UnixMilli()
|
||||
end := time.Date(2025, 3, 1, 0, 0, 0, 0, time.UTC).UnixMilli()
|
||||
step := (end - start) / numMetrics
|
||||
|
||||
// Verify backup/restore:
|
||||
//
|
||||
@@ -209,8 +207,8 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
|
||||
// - Start vmsingle
|
||||
// - Ensure that the queries return batch1 data only.
|
||||
|
||||
batch1Data, wantBatch1Series, wantBatch1QueryResults := genData(numMetrics, "batch1", start)
|
||||
batch2Data, wantBatch2Series, wantBatch2QueryResults := genData(numMetrics, "batch2", start)
|
||||
batch1Data, wantBatch1Series, wantBatch1QueryResults := genData(numMetrics, "batch1", start, step)
|
||||
batch2Data, wantBatch2Series, wantBatch2QueryResults := genData(numMetrics, "batch2", start, step)
|
||||
wantBatch12Series := slices.Concat(wantBatch1Series, wantBatch2Series)
|
||||
wantBatch12QueryResults := slices.Concat(wantBatch1QueryResults, wantBatch2QueryResults)
|
||||
|
||||
@@ -219,13 +217,14 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
|
||||
sut.PrometheusAPIV1ImportPrometheus(t, batch1Data, apptest.QueryOpts{})
|
||||
sut.ForceFlush(t)
|
||||
assertSeries(sut, `{__name__=~"batch1.*"}`, start, end, wantBatch1Series)
|
||||
assertQueryResults(sut, `{__name__=~"batch1.*"}`, start, end, wantBatch1QueryResults)
|
||||
assertQueryResults(sut, `{__name__=~"batch1.*"}`, start, end, step, wantBatch1QueryResults)
|
||||
|
||||
createBackup(sut, "batch1")
|
||||
|
||||
sut.PrometheusAPIV1ImportPrometheus(t, batch2Data, apptest.QueryOpts{})
|
||||
sut.ForceFlush(t)
|
||||
assertSeries(sut, `{__name__=~"batch(1|2).*"}`, start, end, wantBatch12Series)
|
||||
assertQueryResults(sut, `{__name__=~"batch(1|2).*"}`, start, end, wantBatch12QueryResults)
|
||||
assertQueryResults(sut, `{__name__=~"batch(1|2).*"}`, start, end, step, wantBatch12QueryResults)
|
||||
createBackup(sut, "batch12")
|
||||
|
||||
opts.stopSUT()
|
||||
@@ -235,5 +234,5 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
|
||||
sut = opts.startSUT()
|
||||
|
||||
assertSeries(sut, `{__name__=~"batch1.*"}`, start, end, wantBatch1Series)
|
||||
assertQueryResults(sut, `{__name__=~"batch1.*"}`, start, end, wantBatch1QueryResults)
|
||||
assertQueryResults(sut, `{__name__=~"batch1.*"}`, start, end, step, wantBatch1QueryResults)
|
||||
}
|
||||
|
||||
@@ -76,11 +76,13 @@ func (app *Vmagent) APIV1ImportPrometheus(t *testing.T, records []string, opts Q
|
||||
// Flushing may still be in progress on the function return.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1importprometheus
|
||||
func (app *Vmagent) APIV1ImportPrometheusNoWaitFlush(t *testing.T, records []string, _ QueryOpts) {
|
||||
func (app *Vmagent) APIV1ImportPrometheusNoWaitFlush(t *testing.T, records []string, opts QueryOpts) {
|
||||
t.Helper()
|
||||
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
_, statusCode := app.cli.Post(t, app.apiV1ImportPrometheusURL, "text/plain", data)
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
_, statusCode := app.cli.Post(t, app.apiV1ImportPrometheusURL, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func StartVminsert(instance string, flags []string, cli *Client, output io.Write
|
||||
extractREs = append(extractREs, regexp.MustCompile(logRecord))
|
||||
}
|
||||
|
||||
app, stderrExtracts, err := startApp(instance, "../../bin/vminsert", flags, &appOptions{
|
||||
app, stderrExtracts, err := startApp(instance, "../../bin/vminsert-race", flags, &appOptions{
|
||||
defaultFlags: map[string]string{
|
||||
"-httpListenAddr": "127.0.0.1:0",
|
||||
"-clusternativeListenAddr": "127.0.0.1:0",
|
||||
@@ -114,8 +114,10 @@ func (app *Vminsert) InfluxWrite(t *testing.T, records []string, opts QueryOpts)
|
||||
}
|
||||
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
app.sendBlocking(t, len(records), func() {
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -146,8 +148,10 @@ func (app *Vminsert) PrometheusAPIV1ImportCSV(t *testing.T, records []string, op
|
||||
url += "?" + uvs
|
||||
}
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
app.sendBlocking(t, len(records), func() {
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -168,8 +172,10 @@ func (app *Vminsert) PrometheusAPIV1ImportNative(t *testing.T, data []byte, opts
|
||||
if len(uvs) > 0 {
|
||||
url += "?" + uvs
|
||||
}
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
app.sendBlocking(t, 1, func() {
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -191,8 +197,10 @@ func (app *Vminsert) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOp
|
||||
url += "?" + uvs
|
||||
}
|
||||
data := []byte("[" + strings.Join(records, ",") + "]")
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "application/json")
|
||||
app.sendBlocking(t, len(records), func() {
|
||||
_, statusCode := app.cli.Post(t, url, "application/json", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -211,8 +219,10 @@ func (app *Vminsert) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest,
|
||||
if prommetadata.IsEnabled() {
|
||||
recordsCount += len(wr.Metadata)
|
||||
}
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "application/x-protobuf")
|
||||
app.sendBlocking(t, recordsCount, func() {
|
||||
_, statusCode := app.cli.Post(t, url, "application/x-protobuf", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -237,8 +247,22 @@ func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, records []str
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
var recordsCount int
|
||||
var metadataRecords int
|
||||
uniqueMetadataMetricNames := make(map[string]struct{})
|
||||
for _, record := range records {
|
||||
if strings.HasPrefix(record, "#") {
|
||||
// metric metadata has the following format:
|
||||
//# HELP importprometheus_series
|
||||
//# TYPE importprometheus_series
|
||||
// it results into single metadata record
|
||||
if strings.HasPrefix(record, "# ") {
|
||||
metadataItems := strings.Split(record, " ")
|
||||
if len(metadataItems) < 2 {
|
||||
t.Fatalf("BUG: unexpected metadata format=%q", record)
|
||||
}
|
||||
metricName := metadataItems[2]
|
||||
if _, ok := uniqueMetadataMetricNames[metricName]; ok {
|
||||
continue
|
||||
}
|
||||
uniqueMetadataMetricNames[metricName] = struct{}{}
|
||||
metadataRecords++
|
||||
continue
|
||||
}
|
||||
@@ -247,8 +271,10 @@ func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, records []str
|
||||
if prommetadata.IsEnabled() {
|
||||
recordsCount += metadataRecords
|
||||
}
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
app.sendBlocking(t, recordsCount, func() {
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -268,8 +294,10 @@ func (app *Vminsert) ZabbixConnectorHistory(t *testing.T, records []string, opts
|
||||
url += "?" + uvs
|
||||
}
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "application/json")
|
||||
app.sendBlocking(t, len(records), func() {
|
||||
_, statusCode := app.cli.Post(t, url, "application/json", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ type Vmselect struct {
|
||||
// sets the default flags and populates the app instance state with runtime
|
||||
// values extracted from the application log (such as httpListenAddr)
|
||||
func StartVmselect(instance string, flags []string, cli *Client, output io.Writer) (*Vmselect, error) {
|
||||
app, stderrExtracts, err := startApp(instance, "../../bin/vmselect", flags, &appOptions{
|
||||
app, stderrExtracts, err := startApp(instance, "../../bin/vmselect-race", flags, &appOptions{
|
||||
defaultFlags: map[string]string{
|
||||
"-httpListenAddr": "127.0.0.1:0",
|
||||
"-clusternativeListenAddr": "127.0.0.1:0",
|
||||
@@ -76,7 +76,7 @@ func (app *Vmselect) PrometheusAPIV1Export(t *testing.T, query string, opts Quer
|
||||
values := opts.asURLValues()
|
||||
values.Add("match[]", query)
|
||||
values.Add("format", "promapi")
|
||||
res, _ := app.cli.PostForm(t, exportURL, values)
|
||||
res, _ := app.cli.PostForm(t, exportURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1QueryResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func (app *Vmselect) PrometheusAPIV1ExportNative(t *testing.T, query string, opt
|
||||
values := opts.asURLValues()
|
||||
values.Add("match[]", query)
|
||||
values.Add("format", "promapi")
|
||||
res, _ := app.cli.PostForm(t, exportURL, values)
|
||||
res, _ := app.cli.PostForm(t, exportURL, values, opts.Headers)
|
||||
return []byte(res)
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func (app *Vmselect) PrometheusAPIV1Query(t *testing.T, query string, opts Query
|
||||
values := opts.asURLValues()
|
||||
values.Add("query", query)
|
||||
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1QueryResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (app *Vmselect) PrometheusAPIV1QueryRange(t *testing.T, query string, opts
|
||||
values := opts.asURLValues()
|
||||
values.Add("query", query)
|
||||
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1QueryResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ func (app *Vmselect) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts
|
||||
values := opts.asURLValues()
|
||||
values.Add("match[]", matchQuery)
|
||||
|
||||
res, _ := app.cli.PostForm(t, seriesURL, values)
|
||||
res, _ := app.cli.PostForm(t, seriesURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1SeriesResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func (app *Vmselect) PrometheusAPIV1SeriesCount(t *testing.T, opts QueryOpts) *P
|
||||
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series/count", app.httpListenAddr, opts.getTenant())
|
||||
values := opts.asURLValues()
|
||||
|
||||
res, _ := app.cli.PostForm(t, seriesURL, values)
|
||||
res, _ := app.cli.PostForm(t, seriesURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1SeriesCountResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ func (app *Vmselect) PrometheusAPIV1Labels(t *testing.T, matchQuery string, opts
|
||||
values.Add("match[]", matchQuery)
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/labels", app.httpListenAddr, opts.getTenant())
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1LabelsResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ func (app *Vmselect) PrometheusAPIV1LabelValues(t *testing.T, labelName, matchQu
|
||||
values.Add("match[]", matchQuery)
|
||||
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/label/%s/values", app.httpListenAddr, opts.getTenant(), labelName)
|
||||
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1LabelValuesResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -197,7 +197,7 @@ func (app *Vmselect) PrometheusAPIV1Metadata(t *testing.T, metric string, limit
|
||||
values.Add("limit", strconv.Itoa(limit))
|
||||
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/metadata", app.httpListenAddr, opts.getTenant())
|
||||
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1Metadata(t, res)
|
||||
}
|
||||
|
||||
@@ -212,7 +212,7 @@ func (app *Vmselect) APIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string,
|
||||
values := opts.asURLValues()
|
||||
values.Add("match[]", matchQuery)
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
|
||||
}
|
||||
@@ -231,7 +231,7 @@ func (app *Vmselect) MetricNamesStats(t *testing.T, limit, le, matchPattern stri
|
||||
values.Add("match_pattern", matchPattern)
|
||||
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/status/metric_names_stats", app.httpListenAddr, opts.getTenant())
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
|
||||
}
|
||||
@@ -251,7 +251,7 @@ func (app *Vmselect) MetricNamesStatsReset(t *testing.T, opts QueryOpts) {
|
||||
values := opts.asURLValues()
|
||||
queryURL := fmt.Sprintf("http://%s/admin/api/v1/admin/status/metric_names_stats/reset", app.httpListenAddr)
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func (app *Vmselect) APIV1StatusTSDB(t *testing.T, matchQuery string, date strin
|
||||
addNonEmpty("topN", topN)
|
||||
addNonEmpty("date", date)
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, seriesURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, seriesURL, values, opts.Headers)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
|
||||
}
|
||||
@@ -295,7 +295,7 @@ func (app *Vmselect) GraphiteMetricsIndex(t *testing.T, opts QueryOpts) Graphite
|
||||
t.Helper()
|
||||
|
||||
seriesURL := fmt.Sprintf("http://%s/select/%s/graphite/metrics/index.json", app.httpListenAddr, opts.getTenant())
|
||||
res, statusCode := app.cli.Get(t, seriesURL)
|
||||
res, statusCode := app.cli.Get(t, seriesURL, opts.Headers)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
|
||||
}
|
||||
@@ -317,7 +317,7 @@ func (app *Vmselect) GraphiteTagsTagSeries(t *testing.T, record string, opts Que
|
||||
values := opts.asURLValues()
|
||||
values.Add("path", record)
|
||||
|
||||
_, statusCode := app.cli.PostForm(t, url, values)
|
||||
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
|
||||
if got, want := statusCode, http.StatusNotImplemented; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", got, want)
|
||||
}
|
||||
@@ -332,7 +332,7 @@ func (app *Vmselect) GraphiteTagsTagMultiSeries(t *testing.T, records []string,
|
||||
values.Add("path", rec)
|
||||
}
|
||||
|
||||
_, statusCode := app.cli.PostForm(t, url, values)
|
||||
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
|
||||
if got, want := statusCode, http.StatusNotImplemented; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", got, want)
|
||||
}
|
||||
@@ -343,7 +343,7 @@ func (app *Vmselect) APIV1AdminTenants(t *testing.T) *AdminTenantsResponse {
|
||||
t.Helper()
|
||||
|
||||
tenantsURL := fmt.Sprintf("http://%s/admin/tenants", app.httpListenAddr)
|
||||
res, statusCode := app.cli.Get(t, tenantsURL)
|
||||
res, statusCode := app.cli.Get(t, tenantsURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func StartVmsingleAt(instance, binary string, flags []string, cli *Client, outpu
|
||||
func (app *Vmsingle) ForceFlush(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
_, statusCode := app.cli.Get(t, app.forceFlushURL)
|
||||
_, statusCode := app.cli.Get(t, app.forceFlushURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
@@ -108,7 +108,7 @@ func (app *Vmsingle) ForceFlush(t *testing.T) {
|
||||
func (app *Vmsingle) ForceMerge(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
_, statusCode := app.cli.Get(t, app.forceMergeURL)
|
||||
_, statusCode := app.cli.Get(t, app.forceMergeURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
@@ -130,8 +130,9 @@ func (app *Vmsingle) InfluxWrite(t *testing.T, records []string, opts QueryOpts)
|
||||
if len(uvs) > 0 {
|
||||
url += "?" + uvs
|
||||
}
|
||||
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -161,7 +162,9 @@ func (app *Vmsingle) PrometheusAPIV1ImportCSV(t *testing.T, records []string, op
|
||||
url += "?" + uvs
|
||||
}
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -181,7 +184,9 @@ func (app *Vmsingle) PrometheusAPIV1ImportNative(t *testing.T, data []byte, opts
|
||||
if len(uvs) > 0 {
|
||||
url += "?" + uvs
|
||||
}
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -203,7 +208,9 @@ func (app *Vmsingle) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOp
|
||||
url += "?" + uvs
|
||||
}
|
||||
data := []byte("[" + strings.Join(records, ",") + "]")
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -212,11 +219,13 @@ func (app *Vmsingle) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOp
|
||||
// PrometheusAPIV1Write is a test helper function that inserts a
|
||||
// collection of records in Prometheus remote-write format by sending a HTTP
|
||||
// POST request to /prometheus/api/v1/write vmsingle endpoint.
|
||||
func (app *Vmsingle) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, _ QueryOpts) {
|
||||
func (app *Vmsingle) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, opts QueryOpts) {
|
||||
t.Helper()
|
||||
|
||||
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
|
||||
_, statusCode := app.cli.Post(t, app.prometheusAPIV1WriteURL, "application/x-protobuf", data)
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "application/x-protobuf")
|
||||
_, statusCode := app.cli.Post(t, app.prometheusAPIV1WriteURL, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -237,9 +246,10 @@ func (app *Vmsingle) PrometheusAPIV1ImportPrometheus(t *testing.T, records []str
|
||||
if len(uvs) > 0 {
|
||||
url += "?" + uvs
|
||||
}
|
||||
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "text/plain")
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
_, statusCode := app.cli.Post(t, url, "text/plain", data)
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
|
||||
}
|
||||
@@ -256,7 +266,7 @@ func (app *Vmsingle) PrometheusAPIV1Export(t *testing.T, query string, opts Quer
|
||||
values.Add("match[]", query)
|
||||
values.Add("format", "promapi")
|
||||
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportURL, values)
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1QueryResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -273,7 +283,7 @@ func (app *Vmsingle) PrometheusAPIV1ExportNative(t *testing.T, query string, opt
|
||||
values.Add("match[]", query)
|
||||
values.Add("format", "promapi")
|
||||
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportNativeURL, values)
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportNativeURL, values, opts.Headers)
|
||||
return []byte(res)
|
||||
}
|
||||
|
||||
@@ -287,7 +297,7 @@ func (app *Vmsingle) PrometheusAPIV1Query(t *testing.T, query string, opts Query
|
||||
|
||||
values := opts.asURLValues()
|
||||
values.Add("query", query)
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryURL, values)
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1QueryResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -302,7 +312,7 @@ func (app *Vmsingle) PrometheusAPIV1QueryRange(t *testing.T, query string, opts
|
||||
values := opts.asURLValues()
|
||||
values.Add("query", query)
|
||||
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryRangeURL, values)
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryRangeURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1QueryResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -316,7 +326,7 @@ func (app *Vmsingle) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts
|
||||
values := opts.asURLValues()
|
||||
values.Add("match[]", matchQuery)
|
||||
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1SeriesURL, values)
|
||||
res, _ := app.cli.PostForm(t, app.prometheusAPIV1SeriesURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1SeriesResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -330,7 +340,7 @@ func (app *Vmsingle) PrometheusAPIV1SeriesCount(t *testing.T, opts QueryOpts) *P
|
||||
values := opts.asURLValues()
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/series/count", app.httpListenAddr)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1SeriesCountResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -345,7 +355,7 @@ func (app *Vmsingle) PrometheusAPIV1Labels(t *testing.T, matchQuery string, opts
|
||||
values.Add("match[]", matchQuery)
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/labels", app.httpListenAddr)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1LabelsResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -360,7 +370,7 @@ func (app *Vmsingle) PrometheusAPIV1LabelValues(t *testing.T, labelName, matchQu
|
||||
values.Add("match[]", matchQuery)
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/label/%s/values", app.httpListenAddr, labelName)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1LabelValuesResponse(t, res)
|
||||
}
|
||||
|
||||
@@ -374,7 +384,7 @@ func (app *Vmsingle) PrometheusAPIV1Metadata(t *testing.T, metric string, limit
|
||||
values.Add("limit", strconv.Itoa(limit))
|
||||
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/metadata", app.httpListenAddr)
|
||||
|
||||
res, _ := app.cli.PostForm(t, queryURL, values)
|
||||
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
return NewPrometheusAPIV1Metadata(t, res)
|
||||
}
|
||||
|
||||
@@ -389,7 +399,7 @@ func (app *Vmsingle) APIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string,
|
||||
values := opts.asURLValues()
|
||||
values.Add("match[]", matchQuery)
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
|
||||
}
|
||||
@@ -402,7 +412,7 @@ func (app *Vmsingle) GraphiteMetricsIndex(t *testing.T, _ QueryOpts) GraphiteMet
|
||||
t.Helper()
|
||||
|
||||
seriesURL := fmt.Sprintf("http://%s/metrics/index.json", app.httpListenAddr)
|
||||
res, statusCode := app.cli.Get(t, seriesURL)
|
||||
res, statusCode := app.cli.Get(t, seriesURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
|
||||
}
|
||||
@@ -424,7 +434,7 @@ func (app *Vmsingle) GraphiteTagsTagSeries(t *testing.T, record string, opts Que
|
||||
values := opts.asURLValues()
|
||||
values.Add("path", record)
|
||||
|
||||
_, statusCode := app.cli.PostForm(t, url, values)
|
||||
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
|
||||
if got, want := statusCode, http.StatusNotImplemented; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", got, want)
|
||||
}
|
||||
@@ -439,7 +449,7 @@ func (app *Vmsingle) GraphiteTagsTagMultiSeries(t *testing.T, records []string,
|
||||
values.Add("path", rec)
|
||||
}
|
||||
|
||||
_, statusCode := app.cli.PostForm(t, url, values)
|
||||
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
|
||||
if got, want := statusCode, http.StatusNotImplemented; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", got, want)
|
||||
}
|
||||
@@ -458,7 +468,7 @@ func (app *Vmsingle) APIV1StatusMetricNamesStats(t *testing.T, limit, le, matchP
|
||||
values.Add("match_pattern", matchPattern)
|
||||
queryURL := fmt.Sprintf("http://%s/api/v1/status/metric_names_stats", app.httpListenAddr)
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
|
||||
}
|
||||
@@ -478,7 +488,7 @@ func (app *Vmsingle) APIV1AdminStatusMetricNamesStatsReset(t *testing.T, opts Qu
|
||||
values := opts.asURLValues()
|
||||
queryURL := fmt.Sprintf("http://%s/api/v1/admin/status/metric_names_stats/reset", app.httpListenAddr)
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
|
||||
if statusCode != http.StatusNoContent {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
|
||||
}
|
||||
@@ -491,7 +501,7 @@ func (app *Vmsingle) APIV1AdminStatusMetricNamesStatsReset(t *testing.T, opts Qu
|
||||
func (app *Vmsingle) SnapshotCreate(t *testing.T) *SnapshotCreateResponse {
|
||||
t.Helper()
|
||||
|
||||
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), "", nil)
|
||||
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), nil, nil)
|
||||
if got, want := statusCode, http.StatusOK; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
|
||||
}
|
||||
@@ -517,7 +527,7 @@ func (app *Vmsingle) APIV1AdminTSDBSnapshot(t *testing.T) *APIV1AdminTSDBSnapsho
|
||||
t.Helper()
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/api/v1/admin/tsdb/snapshot", app.httpListenAddr)
|
||||
data, statusCode := app.cli.Post(t, queryURL, "", nil)
|
||||
data, statusCode := app.cli.Post(t, queryURL, nil, nil)
|
||||
if got, want := statusCode, http.StatusOK; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
|
||||
}
|
||||
@@ -538,7 +548,7 @@ func (app *Vmsingle) SnapshotList(t *testing.T) *SnapshotListResponse {
|
||||
t.Helper()
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/snapshot/list", app.httpListenAddr)
|
||||
data, statusCode := app.cli.Get(t, queryURL)
|
||||
data, statusCode := app.cli.Get(t, queryURL, nil)
|
||||
if got, want := statusCode, http.StatusOK; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
|
||||
}
|
||||
@@ -584,7 +594,7 @@ func (app *Vmsingle) SnapshotDeleteAll(t *testing.T) *SnapshotDeleteAllResponse
|
||||
t.Helper()
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/snapshot/delete_all", app.httpListenAddr)
|
||||
data, statusCode := app.cli.Get(t, queryURL)
|
||||
data, statusCode := app.cli.Get(t, queryURL, nil)
|
||||
if got, want := statusCode, http.StatusOK; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
|
||||
}
|
||||
@@ -615,7 +625,7 @@ func (app *Vmsingle) APIV1StatusTSDB(t *testing.T, matchQuery string, date strin
|
||||
addNonEmpty("topN", topN)
|
||||
addNonEmpty("date", date)
|
||||
|
||||
res, statusCode := app.cli.PostForm(t, seriesURL, values)
|
||||
res, statusCode := app.cli.PostForm(t, seriesURL, values, opts.Headers)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
|
||||
}
|
||||
@@ -641,7 +651,9 @@ func (app *Vmsingle) ZabbixConnectorHistory(t *testing.T, records []string, opts
|
||||
url += "?" + uvs
|
||||
}
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
_, statusCode := app.cli.Post(t, url, "application/json", data)
|
||||
headers := opts.getHeaders()
|
||||
headers.Set("Content-Type", "application/json")
|
||||
_, statusCode := app.cli.Post(t, url, data, headers)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (app *Vmstorage) ForceFlush(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
forceFlushURL := fmt.Sprintf("http://%s/internal/force_flush", app.httpListenAddr)
|
||||
_, statusCode := app.cli.Get(t, forceFlushURL)
|
||||
_, statusCode := app.cli.Get(t, forceFlushURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (app *Vmstorage) ForceMerge(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
forceMergeURL := fmt.Sprintf("http://%s/internal/force_merge", app.httpListenAddr)
|
||||
_, statusCode := app.cli.Get(t, forceMergeURL)
|
||||
_, statusCode := app.cli.Get(t, forceMergeURL, nil)
|
||||
if statusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
|
||||
}
|
||||
@@ -101,7 +101,7 @@ func (app *Vmstorage) ForceMerge(t *testing.T) {
|
||||
func (app *Vmstorage) SnapshotCreate(t *testing.T) *SnapshotCreateResponse {
|
||||
t.Helper()
|
||||
|
||||
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), "", nil)
|
||||
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), nil, nil)
|
||||
if got, want := statusCode, http.StatusOK; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
|
||||
}
|
||||
@@ -127,7 +127,7 @@ func (app *Vmstorage) SnapshotList(t *testing.T) *SnapshotListResponse {
|
||||
t.Helper()
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/snapshot/list", app.httpListenAddr)
|
||||
data, statusCode := app.cli.Get(t, queryURL)
|
||||
data, statusCode := app.cli.Get(t, queryURL, nil)
|
||||
if got, want := statusCode, http.StatusOK; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
|
||||
}
|
||||
@@ -173,7 +173,7 @@ func (app *Vmstorage) SnapshotDeleteAll(t *testing.T) *SnapshotDeleteAllResponse
|
||||
t.Helper()
|
||||
|
||||
queryURL := fmt.Sprintf("http://%s/snapshot/delete_all", app.httpListenAddr)
|
||||
data, statusCode := app.cli.Post(t, queryURL, "", nil)
|
||||
data, statusCode := app.cli.Post(t, queryURL, nil, nil)
|
||||
if got, want := statusCode, http.StatusOK; got != want {
|
||||
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# see https://docs.codecov.com/docs/common-recipe-list#set-non-blocking-status-checks
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
patch:
|
||||
default:
|
||||
informational: true
|
||||
@@ -151,7 +151,7 @@ Some alerting rules thresholds are just recommendations and could require an adj
|
||||
The list of alerting rules is the following:
|
||||
* [alerts-health.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-health.yml):
|
||||
alerting rules related to all VictoriaMetrics components for tracking their "health" state;
|
||||
* [alerts.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts.yml):
|
||||
* [alerts-single-node.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-single-node.yml):
|
||||
alerting rules related to [single-server VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) installation;
|
||||
* [alerts-cluster.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-cluster.yml):
|
||||
alerting rules related to [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/);
|
||||
|
||||
@@ -3,7 +3,7 @@ services:
|
||||
# It scrapes targets defined in --promscrape.config
|
||||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.139.0
|
||||
image: victoriametrics/vmagent:v1.140.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
ports:
|
||||
@@ -42,14 +42,14 @@ services:
|
||||
# vmstorage shards. Each shard receives 1/N of all metrics sent to vminserts,
|
||||
# where N is number of vmstorages (2 in this case).
|
||||
vmstorage-1:
|
||||
image: victoriametrics/vmstorage:v1.139.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.140.0-cluster
|
||||
volumes:
|
||||
- strgdata-1:/storage
|
||||
command:
|
||||
- "--storageDataPath=/storage"
|
||||
restart: always
|
||||
vmstorage-2:
|
||||
image: victoriametrics/vmstorage:v1.139.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.140.0-cluster
|
||||
volumes:
|
||||
- strgdata-2:/storage
|
||||
command:
|
||||
@@ -59,7 +59,7 @@ services:
|
||||
# vminsert is ingestion frontend. It receives metrics pushed by vmagent,
|
||||
# pre-process them and distributes across configured vmstorage shards.
|
||||
vminsert-1:
|
||||
image: victoriametrics/vminsert:v1.139.0-cluster
|
||||
image: victoriametrics/vminsert:v1.140.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -68,7 +68,7 @@ services:
|
||||
- "--storageNode=vmstorage-2:8400"
|
||||
restart: always
|
||||
vminsert-2:
|
||||
image: victoriametrics/vminsert:v1.139.0-cluster
|
||||
image: victoriametrics/vminsert:v1.140.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -80,7 +80,7 @@ services:
|
||||
# vmselect is a query fronted. It serves read queries in MetricsQL or PromQL.
|
||||
# vmselect collects results from configured `--storageNode` shards.
|
||||
vmselect-1:
|
||||
image: victoriametrics/vmselect:v1.139.0-cluster
|
||||
image: victoriametrics/vmselect:v1.140.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -90,7 +90,7 @@ services:
|
||||
- "--vmalert.proxyURL=http://vmalert:8880"
|
||||
restart: always
|
||||
vmselect-2:
|
||||
image: victoriametrics/vmselect:v1.139.0-cluster
|
||||
image: victoriametrics/vmselect:v1.140.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -105,7 +105,7 @@ services:
|
||||
# read requests from Grafana, vmui, vmalert among vmselects.
|
||||
# It can be used as an authentication proxy.
|
||||
vmauth:
|
||||
image: victoriametrics/vmauth:v1.139.0
|
||||
image: victoriametrics/vmauth:v1.140.0
|
||||
depends_on:
|
||||
- "vmselect-1"
|
||||
- "vmselect-2"
|
||||
@@ -119,13 +119,13 @@ services:
|
||||
|
||||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.139.0
|
||||
image: victoriametrics/vmalert:v1.140.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
ports:
|
||||
- 8880:8880
|
||||
volumes:
|
||||
- ./rules/alerts-cluster.yml:/etc/alerts/alerts.yml
|
||||
- ./rules/alerts-cluster.yml:/etc/alerts/alerts-cluster.yml
|
||||
- ./rules/alerts-health.yml:/etc/alerts/alerts-health.yml
|
||||
- ./rules/alerts-vmagent.yml:/etc/alerts/alerts-vmagent.yml
|
||||
- ./rules/alerts-vmalert.yml:/etc/alerts/alerts-vmalert.yml
|
||||
|
||||
@@ -3,7 +3,7 @@ services:
|
||||
# It scrapes targets defined in --promscrape.config
|
||||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.139.0
|
||||
image: victoriametrics/vmagent:v1.140.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -18,7 +18,7 @@ services:
|
||||
# VictoriaMetrics instance, a single process responsible for
|
||||
# storing metrics and serve read requests.
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.139.0
|
||||
image: victoriametrics/victoria-metrics:v1.140.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
- 8089:8089
|
||||
@@ -59,14 +59,14 @@ services:
|
||||
|
||||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.139.0
|
||||
image: victoriametrics/vmalert:v1.140.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
- "alertmanager"
|
||||
ports:
|
||||
- 8880:8880
|
||||
volumes:
|
||||
- ./rules/alerts.yml:/etc/alerts/alerts.yml
|
||||
- ./rules/alerts-single-node.yml:/etc/alerts/alerts-single-node.yml
|
||||
- ./rules/alerts-health.yml:/etc/alerts/alerts-health.yml
|
||||
- ./rules/alerts-vmagent.yml:/etc/alerts/alerts-vmagent.yml
|
||||
- ./rules/alerts-vmalert.yml:/etc/alerts/alerts-vmalert.yml
|
||||
|
||||
@@ -170,3 +170,57 @@ groups:
|
||||
is saturated by more than 90% and vminsert won't be able to keep up.\n
|
||||
This usually means that more vminsert or vmstorage nodes must be added to the cluster in order to increase
|
||||
the total number of vminsert -> vmstorage links."
|
||||
|
||||
- alert: MetadataCacheUtilizationIsTooHigh
|
||||
expr: |
|
||||
vm_metrics_metadata_storage_size_bytes / vm_metrics_metadata_storage_max_size_bytes > 0.95
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Metadata cache capacity on {{ $labels.instance }} (job={{ $labels.job }}) is utilized for more than 95% for the last 15min"
|
||||
description: "Metadata cache stores meta information about ingested time series - see https://docs.victoriametrics.com/victoriametrics/#metrics-metadata.
|
||||
When cache is overutilized, the oldest entries will be dropped out automatically. It may result into incomplete
|
||||
response for /api/v1/metadata API calls. It doesn't impact regular queries or alerts. Cache size is controlled
|
||||
via -storage.maxMetadataStorageSize cmd-line flag."
|
||||
|
||||
- alert: MetricNameStatsCacheUtilizationIsTooHigh
|
||||
expr: |
|
||||
vm_cache_size_bytes{type="storage/metricNamesStatsTracker"} / vm_cache_size_max_bytes{type="storage/metricNamesStatsTracker"} > 0.95
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Cache capacity for tracking metric names usage on {{ $labels.instance }} (job={{ $labels.job }}) is utilized for more than 95% during the last 15min"
|
||||
description: "Metric names usage cache stores information about unique metric names and how frequently they are queried - see https://docs.victoriametrics.com/victoriametrics/#track-ingested-metrics-usage.
|
||||
When cache is overutilized, it will stop tracking the new metric names. It has no other negative impact.
|
||||
Usually, the number of unique metric names is very limited (thousands). The cache can be overutilized only if metric names
|
||||
are changing too frequently or if the cache size is too low. There are following ways to mitigate cache overutilization:
|
||||
- disable cache via `--storage.trackMetricNamesStats=false` flag, so metric names usage will stop tracking
|
||||
- increase the cache size via `--storage.cacheSizeMetricNamesStats` flag
|
||||
- reset the cache (see docs for details)"
|
||||
|
||||
- alert: IndexDBRecordsDrop
|
||||
expr: increase(vm_indexdb_items_dropped_total[5m]) > 0
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "IndexDB skipped registering items during data ingestion with reason={{ $labels.reason }}."
|
||||
description: |
|
||||
VictoriaMetrics could skip registering new timeseries during ingestion if they fail the validation process.
|
||||
For example, `reason=too_long_item` means that time series cannot exceed 64KB. Please, reduce the number
|
||||
of labels or label values for such series. Or enforce these limits via `-maxLabelsPerTimeseries` and
|
||||
`-maxLabelValueLen` command-line flags.
|
||||
|
||||
- alert: TooManyTSIDMisses
|
||||
expr: increase(vm_missing_tsids_for_metric_id_total[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Unexpected TSID misses for job \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes"
|
||||
description: |
|
||||
Unexpected TSID misses for \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes.
|
||||
If this happens after unclean shutdown of VictoriaMetrics process (via \"kill -9\", OOM or power off),
|
||||
then this is OK - the alert must go away in a few minutes after the restart.
|
||||
Otherwise this may point to the corruption of index data.
|
||||
@@ -82,19 +82,6 @@ groups:
|
||||
Check the logs for the given target. Check also the \"location\" label at the vm_log_messages_total metric if -loggerLevel command-line flag is set to value other than INFO.
|
||||
This label contains code locations responsible for generating log messages suppressed by -loggerLevel.
|
||||
|
||||
- alert: TooManyTSIDMisses
|
||||
expr: increase(vm_missing_tsids_for_metric_id_total[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Unexpected TSID misses for job \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes"
|
||||
description: |
|
||||
Unexpected TSID misses for \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes.
|
||||
If this happens after unclean shutdown of VictoriaMetrics process (via \"kill -9\", OOM or power off),
|
||||
then this is OK - the alert must go away in a few minutes after the restart.
|
||||
Otherwise this may point to the corruption of index data.
|
||||
|
||||
- alert: ConcurrentInsertsHitTheLimit
|
||||
expr: avg_over_time(vm_concurrent_insert_current[1m]) >= vm_concurrent_insert_capacity
|
||||
for: 15m
|
||||
@@ -109,28 +96,6 @@ groups:
|
||||
making write attempts. If vmagent's or vminsert's CPU usage and network saturation are at normal level, then
|
||||
it might be worth adjusting `-maxConcurrentInserts` cmd-line flag.
|
||||
|
||||
- alert: IndexDBRecordsDrop
|
||||
expr: increase(vm_indexdb_items_dropped_total[5m]) > 0
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "IndexDB skipped registering items during data ingestion with reason={{ $labels.reason }}."
|
||||
description: |
|
||||
VictoriaMetrics could skip registering new timeseries during ingestion if they fail the validation process.
|
||||
For example, `reason=too_long_item` means that time series cannot exceed 64KB. Please, reduce the number
|
||||
of labels or label values for such series. Or enforce these limits via `-maxLabelsPerTimeseries` and
|
||||
`-maxLabelValueLen` command-line flags.
|
||||
|
||||
- alert: RowsRejectedOnIngestion
|
||||
expr: rate(vm_rows_ignored_total[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Some rows are rejected on \"{{ $labels.instance }}\" on ingestion attempt"
|
||||
description: "Ingested rows on instance \"{{ $labels.instance }}\" are rejected due to the
|
||||
following reason: \"{{ $labels.reason }}\""
|
||||
|
||||
- alert: TooHighQueryLoad
|
||||
expr: increase(vm_concurrent_select_limit_timeout_total[5m]) > 0
|
||||
for: 15m
|
||||
@@ -148,3 +113,14 @@ groups:
|
||||
* increase compute resources or number of replicas;
|
||||
* adjust limits `-search.maxConcurrentRequests` and `-search.maxQueueDuration`.
|
||||
See more at https://docs.victoriametrics.com/victoriametrics/troubleshooting/#slow-queries.
|
||||
|
||||
- alert: RowsRejectedOnIngestion
|
||||
expr: rate(vm_rows_ignored_total[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Some rows are rejected on \"{{ $labels.instance }}\" on ingestion attempt"
|
||||
description: "Ingested rows on instance \"{{ $labels.instance }}\" are rejected due to the
|
||||
following reason: \"{{ $labels.reason }}\""
|
||||
|
||||
|
||||
@@ -148,4 +148,45 @@ groups:
|
||||
description: "Metadata cache stores meta information about ingested time series - see https://docs.victoriametrics.com/victoriametrics/#metrics-metadata.
|
||||
When cache is overutilized, the oldest entries will be dropped out automatically. It may result into incomplete
|
||||
response for /api/v1/metadata API calls. It doesn't impact regular queries or alerts. Cache size is controlled
|
||||
via -storage.maxMetadataStorageSize cmd-line flag."
|
||||
via -storage.maxMetadataStorageSize cmd-line flag."
|
||||
|
||||
- alert: MetricNameStatsCacheUtilizationIsTooHigh
|
||||
expr: |
|
||||
vm_cache_size_bytes{type="storage/metricNamesStatsTracker"} / vm_cache_size_max_bytes{type="storage/metricNamesStatsTracker"} > 0.95
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Cache capacity for tracking metric names usage on {{ $labels.instance }} (job={{ $labels.job }}) is utilized for more than 95% during the last 15min"
|
||||
description: "Metric names usage cache stores information about unique metric names and how frequently they are queried - see https://docs.victoriametrics.com/victoriametrics/#track-ingested-metrics-usage.
|
||||
When cache is overutilized, it will stop tracking the new metric names. It has no other negative impact.
|
||||
Usually, the number of unique metric names is very limited (thousands). The cache can be overutilized only if metric names
|
||||
are changing too frequently or if the cache size is too low. There are following ways to mitigate cache overutilization:
|
||||
- disable cache via `--storage.trackMetricNamesStats=false` flag, so metric names usage will stop tracking
|
||||
- increase the cache size via `--storage.cacheSizeMetricNamesStats` flag
|
||||
- reset the cache (see docs for details)"
|
||||
|
||||
- alert: IndexDBRecordsDrop
|
||||
expr: increase(vm_indexdb_items_dropped_total[5m]) > 0
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "IndexDB skipped registering items during data ingestion with reason={{ $labels.reason }}."
|
||||
description: |
|
||||
VictoriaMetrics could skip registering new timeseries during ingestion if they fail the validation process.
|
||||
For example, `reason=too_long_item` means that time series cannot exceed 64KB. Please, reduce the number
|
||||
of labels or label values for such series. Or enforce these limits via `-maxLabelsPerTimeseries` and
|
||||
`-maxLabelValueLen` command-line flags.
|
||||
|
||||
- alert: TooManyTSIDMisses
|
||||
expr: increase(vm_missing_tsids_for_metric_id_total[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Unexpected TSID misses for job \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes"
|
||||
description: |
|
||||
Unexpected TSID misses for \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes.
|
||||
If this happens after unclean shutdown of VictoriaMetrics process (via \"kill -9\", OOM or power off),
|
||||
then this is OK - the alert must go away in a few minutes after the restart.
|
||||
Otherwise this may point to the corruption of index data.
|
||||
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.139.0
|
||||
image: victoriametrics/vmagent:v1.140.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -14,7 +14,7 @@ services:
|
||||
restart: always
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.139.0
|
||||
image: victoriametrics/victoria-metrics:v1.140.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
@@ -40,7 +40,7 @@ services:
|
||||
restart: always
|
||||
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.139.0
|
||||
image: victoriametrics/vmalert:v1.140.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -59,7 +59,7 @@ services:
|
||||
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr": },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
|
||||
restart: always
|
||||
vmanomaly:
|
||||
image: victoriametrics/vmanomaly:v1.29.2
|
||||
image: victoriametrics/vmanomaly:v1.29.3
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
||||
@@ -41,18 +41,8 @@ docs-debug: docs docs-image
|
||||
$(foreach dir,$(wildcard ./docs/$(dir)/*), -v ./docs/$(notdir $(dir)):/opt/docs/content/$(notdir $(dir))) \
|
||||
vmdocs-docker-package
|
||||
|
||||
docs-update-version: docs-image
|
||||
$(if $(filter v%,$(PKG_TAG)), \
|
||||
docker run \
|
||||
--rm \
|
||||
--entrypoint /usr/bin/find \
|
||||
--platform $(DOCKER_PLATFORM) \
|
||||
--name vmdocs-docker-container \
|
||||
-v ./docs:/opt/docs/content/victoriametrics vmdocs-docker-package \
|
||||
content \
|
||||
-regex ".*\.md" \
|
||||
-exec sed -i 's/{{% available_from "#" %}}/{{% available_from "$(PKG_TAG)" %}}/g' {} \;, \
|
||||
$(info "Skipping docs version update, invalid $$PKG_TAG: $(PKG_TAG)"))
|
||||
docs-update-version:
|
||||
find docs/victoriametrics/ -name '*.md' -exec sed -i 's/{{% available_from "#" %}}/{{% available_from "$(TAG)" %}}/g' {} \;
|
||||
|
||||
# Converts images at docs folder to webp format
|
||||
# See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#images-in-documentation
|
||||
@@ -342,4 +332,4 @@ endif
|
||||
$(MAKE) docs-update-vmagent-flags && git checkout "$$orig_branch" && \
|
||||
$(MAKE) docs-update-vmselect-flags && git checkout "$$orig_branch" && \
|
||||
$(MAKE) docs-update-vminsert-flags && git checkout "$$orig_branch" && \
|
||||
$(MAKE) docs-update-vmstorage-flags && git checkout "$$orig_branch"
|
||||
$(MAKE) docs-update-vmstorage-flags && git checkout "$$orig_branch"
|
||||
|
||||
@@ -14,6 +14,11 @@ aliases:
|
||||
---
|
||||
Please find the changelog for VictoriaMetrics Anomaly Detection below.
|
||||
|
||||
## v1.29.3
|
||||
Released: 2026-04-16
|
||||
|
||||
- UI: Updated [vmanomaly UI](https://docs.victoriametrics.com/anomaly-detection/ui/) from [v1.6.0](https://docs.victoriametrics.com/anomaly-detection/ui/#v160) to [v1.6.1](https://docs.victoriametrics.com/anomaly-detection/ui/#v161), see respective [release notes](https://docs.victoriametrics.com/anomaly-detection/ui/#v161) for details.
|
||||
|
||||
## v1.29.2
|
||||
Released: 2026-04-02
|
||||
|
||||
|
||||
@@ -48,13 +48,15 @@ Please see example graph illustrating this logic below:
|
||||
|
||||
## What data does vmanomaly operate on?
|
||||
|
||||
`vmanomaly` operates on timeseries (metrics) data, and supports both **VictoriaMetrics** and **VictoriaLogs** as data sources. Choose the source depending on the use case.
|
||||
> [!NOTE]
|
||||
> `vmanomaly` operates on timeseries (metrics) data, and supports both **VictoriaMetrics** and **VictoriaLogs/VictoriaTraces** as data sources to get metrics-compatible data. Choose the source depending on the use case. Single-node / Cluster and OpenSource / Enterprise datasources are supported as well, `vmanomaly` is compatible with both, yet itself requires an [Enterprise license](https://victoriametrics.com/products/enterprise/) to run.
|
||||
|
||||
**VictoriaMetrics (metrics):** use full [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) for selection, sampling, and processing; [global filters](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#prometheus-querying-api-enhancements) are also supported. See the [VmReader](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) for the details.
|
||||
|
||||
**VictoriaLogs (logs → metrics):** {{% available_from "v1.26.0" anomaly %}} use [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) via the [`VLogsReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vlogs-reader) to create log-derived metrics for anomaly detection (e.g., error rates, request latencies).
|
||||
**VictoriaLogs (logs → metrics):** {{% available_from "v1.26.0" anomaly %}} use [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) via the [`VLogsReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vlogs-reader) to create log-derived or traces-derived metrics for anomaly detection (e.g., error rates, request latencies, error spans count).
|
||||
|
||||
> Please note that only LogsQL queries with [stats pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe) functions [subset](https://docs.victoriametrics.com/anomaly-detection/components/reader/#valid-stats-functions) are supported, as they produce **numeric** time series.
|
||||
> [!NOTE]
|
||||
> Please note that only LogsQL queries with [stats pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe) functions [subset](https://docs.victoriametrics.com/anomaly-detection/components/reader/#valid-stats-functions) are supported, as they produce **numeric** time series.
|
||||
|
||||
|
||||
## Using offsets
|
||||
@@ -421,7 +423,7 @@ services:
|
||||
# ...
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.29.2
|
||||
image: victoriametrics/vmanomaly:v1.29.3
|
||||
# ...
|
||||
restart: always
|
||||
volumes:
|
||||
@@ -639,7 +641,7 @@ options:
|
||||
Here’s an example of using the config splitter to divide configurations based on the `extra_filters` argument from the reader section:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.29.2 && docker image tag victoriametrics/vmanomaly:v1.29.2 vmanomaly
|
||||
docker pull victoriametrics/vmanomaly:v1.29.3 && docker image tag victoriametrics/vmanomaly:v1.29.3 vmanomaly
|
||||
```
|
||||
|
||||
```sh
|
||||
|
||||
@@ -45,8 +45,8 @@ There are 2 types of compatibility to consider when migrating in stateful mode:
|
||||
|
||||
| Group start | Group end | Compatibility | Notes |
|
||||
|---------|--------- |------------|-------|
|
||||
| [v1.29.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1292) | Latest* | Fully Compatible | Just a placeholder for new releases |
|
||||
| [v1.29.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1291) | [v1.29.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1292) | Fully Compatible | - |
|
||||
| [v1.29.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1293) | Latest* | Fully Compatible | Just a placeholder for new releases |
|
||||
| [v1.29.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1291) | [v1.29.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1293) | Fully Compatible | - |
|
||||
| [v1.28.7](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1287) | [v1.29.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1290) | Partially compatible* | Dumped models of class [prophet](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet) and [seasonal quantile](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-seasonal-quantile) have problems with loading to [v1.29.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1290) due to dropped `pytz` library. **Upgrading directly from v1.28.7 to [v1.29.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1291) with a fix is suggested** |
|
||||
| [v1.26.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1262) | [v1.28.7](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1287) | Fully Compatible | [v1.28.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1280) introduced [rolling](https://docs.victoriametrics.com/anomaly-detection/components/models/#rolling-models) model class drop in favor of [online](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models) models (`rolling_quantile` and `std` models), however, it does not impact compatibility, as artifacts were not produced by default for rolling models. Also, offline `mad` and `zscore` models are redirecting to their respective online counterparts since [v1.28.4](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1284). |
|
||||
| [v1.25.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1253) | [v1.26.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1270) | Partially Compatible* | [v1.25.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1253) introduced `forecast_at` argument for base [univariate](https://docs.victoriametrics.com/anomaly-detection/components/models/#univariate-models) and `Prophet` [models](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet), however, itself remains backward-reversible from newer states like [v1.26.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1262), [v1.27.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1270). (All models except `isolation_forest_multivariate` class will be dropped) |
|
||||
@@ -81,4 +81,4 @@ In stateless mode, the migration process is almost straightforward as there are
|
||||
# Other VmReader settings...
|
||||
sampling_period: 1m
|
||||
...
|
||||
```
|
||||
```
|
||||
|
||||
@@ -122,7 +122,7 @@ Below are the steps to get `vmanomaly` up and running inside a Docker container:
|
||||
1. Pull Docker image:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.29.2
|
||||
docker pull victoriametrics/vmanomaly:v1.29.3
|
||||
```
|
||||
|
||||
2. Create the license file with your license key.
|
||||
@@ -142,7 +142,7 @@ docker run -it \
|
||||
-v ./license:/license \
|
||||
-v ./config.yaml:/config.yaml \
|
||||
-p 8490:8490 \
|
||||
victoriametrics/vmanomaly:v1.29.2 \
|
||||
victoriametrics/vmanomaly:v1.29.3 \
|
||||
/config.yaml \
|
||||
--licenseFile=/license \
|
||||
--loggerLevel=INFO \
|
||||
@@ -159,7 +159,7 @@ docker run -it \
|
||||
-e VMANOMALY_DATA_DUMPS_DIR=/tmp/vmanomaly/data \
|
||||
-e VMANOMALY_MODEL_DUMPS_DIR=/tmp/vmanomaly/models \
|
||||
-p 8490:8490 \
|
||||
victoriametrics/vmanomaly:v1.29.2 \
|
||||
victoriametrics/vmanomaly:v1.29.3 \
|
||||
/config.yaml \
|
||||
--licenseFile=/license \
|
||||
--loggerLevel=INFO \
|
||||
@@ -172,7 +172,7 @@ services:
|
||||
# ...
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.29.2
|
||||
image: victoriametrics/vmanomaly:v1.29.3
|
||||
# ...
|
||||
restart: always
|
||||
volumes:
|
||||
|
||||
@@ -9,14 +9,17 @@ sitemap:
|
||||
|
||||
In today's fast-paced and complex landscape of system monitoring, [VictoriaMetrics Anomaly Detection](https://victoriametrics.com/products/enterprise/anomaly-detection/) (`vmanomaly`), a part of our [Enterprise offering](https://victoriametrics.com/products/enterprise/), serves as an **observability layer** for SREs and DevOps teams atop of collected data to **automate the detection of anomalies in time-series data**, reducing manual efforts required to identify abnormal system behavior.
|
||||
|
||||
Unlike traditional threshold-based alerting, which relies on **raw metric values** and requires constant tuning and maintenance of thresholds and alerting rules, `vmanomaly` introduces a **unified, interpretable [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score)** - a **de-trended, de-seasonalized metric** generated through machine learning. This approach eliminates the need for frequent manual adjustments by enabling **stable, long-term static thresholds (as simple as `anomaly_score > 1`)** that remain effective over time through continuous model retraining.
|
||||
Unlike traditional threshold-based alerting, which relies on **raw metric values** and requires constant tuning and maintenance of thresholds and alerting rules, `vmanomaly` introduces a **unified, interpretable [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score)** - a **de-trended, de-seasonalized metric** generated through machine learning. This approach eliminates the need for frequent manual adjustments by enabling **stable, long-term static thresholds (as simple as `anomaly_score > 1`)** that remain effective over time through continuous model retraining and updates.
|
||||
|
||||
By shifting to anomaly-based detection, teams can **identify and respond to potential issues faster**, enhancing system reliability and operational efficiency while significantly **reducing the engineering effort spent on handcrafting and maintaining alerting rules**.
|
||||
|
||||
|
||||
## What does it do?
|
||||
|
||||
`vmanomaly` is designed to **periodically analyze new data points** across selected metrics (either requested from [VictoriaMetrics TSDB](https://docs.victoriametrics.com/victoriametrics/) or produced by [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/) metrics [endpoint](https://docs.victoriametrics.com/victorialogs/querying/#querying-log-range-stats)), generating a **unified metric** called [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score).
|
||||
`vmanomaly` is designed to **periodically analyze new data points** across selected metrics - either requested from [VictoriaMetrics TSDB](https://docs.victoriametrics.com/victoriametrics/) or produced by [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/) or [VictoriaTraces](https://docs.victoriametrics.com/victoriatraces/) metrics [endpoint](https://docs.victoriametrics.com/victorialogs/querying/#querying-log-range-stats) - to generate a **unified metric** called [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score).
|
||||
|
||||
> [!NOTE]
|
||||
> `vmanomaly` can use both single-node and cluster versions of VictoriaMetrics/VictoriaLogs/VictoriaTraces as a data source, and is compatible with both OpenSource and Enterprise versions of it. However, `vmanomaly` itself requires an Enterprise license to run, and is part of our [Enterprise offering](https://victoriametrics.com/products/enterprise/).
|
||||
|
||||
Key functions:
|
||||
- **Automated anomaly detection** - continuously scans time-series data to identify deviations from expected behavior.
|
||||
|
||||
@@ -315,7 +315,7 @@ docker run -it --rm \
|
||||
-e VMANOMALY_MCP_SERVER_URL=http://mcp-vmanomaly:8081/mcp \
|
||||
-p 8080:8080 \
|
||||
-p 8490:8490 \
|
||||
victoriametrics/vmanomaly:v1.29.2 \
|
||||
victoriametrics/vmanomaly:v1.29.3 \
|
||||
vmanomaly_config.yaml
|
||||
```
|
||||
|
||||
@@ -640,6 +640,23 @@ If the **results** look good and the **model configuration should be deployed in
|
||||
|
||||
## Changelog
|
||||
|
||||
### v1.6.1
|
||||
Released: 2026-04-16
|
||||
|
||||
vmanomaly version: [v1.29.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1293)
|
||||
|
||||
- IMPROVEMENT: Consecutive anomalies (when "streaks" option is enabled) are now grouped in the Visualization Panel as a single anomaly line instead of multiple dots for reduced visual noise and better representation of prolonged anomalous periods, while still showing the exact anomaly score and labels on hover.
|
||||
|
||||
- IMPROVEMENT: Raw query results now refresh automatically after time range changes; yet anomaly detection results are preserved until "Detect Anomalies" button is hit again, to avoid recalculating anomalies on the new time range without explicit user action, which could be costly if the new time range is large and the model is complex.
|
||||
|
||||
- IMPROVEMENT: Table legend view is now enabled by default for sorting and filtering enablement.
|
||||
|
||||
- BUGFIX: Generated config and example alert outputs now preserve configured fit/infer values correctly and avoid invalid float-based duration strings in generated YAML, which could lead to data validation errors if copied to production configuration without adjustments.
|
||||
|
||||
- BUGFIX: Fixed multiple confusing anomaly UI behaviors around scheduler fields (fit_every, infer_every) and generated artifacts.
|
||||
|
||||
- BUGFIX: Chart y-axis range is now updating after legend series selection (regression introduced in v1.6.0).
|
||||
|
||||
### v1.6.0
|
||||
Released: 2026-04-02
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 357 KiB |
|
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 467 KiB |
|
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 188 KiB |
@@ -395,7 +395,7 @@ services:
|
||||
restart: always
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.29.2
|
||||
image: victoriametrics/vmanomaly:v1.29.3
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
||||
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 234 KiB |
|
Before Width: | Height: | Size: 106 KiB After Width: | Height: | Size: 282 KiB |
|
Before Width: | Height: | Size: 181 KiB After Width: | Height: | Size: 1.0 MiB |
|
Before Width: | Height: | Size: 164 KiB After Width: | Height: | Size: 929 KiB |
|
Before Width: | Height: | Size: 104 KiB After Width: | Height: | Size: 563 KiB |
|
Before Width: | Height: | Size: 331 KiB After Width: | Height: | Size: 871 KiB |
|
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 310 KiB |
|
Before Width: | Height: | Size: 122 KiB After Width: | Height: | Size: 944 KiB |
|
Before Width: | Height: | Size: 2.6 KiB After Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 303 KiB |
|
Before Width: | Height: | Size: 140 KiB After Width: | Height: | Size: 681 KiB |
|
Before Width: | Height: | Size: 109 KiB After Width: | Height: | Size: 805 KiB |
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 111 KiB |
|
Before Width: | Height: | Size: 160 KiB After Width: | Height: | Size: 1.2 MiB |
|
Before Width: | Height: | Size: 236 KiB After Width: | Height: | Size: 189 KiB |
|
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 206 KiB |
|
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 206 KiB |
|
Before Width: | Height: | Size: 1.5 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 206 KiB |
|
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 428 KiB |
|
Before Width: | Height: | Size: 9.7 KiB After Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 740 KiB |
|
Before Width: | Height: | Size: 2.4 KiB After Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 225 KiB |
|
Before Width: | Height: | Size: 98 KiB After Width: | Height: | Size: 189 KiB |
|
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 26 KiB |
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 88 KiB |
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 160 KiB |
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 93 KiB |
|
Before Width: | Height: | Size: 80 KiB After Width: | Height: | Size: 425 KiB |
|
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 514 KiB |
|
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 193 KiB |
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 64 KiB |
@@ -238,23 +238,23 @@ vmagent will write data into VictoriaMetrics single-node and cluster (with tenan
|
||||
# compose.yaml
|
||||
services:
|
||||
vmsingle:
|
||||
image: victoriametrics/victoria-metrics:v1.139.0
|
||||
image: victoriametrics/victoria-metrics:v1.140.0
|
||||
|
||||
vmstorage:
|
||||
image: victoriametrics/vmstorage:v1.139.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.140.0-cluster
|
||||
|
||||
vminsert:
|
||||
image: victoriametrics/vminsert:v1.139.0-cluster
|
||||
image: victoriametrics/vminsert:v1.140.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8400
|
||||
|
||||
vmselect:
|
||||
image: victoriametrics/vmselect:v1.139.0-cluster
|
||||
image: victoriametrics/vmselect:v1.140.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8401
|
||||
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.139.0
|
||||
image: victoriametrics/vmagent:v1.140.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
command:
|
||||
@@ -306,7 +306,7 @@ Now add the vmauth service to `compose.yaml`:
|
||||
# compose.yaml
|
||||
services:
|
||||
vmauth:
|
||||
image: docker.io/victoriametrics/vmauth:v1.139.0
|
||||
image: docker.io/victoriametrics/vmauth:v1.140.0
|
||||
ports:
|
||||
- 8427:8427
|
||||
volumes:
|
||||
@@ -420,7 +420,7 @@ Create two Prometheus datasources in Grafana with the following URLs: `http://vm
|
||||

|
||||
|
||||
You can also use the VictoriaMetrics [Grafana datasource](https://github.com/VictoriaMetrics/victoriametrics-datasource) plugin.
|
||||
See installation instructions in [Grafana datasource - Installation](https://docs.victoriametrics.com/victoriametrics/victoriametrics-datasource/#installation).
|
||||
See installation instructions in [Grafana datasource - Installation](https://docs.victoriametrics.com/victoriametrics/integrations/grafana/#victoriametrics-datasource).
|
||||
|
||||
Users with the `vm_access` claim will be able to query metrics from the specified tenant with extra filters applied.
|
||||
|
||||
|
||||
@@ -155,15 +155,15 @@ These services will store and query the metrics scraped by vmagent.
|
||||
# compose.yaml
|
||||
services:
|
||||
vmstorage:
|
||||
image: victoriametrics/vmstorage:v1.139.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.140.0-cluster
|
||||
|
||||
vminsert:
|
||||
image: victoriametrics/vminsert:v1.139.0-cluster
|
||||
image: victoriametrics/vminsert:v1.140.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8400
|
||||
|
||||
vmselect:
|
||||
image: victoriametrics/vmselect:v1.139.0-cluster
|
||||
image: victoriametrics/vmselect:v1.140.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8401
|
||||
ports:
|
||||
@@ -196,7 +196,7 @@ Add the vmauth service to `compose.yaml`:
|
||||
# compose.yaml
|
||||
services:
|
||||
vmauth:
|
||||
image: victoriametrics/vmauth:v1.139.0-enterprise
|
||||
image: victoriametrics/vmauth:v1.140.0-enterprise
|
||||
ports:
|
||||
- 8427:8427
|
||||
volumes:
|
||||
@@ -251,7 +251,7 @@ Add the vmagent service to `compose.yaml` with OAuth2 configuration:
|
||||
# compose.yaml
|
||||
services:
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.139.0
|
||||
image: victoriametrics/vmagent:v1.140.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
command:
|
||||
|
||||
@@ -107,7 +107,7 @@ The final piece is the Docker Compose file. This ties all the services together
|
||||
# compose.yml
|
||||
services:
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.139.0
|
||||
image: victoriametrics/victoria-metrics:v1.140.0
|
||||
command:
|
||||
- "--storageDataPath=/victoria-metrics-data"
|
||||
- "--selfScrapeInterval=10s"
|
||||
@@ -128,7 +128,7 @@ services:
|
||||
- ./alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
|
||||
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.139.0
|
||||
image: victoriametrics/vmalert:v1.140.0
|
||||
depends_on:
|
||||
- victoriametrics
|
||||
- alertmanager
|
||||
@@ -198,7 +198,7 @@ If you open the sidebar and select **Alerting** > **Alert rules**, you should be
|
||||
|
||||
Open the sidebar again and go to **Alerting** > **Active notifications** to see the active alert reported by Alertmanager.
|
||||
|
||||

|
||||

|
||||
|
||||
You can also see the alerts in VMUI by opening the browser in `http://localhost:8428/vmui/?#/rules`. This is possible only when we have configured `-vmalert.proxyURL` in VictoriaMetrics.
|
||||
|
||||
|
||||
@@ -10,5 +10,11 @@ tags:
|
||||
- logs
|
||||
- traces
|
||||
- playground
|
||||
aliases:
|
||||
- /playgrounds/victoriametrics/
|
||||
- /playgrounds/victorialogs/
|
||||
- /playgrounds/victoriatraces/
|
||||
- /playgrounds/cloud/
|
||||
- /playgrounds/vmanomaly/
|
||||
---
|
||||
{{% content "README.md" %}}
|
||||
|
||||
@@ -113,6 +113,7 @@ See also [case studies](https://docs.victoriametrics.com/victoriametrics/casestu
|
||||
* [FreeBSD: monitoring with VictoriaMetrics and Grafana](https://setevoy.medium.com/freebsd-monitoring-with-victoriametrics-and-grafana-f789904f2628)
|
||||
* [QCon London 2026: Wrangling Telemetry at Scale, a Guide to Self-Hosted Observability](https://www.infoq.com/news/2026/03/self-hosted-observability/)
|
||||
* [How We Made Telemetry Queries 10x Faster: Chunk-Split Caching for Metrics, Logs, and Traces](https://mirastacklabs.ai/blog/chunk-split-caching/)
|
||||
* [Building a high-volume metrics pipeline with OpenTelemetry and vmagent](https://medium.com/airbnb-engineering/building-a-high-volume-metrics-pipeline-with-opentelemetry-and-vmagent-c714d6910b45)
|
||||
|
||||
## Third-party articles and slides about VictoriaLogs
|
||||
|
||||
|
||||
@@ -62,11 +62,13 @@ Pull requests requirements:
|
||||
1. The pull request must conform to [VictoriaMetrics development goals](https://docs.victoriametrics.com/victoriametrics/goals/).
|
||||
1. Don't use `master` branch for making PRs, as it makes it impossible for reviewers to modify the changes.
|
||||
1. All commits need to be [signed](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits).
|
||||
1. A commit message should contain clear and concise description of what was done and for what purpose.
|
||||
Use the imperative, present tense: "change" not "changed" nor "changes". Read your commit message as "This commit will ..", don't capitalize the first letter.
|
||||
Message should be prefixed with `<dir>/<component>:` to show what component has been changed, i.e. `app/vmalert: fix...`.
|
||||
1. Pull request title should be prefixed with `<dir>/<component>:` to show what component has been changed, i.e. `app/vmalert: fix...`.
|
||||
Pull request description should contain clear and concise description of what was done, why it is needed and for what purpose.
|
||||
Use clear language, so reviewers can quickly understand the change and its impact.
|
||||
1. A link to the issue(s) related to the change, if any. Use `Fixes [issue link]` if the PR resolves the issue, or `Related to [issue link]` for reference.
|
||||
1. Tests proving that the change is effective. See [this style guide](https://itnext.io/f-tests-as-a-replacement-for-table-driven-tests-in-go-8814a8b19e9e) for tests.
|
||||
1. Tests proving that the change is effective. Tests are expected for non-trivial new functionality or non-trivial modifications.
|
||||
Bug fixes must include tests unless a maintainer explicitly agrees otherwise.
|
||||
See [this style guide](https://itnext.io/f-tests-as-a-replacement-for-table-driven-tests-in-go-8814a8b19e9e) for tests.
|
||||
To run tests and code checks locally, execute commands `make test-full` and `make check-all`.
|
||||
1. Try to not extend the scope of the pull requests outside the issue, do not make unrelated changes.
|
||||
1. Update [docs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/docs) if needed. For example, adding a new flag or changing behavior of existing flags or features
|
||||
|
||||
@@ -556,7 +556,7 @@ See more details on [how to monitor VictoriaMetrics components](https://docs.vic
|
||||
- `-storage.maxHourlySeries` is the limit on the number of [active time series](https://docs.victoriametrics.com/victoriametrics/faq/#what-is-an-active-time-series) during the last hour.
|
||||
- `-storage.maxDailySeries` is the limit on the number of unique time series during the day. This limit can be used for limiting daily [time series churn rate](https://docs.victoriametrics.com/victoriametrics/faq/#what-is-high-churn-rate).
|
||||
|
||||
It is possible to use `-1` as a value for these flags{{% available_from "#" %}} in order to enable series tracking but set limit to maximum possible value.
|
||||
It is possible to use `-1` as a value for these flags{{% available_from "v1.140.0" %}} in order to enable series tracking but set limit to maximum possible value.
|
||||
This is useful in order to estimate the number of unique series written to `vmstorage` without enforcing limits.
|
||||
|
||||
Note that these limits are set and applied individually per each `vmstorage` node in the cluster. So, if the cluster has `N` `vmstorage` nodes, then the cluster-level limits will be `N` times bigger than the per-`vmstorage` limits.
|
||||
|
||||
@@ -27,5 +27,5 @@ to [the latest available releases](https://docs.victoriametrics.com/victoriametr
|
||||
|
||||
## Currently supported LTS release lines
|
||||
|
||||
- v1.136.x - the latest one is [v1.136.3 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.136.3)
|
||||
- v1.122.x - the latest one is [v1.122.18 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.18)
|
||||
- v1.136.x - the latest one is [v1.136.4 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.136.4)
|
||||
- v1.122.x - the latest one is [v1.122.19 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.19)
|
||||
|
||||