Compare commits

..

1 Commits

Author SHA1 Message Date
Andrii Chubatiuk
305f1c91f8 lib/{fs,filestream}: use single ParallelExecutor for fs and filestream tasks 2025-12-31 11:51:32 +02:00
147 changed files with 7127 additions and 12751 deletions

View File

@@ -9,14 +9,14 @@ import (
"sync"
"sync/atomic"
"github.com/VictoriaMetrics/metrics"
"gopkg.in/yaml.v2"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"go.yaml.in/yaml/v3"
"github.com/VictoriaMetrics/metrics"
)
var (
@@ -139,7 +139,6 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
remoteWriteRelabelConfigData.Store(&rawCfg)
rcs.global = global
}
if len(*relabelConfigPaths) > len(*remoteWriteURLs) {
return nil, fmt.Errorf("too many -remoteWrite.urlRelabelConfig args: %d; it mustn't exceed the number of -remoteWrite.url args: %d",
len(*relabelConfigPaths), (len(*remoteWriteURLs)))
@@ -177,9 +176,19 @@ type relabelConfigs struct {
perURL []*promrelabel.ParsedConfigs
}
// isSet indicates whether (global or per-URL) command-line flags is set
func (rcs *relabelConfigs) isSet() bool {
return *relabelConfigPathGlobal != "" || len(*relabelConfigPaths) > 0
if rcs == nil {
return false
}
if rcs.global.Len() > 0 {
return true
}
for _, pc := range rcs.perURL {
if pc.Len() > 0 {
return true
}
}
return false
}
// initLabelsGlobal must be called after parsing command-line flags.

View File

@@ -80,15 +80,14 @@ func (as AlertState) String() string {
// AlertTplData is used to execute templating
type AlertTplData struct {
Type string
Labels map[string]string
Value float64
Expr string
AlertID uint64
GroupID uint64
ActiveAt time.Time
For time.Duration
IsPartial bool
Type string
Labels map[string]string
Value float64
Expr string
AlertID uint64
GroupID uint64
ActiveAt time.Time
For time.Duration
}
var tplHeaders = []string{
@@ -102,7 +101,6 @@ var tplHeaders = []string{
"{{ $groupID := .GroupID }}",
"{{ $activeAt := .ActiveAt }}",
"{{ $for := .For }}",
"{{ $isPartial := .IsPartial }}",
}
// ExecTemplate executes the Alert template for given

View File

@@ -346,8 +346,6 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
ls.processed[l.Name] = l.Value
}
// labels only support limited templating variables,
// including `labels`, `value` and `expr`, to avoid breaking alert states or causing cardinality issue with results
extraLabels, err := notifier.ExecTemplate(qFn, ar.Labels, notifier.AlertTplData{
Labels: ls.origin,
Value: m.Values[0],
@@ -389,7 +387,11 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
return nil, err
}
alertID := hash(ls.processed)
a := ar.newAlert(s, time.Time{}, ls.processed, nil) // initial alert
as, err := ar.expandAnnotationTemplates(s, qFn, time.Time{}, ls)
if err != nil {
return nil, err
}
a := ar.newAlert(s, time.Time{}, ls.processed, as) // initial alert
prevT := time.Time{}
for i := range s.Values {
@@ -405,6 +407,8 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
// reset to Pending if there are gaps > EvalInterval between DPs
a.State = notifier.StatePending
a.ActiveAt = at
// re-template the annotations as active timestamp is changed
a.Annotations, _ = ar.expandAnnotationTemplates(s, qFn, at, ls)
a.Start = time.Time{}
} else if at.Sub(a.ActiveAt) >= ar.For && a.State != notifier.StateFiring {
a.State = notifier.StateFiring
@@ -459,8 +463,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
}
isPartial := isPartialResponse(res)
ar.logDebugf(ts, nil, "query returned %d series (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartial)
ar.logDebugf(ts, nil, "query returned %d series (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartialResponse(res))
qFn := func(query string) ([]datasource.Metric, error) {
res, _, err := ar.q.Query(ctx, query, ts)
return res.Data, err
@@ -486,7 +489,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
at = a.ActiveAt
}
}
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls, isPartial)
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls)
if err != nil {
// only set error in current state, but do not break alert processing
curState.Err = err
@@ -604,17 +607,16 @@ func (ar *AlertingRule) expandLabelTemplates(m datasource.Metric, qFn templates.
return ls, nil
}
func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templates.QueryFn, activeAt time.Time, ls *labelSet, isPartial bool) (map[string]string, error) {
func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templates.QueryFn, activeAt time.Time, ls *labelSet) (map[string]string, error) {
tplData := notifier.AlertTplData{
Value: m.Values[0],
Type: ar.Type.String(),
Labels: ls.origin,
Expr: ar.Expr,
AlertID: hash(ls.processed),
GroupID: ar.GroupID,
ActiveAt: activeAt,
For: ar.For,
IsPartial: isPartial,
Value: m.Values[0],
Type: ar.Type.String(),
Labels: ls.origin,
Expr: ar.Expr,
AlertID: hash(ls.processed),
GroupID: ar.GroupID,
ActiveAt: activeAt,
For: ar.For,
}
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
if err != nil {

View File

@@ -664,7 +664,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
Name: "for-pending",
Type: config.NewPrometheusType().String(),
Labels: map[string]string{"alertname": "for-pending"},
Annotations: map[string]string{},
Annotations: map[string]string{"activeAt": "5000"},
State: notifier.StatePending,
ActiveAt: time.Unix(5, 0),
Value: 1,
@@ -684,7 +684,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
Name: "for-firing",
Type: config.NewPrometheusType().String(),
Labels: map[string]string{"alertname": "for-firing"},
Annotations: map[string]string{},
Annotations: map[string]string{"activeAt": "1000"},
State: notifier.StateFiring,
ActiveAt: time.Unix(1, 0),
Start: time.Unix(5, 0),
@@ -705,7 +705,7 @@ func TestAlertingRuleExecRange(t *testing.T) {
Name: "for-hold-pending",
Type: config.NewPrometheusType().String(),
Labels: map[string]string{"alertname": "for-hold-pending"},
Annotations: map[string]string{},
Annotations: map[string]string{"activeAt": "5000"},
State: notifier.StatePending,
ActiveAt: time.Unix(5, 0),
Value: 1,
@@ -1120,7 +1120,7 @@ func TestAlertingRuleLimit_Success(t *testing.T) {
}
func TestAlertingRule_Template(t *testing.T) {
f := func(rule *AlertingRule, metrics []datasource.Metric, isResponsePartial bool, alertsExpected map[uint64]*notifier.Alert) {
f := func(rule *AlertingRule, metrics []datasource.Metric, alertsExpected map[uint64]*notifier.Alert) {
t.Helper()
fakeGroup := Group{
@@ -1133,7 +1133,6 @@ func TestAlertingRule_Template(t *testing.T) {
entries: make([]StateEntry, 10),
}
fq.Add(metrics...)
fq.SetPartialResponse(isResponsePartial)
if _, err := rule.exec(context.TODO(), time.Now(), 0); err != nil {
t.Fatalf("unexpected error: %s", err)
@@ -1164,7 +1163,7 @@ func TestAlertingRule_Template(t *testing.T) {
}, []datasource.Metric{
metricWithValueAndLabels(t, 1, "instance", "foo"),
metricWithValueAndLabels(t, 1, "instance", "bar"),
}, false, map[uint64]*notifier.Alert{
}, map[uint64]*notifier.Alert{
hash(map[string]string{alertNameLabel: "common", "region": "east", "instance": "foo"}): {
Annotations: map[string]string{
"summary": `common: Too high connection number for "foo"`,
@@ -1193,14 +1192,14 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "{{ $labels.instance }}",
},
Annotations: map[string]string{
"summary": `{{ $labels.__name__ }}: Too high connection number for "{{ $labels.instance }}".{{ if $isPartial }} WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.{{ end }}`,
"summary": `{{ $labels.__name__ }}: Too high connection number for "{{ $labels.instance }}"`,
"description": `{{ $labels.alertname}}: It is {{ $value }} connections for "{{ $labels.instance }}"`,
},
alerts: make(map[uint64]*notifier.Alert),
}, []datasource.Metric{
metricWithValueAndLabels(t, 2, "__name__", "first", "instance", "foo", alertNameLabel, "override"),
metricWithValueAndLabels(t, 10, "__name__", "second", "instance", "bar", alertNameLabel, "override"),
}, false, map[uint64]*notifier.Alert{
}, map[uint64]*notifier.Alert{
hash(map[string]string{alertNameLabel: "override label", "exported_alertname": "override", "instance": "foo"}): {
Labels: map[string]string{
alertNameLabel: "override label",
@@ -1208,7 +1207,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "foo",
},
Annotations: map[string]string{
"summary": `first: Too high connection number for "foo".`,
"summary": `first: Too high connection number for "foo"`,
"description": `override: It is 2 connections for "foo"`,
},
},
@@ -1219,7 +1218,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "bar",
},
Annotations: map[string]string{
"summary": `second: Too high connection number for "bar".`,
"summary": `second: Too high connection number for "bar"`,
"description": `override: It is 10 connections for "bar"`,
},
},
@@ -1232,7 +1231,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "{{ $labels.instance }}",
},
Annotations: map[string]string{
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}.{{ if $isPartial }} WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.{{ end }}`,
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}`,
},
alerts: make(map[uint64]*notifier.Alert),
}, []datasource.Metric{
@@ -1240,7 +1239,7 @@ func TestAlertingRule_Template(t *testing.T) {
alertNameLabel, "originAlertname",
alertGroupNameLabel, "originGroupname",
"instance", "foo"),
}, true, map[uint64]*notifier.Alert{
}, map[uint64]*notifier.Alert{
hash(map[string]string{
alertNameLabel: "OriginLabels",
"exported_alertname": "originAlertname",
@@ -1256,7 +1255,7 @@ func TestAlertingRule_Template(t *testing.T) {
"instance": "foo",
},
Annotations: map[string]string{
"summary": `Alert "originAlertname(originGroupname)" for instance foo. WARNING: Partial response detected - this alert may be incomplete. Please verify the results manually.`,
"summary": `Alert "originAlertname(originGroupname)" for instance foo`,
},
},
})
@@ -1386,7 +1385,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"group": "vmalert",
"alertname": "ConfigurationReloadFailure",
"alertgroup": "vmalert",
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
}
expectedProcessedLabels := map[string]string{
@@ -1396,7 +1395,7 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"exported_alertname": "ConfigurationReloadFailure",
"group": "vmalert",
"alertgroup": "vmalert",
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
}
ls, err := ar.toLabels(metric, nil)

View File

@@ -394,7 +394,7 @@ func (bu *backendURL) runHealthCheck() {
if errors.Is(bu.healthCheckContext.Err(), context.Canceled) {
return
}
logger.Warnf("ignoring the backend at %s for %s because of dial error: %s", addr, *failTimeout, err)
logger.Warnf("ignoring the backend at %s for %s becasue of dial error: %s", addr, *failTimeout, err)
continue
}
@@ -809,7 +809,7 @@ func reloadAuthConfig() (bool, error) {
ok, err := reloadAuthConfigData(data)
if err != nil {
return false, fmt.Errorf("failed to parse -auth.config=%q: %w", *authConfigPath, err)
return false, fmt.Errorf("failed to pars -auth.config=%q: %w", *authConfigPath, err)
}
if !ok {
return false, nil

View File

@@ -156,10 +156,6 @@ func requestHandlerWithInternalRoutes(w http.ResponseWriter, r *http.Request) bo
}
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
if r.Body != nil {
r.Body = &readDurationTrackingBody{r: r.Body}
}
ats := getAuthTokensFromRequest(r)
if len(ats) == 0 {
// Process requests for unauthorized users
@@ -353,37 +349,14 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
err = ctxErr
}
if err != nil {
if errors.Is(err, errReadTimeout) {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
// Do not retry canceled or timed out requests
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
logger.Warnf("remoteAddr: %s; requestURI: %s; client %s request exceeded single read timeout -readTimeout=%s, closing connection", remoteAddr, requestURI, ui.name(), *readTimeout)
rejectSlowClientRequests.Inc()
if w1, ok := w.(http.Hijacker); ok {
conn, _, connErr := w1.Hijack()
if connErr != nil {
logger.Errorf("cannot hijack connection for slow read timeout handling for %s: %s", targetURL, connErr)
return true, false
}
_ = conn.Close()
return true, false
if errors.Is(err, context.DeadlineExceeded) {
// Timed out request must be counted as errors, since this usually means that the backend is slow.
logger.Warnf("remoteAddr: %s; requestURI: %s; timeout while proxying the response from %s: %s", remoteAddr, requestURI, targetURL, err)
}
return true, false
}
// Do not retry canceled
if errors.Is(err, context.Canceled) {
clientCanceledRequests.Inc()
return true, false
}
// Do not retry timed out requests
if errors.Is(err, context.DeadlineExceeded) {
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
// Timed out request must be counted as errors, since this usually means that the backend is slow.
logger.Warnf("remoteAddr: %s; requestURI: %s; timeout while proxying the response from %s: %s", remoteAddr, requestURI, targetURL, err)
return false, false
}
if !rtbOK || !rtb.canRetry() {
@@ -440,10 +413,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
err = copyStreamToClient(w, res.Body)
_ = res.Body.Close()
if errors.Is(err, context.Canceled) {
clientCanceledRequests.Inc()
return true, false
} else if err != nil && !netutil.IsTrivialNetworkError(err) {
if err != nil && !netutil.IsTrivialNetworkError(err) && !errors.Is(err, context.Canceled) {
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
requestURI := httpserver.GetRequestURI(r)
@@ -576,8 +546,6 @@ var (
configReloadRequests = metrics.NewCounter(`vmauth_http_requests_total{path="/-/reload"}`)
invalidAuthTokenRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="invalid_auth_token"}`)
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
clientCanceledRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="client_canceled"}`)
rejectSlowClientRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="reject_slow_client"}`)
)
func newRoundTripper(caFileOpt, certFileOpt, keyFileOpt, serverNameOpt string, insecureSkipVerifyP *bool) (http.RoundTripper, error) {
@@ -665,7 +633,6 @@ func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err err
if errors.Is(ctx.Err(), context.Canceled) {
// Do not return any response for the request canceled by the client,
// since the connection to the client is already closed.
clientCanceledRequests.Inc()
return
}
@@ -804,34 +771,3 @@ func debugInfo(u *url.URL, r *http.Request) string {
fmt.Fprint(s, ")")
return s.String()
}
var slowReadDuration = metrics.NewSummary(`vmauth_request_slow_read_duration_seconds`)
var readTimeout = flag.Duration("readTimeout", 0, "The maximum duration for a single read call when exceeded the connection is closed. Zero disables request read timeout. "+
"See also -writeTimeout")
var errReadTimeout = fmt.Errorf("request read timeout")
type readDurationTrackingBody struct {
r io.ReadCloser
}
func (r *readDurationTrackingBody) Read(p []byte) (n int, err error) {
start := time.Now()
n, err = r.r.Read(p)
dur := time.Since(start)
// Record slow read durations only to avoid overhead for fast reads.
if dur > time.Millisecond {
slowReadDuration.Update(dur.Seconds())
}
if err == nil && *readTimeout > 0 && dur > *readTimeout {
return n, errReadTimeout
}
return n, err
}
func (r *readDurationTrackingBody) Close() error {
return r.r.Close()
}

View File

@@ -468,7 +468,7 @@ var (
Name: vmNativeFilterMatch,
Usage: "Time series selector to match series for export. For example, select {instance!=\"localhost\"} will " +
"match all series with \"instance\" label different to \"localhost\".\n" +
" See more details here https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-data-in-native-format",
" See more details here https://github.com/VictoriaMetrics/VictoriaMetrics#how-to-export-data-in-native-format",
Value: `{__name__!=""}`,
},
&cli.StringFlag{

View File

@@ -232,7 +232,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
}
firehose.WriteSuccessResponse(w, r)
return true
case "/zabbixconnector/api/v1/history":
case "zabbixconnector/api/v1/history":
zabbixconnectorHistoryRequests.Inc()
if err := zabbixconnector.InsertHandlerForHTTP(r); err != nil {
zabbixconnectorHistoryErrors.Inc()
@@ -241,7 +241,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
fmt.Fprintf(w, `{"error":%q}`, err.Error())
return true
}
w.WriteHeader(http.StatusOK)
w.WriteHeader(http.StatusAccepted)
return true
case "/newrelic":
newrelicCheckRequest.Inc()

View File

@@ -520,7 +520,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
fmt.Fprintf(w, "%s", `{"status":"error","msg":"for accessing vmalert flag '-vmalert.proxyURL' must be configured"}`)
return true
}
proxyVMAlertRequests(w, r, path)
proxyVMAlertRequests(w, r)
return true
}
@@ -558,7 +558,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
case "/api/v1/rules", "/rules":
rulesRequests.Inc()
if len(*vmalertProxyURL) > 0 {
proxyVMAlertRequests(w, r, path)
proxyVMAlertRequests(w, r)
return true
}
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#rules
@@ -568,7 +568,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
case "/api/v1/alerts", "/alerts":
alertsRequests.Inc()
if len(*vmalertProxyURL) > 0 {
proxyVMAlertRequests(w, r, path)
proxyVMAlertRequests(w, r)
return true
}
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#alerts
@@ -578,7 +578,7 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
case "/api/v1/notifiers", "/notifiers":
notifiersRequests.Inc()
if len(*vmalertProxyURL) > 0 {
proxyVMAlertRequests(w, r, path)
proxyVMAlertRequests(w, r)
return true
}
w.Header().Set("Content-Type", "application/json")
@@ -725,7 +725,7 @@ var (
metricNamesStatsResetErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/admin/status/metric_names_stats/reset"}`)
)
func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request, path string) {
func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request) {
defer func() {
err := recover()
if err == nil || err == http.ErrAbortHandler {
@@ -736,10 +736,8 @@ func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request, path string) {
// Forward other panics to the caller.
panic(err)
}()
req := r.Clone(r.Context())
req.URL.Path = strings.TrimPrefix(path, "prometheus")
req.Host = vmalertProxyHost
vmalertProxy.ServeHTTP(w, req)
r.Host = vmalertProxyHost
vmalertProxy.ServeHTTP(w, r)
}
var (

View File

@@ -785,8 +785,7 @@ func getRollupExprArg(arg metricsql.Expr) *metricsql.RollupExpr {
// - rollupFunc(m) if iafc is nil
// - aggrFunc(rollupFunc(m)) if iafc isn't nil
func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr,
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext,
) ([]*timeseries, error) {
re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
if re.At == nil {
return evalRollupFuncWithoutAt(qt, ec, funcName, rf, expr, re, iafc)
}
@@ -836,8 +835,7 @@ func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf
}
func evalRollupFuncWithoutAt(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext,
) ([]*timeseries, error) {
expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
funcName = strings.ToLower(funcName)
ecNew := ec
var offset int64
@@ -1060,8 +1058,7 @@ func removeNanValues(dstValues []float64, dstTimestamps []int64, values []float6
// evalInstantRollup evaluates instant rollup where ec.Start == ec.End.
func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window int64,
) ([]*timeseries, error) {
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window int64) ([]*timeseries, error) {
if ec.Start != ec.End {
logger.Panicf("BUG: evalInstantRollup cannot be called on non-empty time range; got %s", ec.timeRangeString())
}
@@ -1086,12 +1083,10 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
rollupResultCacheV.DeleteInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
}
getCachedSeries := func(qt *querytracer.Tracer) ([]*timeseries, int64, error) {
rollupResultCacheV.rollupResultCacheRequests.Inc()
again:
offset := int64(0)
tssCached := rollupResultCacheV.GetInstantValues(qt, expr, window, ec.Step, ec.EnforcedTagFilterss)
if len(tssCached) == 0 {
rollupResultCacheV.rollupResultCacheMisses.Inc()
// Cache miss. Re-populate the missing data.
start := int64(fasttime.UnixTimestamp()*1000) - cacheTimestampOffset.Milliseconds()
offset = timestamp - start
@@ -1134,7 +1129,6 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
deleteCachedSeries(qt)
goto again
}
rollupResultCacheV.rollupResultCachePartialHits.Inc()
ec.QueryStats.addSeriesFetched(len(tssCached))
return tssCached, offset, nil
}
@@ -1543,11 +1537,16 @@ func assertInstantValues(tss []*timeseries) {
}
}
var memoryIntensiveQueries = metrics.NewCounter(`vm_memory_intensive_queries_total`)
var (
rollupResultCacheFullHits = metrics.NewCounter(`vm_rollup_result_cache_full_hits_total`)
rollupResultCachePartialHits = metrics.NewCounter(`vm_rollup_result_cache_partial_hits_total`)
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
memoryIntensiveQueries = metrics.NewCounter(`vm_memory_intensive_queries_total`)
)
func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr,
) ([]*timeseries, error) {
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
window, err := windowExpr.NonNegativeDuration(ec.Step)
if err != nil {
return nil, fmt.Errorf("cannot parse lookbehind window in square brackets at %s: %w", expr.AppendString(nil), err)
@@ -1583,20 +1582,19 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
}
// Search for cached results.
rollupResultCacheV.rollupResultCacheRequests.Inc()
tssCached, start := rollupResultCacheV.GetSeries(qt, ec, expr, window)
ec.QueryStats.addSeriesFetched(len(tssCached))
if start > ec.End {
qt.Printf("the result is fully cached")
rollupResultCacheV.rollupResultCacheFullHits.Inc()
rollupResultCacheFullHits.Inc()
return tssCached, nil
}
if start > ec.Start {
qt.Printf("partial cache hit")
rollupResultCacheV.rollupResultCachePartialHits.Inc()
rollupResultCachePartialHits.Inc()
} else {
qt.Printf("cache miss")
rollupResultCacheV.rollupResultCacheMisses.Inc()
rollupResultCacheMiss.Inc()
}
// Fetch missing results, which aren't cached yet.
@@ -1632,8 +1630,7 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
//
// pointsPerSeries is used only for estimating the needed memory for query processing
func evalRollupFuncNoCache(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window, pointsPerSeries int64,
) ([]*timeseries, error) {
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, window, pointsPerSeries int64) ([]*timeseries, error) {
if qt.Enabled() {
qt = qt.NewChild("rollup %s: timeRange=%s, step=%d, window=%d", expr.AppendString(nil), ec.timeRangeString(), ec.Step, window)
defer qt.Done()
@@ -1756,8 +1753,7 @@ func maxSilenceInterval() int64 {
func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool,
iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64,
) ([]*timeseries, error) {
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
qt = qt.NewChild("rollup %s() with incremental aggregation %s() over %d series; rollupConfigs=%s", funcName, iafc.ae.Name, rss.Len(), rcs)
defer qt.Done()
var samplesScannedTotal atomic.Uint64
@@ -1796,8 +1792,7 @@ func evalRollupWithIncrementalAggregate(qt *querytracer.Tracer, funcName string,
}
func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, keepMetricNames bool, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64,
) ([]*timeseries, error) {
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
qt = qt.NewChild("rollup %s() over %d series; rollupConfigs=%s", funcName, rss.Len(), rcs)
defer qt.Done()
@@ -1837,8 +1832,7 @@ func evalRollupNoIncrementalAggregate(qt *querytracer.Tracer, funcName string, k
}
func doRollupForTimeseries(funcName string, keepMetricNames bool, rc *rollupConfig, tsDst *timeseries, mnSrc *storage.MetricName,
valuesSrc []float64, timestampsSrc []int64, sharedTimestamps []int64,
) uint64 {
valuesSrc []float64, timestampsSrc []int64, sharedTimestamps []int64) uint64 {
tsDst.MetricName.CopyFrom(mnSrc)
if len(rc.TagValue) > 0 {
tsDst.MetricName.AddTag("rollup", rc.TagValue)

View File

@@ -83,11 +83,9 @@ func checkRollupResultCacheReset() {
const checkRollupResultCacheResetInterval = 5 * time.Second
var (
needRollupResultCacheReset atomic.Bool
checkRollupResultCacheResetOnce sync.Once
rollupResultResetMetricRowSample atomic.Pointer[storage.MetricRow]
)
var needRollupResultCacheReset atomic.Bool
var checkRollupResultCacheResetOnce sync.Once
var rollupResultResetMetricRowSample atomic.Pointer[storage.MetricRow]
var rollupResultCacheV = &rollupResultCache{
c: workingsetcache.New(1024 * 1024), // This is a cache for testing.
@@ -180,12 +178,6 @@ func InitRollupResultCache(cachePath string) {
rollupResultCacheV = &rollupResultCache{
c: c,
rollupResultCacheRequests: metrics.GetOrCreateCounter(`vm_rollup_result_cache_requests_total`),
rollupResultCacheFullHits: metrics.GetOrCreateCounter(`vm_rollup_result_cache_full_hits_total`),
rollupResultCachePartialHits: metrics.GetOrCreateCounter(`vm_rollup_result_cache_partial_hits_total`),
rollupResultCacheMisses: metrics.GetOrCreateCounter(`vm_rollup_result_cache_miss_total`),
rollupResultCacheResets: metrics.GetOrCreateCounter(`vm_rollup_result_cache_resets_total`),
}
}
@@ -201,18 +193,13 @@ func StopRollupResultCache() {
type rollupResultCache struct {
c *workingsetcache.Cache
rollupResultCacheRequests *metrics.Counter
rollupResultCacheFullHits *metrics.Counter
rollupResultCachePartialHits *metrics.Counter
rollupResultCacheMisses *metrics.Counter
rollupResultCacheResets *metrics.Counter
}
var rollupResultCacheResets = metrics.NewCounter(`vm_cache_resets_total{type="promql/rollupResult"}`)
// ResetRollupResultCache resets rollup result cache.
func ResetRollupResultCache() {
rollupResultCacheV.rollupResultCacheResets.Inc()
rollupResultCacheResets.Inc()
rollupResultCacheKeyPrefix.Add(1)
logger.Infof("rollupResult cache has been cleared")
}

View File

@@ -29,8 +29,7 @@ import (
)
var (
retentionPeriod = flagutil.NewRetentionDuration("retentionPeriod", "1M", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. "+
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention. See also -retentionFilter")
retentionPeriod = flagutil.NewRetentionDuration("retentionPeriod", "1", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter")
snapshotAuthKey = flagutil.NewPassword("snapshotAuthKey", "authKey, which must be passed in query string to /snapshot* pages. It overrides -httpAuth.*")
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages. It overrides -httpAuth.*")
forceFlushAuthKey = flagutil.NewPassword("forceFlushAuthKey", "authKey, which must be passed in query string to /internal/force_flush pages. It overrides -httpAuth.*")
@@ -389,23 +388,11 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
case "/create":
snapshotsCreateTotal.Inc()
w.Header().Set("Content-Type", "application/json")
snapshotName := Storage.MustCreateSnapshot()
// Verify whether the client already closed the connection.
// In this case it is better to drop the created snapshot, since the client isn't interested in it.
if err := r.Context().Err(); err != nil {
logger.Infof("deleting already created snapshot at %s because the client canceled the request", snapshotName)
if err := deleteSnapshot(snapshotName); err != nil {
logger.Infof("cannot delete just created snapshot: %s", err)
return true
}
return true
}
snapshotPath := Storage.MustCreateSnapshot()
if prometheusCompatibleResponse {
fmt.Fprintf(w, `{"status":"success","data":{"name":%s}}`, stringsutil.JSONString(snapshotName))
fmt.Fprintf(w, `{"status":"success","data":{"name":%s}}`, stringsutil.JSONString(snapshotPath))
} else {
fmt.Fprintf(w, `{"status":"ok","snapshot":%s}`, stringsutil.JSONString(snapshotName))
fmt.Fprintf(w, `{"status":"ok","snapshot":%s}`, stringsutil.JSONString(snapshotPath))
}
return true
case "/list":
@@ -425,12 +412,23 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
snapshotsDeleteTotal.Inc()
w.Header().Set("Content-Type", "application/json")
snapshotName := r.FormValue("snapshot")
if err := deleteSnapshot(snapshotName); err != nil {
jsonResponseError(w, err)
snapshotsDeleteErrorsTotal.Inc()
return true
snapshots := Storage.MustListSnapshots()
for _, snName := range snapshots {
if snName == snapshotName {
if err := Storage.DeleteSnapshot(snName); err != nil {
err = fmt.Errorf("cannot delete snapshot %q: %w", snName, err)
jsonResponseError(w, err)
snapshotsDeleteErrorsTotal.Inc()
return true
}
fmt.Fprintf(w, `{"status":"ok"}`)
return true
}
}
fmt.Fprintf(w, `{"status":"ok"}`)
err := fmt.Errorf("cannot find snapshot %q", snapshotName)
jsonResponseError(w, err)
return true
case "/delete_all":
snapshotsDeleteAllTotal.Inc()
@@ -451,19 +449,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
}
}
func deleteSnapshot(snapshotName string) error {
snapshots := Storage.MustListSnapshots()
for _, snName := range snapshots {
if snName == snapshotName {
if err := Storage.DeleteSnapshot(snName); err != nil {
return fmt.Errorf("cannot delete snapshot %q: %w", snName, err)
}
return nil
}
}
return fmt.Errorf("cannot find snapshot %q", snapshotName)
}
func initStaleSnapshotsRemover(strg *storage.Storage) {
staleSnapshotsRemoverCh = make(chan struct{})
if snapshotsMaxAge.Duration() <= 0 {

View File

@@ -46,7 +46,7 @@ export default [...compat.extends(
settings: {
react: {
pragma: "React",
version: "19.0",
version: "detect",
},
linkComponents: ["Hyperlink", {
@@ -69,11 +69,10 @@ export default [...compat.extends(
"varsIgnorePattern": "^_",
"ignoreRestSiblings": true
}],
"unused-imports/no-unused-imports": "error",
"react/jsx-closing-bracket-location": [1, "line-aligned"],
"object-curly-spacing": [2, "always"],
"react/jsx-max-props-per-line": [1, {
maximum: 1,
@@ -82,23 +81,13 @@ export default [...compat.extends(
"react/jsx-first-prop-new-line": [1, "multiline"],
// Disable core indent rule due to recursion issues in ESLint 9; use JSX-specific rules instead
indent: ["error", 2, {
SwitchCase: 1,
ignoredNodes: [
"JSXElement",
"JSXElement *",
"JSXFragment",
"JSXFragment *",
],
}],
indent: "off",
"react/jsx-indent": ["error", 2],
"react/jsx-indent-props": ["error", 2],
"linebreak-style": ["error", "unix"],
quotes: ["error", "double"],
semi: ["error", "always"],
// Formatting rules moved out of ESLint core; omit here to avoid deprecation noise
"react/prop-types": 0,
"react/react-in-jsx-scope": "off",
},
}];

File diff suppressed because it is too large Load Diff

View File

@@ -18,48 +18,47 @@
"preview": "vite preview",
"typecheck": "tsc --noEmit",
"test": "vitest run",
"test:dev": "vitest",
"precommit": "npm run lint:local && npm run typecheck && npm run test"
"test:dev": "vitest"
},
"dependencies": {
"classnames": "^2.5.1",
"dayjs": "^1.11.19",
"dayjs": "^1.11.13",
"lodash.debounce": "^4.0.8",
"marked": "^17.0.1",
"preact": "^10.28.2",
"qs": "^6.14.1",
"marked": "^16.0.0",
"preact": "^10.26.9",
"qs": "^6.14.0",
"react-input-mask": "^2.0.4",
"react-router-dom": "^7.12.0",
"react-router-dom": "^7.6.3",
"uplot": "^1.6.32",
"vite": "^7.3.1",
"web-vitals": "^5.1.0"
"vite": "^7.1.11",
"web-vitals": "^5.0.3"
},
"devDependencies": {
"@eslint/eslintrc": "^3.3.3",
"@eslint/js": "^9.39.2",
"@eslint/eslintrc": "^3.3.1",
"@eslint/js": "^9.30.1",
"@preact/preset-vite": "^2.10.2",
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/jest-dom": "^6.6.3",
"@testing-library/preact": "^3.2.4",
"@types/lodash.debounce": "^4.0.9",
"@types/node": "^25.0.8",
"@types/node": "^24.0.12",
"@types/qs": "^6.14.0",
"@types/react": "^19.2.8",
"@types/react": "^19.1.8",
"@types/react-input-mask": "^3.0.6",
"@types/react-router-dom": "^5.3.3",
"@typescript-eslint/eslint-plugin": "^8.53.0",
"@typescript-eslint/parser": "^8.53.0",
"cross-env": "^10.1.0",
"eslint": "^9.39.2",
"@typescript-eslint/eslint-plugin": "^8.36.0",
"@typescript-eslint/parser": "^8.36.0",
"cross-env": "^7.0.3",
"eslint": "^9.30.1",
"eslint-plugin-react": "^7.37.5",
"eslint-plugin-unused-imports": "^4.3.0",
"globals": "^17.0.0",
"eslint-plugin-unused-imports": "^4.1.4",
"globals": "^16.3.0",
"http-proxy-middleware": "^3.0.5",
"jsdom": "^27.4.0",
"jsdom": "^26.1.0",
"postcss": "^8.5.6",
"rollup-plugin-visualizer": "^6.0.5",
"sass-embedded": "^1.97.2",
"typescript": "^5.9.3",
"vitest": "^4.0.17"
"rollup-plugin-visualizer": "^6.0.3",
"sass-embedded": "^1.89.2",
"typescript": "^5.8.3",
"vitest": "^3.2.4"
},
"browserslist": {
"production": [

View File

@@ -39,14 +39,14 @@ const LegendGroup: FC<LegendGroupProps> = ({ labels, group, isAnomalyView, onCha
const Content = isTableView ? LegendTable : LegendLines;
const disableAutoCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE") === "false";
const defaultExpanded = disableAutoCollapse ? true : sortedLabels.length <= LEGEND_COLLAPSE_SERIES_LIMIT;
const disableAutoCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE") === "false"
const defaultExpanded = disableAutoCollapse ? true : sortedLabels.length <= LEGEND_COLLAPSE_SERIES_LIMIT
const expandedWarning = (
<span className="vm-legend-group-header__warning">
Legend collapsed by default ({sortedLabels.length} series) click to expand.
</span>
);
)
return (
<div

View File

@@ -29,7 +29,7 @@ const LimitsConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
const { seriesLimits } = useCustomPanelState();
const customPanelDispatch = useCustomPanelDispatch();
const storageCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE");
const storageCollapse = getFromStorage("LEGEND_AUTO_COLLAPSE")
const [legendCollapse, setLegendCollapse] = useState(storageCollapse ? storageCollapse === "true" : true);
const [limits, setLimits] = useState(seriesLimits);
@@ -58,7 +58,7 @@ const LimitsConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
}, [limits]);
useEffect(() => {
saveToStorage("LEGEND_AUTO_COLLAPSE", `${legendCollapse}`);
saveToStorage("LEGEND_AUTO_COLLAPSE", `${legendCollapse}`)
}, [legendCollapse]);
useImperativeHandle(ref, () => ({ handleApply }), [handleApply]);

View File

@@ -9,6 +9,7 @@ import { getFromStorage, removeFromStorage, saveToStorage } from "../../../../ut
import useBoolean from "../../../../hooks/useBoolean";
import { ChildComponentHandle } from "../GlobalSettings";
import { useAppDispatch, useAppState } from "../../../../state/common/StateContext";
import { getTenantIdFromUrl } from "../../../../utils/tenants";
interface ServerConfiguratorProps {
onClose: () => void;
@@ -38,6 +39,10 @@ const ServerConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
};
const handleApply = useCallback(() => {
const tenantIdFromUrl = getTenantIdFromUrl(serverUrl);
if (tenantIdFromUrl !== "") {
dispatch({ type: "SET_TENANT_ID", payload: tenantIdFromUrl });
}
dispatch({ type: "SET_SERVER", payload: serverUrl });
onClose();
}, [serverUrl]);
@@ -55,6 +60,12 @@ const ServerConfigurator = forwardRef<ChildComponentHandle, ServerConfiguratorPr
}
}, [enabledStorage]);
useEffect(() => {
if (enabledStorage) {
saveToStorage("SERVER_URL", serverUrl);
}
}, [serverUrl]);
useEffect(() => {
// the tenant selector can change the serverUrl
if (stateServerUrl === serverUrl) return;

View File

@@ -1,4 +1,4 @@
import { FC, useState, useRef, useMemo } from "preact/compat";
import { FC, useState, useRef, useEffect, useMemo } from "preact/compat";
import { useAppDispatch, useAppState } from "../../../../state/common/StateContext";
import { useTimeDispatch } from "../../../../state/time/TimeStateContext";
import { ArrowDownIcon, StorageIcon } from "../../../Main/Icons";
@@ -10,14 +10,14 @@ import { getAppModeEnable } from "../../../../utils/app-mode";
import Tooltip from "../../../Main/Tooltip/Tooltip";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import TextField from "../../../Main/TextField/TextField";
import { replaceTenantId } from "../../../../utils/tenants";
import { getTenantIdFromUrl, replaceTenantId } from "../../../../utils/tenants";
import useBoolean from "../../../../hooks/useBoolean";
const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
const appModeEnable = getAppModeEnable();
const { isMobile } = useDeviceDetect();
const { tenantId, serverUrl } = useAppState();
const { tenantId: tenantIdState, serverUrl } = useAppState();
const dispatch = useAppDispatch();
const timeDispatch = useTimeDispatch();
@@ -48,8 +48,10 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
}, [accountIds]);
const createHandlerChange = (value: string) => () => {
const tenant = value;
dispatch({ type: "SET_TENANT_ID", payload: tenant });
if (serverUrl) {
const updateServerUrl = replaceTenantId(serverUrl, value);
const updateServerUrl = replaceTenantId(serverUrl, tenant);
if (updateServerUrl === serverUrl) return;
dispatch({ type: "SET_SERVER", payload: updateServerUrl });
timeDispatch({ type: "RUN_QUERY" });
@@ -57,6 +59,16 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
handleCloseOptions();
};
useEffect(() => {
const id = getTenantIdFromUrl(serverUrl);
if (tenantIdState && tenantIdState !== id) {
createHandlerChange(tenantIdState)();
} else {
createHandlerChange(id)();
}
}, [serverUrl]);
if (!showTenantSelector) return null;
return (
@@ -71,7 +83,7 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
<span className="vm-mobile-option__icon"><StorageIcon/></span>
<div className="vm-mobile-option-text">
<span className="vm-mobile-option-text__label">Tenant ID</span>
<span className="vm-mobile-option-text__value">{tenantId}</span>
<span className="vm-mobile-option-text__value">{tenantIdState}</span>
</div>
<span className="vm-mobile-option__arrow"><ArrowDownIcon/></span>
</div>
@@ -94,7 +106,7 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
)}
onClick={toggleOpenOptions}
>
{tenantId}
{tenantIdState}
</Button>
)}
</div>
@@ -126,7 +138,7 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
className={classNames({
"vm-list-item": true,
"vm-list-item_mobile": isMobile,
"vm-list-item_active": id === tenantId
"vm-list-item_active": id === tenantIdState
})}
key={id}
onClick={createHandlerChange(id)}

View File

@@ -3,18 +3,19 @@ import { useEffect, useMemo, useState } from "preact/compat";
import { ErrorTypes } from "../../../../../types";
import { getAccountIds } from "../../../../../api/accountId";
import { getAppModeEnable, getAppModeParams } from "../../../../../utils/app-mode";
import { getTenantIdFromUrl } from "../../../../../utils/tenants";
export const useFetchAccountIds = () => {
const { useTenantID } = getAppModeParams();
const appModeEnable = getAppModeEnable();
const { tenantId, serverUrl } = useAppState();
const { serverUrl } = useAppState();
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<ErrorTypes | string>();
const [accountIds, setAccountIds] = useState<string[]>([]);
const fetchUrl = useMemo(() => getAccountIds(serverUrl), [serverUrl]);
const isServerUrlWithTenant = useMemo(() => !!tenantId, [tenantId]);
const isServerUrlWithTenant = useMemo(() => !!getTenantIdFromUrl(serverUrl), [serverUrl]);
const preventFetch = appModeEnable ? !useTenantID : !isServerUrlWithTenant;
useEffect(() => {

View File

@@ -17,4 +17,4 @@ export const formatDuration = (raw: number) => {
export const formatEventTime = (raw: string) => {
const t = dayjs(raw);
return t.year() <= 1 ? "Never" : t.format("DD MMM YYYY HH:mm:ss");
};
}

View File

@@ -1,5 +1,4 @@
@use "src/styles/variables" as *;
@use 'sass:meta';
$button-radius: 6px;
@@ -43,8 +42,6 @@ $button-radius: 6px;
svg {
width: 14px;
min-width: 14px;
max-width: 14px;
}
}
@@ -54,8 +51,6 @@ $button-radius: 6px;
svg {
width: 16px;
min-width: 16px;
max-width: 16px;
}
}
@@ -65,8 +60,6 @@ $button-radius: 6px;
svg {
width: 18px;
min-width: 18px;
max-width: 18px;
line-height: 16px;
}
}
@@ -135,14 +128,8 @@ $button-radius: 6px;
);
@each $name, $color in $button-colors {
@if $name == white {
@include contained-button($name, $color, $color-black);
@include outlined-button($name, $color, $color-white);
@include text-button($name, $color-white);
} @else {
@include contained-button($name, $color, $color-white);
@include outlined-button($name, $color, $color);
@include text-button($name, $color);
}
@include contained-button($name, $color, if($name == white, $color-black, $color-white));
@include outlined-button($name, $color, if($name == white, $color-white, $color));
@include text-button($name, if($name == white, $color-white, $color));
}
}

View File

@@ -1,8 +1,4 @@
import {
getFromStorage,
saveToStorage,
StorageKeys,
} from "../../utils/storage";
import { getFromStorage, removeFromStorage, saveToStorage, StorageKeys } from "../../utils/storage";
import { QueryHistoryType } from "../../state/query/reducer";
import { MAX_QUERIES_HISTORY, MAX_QUERY_FIELDS } from "../../constants/graph";
@@ -77,3 +73,17 @@ export const getUpdatedHistory = (query: string, queryHistory?: QueryHistoryType
values: newValues
};
};
const migrateMetricsQueryHistoryToHistoryByKey = () => {
const migrateHistory = (type: HistoryType) => {
const queryList = getFromStorage(type) as string;
if (queryList) {
const queryHistory: string[][] = JSON.parse(queryList);
saveHistoryToStorage("METRICS_QUERY_HISTORY", type, queryHistory);
removeFromStorage([type]);
}
};
migrateHistory("QUERY_HISTORY");
migrateHistory("QUERY_FAVORITES");
};
migrateMetricsQueryHistoryToHistoryByKey();

View File

@@ -1,69 +0,0 @@
import { useEffect } from "react";
import { StorageErrorCode } from "./types";
import { useSnack } from "../../contexts/Snackbar";
import { storageErrorInfo } from "./storageErrors";
import "./style.scss";
const classifyStorageException = (e: unknown): StorageErrorCode => {
if (!(e instanceof DOMException)) return StorageErrorCode.UNKNOWN;
switch (e.name) {
case "QuotaExceededError":
return StorageErrorCode.QUOTA_EXCEEDED;
case "SecurityError":
return StorageErrorCode.SECURITY_ERROR;
default:
return StorageErrorCode.UNKNOWN;
}
};
const getStorageError = (storage: Storage | null | undefined): StorageErrorCode | null => {
if (!storage) {
return StorageErrorCode.NO_STORAGE;
}
try {
const key = "__vmui_test__";
storage.setItem(key, "1");
storage.removeItem(key);
return null;
} catch (e) {
return classifyStorageException(e);
}
};
const WebStorageCheck = () => {
const { showInfoMessage } = useSnack();
useEffect(() => {
const error = getStorageError(window.localStorage);
if (error) {
const { title, description, fix } = storageErrorInfo[error];
const text = (
<div className="vm-storage-check">
<h3>{title}</h3>
<p>{description}</p>
{!!fix?.length && (
<div className="vm-storage-check__fix">
<div>Try this:</div>
<ul>
{fix.map((step, i) => (
<li key={`${i}-${step}`}>{step}</li>
))}
</ul>
</div>
)}
</div>
);
showInfoMessage({ text: text, type: "error", timeout: 600000 });
}
}, []);
return null;
};
export default WebStorageCheck;

View File

@@ -1,47 +0,0 @@
import { StorageError, StorageErrorCode } from "./types";
export const storageErrorInfo: Record<StorageErrorCode, StorageError> = {
[StorageErrorCode.NO_STORAGE]: {
title: "Storage unavailable",
description:
"Browser storage is not available for this website.",
fix: [
"Disable Private/Incognito mode and reload the page.",
"Disable privacy or ad-blocking extensions for this site and reload.",
"Open the site in another browser.",
],
},
[StorageErrorCode.SECURITY_ERROR]: {
title: "Storage access blocked",
description:
"Browser settings or an extension are blocking access to browser storage.",
fix: [
"Disable Private/Incognito mode and reload the page.",
"Disable privacy or ad-blocking extensions for this site and reload.",
"Open the site in a regular browser tab (not embedded).",
],
},
[StorageErrorCode.QUOTA_EXCEEDED]: {
title: "Storage quota exceeded",
description:
"The storage limit for this website has been reached.",
fix: [
"Clear this websites stored data and reload the page.",
"Close other tabs for this website and try again.",
"Use another browser or browser profile.",
],
},
[StorageErrorCode.UNKNOWN]: {
title: "Storage error",
description:
"An unexpected error occurred while accessing browser storage.",
fix: [
"Reload the page.",
"Update the browser and try again.",
"Disable browser extensions and reload.",
],
},
};

View File

@@ -1,11 +0,0 @@
@use "src/styles/variables" as *;
.vm-storage-check {
h3 {
font-weight: bold;
}
p {
margin-bottom: $padding-global
}
}

View File

@@ -1,13 +0,0 @@
export enum StorageErrorCode {
NO_STORAGE = "NO_STORAGE",
SECURITY_ERROR = "SECURITY_ERROR",
QUOTA_EXCEEDED = "QUOTA_EXCEEDED",
UNKNOWN = "UNKNOWN",
}
export type StorageError = {
title: string;
description: string;
fix: string[]
}

View File

@@ -12,8 +12,6 @@ import useDeviceDetect from "../../hooks/useDeviceDetect";
import ControlsMainLayout from "./ControlsMainLayout";
import useFetchDefaultTimezone from "../../hooks/useFetchDefaultTimezone";
import useFetchAppConfig from "../../hooks/useFetchAppConfig";
import WebStorageCheck from "../../components/WebStorageCheck/WebStorageCheck";
import { migrateStorageToPrefixedKeys } from "../../utils/storage";
const MainLayout: FC = () => {
const appModeEnable = getAppModeEnable();
@@ -47,13 +45,6 @@ const MainLayout: FC = () => {
useEffect(setDocumentTitle, [pathname]);
useEffect(redirectSearchToHashParams, []);
useEffect(() => {
const migrateStorage = migrateStorageToPrefixedKeys();
if (migrateStorage.removed.length || migrateStorage.migrated.length) {
console.info(migrateStorage);
}
}, []);
return <section className="vm-container">
<Header controlsComponent={ControlsMainLayout}/>
<div
@@ -66,8 +57,6 @@ const MainLayout: FC = () => {
<Outlet/>
</div>
{!appModeEnable && <Footer/>}
<WebStorageCheck/>
</section>;
};

View File

@@ -7,6 +7,7 @@ import AppConfigurator from "../appConfigurator";
import { useSearchParams } from "react-router-dom";
import dayjs from "dayjs";
import { DATE_FORMAT } from "../../../constants/date";
import { getTenantIdFromUrl } from "../../../utils/tenants";
import usePrevious from "../../../hooks/usePrevious";
export const useFetchQuery = (): {
@@ -26,7 +27,7 @@ export const useFetchQuery = (): {
const prevDate = usePrevious(date);
const prevTotal = useRef<{ data: TSDBStatus }>();
const { tenantId, serverUrl } = useAppState();
const { serverUrl } = useAppState();
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<ErrorTypes | string>();
const [tsdbStatus, setTSDBStatus] = useState<TSDBStatus>(appConfigurator.defaultTSDBStatus);
@@ -157,8 +158,9 @@ export const useFetchQuery = (): {
}, [error]);
useEffect(() => {
setIsCluster(!!tenantId);
}, [tenantId]);
const id = getTenantIdFromUrl(serverUrl);
setIsCluster(!!id);
}, [serverUrl]);
appConfigurator.tsdbStatusData = tsdbStatus;

View File

@@ -1,6 +1,7 @@
import { useEffect, useState } from "react";
import { useTimeDispatch, useTimeState } from "../../../state/time/TimeStateContext";
import { useCustomPanelDispatch, useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext";
import { useAppDispatch, useAppState } from "../../../state/common/StateContext";
import { useQueryDispatch, useQueryState } from "../../../state/query/QueryStateContext";
import { displayTypeTabs } from "../DisplayTypeSwitch";
import { useGraphDispatch, useGraphState } from "../../../state/graph/GraphStateContext";
@@ -14,12 +15,14 @@ import { arrayEquals } from "../../../utils/array";
import { isEqualURLSearchParams } from "../../../utils/url";
export const useSetQueryParams = () => {
const { tenantId } = useAppState();
const { displayType } = useCustomPanelState();
const { query } = useQueryState();
const { duration, relativeTime, period: { date, step } } = useTimeState();
const { customStep } = useGraphState();
const [searchParams, setSearchParams] = useSearchParams();
const dispatch = useAppDispatch();
const timeDispatch = useTimeDispatch();
const graphDispatch = useGraphDispatch();
const queryDispatch = useQueryDispatch();
@@ -69,6 +72,10 @@ export const useSetQueryParams = () => {
if (searchParams.get(`${group}.tab`) !== displayTypeCode) {
newSearchParams.set(`${group}.tab`, `${displayTypeCode}`);
}
if (searchParams.get(`${group}.tenantID`) !== tenantId && tenantId) {
newSearchParams.set(`${group}.tenantID`, tenantId);
}
});
// Remove extra parameters that exceed the request size
@@ -82,7 +89,7 @@ export const useSetQueryParams = () => {
if (isEqualURLSearchParams(newSearchParams, searchParams) || !newSearchParams.size) return;
setSearchParams(newSearchParams);
}, [displayType, query, duration, relativeTime, date, step, customStep]);
}, [tenantId, displayType, query, duration, relativeTime, date, step, customStep]);
useEffect(() => {
const timer = setTimeout(setterSearchParams, 200);
@@ -107,6 +114,11 @@ export const useSetQueryParams = () => {
customPanelDispatch({ type: "SET_DISPLAY_TYPE", payload: displayTypeFromUrl });
}
const tenantIdFromUrl = searchParams.get("g0.tenantID") || "";
if (tenantIdFromUrl !== tenantId) {
dispatch({ type: "SET_TENANT_ID", payload: tenantIdFromUrl });
}
const queryFromUrl = getQueryArray();
if (!arrayEquals(queryFromUrl, query)) {
queryDispatch({ type: "SET_QUERY", payload: queryFromUrl });

View File

@@ -1,8 +1,7 @@
import { createContext, FC, useContext, useEffect, useMemo, useReducer } from "preact/compat";
import { createContext, FC, useContext, useMemo, useReducer } from "preact/compat";
import { Action, AppState, initialState, reducer } from "./reducer";
import { getQueryStringValue } from "../../utils/query-string";
import { Dispatch } from "react";
import { getFromStorage, removeFromStorage, saveToStorage } from "../../utils/storage";
type StateContextType = { state: AppState, dispatch: Dispatch<Action> };
@@ -24,17 +23,6 @@ export const AppStateProvider: FC = ({ children }) => {
return { state, dispatch };
}, [state, dispatch]);
useEffect(() => {
if (!state.serverUrl) return;
const enabledStorage = !!getFromStorage("SERVER_URL");
if (enabledStorage) {
saveToStorage("SERVER_URL", state.serverUrl);
} else {
removeFromStorage(["SERVER_URL"]);
}
}, [state.serverUrl]);
return <StateContext.Provider value={contextValue}>
{children}
</StateContext.Provider>;

View File

@@ -1,9 +1,9 @@
import { getDefaultServer } from "../../utils/default-server-url";
import { getQueryStringValue } from "../../utils/query-string";
import { getFromStorage, saveToStorage } from "../../utils/storage";
import { AppConfig, Theme } from "../../types";
import { isDarkTheme } from "../../utils/theme";
import { removeTrailingSlash } from "../../utils/url";
import { getTenantIdFromUrl } from "../../utils/tenants";
export interface AppState {
serverUrl: string;
@@ -16,14 +16,15 @@ export interface AppState {
export type Action =
| { type: "SET_SERVER", payload: string }
| { type: "SET_THEME", payload: Theme }
| { type: "SET_TENANT_ID", payload: string }
| { type: "SET_APP_CONFIG", payload: AppConfig }
| { type: "SET_DARK_THEME" }
const serverUrl = removeTrailingSlash(getDefaultServer());
const tenantId = getQueryStringValue("g0.tenantID", "") as string;
export const initialState: AppState = {
serverUrl,
tenantId: getTenantIdFromUrl(serverUrl),
serverUrl: removeTrailingSlash(getDefaultServer(tenantId)),
tenantId,
theme: (getFromStorage("THEME") || Theme.system) as Theme,
isDarkTheme: null,
appConfig: {}
@@ -34,9 +35,13 @@ export function reducer(state: AppState, action: Action): AppState {
case "SET_SERVER":
return {
...state,
tenantId: getTenantIdFromUrl(action.payload),
serverUrl: removeTrailingSlash(action.payload)
};
case "SET_TENANT_ID":
return {
...state,
tenantId: action.payload
};
case "SET_THEME":
saveToStorage("THEME", action.payload);
return {

View File

@@ -1,4 +1,5 @@
import { getAppModeParams } from "./app-mode";
import { replaceTenantId } from "./tenants";
import { APP_TYPE, AppType } from "../constants/appType";
import { getFromStorage } from "./storage";
@@ -6,7 +7,7 @@ export const getDefaultURL = (u: string) => {
return u.replace(/(\/(?:prometheus\/)?(?:graph|vmui)\/.*|\/#\/.*)/, "/prometheus");
};
export const getDefaultServer = (): string => {
export const getDefaultServer = (tenantId?: string): string => {
const { serverURL } = getAppModeParams();
const storageURL = getFromStorage("SERVER_URL") as string;
const anomalyURL = `${window.location.origin}${window.location.pathname.replace(/^\/vmui/, "")}`;
@@ -17,6 +18,6 @@ export const getDefaultServer = (): string => {
case AppType.vmanomaly:
return storageURL || anomalyURL;
default:
return url;
return tenantId ? replaceTenantId(url, tenantId) : url;
}
};

View File

@@ -1,105 +1,48 @@
const STORAGE_PREFIX = "VMUI:" as const;
export const ALL_STORAGE_KEYS = [
"AUTOCOMPLETE",
"NO_CACHE",
"QUERY_TRACING",
"SERIES_LIMITS",
"LEGEND_AUTO_COLLAPSE",
"TABLE_COMPACT",
"TIMEZONE",
"DISABLED_DEFAULT_TIMEZONE",
"THEME",
"EXPLORE_METRICS_TIPS",
"METRICS_QUERY_HISTORY",
"SERVER_URL",
"POINTS_SHOW_ALL",
] as const;
export type StorageKeys = (typeof ALL_STORAGE_KEYS)[number];
type PrefixedStorageKeys = `${typeof STORAGE_PREFIX}${StorageKeys}`;
const toPrefixedKey = (key: StorageKeys): PrefixedStorageKeys => {
return `${STORAGE_PREFIX}${key}`;
};
type StorageValue = string | boolean | Record<string, unknown>;
export const saveToStorage = (key: StorageKeys, value: StorageValue, withPrefix = true): void => {
try {
const storageKey = withPrefix ? toPrefixedKey(key) : key;
if (value) {
// keeping object in storage so that keeping the string is not different from keeping
window.localStorage.setItem(storageKey, JSON.stringify({ value }));
} else {
window.localStorage.removeItem(storageKey);
}
window.dispatchEvent(new Event("storage"));
} catch (e) {
console.error(e);
}
};
export const getFromStorage = (key: StorageKeys, withPrefix = true): undefined | StorageValue => {
const storageKey = withPrefix ? toPrefixedKey(key) : key;
const valueObj = window.localStorage.getItem(storageKey);
if (valueObj === null) return undefined;
try {
return JSON.parse(valueObj)?.value; // see comment in "saveToStorage"
} catch (e) {
return valueObj; // fallback for corrupted json
}
};
export const removeFromStorage = (keys: StorageKeys[], withPrefix = true): void => {
const storageKeys = withPrefix ? keys.map(toPrefixedKey) : keys;
storageKeys.forEach(k => window.localStorage.removeItem(k));
};
/**
* Migrates legacy (unprefixed) localStorage keys to the new prefixed format (`${STORAGE_PREFIX}*`).
* Keeps the prefixed value if it already exists, then removes the legacy key.
*/
* Do not use this type in local storage type
* @deprecated
* */
type DeprecatedStorageKeys = "QUERY_HISTORY" | "QUERY_FAVORITES";
type StorageMigrationResult = {
migrated: StorageKeys[];
removed: StorageKeys[];
skipped: StorageKeys[];
};
export type StorageKeys = "AUTOCOMPLETE"
| "NO_CACHE"
| "QUERY_TRACING"
| "SERIES_LIMITS"
| "LEGEND_AUTO_COLLAPSE"
| "TABLE_COMPACT"
| "TIMEZONE"
| "DISABLED_DEFAULT_TIMEZONE"
| "THEME"
| "EXPLORE_METRICS_TIPS"
| "METRICS_QUERY_HISTORY"
| "SERVER_URL"
| "RAW_JSON_LIVE_VIEW"
| "POINTS_SHOW_ALL"
| DeprecatedStorageKeys;
export const migrateStorageToPrefixedKeys = (): StorageMigrationResult => {
const res: StorageMigrationResult = {
migrated: [],
removed: [],
skipped: [],
};
for (const key of ALL_STORAGE_KEYS) {
const legacyKey = key as StorageKeys; // unprefixed
const legacyValue = getFromStorage(legacyKey, false);
const prefixedValue = getFromStorage(legacyKey, true);
if (legacyValue === undefined) {
res.skipped.push(legacyKey);
continue;
}
// prefixed exists -> keep it, just remove legacy
if (prefixedValue !== undefined) {
removeFromStorage([legacyKey], false);
res.removed.push(legacyKey);
continue;
}
// prefixed missing -> copy legacy -> prefixed, then remove legacy
saveToStorage(legacyKey, legacyValue, true);
removeFromStorage([legacyKey], false);
res.migrated.push(legacyKey);
export const saveToStorage = (key: StorageKeys, value: string | boolean | Record<string, unknown>): void => {
if (value) {
// keeping object in storage so that keeping the string is not different from keeping
window.localStorage.setItem(key, JSON.stringify({ value }));
} else {
removeFromStorage([key]);
}
return res;
window.dispatchEvent(new Event("storage"));
};
// TODO: make this aware of data type that is stored
export const getFromStorage = (key: StorageKeys): undefined | boolean | string | Record<string, unknown> => {
const valueObj = window.localStorage.getItem(key);
if (valueObj === null) {
return undefined;
} else {
try {
return JSON.parse(valueObj)?.value; // see comment in "saveToStorage"
} catch (e) {
return valueObj; // fallback for corrupted json
}
}
};
export const removeFromStorage = (keys: StorageKeys[]): void => keys.forEach(k => window.localStorage.removeItem(k));

View File

@@ -1,89 +0,0 @@
import { describe, it, expect } from "vitest";
import {
replaceTenantId,
getTenantIdFromUrl,
getUrlWithoutTenant,
} from "./tenants";
describe("tenant url helpers", () => {
describe("getTenantIdFromUrl", () => {
it("returns accountID", () => {
expect(getTenantIdFromUrl("http://vmselect:8481/select/0/vmui/")).toBe("0");
});
it("returns accountID:projectID", () => {
expect(getTenantIdFromUrl("http://vmselect:8481/select/12:7/vmui/")).toBe("12:7");
});
it("returns empty string if tenant is missing", () => {
expect(getTenantIdFromUrl("http://vmselect:8481/select/vmui/")).toBe("");
});
it("returns empty string for unrelated paths", () => {
expect(getTenantIdFromUrl("http://vmselect:8481/foo/bar")).toBe("");
});
it("returns accountID when url ends right after tenant", () => {
expect(getTenantIdFromUrl("http://vmselect:8481/select/0")).toBe("0");
});
});
describe("replaceTenantId", () => {
it("replaces accountID with another accountID", () => {
expect(
replaceTenantId("http://vmselect:8481/select/0/vmui/", "2")
).toBe("http://vmselect:8481/select/2/vmui/");
});
it("replaces accountID with accountID:projectID", () => {
expect(
replaceTenantId("http://vmselect:8481/select/0/prometheus/", "1:9")
).toBe("http://vmselect:8481/select/1:9/prometheus/");
});
it("keeps the rest of the path intact", () => {
expect(
replaceTenantId("http://vmselect:8481/select/3:4/prometheus/api/v1/query", "7")
).toBe("http://vmselect:8481/select/7/prometheus/api/v1/query");
});
it("does not change url if it doesn't match expected pattern", () => {
expect(
replaceTenantId("http://vmselect:8481/foo/bar", "2")
).toBe("http://vmselect:8481/foo/bar");
});
});
describe("getUrlWithoutTenant", () => {
it("removes /select/<tenant>/... and returns base url", () => {
expect(
getUrlWithoutTenant("http://vmselect:8481/select/0/vmui/")
).toBe("http://vmselect:8481");
});
it("removes /select/<tenant>/... for accountID:projectID and returns base url", () => {
expect(
getUrlWithoutTenant("http://vmselect:8481/select/5:6/prometheus/")
).toBe("http://vmselect:8481");
});
it("works with deep paths and returns base url", () => {
expect(
getUrlWithoutTenant("http://vmselect:8481/select/1:2/prometheus/api/v1/query")
).toBe("http://vmselect:8481");
});
it("does not change url if it doesn't match expected pattern", () => {
expect(
getUrlWithoutTenant("http://vmselect:8481/foo/bar")
).toBe("http://vmselect:8481/foo/bar");
});
it("removes url ending right after tenant", () => {
expect(
getUrlWithoutTenant("http://vmselect:8481/select/0")
).toBe("http://vmselect:8481");
});
});
});

View File

@@ -1,21 +1,13 @@
const TENANT_REGEXP = /(\/select\/)(\d+(?::\d+)?)(\/.*)?$/;
const regexp = /(\/select\/)([^/])(\/)(.+)/;
export const replaceTenantId = (serverUrl: string, tenantId: string) => {
return serverUrl.replace(TENANT_REGEXP, `$1${tenantId}$3`);
return serverUrl.replace(regexp, `$1${tenantId}/$4`);
};
export const getTenantIdFromUrl = (url: string): string => {
return url.match(TENANT_REGEXP)?.[2] ?? "";
return url.match(regexp)?.[2] || "";
};
export const getUrlWithoutTenant = (url: string): string => {
return url.replace(TENANT_REGEXP, "");
};
export const updateBrowserUrlTenant = (tenantId: string) => {
const base = `${window.location.origin}${window.location.pathname}${window.location.search}`;
const nextBase = replaceTenantId(base, tenantId);
const nextUrl = `${nextBase}${window.location.hash}`;
window.history.replaceState(null, "", nextUrl);
return url.replace(regexp, "");
};

View File

@@ -31,7 +31,7 @@ const shortDurations = supportedDurations.map(d => d.short);
export const sameTs = (a: number, b: number) => {
return roundToThousandths(a) === roundToThousandths(b);
};
}
export const humanizeSeconds = (num: number): string => {
return getDurationFromMilliseconds(dayjs.duration(num, "seconds").asMilliseconds());

View File

@@ -44,7 +44,7 @@ export const getTimeSeries = (
const tStart = roundToThousandths(period.start);
const tEnd = roundToThousandths(period.end);
const baseStep = getSecondsFromDuration(stepDuration) || 0.001;
const step = Math.max(0.001, roundToThousandths(baseStep));
const step = Math.max(0.001, roundToThousandths(baseStep))
const anchor = roundToThousandths(tsAnchor ?? tStart);

View File

@@ -1,7 +1,7 @@
{
"compilerOptions": {
"target": "ESNext",
"types": ["vite/client", "vitest/globals", "node"],
"types": ["vite/client", "vitest/globals"],
"lib": [
"dom",
"dom.iterable",

View File

@@ -265,36 +265,6 @@ func TestSingleIngestionProtocols(t *testing.T) {
{Timestamp: 1707123456800, Value: 20}, // 2024-02-05T08:57:36.700Z
},
})
// zabbixconnector format
sut.ZabbixConnectorHistory(t,
[]string{
`{"host":{"host":"h1","name":"n1"},"item_tags":[], "itemid":1,"name":"zabbixconnector_series","clock":1707123456,"ns":700000000,"value":10,"type":0}`,
`{"host":{"host":"h2","name":"n2"},"item_tags":[{"tag":"foo2","value":"value1"}], "itemid":1,"name":"zabbixconnector_series2","clock":1707123456,"ns":800000000,"value":20,"type":0}`,
},
apptest.QueryOpts{})
sut.ForceFlush(t)
f(sut, &opts{
query: `{__name__=~"zabbixconnector.+"}`,
wantMetrics: []map[string]string{
{
"__name__": "zabbixconnector_series",
"host": "h1",
"hostname": "n1",
},
{
"__name__": "zabbixconnector_series2",
"host": "h2",
"hostname": "n2",
"tag_foo2": "value1",
},
},
wantSamples: []*apptest.Sample{
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456800, Value: 20}, // 2024-02-05T08:57:36.700Z
},
})
}
func TestClusterIngestionProtocols(t *testing.T) {
@@ -561,33 +531,4 @@ func TestClusterIngestionProtocols(t *testing.T) {
{Timestamp: 1707123456800, Value: 20}, // 2024-02-05T08:57:36.700Z
},
})
// zabbixconnector format
vminsert.ZabbixConnectorHistory(t,
[]string{
`{"host":{"host":"h1","name":"n1"},"item_tags":[], "itemid":1,"name":"zabbixconnector_series","clock":1707123456,"ns":700000000,"value":10,"type":0}`,
`{"host":{"host":"h2","name":"n2"},"item_tags":[{"tag":"foo2","value":"value1"}], "itemid":1,"name":"zabbixconnector_series2","clock":1707123456,"ns":800000000,"value":20,"type":0}`,
},
apptest.QueryOpts{})
vmstorage.ForceFlush(t)
f(&opts{
query: `{__name__=~"zabbixconnector.+"}`,
wantMetrics: []map[string]string{
{
"__name__": "zabbixconnector_series",
"host": "h1",
"hostname": "n1",
},
{
"__name__": "zabbixconnector_series2",
"host": "h2",
"hostname": "n2",
"tag_foo2": "value1",
},
},
wantSamples: []*apptest.Sample{
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456800, Value: 20}, // 2024-02-05T08:57:36.700Z
},
})
}

View File

@@ -255,28 +255,6 @@ func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, records []str
})
}
// ZabbixConnectorHistory is a test helper function that inserts a
// collection of records in zabbixconnector format by sending a HTTP
// POST request to /zabbixconnector/api/v1/history vmsingle endpoint.
func (app *Vminsert) ZabbixConnectorHistory(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/zabbixconnector/api/v1/history", app.httpListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
})
}
// String returns the string representation of the vminsert app state.
func (app *Vminsert) String() string {
return fmt.Sprintf("{app: %s httpListenAddr: %q}", app.app, app.httpListenAddr)

View File

@@ -597,27 +597,8 @@ func (app *Vmsingle) APIV1StatusTSDB(t *testing.T, matchQuery string, date strin
return status
}
// ZabbixConnectorHistory is a test helper function that inserts a
// collection of records in zabbixconnector format by sending a HTTP
// POST request to /zabbixconnector/api/v1/history vmsingle endpoint.
func (app *Vmsingle) ZabbixConnectorHistory(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/zabbixconnector/api/v1/history", app.httpListenAddr)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// HTTPAddr returns the address at which the vminsert process is
// listening for incoming HTTP requests.
// HTTPAddr returns the address at which the vmstorage process is listening
// for http connections.
func (app *Vmsingle) HTTPAddr() string {
return app.httpListenAddr
}

View File

@@ -452,7 +452,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(increase(vm_backup_errors_total{job=~\"$job\", instance=~\"$instance\"}[$__range]))",
"expr": "sum(increase(vm_backup_errors_total{job=~\"$job\", instance=~\"$instance\"}[1h]))",
"legendFormat": "__auto",
"range": true,
"refId": "A"
@@ -605,7 +605,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(increase(vm_retention_errors_total{job=~\"$job\", instance=~\"$instance\"}[$__range]))",
"expr": "sum(increase(vm_retention_errors_total{job=~\"$job\", instance=~\"$instance\"}[1h]))",
"legendFormat": "__auto",
"range": true,
"refId": "A"

View File

@@ -8966,113 +8966,6 @@
],
"title": "Network usage: vmstorage ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows the [rollup result cache](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#rollup-result-cache) miss ratio for query when cache is enabled. \nRollup cache is typically hit in two scenarios:\n1. Repeated [range queries](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#range-query) with increasing time, start and end arguments;\n2. Repeated [instant queries](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#instant-query) containing rollup functions with lookbehind window exceeding `-search.minWindowForInstantRollupOptimization`.\n\nA lower value indicates high cache utilization, suggesting that most queries are repeated from stable clients such as vmalert rules or Grafana dashboards.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"links": [],
"mappings": [],
"max": 1,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8424
},
"id": 226,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_rollup_result_cache_miss_total{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval]))\n/\nsum(rate(vm_rollup_result_cache_requests_total{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "miss",
"range": true,
"refId": "A"
}
],
"title": "Rollup result cache miss ratio ($instance)",
"type": "timeseries"
}
],
"title": "vmselect ($instance)",
@@ -11458,4 +11351,4 @@
"title": "VictoriaMetrics - cluster",
"uid": "oS7Bi_0Wz",
"version": 1
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -453,7 +453,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(increase(vm_backup_errors_total{job=~\"$job\", instance=~\"$instance\"}[$__range]))",
"expr": "sum(increase(vm_backup_errors_total{job=~\"$job\", instance=~\"$instance\"}[1h]))",
"legendFormat": "__auto",
"range": true,
"refId": "A"
@@ -606,7 +606,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(increase(vm_retention_errors_total{job=~\"$job\", instance=~\"$instance\"}[$__range]))",
"expr": "sum(increase(vm_retention_errors_total{job=~\"$job\", instance=~\"$instance\"}[1h]))",
"legendFormat": "__auto",
"range": true,
"refId": "A"

View File

@@ -8967,113 +8967,6 @@
],
"title": "Network usage: vmstorage ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the [rollup result cache](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#rollup-result-cache) miss ratio for query when cache is enabled. \nRollup cache is typically hit in two scenarios:\n1. Repeated [range queries](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#range-query) with increasing time, start and end arguments;\n2. Repeated [instant queries](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#instant-query) containing rollup functions with lookbehind window exceeding `-search.minWindowForInstantRollupOptimization`.\n\nA lower value indicates high cache utilization, suggesting that most queries are repeated from stable clients such as vmalert rules or Grafana dashboards.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"links": [],
"mappings": [],
"max": 1,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8424
},
"id": 226,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_rollup_result_cache_miss_total{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval]))\n/\nsum(rate(vm_rollup_result_cache_requests_total{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "miss",
"range": true,
"refId": "A"
}
],
"title": "Rollup result cache miss ratio ($instance)",
"type": "timeseries"
}
],
"title": "vmselect ($instance)",
@@ -11459,4 +11352,4 @@
"title": "VictoriaMetrics - cluster (VM)",
"uid": "oS7Bi_0Wz_vm",
"version": 1
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -604,13 +604,224 @@
"h": 1,
"w": 24,
"x": 0,
"y": 8
"y": 9
},
"id": 13,
"panels": [],
"title": "Overview",
"type": "row"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 10
},
"id": 11,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vmauth_user_requests_total{job=~\"$job\", instance=~\"$instance\", username=~\"$user\"}[$__rate_interval])) by(username)",
"hide": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vmauth_unauthorized_user_requests_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
"hide": false,
"legendFormat": "__auto",
"range": true,
"refId": "B"
}
],
"title": "Requests rate",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows percent utilization of per concurrent requests capacity.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "dashed"
}
},
"mappings": [],
"max": 1,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 0.9
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 10
},
"id": 14,
"options": {
"legend": {
"calcs": [
"max",
"mean"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Mean",
"sortDesc": true
},
"tooltip": {
"mode": "multi",
"sort": "desc"
}
},
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "max(\nmax_over_time(vmauth_user_concurrent_requests_current{job=~\"$job\", instance=~\"$instance\", username=~\"$user\"}[$__rate_interval])\n/ \nvmauth_user_concurrent_requests_capacity{job=~\"$job\", instance=~\"$instance\", username=~\"$user\"}\n) by(username) > 0\n",
"hide": false,
"interval": "5m",
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "User concurrent requests usage",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
@@ -629,7 +840,6 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -646,7 +856,6 @@
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -662,7 +871,7 @@
"steps": [
{
"color": "green",
"value": 0
"value": null
},
{
"color": "red",
@@ -677,7 +886,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 9
"y": 19
},
"id": 16,
"options": {
@@ -688,12 +897,10 @@
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {
@@ -729,7 +936,6 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
@@ -746,7 +952,6 @@
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -762,7 +967,7 @@
"steps": [
{
"color": "green",
"value": 0
"value": null
},
{
"color": "red",
@@ -777,7 +982,7 @@
"h": 9,
"w": 12,
"x": 12,
"y": 9
"y": 19
},
"id": 10,
"options": {
@@ -788,12 +993,10 @@
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {
@@ -835,336 +1038,6 @@
"title": "Concurrent limit reached",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 18
},
"id": 11,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vmauth_user_requests_total{job=~\"$job\", instance=~\"$instance\", username=~\"$user\"}[$__rate_interval])) by(username)",
"hide": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vmauth_unauthorized_user_requests_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
"hide": false,
"legendFormat": "unauthorized_user",
"range": true,
"refId": "B"
}
],
"title": "User requests rate",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 18
},
"id": 37,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vmauth_user_request_errors_total{job=~\"$job\", instance=~\"$instance\", username=~\"$user\"}[$__rate_interval])) by (username) > 0",
"hide": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vmauth_unauthorized_user_request_errors_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) > 0",
"hide": false,
"legendFormat": "unauthorized_user",
"range": true,
"refId": "B"
}
],
"title": "User requests error rate",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows percent utilization of per concurrent requests capacity.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "dashed"
}
},
"mappings": [],
"max": 1,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 0.9
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 27
},
"id": 14,
"options": {
"legend": {
"calcs": [
"max",
"mean"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Mean",
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "max(\nmax_over_time(vmauth_user_concurrent_requests_current{job=~\"$job\", instance=~\"$instance\", username=~\"$user\"}[$__rate_interval])\n/ \nvmauth_user_concurrent_requests_capacity{job=~\"$job\", instance=~\"$instance\", username=~\"$user\"}\n) by(username) > 0\n",
"hide": false,
"interval": "5m",
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "User concurrent requests usage",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
@@ -1183,7 +1056,6 @@
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -1200,7 +1072,6 @@
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -1216,7 +1087,7 @@
"steps": [
{
"color": "green",
"value": 0
"value": null
},
{
"color": "red",
@@ -1231,7 +1102,7 @@
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"x": 0,
"y": 27
},
"id": 19,
@@ -1248,12 +1119,10 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": {

File diff suppressed because it is too large Load Diff

View File

@@ -3,9 +3,9 @@
DOCKER_REGISTRIES ?= docker.io quay.io
DOCKER_NAMESPACE ?= victoriametrics
ROOT_IMAGE ?= alpine:3.23.2
ROOT_IMAGE ?= alpine:3.22.2
ROOT_IMAGE_SCRATCH ?= scratch
CERTS_IMAGE := alpine:3.23.2
CERTS_IMAGE := alpine:3.22.2
GO_BUILDER_IMAGE := golang:1.25.5

View File

@@ -3,7 +3,7 @@ services:
# It scrapes targets defined in --promscrape.config
# And forward them to --remoteWrite.url
vmagent:
image: victoriametrics/vmagent:v1.133.0
image: victoriametrics/vmagent:v1.132.0
depends_on:
- "vmauth"
ports:
@@ -37,14 +37,14 @@ services:
# vmstorage shards. Each shard receives 1/N of all metrics sent to vminserts,
# where N is number of vmstorages (2 in this case).
vmstorage-1:
image: victoriametrics/vmstorage:v1.133.0-cluster
image: victoriametrics/vmstorage:v1.132.0-cluster
volumes:
- strgdata-1:/storage
command:
- "--storageDataPath=/storage"
restart: always
vmstorage-2:
image: victoriametrics/vmstorage:v1.133.0-cluster
image: victoriametrics/vmstorage:v1.132.0-cluster
volumes:
- strgdata-2:/storage
command:
@@ -54,7 +54,7 @@ services:
# vminsert is ingestion frontend. It receives metrics pushed by vmagent,
# pre-process them and distributes across configured vmstorage shards.
vminsert-1:
image: victoriametrics/vminsert:v1.133.0-cluster
image: victoriametrics/vminsert:v1.132.0-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@@ -63,7 +63,7 @@ services:
- "--storageNode=vmstorage-2:8400"
restart: always
vminsert-2:
image: victoriametrics/vminsert:v1.133.0-cluster
image: victoriametrics/vminsert:v1.132.0-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@@ -75,7 +75,7 @@ services:
# vmselect is a query fronted. It serves read queries in MetricsQL or PromQL.
# vmselect collects results from configured `--storageNode` shards.
vmselect-1:
image: victoriametrics/vmselect:v1.133.0-cluster
image: victoriametrics/vmselect:v1.132.0-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@@ -85,7 +85,7 @@ services:
- "--vmalert.proxyURL=http://vmalert:8880"
restart: always
vmselect-2:
image: victoriametrics/vmselect:v1.133.0-cluster
image: victoriametrics/vmselect:v1.132.0-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@@ -100,7 +100,7 @@ services:
# read requests from Grafana, vmui, vmalert among vmselects.
# It can be used as an authentication proxy.
vmauth:
image: victoriametrics/vmauth:v1.133.0
image: victoriametrics/vmauth:v1.132.0
depends_on:
- "vmselect-1"
- "vmselect-2"
@@ -114,7 +114,7 @@ services:
# vmalert executes alerting and recording rules
vmalert:
image: victoriametrics/vmalert:v1.133.0
image: victoriametrics/vmalert:v1.132.0
depends_on:
- "vmauth"
ports:

View File

@@ -3,7 +3,7 @@ services:
# It scrapes targets defined in --promscrape.config
# And forward them to --remoteWrite.url
vmagent:
image: victoriametrics/vmagent:v1.133.0
image: victoriametrics/vmagent:v1.132.0
depends_on:
- "victoriametrics"
ports:
@@ -18,7 +18,7 @@ services:
# VictoriaMetrics instance, a single process responsible for
# storing metrics and serve read requests.
victoriametrics:
image: victoriametrics/victoria-metrics:v1.133.0
image: victoriametrics/victoria-metrics:v1.132.0
ports:
- 8428:8428
- 8089:8089
@@ -54,7 +54,7 @@ services:
# vmalert executes alerting and recording rules
vmalert:
image: victoriametrics/vmalert:v1.133.0
image: victoriametrics/vmalert:v1.132.0
depends_on:
- "victoriametrics"
- "alertmanager"

View File

@@ -1,6 +1,6 @@
services:
vmagent:
image: victoriametrics/vmagent:v1.133.0
image: victoriametrics/vmagent:v1.132.0
depends_on:
- "victoriametrics"
ports:
@@ -14,7 +14,7 @@ services:
restart: always
victoriametrics:
image: victoriametrics/victoria-metrics:v1.133.0
image: victoriametrics/victoria-metrics:v1.132.0
ports:
- 8428:8428
volumes:
@@ -40,7 +40,7 @@ services:
restart: always
vmalert:
image: victoriametrics/vmalert:v1.133.0
image: victoriametrics/vmalert:v1.132.0
depends_on:
- "victoriametrics"
ports:
@@ -59,7 +59,7 @@ services:
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr": },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
restart: always
vmanomaly:
image: victoriametrics/vmanomaly:v1.28.4
image: victoriametrics/vmanomaly:v1.28.2
depends_on:
- "victoriametrics"
ports:

View File

@@ -67,181 +67,6 @@ docs-images-to-webp: docs-image
-regex ".*\.\(png\|jpg\|jpeg\)" \
-exec sh -c 'cwebp -preset drawing -m 6 -o $$(echo {} | cut -f-1 -d.).webp {} && rm -rf {}' {} \;
docs-update-vmsingle-flags:
(cd /tmp/vm-enterprise-single-node && make victoria-metrics)
(cd /tmp/vm-opensource-single-node && make victoria-metrics)
(cd /tmp/vm-enterprise-single-node && ./bin/victoria-metrics -help 2>&1) > /tmp/vm-enterprise-single-node/victoria_metrics_enterprise_flags_tmp.md
(cd /tmp/vm-opensource-single-node && ./bin/victoria-metrics -help 2>&1) > /tmp/vm-opensource-single-node/victoria_metrics_common_flags_tmp.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/victoria_metrics_common_flags.md
cat /tmp/vm-opensource-single-node/victoria_metrics_common_flags_tmp.md >> docs/victoriametrics/victoria_metrics_common_flags.md
printf -- '```\n' >> docs/victoriametrics/victoria_metrics_common_flags.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/victoria_metrics_enterprise_flags.md
diff /tmp/vm-enterprise-single-node/victoria_metrics_enterprise_flags_tmp.md /tmp/vm-opensource-single-node/victoria_metrics_common_flags_tmp.md |grep '^<' | sed 's/^< //' >> docs/victoriametrics/victoria_metrics_enterprise_flags.md
printf -- '```\n' >> docs/victoriametrics/victoria_metrics_enterprise_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/victoria_metrics_common_flags.md
sed -i 's/\t/ /g' docs/victoriametrics/victoria_metrics_enterprise_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/victoria_metrics_common_flags.md
sed -i '/The maximum number of concurrent search requests\./ s/(default [0-9]\+)/(default vmselect.getDefaultMaxConcurrentRequests())/' docs/victoriametrics/victoria_metrics_common_flags.md
sed -i '/The maximum number of CPU cores a single query can use\./ s/(default [0-9]\+)/(default netstorage.defaultMaxWorkersPerQuery())/' docs/victoriametrics/victoria_metrics_common_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/victoria_metrics_common_flags.md
docs-update-vmauth-flags:
# ---- vmauth
(cd /tmp/vm-enterprise-single-node && make vmauth)
(cd /tmp/vm-opensource-single-node && make vmauth)
(cd /tmp/vm-enterprise-single-node && ./bin/vmauth -help 2>&1) > /tmp/vm-enterprise-single-node/vmauth_enterprise_flags_tmp.md
(cd /tmp/vm-opensource-single-node && ./bin/vmauth -help 2>&1) > /tmp/vm-opensource-single-node/vmauth_common_flags_tmp.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmauth_common_flags.md
cat /tmp/vm-opensource-single-node/vmauth_common_flags_tmp.md >> docs/victoriametrics/vmauth_common_flags.md
printf -- '```\n' >> docs/victoriametrics/vmauth_common_flags.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmauth_enterprise_flags.md
diff /tmp/vm-enterprise-single-node/vmauth_enterprise_flags_tmp.md /tmp/vm-opensource-single-node/vmauth_common_flags_tmp.md |grep '^<' | sed 's/^< //' >> docs/victoriametrics/vmauth_enterprise_flags.md
printf -- '```' >> docs/victoriametrics/vmauth_enterprise_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmauth_common_flags.md
sed -i 's/\t/ /g' docs/victoriametrics/vmauth_enterprise_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmauth_common_flags.md
docs-update-vmagent-flags:
(cd /tmp/vm-enterprise-single-node && make vmagent)
(cd /tmp/vm-opensource-single-node && make vmagent)
(cd /tmp/vm-enterprise-single-node && ./bin/vmagent -help 2>&1) > /tmp/vm-enterprise-single-node/vmagent_enterprise_flags_tmp.md
(cd /tmp/vm-opensource-single-node && ./bin/vmagent -help 2>&1) > /tmp/vm-opensource-single-node/vmagent_common_flags_tmp.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmagent_common_flags.md
cat /tmp/vm-opensource-single-node/vmagent_common_flags_tmp.md >> docs/victoriametrics/vmagent_common_flags.md
printf -- '```\n' >> docs/victoriametrics/vmagent_common_flags.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmagent_enterprise_flags.md
diff /tmp/vm-enterprise-single-node/vmagent_enterprise_flags_tmp.md /tmp/vm-opensource-single-node/vmagent_common_flags_tmp.md |grep '^<' | sed 's/^< //' >> docs/victoriametrics/vmagent_enterprise_flags.md
printf -- '```\n' >> docs/victoriametrics/vmagent_enterprise_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmagent_common_flags.md
sed -i 's/\t/ /g' docs/victoriametrics/vmagent_enterprise_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmagent_common_flags.md
sed -i '/The number of concurrent queues to each -remoteWrite.url./ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmagent_common_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmagent_common_flags.md
docs-update-vmalert-flags:
(cd /tmp/vm-enterprise-single-node && make vmalert)
(cd /tmp/vm-opensource-single-node && make vmalert)
(cd /tmp/vm-enterprise-single-node && ./bin/vmalert -help 2>&1) > /tmp/vm-enterprise-single-node/vmalert_enterprise_flags_tmp.md
(cd /tmp/vm-opensource-single-node && ./bin/vmalert -help 2>&1) > /tmp/vm-opensource-single-node/vmalert_common_flags_tmp.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmalert_common_flags.md
cat /tmp/vm-opensource-single-node/vmalert_common_flags_tmp.md >> docs/victoriametrics/vmalert_common_flags.md
printf -- '```\n' >> docs/victoriametrics/vmalert_common_flags.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmalert_enterprise_flags.md
diff /tmp/vm-enterprise-single-node/vmalert_enterprise_flags_tmp.md /tmp/vm-opensource-single-node/vmalert_common_flags_tmp.md |grep '^<' | sed 's/^< //' >> docs/victoriametrics/vmalert_enterprise_flags.md
printf -- '```' >> docs/victoriametrics/vmalert_enterprise_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmalert_common_flags.md
sed -i 's/\t/ /g' docs/victoriametrics/vmalert_enterprise_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/Defines number of writers for concurrent writing into remote write endpoint./ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmalert_common_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmalert_common_flags.md
docs-update-vmselect-flags:
(cd /tmp/vm-enterprise-cluster && make vmselect)
(cd /tmp/vm-opensource-cluster && make vmselect)
(cd /tmp/vm-enterprise-cluster && ./bin/vmselect -help 2>&1) > /tmp/vm-enterprise-cluster/vmselect_enterprise_flags_tmp.md
(cd /tmp/vm-opensource-cluster && ./bin/vmselect -help 2>&1) > /tmp/vm-opensource-cluster/vmselect_common_flags_tmp.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmselect_common_flags.md
cat /tmp/vm-opensource-cluster/vmselect_common_flags_tmp.md >> docs/victoriametrics/vmselect_common_flags.md
printf -- '```\n' >> docs/victoriametrics/vmselect_common_flags.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmselect_enterprise_flags.md
diff /tmp/vm-enterprise-cluster/vmselect_enterprise_flags_tmp.md /tmp/vm-opensource-cluster/vmselect_common_flags_tmp.md |grep '^<' | sed 's/^< //' >> docs/victoriametrics/vmselect_enterprise_flags.md
printf -- '```' >> docs/victoriametrics/vmselect_enterprise_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmselect_common_flags.md
sed -i 's/\t/ /g' docs/victoriametrics/vmselect_enterprise_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent search requests\./ s/(default [0-9]\+)/(default vmselect.getDefaultMaxConcurrentRequests())/' docs/victoriametrics/vmselect_common_flags.md
sed -i '/The maximum number of CPU cores a single query can use\./ s/(default [0-9]\+)/(default netstorage.defaultMaxWorkersPerQuery())/' docs/victoriametrics/vmselect_common_flags.md
sed -i '/The maximum number of concurrent vmselect requests the server can process at -clusternativeListenAddr/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmselect_common_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmselect_common_flags.md
docs-update-vminsert-flags:
(cd /tmp/vm-enterprise-cluster && make vminsert)
(cd /tmp/vm-opensource-cluster && make vminsert)
(cd /tmp/vm-enterprise-cluster && ./bin/vminsert -help 2>&1) > /tmp/vm-enterprise-cluster/vminsert_enterprise_flags_tmp.md
(cd /tmp/vm-opensource-cluster && ./bin/vminsert -help 2>&1) > /tmp/vm-opensource-cluster/vminsert_common_flags_tmp.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vminsert_common_flags.md
cat /tmp/vm-opensource-cluster/vminsert_common_flags_tmp.md >> docs/victoriametrics/vminsert_common_flags.md
printf -- '```\n' >> docs/victoriametrics/vminsert_common_flags.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vminsert_enterprise_flags.md
diff /tmp/vm-enterprise-cluster/vminsert_enterprise_flags_tmp.md /tmp/vm-opensource-cluster/vminsert_common_flags_tmp.md |grep '^<' | sed 's/^< //' >> docs/victoriametrics/vminsert_enterprise_flags.md
printf -- '```' >> docs/victoriametrics/vminsert_enterprise_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vminsert_common_flags.md
sed -i 's/\t/ /g' docs/victoriametrics/vminsert_enterprise_flags.md
# uncomment and adjust if you need to remove some flags from the documentation.
# should be used as a temporary workaround only.
#awk -i inplace '\
# /^ -promscrape./ {skip=1; next}\
# skip && /^ / {next}\
# skip {skip=0}\
# {print}\
# ' docs/victoriametrics/vminsert_common_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vminsert_common_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vminsert_common_flags.md
docs-update-vmstorage-flags:
(cd /tmp/vm-enterprise-cluster && make vmstorage)
(cd /tmp/vm-opensource-cluster && make vmstorage)
(cd /tmp/vm-enterprise-cluster && ./bin/vmstorage -help 2>&1) > /tmp/vm-enterprise-cluster/vmstorage_enterprise_flags_tmp.md
(cd /tmp/vm-opensource-cluster && ./bin/vmstorage -help 2>&1) > /tmp/vm-opensource-cluster/vmstorage_common_flags_tmp.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmstorage_common_flags.md
cat /tmp/vm-opensource-cluster/vmstorage_common_flags_tmp.md >> docs/victoriametrics/vmstorage_common_flags.md
printf -- '```\n' >> docs/victoriametrics/vmstorage_common_flags.md
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmstorage_enterprise_flags.md
diff /tmp/vm-enterprise-cluster/vmstorage_enterprise_flags_tmp.md /tmp/vm-opensource-cluster/vmstorage_common_flags_tmp.md |grep '^<' | sed 's/^< //' >> docs/victoriametrics/vmstorage_enterprise_flags.md
printf -- '```' >> docs/victoriametrics/vmstorage_enterprise_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmstorage_common_flags.md
sed -i 's/\t/ /g' docs/victoriametrics/vmstorage_enterprise_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmstorage_common_flags.md
sed -i '/The maximum number of concurrent vmselect requests the vmstorage can process at./ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmstorage_common_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmstorage_common_flags.md
# docs-update-flags updates flags in the documentation using the actual binaries compiled
# from the latest enterprise-single-node and enterprise-cluster branches (hardcoded for now).
# The command also normalizes the output a bit.
@@ -261,7 +86,6 @@ docs-update-flags:
# Add tools to PATH see how in `brew info gnu-sed` and `brew info gawk
git fetch enterprise
git fetch opensource
rm -rf /tmp/vm-enterprise-cluster
git worktree remove /tmp/vm-enterprise-cluster || true
@@ -271,19 +95,115 @@ docs-update-flags:
git worktree remove /tmp/vm-enterprise-single-node || true
git worktree add /tmp/vm-enterprise-single-node enterprise/enterprise-single-node
# ---- victoria-metrics
echo "$$FLAGS_HEADER" > docs/victoriametrics/victoria_metrics_flags.md
(cd /tmp/vm-enterprise-single-node && make victoria-metrics)
(cd /tmp/vm-enterprise-single-node && ./bin/victoria-metrics -help 2>&1) >> docs/victoriametrics/victoria_metrics_flags.md
printf -- '```' >> docs/victoriametrics/victoria_metrics_flags.md
rm -rf /tmp/vm-opensource-cluster
git worktree remove /tmp/vm-opensource-cluster || true
git worktree add /tmp/vm-opensource-cluster opensource/cluster
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/victoria_metrics_flags.md
rm -rf /tmp/vm-opensource-single-node
git worktree remove /tmp/vm-opensource-single-node || true
git worktree add /tmp/vm-opensource-single-node opensource/master
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/victoria_metrics_flags.md
sed -i '/The maximum number of concurrent search requests\./ s/(default [0-9]\+)/(default vmselect.getDefaultMaxConcurrentRequests())/' docs/victoriametrics/victoria_metrics_flags.md
sed -i '/The maximum number of CPU cores a single query can use\./ s/(default [0-9]\+)/(default netstorage.defaultMaxWorkersPerQuery())/' docs/victoriametrics/victoria_metrics_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/victoria_metrics_flags.md
make docs-update-vmsingle-flags
make docs-update-vmalert-flags
make docs-update-vmauth-flags
make docs-update-vmagent-flags
make docs-update-vmselect-flags
make docs-update-vminsert-flags
make docs-update-vmstorage-flags
# ---- vmagent
(cd /tmp/vm-enterprise-single-node && make vmagent)
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmagent_flags.md
(cd /tmp/vm-enterprise-single-node && ./bin/vmagent -help 2>&1) >> docs/victoriametrics/vmagent_flags.md
echo '```' >> docs/victoriametrics/vmagent_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmagent_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmagent_flags.md
sed -i '/The number of concurrent queues to each -remoteWrite.url./ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmagent_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmagent_flags.md
# ---- vmalert
(cd /tmp/vm-enterprise-single-node && make vmalert)
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmalert_flags.md
(cd /tmp/vm-enterprise-single-node && ./bin/vmalert -help 2>&1) >> docs/victoriametrics/vmalert_flags.md
echo '```' >> docs/victoriametrics/vmalert_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmalert_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/Defines number of writers for concurrent writing into remote write endpoint./ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmalert_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmalert_flags.md
# ---- vminsert
(cd /tmp/vm-enterprise-cluster && make vminsert)
echo "$$FLAGS_HEADER" > docs/victoriametrics/vminsert_flags.md
(cd /tmp/vm-enterprise-cluster && ./bin/vminsert -help 2>&1) >> docs/victoriametrics/vminsert_flags.md
echo '```' >> docs/victoriametrics/vminsert_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vminsert_flags.md
# uncomment and adjust if you need to remove some flags from the documentation.
# should be used as a temporary workaround only.
#awk -i inplace '\
# /^ -promscrape./ {skip=1; next}\
# skip && /^ / {next}\
# skip {skip=0}\
# {print}\
# ' docs/victoriametrics/vminsert_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vminsert_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vminsert_flags.md
# ---- vmselect
(cd /tmp/vm-enterprise-cluster && make vmselect)
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmselect_flags.md
(cd /tmp/vm-enterprise-cluster && ./bin/vmselect -help 2>&1) >> docs/victoriametrics/vmselect_flags.md
echo '```' >> docs/victoriametrics/vmselect_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmselect_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent search requests\./ s/(default [0-9]\+)/(default vmselect.getDefaultMaxConcurrentRequests())/' docs/victoriametrics/vmselect_flags.md
sed -i '/The maximum number of CPU cores a single query can use\./ s/(default [0-9]\+)/(default netstorage.defaultMaxWorkersPerQuery())/' docs/victoriametrics/vmselect_flags.md
sed -i '/The maximum number of concurrent vmselect requests the server can process at -clusternativeListenAddr/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmselect_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmselect_flags.md
# ---- vmstorage
(cd /tmp/vm-enterprise-cluster && make vmstorage)
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmstorage_flags.md
(cd /tmp/vm-enterprise-cluster && ./bin/vmstorage -help 2>&1) >> docs/victoriametrics/vmstorage_flags.md
echo '```' >> docs/victoriametrics/vmstorage_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmstorage_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent insert requests/ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmstorage_flags.md
sed -i '/The maximum number of concurrent vmselect requests the vmstorage can process at./ s/(default [0-9]\+)/(default 2*cgroup.AvailableCPUs())/' docs/victoriametrics/vmstorage_flags.md
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmstorage_flags.md
# ---- vmauth
(cd /tmp/vm-enterprise-cluster && make vmauth)
echo "$$FLAGS_HEADER" > docs/victoriametrics/vmauth_flags.md
(cd /tmp/vm-enterprise-cluster && ./bin/vmauth -help 2>&1) >> docs/victoriametrics/vmauth_flags.md
echo '```' >> docs/victoriametrics/vmauth_flags.md
# replace tabs in output with one space
sed -i 's/\t/ /g' docs/victoriametrics/vmauth_flags.md
# adjust flags with dynamic default values
# remove after https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9680 implemented
sed -i '/The maximum number of concurrent goroutines to work with files;/ s/(default [0-9]\+)/(default fsutil.getDefaultConcurrency())/' docs/victoriametrics/vmauth_flags.md

View File

@@ -14,20 +14,6 @@ aliases:
---
Please find the changelog for VictoriaMetrics Anomaly Detection below.
## v1.28.4
Released: 2026-01-12
- IMPROVEMENT: Migrate `MADModel` and `ZScoreModel` to their respective [online model](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models) implementations by default. The previous offline versions of these models are now deprecated and will raise warnings when used. Users are encouraged to switch to the new online versions or use the provided aliases (`mad_online`, `zscore_online`) for seamless transition. This change enhances performance and efficiency in processing streaming data without the limitations of offline models. See [online models FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#online-models) for more details.
- UI: Updated [vmanomaly UI](https://docs.victoriametrics.com/anomaly-detection/ui/) from [v1.4.0](https://docs.victoriametrics.com/anomaly-detection/ui/#v140) to [v1.4.1](https://docs.victoriametrics.com/anomaly-detection/ui/#v141), see respective [release notes](https://docs.victoriametrics.com/anomaly-detection/ui/#v141) for details.
- BUGFIX: Restored expected behavior when `fit_every` equals `infer_every` in [`PeriodicScheduler`](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler) - now full data range `fit_window` is fetched for model trainings instead of a last point from that interval.
## v1.28.3
Released: 2025-12-17
- IMPROVEMENT: Aligned service endpoints for `vmanomaly` [MCP Server](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly) integration.
## v1.28.2
Released: 2025-12-11
@@ -37,8 +23,6 @@ Released: 2025-12-11
## v1.28.1
Released: 2025-12-01
- FEATURE: Added TTL support for service artifacts (such as stored model instances). Disabled by default, it can be enabled via new `retention` section in the [settings](https://docs.victoriametrics.com/anomaly-detection/components/settings/#retention). When set, the service will periodically check and clean up model instances that have not been used for inference or refitting within the specified period of time, helping to manage resources in long-running deployments.
- UI: Updated [vmanomaly UI](https://docs.victoriametrics.com/anomaly-detection/ui/) from [v1.2.0](https://docs.victoriametrics.com/anomaly-detection/ui/#v120) to [v1.3.0](https://docs.victoriametrics.com/anomaly-detection/ui/#v130), see respective [release notes](https://docs.victoriametrics.com/anomaly-detection/ui/#v130) for details.
- IMPROVEMENT: Add optional `compression` argument block to [`ProphetModel`](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet) for a time-based downsampling of input data during model **fitting**. This feature significantly reduces memory/disk consumption and **proportionally speeds up training for high-frequency data**, still allowing to make infer calls at initial frequency.

View File

@@ -138,9 +138,6 @@ Please refer to the [state restoration section](https://docs.victoriametrics.com
For information on migrating between different versions of `vmanomaly`, please refer to the [Migration section](https://docs.victoriametrics.com/anomaly-detection/migration/) for compatibility considerations and steps for a smooth transition.
## Choosing the right model for vmanomaly
> {{% available_from "v1.28.3" anomaly %}} Try our [MCP Server](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly) to get AI-assisted recommendations on selecting the best model and its configuration for your use case. See [installation guide](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly#installation) for more details.
Selecting the best model for `vmanomaly` depends on the data's nature and the [types of anomalies](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-2/#categories-of-anomalies) to detect. For instance, [Z-score](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-z-score) is suitable for data without trends or seasonality, while more complex patterns might require models like [Prophet](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet).
Also, there is an option to auto-tune the most important (hyper)parameters of selected model class {{% available_from "v1.12.0" anomaly %}}, find [the details here](https://docs.victoriametrics.com/anomaly-detection/components/models/#autotuned).
@@ -151,19 +148,17 @@ Still not 100% sure what to use? We are [here to help](https://docs.victoriametr
## Incorporating domain knowledge
> {{% available_from "v1.28.3" anomaly %}} Try our [MCP Server](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly) to get AI-assisted recommendations on incorporating domain knowledge into your anomaly detection models. See [installation guide](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly#installation) for more details.
Anomaly detection models can significantly improve when incorporating business-specific assumptions about the data and what constitutes an anomaly. `vmanomaly` supports various [business-side configuration parameters](https://docs.victoriametrics.com/anomaly-detection/components/models/#common-args) across all built-in models to **reduce [false positives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-positive)** and **align model behavior with business needs**, for example:
- **Setting `detection_direction`** - use [`detection_direction`](https://docs.victoriametrics.com/anomaly-detection/components/models/#detection-direction) to specify whether anomalies occur **above or below expectations**:
- **Setting `detection_direction`** use [`detection_direction`](https://docs.victoriametrics.com/anomaly-detection/components/models/#detection-direction) to specify whether anomalies occur **above or below expectations**:
- Set to `above_expected` for metrics like error rates, where spikes indicate anomalies.
- Set to `below_expected` for metrics like customer satisfaction scores or SLAs, where drops indicate anomalies.
- **Defining a `data_range`** - configure [`data_range`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#config-parameters) for the models input query to **automatically assign anomaly scores > 1** for values (`y`) that fall outside the defined range.
- **Defining a `data_range`** configure [`data_range`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#config-parameters) for the models input query to **automatically assign anomaly scores > 1** for values (`y`) that fall outside the defined range.
- **Filtering minor fluctuations with `min_dev_from_expected`** use [`min_dev_from_expected`](https://docs.victoriametrics.com/anomaly-detection/components/models/#minimal-deviation-from-expected) to **ignore insignificant deviations** and prevent small fluctuations from triggering [false positives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-positive).
- **Applying `scale` for asymmetric confidence adjustments** - use [`scale`](https://docs.victoriametrics.com/anomaly-detection/components/models/#scale) to adjust confidence intervals **differently for spikes and drops**, ensuring more appropriate anomaly detection.
- **Applying `scale` for asymmetric confidence adjustments** use [`scale`](https://docs.victoriametrics.com/anomaly-detection/components/models/#scale) to adjust confidence intervals **differently for spikes and drops**, ensuring more appropriate anomaly detection.
**Example:**
@@ -212,32 +207,22 @@ While `vmanomaly` detects anomalies and produces scores, it *does not directly g
<img src="https://docs.victoriametrics.com/anomaly-detection/guides/guide-vmanomaly-vmalert/guide-vmanomaly-vmalert_overview.webp" alt="node_exporter_example_diagram" style="width:60%"/>
Once anomaly scores are written back to VictoriaMetrics, you can use [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) expressions in `vmalert` to define alerting rules based on these scores. Reasonable defaults are based around default threshold of `anomaly_score > 1`:
Once anomaly scores are written back to VictoriaMetrics, you can use [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) expressions subset in `vmalert` to define alerting rules based on these scores. Reasonable defaults are `anomaly_score > 1`:
```yaml
groups:
- name: VMAnomalyAlerts
interval: 60s
- name: vmanomaly_alerts
rules:
- alert: HighAnomalyScore
expr: min(anomaly_score) without (model_alias, scheduler_alias) >= 1
for: 5m # adjust to your needs based on data frequency and alerting policies
expr: anomaly_score > 1 # or similar expressions, like `min(anomaly_score{...}) by (...) > 1`
for: 5m
labels:
severity: warning
query_alias: explore
model_alias: default
scheduler_alias: periodic
preset: ui
annotations:
summary: High anomaly score detected.
description: Anomaly score exceeded threshold ({{ $value }}) for more than
{{ $for }} for query {{ $labels.for }}.
summary: "Anomaly score > 1 for {{ $labels.for }} query"
description: "Anomaly score is {{ $value }} for query {{ $labels.for }}. Value: {{ $value }}."
```
> {{% available_from "v1.27.0" anomaly %}} You can also use the [vmanomaly UI](https://docs.victoriametrics.com/anomaly-detection/ui/) to generate alerting rules automatically based on your model configurations and selected thresholds.
> {{% available_from "v1.28.3" anomaly %}} Check out our [MCP Server](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly) to get AI-assisted recommendations on setting up alerting rules based on produced anomaly scores. See [installation guide](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly#installation) for more details.
## Preventing alert fatigue
Produced anomaly scores are designed in such a way that values from 0.0 to 1.0 indicate non-anomalous data, while a value greater than 1.0 is generally classified as an anomaly. However, there are no perfect models for anomaly detection, that's why reasonable defaults expressions like `anomaly_score > 1` may not work 100% of the time. However, anomaly scores, produced by `vmanomaly` are written back as metrics to VictoriaMetrics, where tools like [`vmalert`](https://docs.victoriametrics.com/victoriametrics/vmalert/) can use [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) expressions to fine-tune alerting thresholds and conditions, balancing between avoiding [false negatives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-negative) and reducing [false positives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-positive).
@@ -419,7 +404,7 @@ services:
# ...
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.28.4
image: victoriametrics/vmanomaly:v1.28.2
# ...
restart: always
volumes:
@@ -456,7 +441,7 @@ With the introduction of [online models](https://docs.victoriametrics.com/anomal
- **Optimized resource utilization**: By spreading the computational load over time and reducing peak demands, online models make more efficient use of resources and inducing less data transfer from VictoriaMetrics TSDB, improving overall system performance.
- **Faster convergence**: Online models can adapt {{% available_from "v1.23.0" anomaly %}} to changes in data patterns more quickly, which is particularly beneficial in dynamic environments where data characteristics may shift frequently. See `decay` argument description [here](https://docs.victoriametrics.com/anomaly-detection/components/models/#decay).
> {{% available_from "v1.24.0" anomaly %}} Online models are best used in conjunction with [stateful mode](https://docs.victoriametrics.com/anomaly-detection/components/settings/#state-restoration) to preserve the model state across service restarts. This allows the model to continue adapting to new data without losing previously learned patterns, thus avoiding the need for a full `fit` stage to start working again. {{% available_from "v1.28.1" anomaly %}} Additionally, setting [retention policies](https://docs.victoriametrics.com/anomaly-detection/components/settings/#retention) helps manage disk space or RAM used by periodical cleanup of old model instances.
> {{% available_from "v1.24.0" anomaly %}} Online models are best used in conjunction with [stateful mode](https://docs.victoriametrics.com/anomaly-detection/components/settings/#state-restoration) to preserve the model state across service restarts. This allows the model to continue adapting to new data without losing previously learned patterns, thus avoiding the need for a full `fit` stage to start working again.
Here's an example of how we can switch from (offline) [Z-score model](https://docs.victoriametrics.com/anomaly-detection/components/models/#z-score) to [Online Z-score model](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-z-score):
@@ -481,9 +466,6 @@ to something like
```yaml
settings:
restore_state: True # to restore model state from previous runs if restarted, available since v1.24.0
retention: # to cleanup old model instances, available since v1.28.1
ttl: '1d' # if model instances are not used in infer calls for more than 1 day, they will be marked for deletion
check_interval: '1h' # how often to check for outdated model instances and delete them
schedulers:
periodic:
@@ -637,7 +619,7 @@ options:
Heres an example of using the config splitter to divide configurations based on the `extra_filters` argument from the reader section:
```sh
docker pull victoriametrics/vmanomaly:v1.28.4 && docker image tag victoriametrics/vmanomaly:v1.28.4 vmanomaly
docker pull victoriametrics/vmanomaly:v1.28.2 && docker image tag victoriametrics/vmanomaly:v1.28.2 vmanomaly
```
```sh

View File

@@ -45,8 +45,8 @@ There are 2 types of compatibilitity to consider when migrating in stateful mode
| Group start | Group end | Compatibility | Notes |
|---------|--------- |------------|-------|
| [v1.28.4](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1284) | Latest* | Fully Compatible | Just a placeholder for new releases |
| [v1.26.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1262) | [v1.28.4](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1284) | Fully Compatible | [v1.28.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1280) introduced [rolling](https://docs.victoriametrics.com/anomaly-detection/components/models/#rolling-models) model class drop in favor of [online](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models) models (`rolling_quantile` and `std` models), however, it does not impact compatibility, as artifacts were not produced by default for rolling models. Also, offline `mad` and `zscore` models are redirecting to their respective online counterparts since [v1.28.4](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1284). |
| [v1.28.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1282) | Latest* | Fully Compatible | Just a placeholder for new releases |
| [v1.26.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1262) | [v1.28.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1282) | Fully Compatible | [v1.28.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1280) introduced [rolling](https://docs.victoriametrics.com/anomaly-detection/components/models/#rolling-models) model class drop in favor of [online](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models) models (`rolling_quantile` and `std` models), however, it does not impact compatibility, as artifacts were not produced by default for rolling models. |
| [v1.25.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1253) | [v1.26.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1270) | Partially Compatible* | [v1.25.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1253) introduced `forecast_at` argument for base [univariate](https://docs.victoriametrics.com/anomaly-detection/components/models/#univariate-models) and `Prophet` [models](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet), however, itself remains backward-reversible from newer states like [v1.26.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1262), [v1.27.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1270). (All models except `isolation_forest_multivariate` class will be dropped) |
| [v1.25.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1251) | [v1.25.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1252) | Fully Compatible | In [v1.25.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1251) there was a change to `vmanomaly.db` metadata database format, so migrating from v1.24.0-v1.25.0 requires deletion of a state, see note above the table |
| [v1.24.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1241) | [v1.25.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1250) | Partially Compatible* | In [v1.25.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1250) there were changes to **data dump layout** and to `online_quantile` and `isolation_forest_multivariate` [model](https://docs.victoriametrics.com/anomaly-detection/components/models/) states, so to migrate from v1.24.0-v1.24.1 it is recommended to drop the state |

View File

@@ -121,7 +121,7 @@ Below are the steps to get `vmanomaly` up and running inside a Docker container:
1. Pull Docker image:
```sh
docker pull victoriametrics/vmanomaly:v1.28.4
docker pull victoriametrics/vmanomaly:v1.28.2
```
2. Create the license file with your license key.
@@ -141,7 +141,7 @@ docker run -it \
-v ./license:/license \
-v ./config.yaml:/config.yaml \
-p 8490:8490 \
victoriametrics/vmanomaly:v1.28.4 \
victoriametrics/vmanomaly:v1.28.2 \
/config.yaml \
--licenseFile=/license \
--loggerLevel=INFO \
@@ -158,7 +158,7 @@ docker run -it \
-e VMANOMALY_DATA_DUMPS_DIR=/tmp/vmanomaly/data \
-e VMANOMALY_MODEL_DUMPS_DIR=/tmp/vmanomaly/models \
-p 8490:8490 \
victoriametrics/vmanomaly:v1.28.4 \
victoriametrics/vmanomaly:v1.28.2 \
/config.yaml \
--licenseFile=/license \
--loggerLevel=INFO \
@@ -171,7 +171,7 @@ services:
# ...
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.28.4
image: victoriametrics/vmanomaly:v1.28.2
# ...
restart: always
volumes:

View File

@@ -55,7 +55,6 @@ Get started with VictoriaMetrics Anomaly Detection by following our guides and i
- **Quickstart**: Learn how to quickly set up `vmanomaly` by following the [Quickstart Guide](https://docs.victoriametrics.com/anomaly-detection/quickstart/).
- **UI**: Explore anomaly detection configurations through the [vmanomaly UI](https://docs.victoriametrics.com/anomaly-detection/ui/).
- **MCP**: Allow AI to assist you in generating service and alerting configurations, answering questions, planning migration with the [MCP Server](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly). Find the setup guide how to setup and use it [here](https://github.com/VictoriaMetrics-Community/mcp-vmanomaly?tab=readme-ov-file#installation).
- **Integration**: Integrate anomaly detection into your existing observability stack. Find detailed steps [here](https://docs.victoriametrics.com/anomaly-detection/guides/guide-vmanomaly-vmalert/).
- **Anomaly Detection Presets**: Enable anomaly detection on predefined sets of metrics. Learn more [here](https://docs.victoriametrics.com/anomaly-detection/presets/).

View File

@@ -253,8 +253,6 @@ server:
port: 8490
# Limit on concurrent tasks to manage UI load (default: 2)
max_concurrent_tasks: 5
# path_prefix: /my-app # optional, available from v1.28.4
# To locate the UI at http://<vmanomaly-host>:8490/my-app/vmui/
settings:
# Number of workers for single job speed-ups (default: 1)
@@ -289,10 +287,6 @@ server:
port: 8490
# Limit on concurrent tasks to manage UI load (default: 2)
max_concurrent_tasks: 5
# override server's configured URL path prefix for all HTTP routes
# e.g. locate the UI at http://<vmanomaly-host>:8490/my-app/vmui/
# available from v1.28.4
# path_prefix: /my-app
# other production components, e.g. schedulers, models, reader, writer, etc.
```
@@ -394,16 +388,8 @@ If the **results** look good and the **model configuration should be deployed in
## Changelog
### v1.4.1
Released: 2026-01-12
vmanomaly version: [v1.28.4](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1284)
- FEATURE: Allow `path_prefix` parameter to override the server's configured URL path prefix for all HTTP routes. This is useful when the UI is served behind a reverse proxy that modifies the base path. For example, if the server is configured with `path_prefix: /my-app`, accessing the UI at `/my-app/` will work correctly even if the proxy serves it at a different base path.
### v1.4.0
Released: 2025-12-11
vmanomaly version: [v1.28.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1282)
- FEATURE: Added an option to show **consecutive anomalies** (if N points in a row exceed anomaly threshold T) in the Visualization Panel, to reduce visual clutter when many anomalies are detected in a row. The option is available as "Streaks" button in the [Model Panel](#model-panel). Respective "streaks: N" stats appears in legend for each series. Example alerting rule's `for` parameter is adjusted accordingly if streaks are used.

View File

@@ -38,9 +38,6 @@ settings:
n_workers: 4 # number of workers to run models in parallel
anomaly_score_outside_data_range: 5.0 # default anomaly score for anomalies outside expected data range
restore_state: True # restore state from previous run, if available
retention: # how long to keep stale models on disk/in memory
ttl: "1d" # time-to-live duration, if the model was not used for inference within this duration, it will be considered stale
check_every: "1h" # how often to check for stale models and remove them
# how and when to run the models is defined by schedulers
# https://docs.victoriametrics.com/anomaly-detection/components/scheduler/

View File

@@ -436,13 +436,14 @@ models:
There are **2 model types**, supported in `vmanomaly`, resulting in **4 possible combinations**:
By input data handling:
- [Univariate models](#univariate-models) - models fit/used per each individual time series, producing **individual** [output](#vmanomaly-output)
- [Multivariate models](#multivariate-models) - models fit/used on a set of time series simultaneously, producing shared [output](#vmanomaly-output)
- [Univariate models](#univariate-models)
- [Multivariate models](#multivariate-models)
By update strategy:
- [Offline models](#offline-models) - models that require **full re-fit** on a defined `fit_window` of data to update their parameters defined by `fit_every` schedule in [scheduler](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler)
- [Online (incremental) models](#online-models) {{% available_from "v1.15.0" anomaly %}} - models that support **incremental updates** of their parameters on each `infer_every` step, even on a single datapoint, without the need for a full re-fit on a `fit_window` of data. This is a recommended approach to reduce data burden from VictoriaMetrics or other data sources.
Each of these models can be of type
- [Rolling](#rolling-models) - **no longer present {{% deprecated_from "v1.28.0" anomaly %}}, being reworked into [online models](#online-models)**
- [Non-rolling](#non-rolling-models)
Moreover, {{% available_from "v1.15.0" anomaly %}}, there exist **[online (incremental) models](#online-models)** subclass for effective streaming-like data processing. Please refer to the [correspondent section](#online-models) for more details.
### Univariate Models
@@ -476,9 +477,47 @@ If during an inference, you got a **different amount of series** or some series
![vmanomaly-model-type-multivariate](model-lifecycle-multivariate.webp)
### Rolling Models
> Rolling models as a class were deprecated {{% deprecated_from "v1.28.0" anomaly %}} in favor of [online models](#online-models), which provide similar benefits with additional advantages. Respective rolling models are refactored into online models (e.g., [RollingQuantile](#rolling-quantile)). Existing configurations that use rolling models' aliases will continue to function, with less limitations (e.g. no constraint on `fit_every` == `infer_every`).
A rolling model is a model that, once trained, **cannot be (naturally) used to make inference on data, not seen during its fit phase**.
An instance of rolling model is **simultaneously fit and used for inference** during its `infer` method call.
As a result, such model instances are **not stored** between consecutive re-fit calls (defined by `fit_every` [arg](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler) in `PeriodicScheduler`), leading to **lower RAM** consumption.
Such models put **more pressure** on your reader's source, i.e. if your model should be fit on large amount of data (say, 14 days with 1-minute resolution) and at the same time you have **frequent inference** (say, once per minute) on new chunks of data - that's because such models require (fit + infer) window of data to be fit first to be used later in each inference call.
> Rolling models require `fit_every` either to be missing or explicitly set equal to `infer_every` in your [PeriodicScheduler](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler).
**Examples:** [RollingQuantile](#rolling-quantile)
![vmanomaly-model-type-rolling](model-type-rolling.webp)
### Non-Rolling Models
> Every model type is now {{% available_from "v1.28.0" anomaly %}} non-rolling. Configurations that used rolling models' aliases will continue to function, with less limitations (e.g. no constraint on `fit_every` == `infer_every`).
Everything that is not classified as [rolling](#rolling-models).
Produced models can be explicitly used to **infer on data, not seen during its fit phase**, thus, it **doesn't require re-fit procedure**.
Such models put **less pressure** on your reader's source, i.e. if you fit on large amount of data (say, 14 days with 1-minute resolution) but do it occasionally (say, once per day), at the same time you have **frequent inference**(say, once per minute) on new chunks of data
> However, it's still highly recommended, to keep your model up-to-date with tendencies found in your data as it evolves in time.
Produced model instances are **stored in-memory** between consecutive re-fit calls (defined by `fit_every` [arg](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler) in `PeriodicScheduler`), leading to **higher RAM** consumption.
**Examples:** [Prophet](#prophet)
![vmanomaly-model-type-non-rolling](model-type-non-rolling.webp)
### Online Models
> Online models are best used **in combination with [stateful service](https://docs.victoriametrics.com/anomaly-detection/components/settings/#state-restoration) {{% available_from "v1.24.0" anomaly %}} to ensure that the model state is preserved if the service restarts and any aggregated model updates are not lost**. E.g. if the model was already trained on many weeks of data and is being updated on new datapoints every minute, there is no need to re-train it from scratch on the same data after each restart, as it can continue to update restored state on new datapoints. Also it is worth setting [retention policy](https://docs.victoriametrics.com/anomaly-detection/components/settings/#retention) {{% available_from "v1.28.1" anomaly %}} for such models to periodically clean up outdated artifacts, e.g. due to high churn rate of unique labelsets in input data.
> Online models are best used **in combination with [stateful service](https://docs.victoriametrics.com/anomaly-detection/components/settings/#state-restoration) {{% available_from "v1.24.0" anomaly %}} to ensure that the model state is preserved if the service restarts and any aggregated model updates are not lost**. E.g. if the model was already trained on many weeks of data and is being updated on new datapoints every minute, there is no need to re-train it from scratch on the same data after each restart, as it can continue to update restored state on new datapoints.
Online (incremental) models {{% available_from "v1.15.0" anomaly %}} allow defining a smaller frame `fit_window` and less frequent `fit` calls to reduce the data burden from VictoriaMetrics. They make incremental updates to model parameters during each `infer_every` call, even on a single datapoint.
If the model doesn't support online mode, it's called **offline** (its parameters are only updated during `fit` calls).
@@ -531,13 +570,15 @@ Built-in models support 2 groups of arguments:
**Models**:
* [AutoTuned](#autotuned) - designed to take the cognitive load off the user, allowing any of built-in models below to be re-tuned for best hyperparameters on data seen during each `fit` phase of the algorithm. Tradeoff is between increased computational time and optimized results / simpler maintenance.
* [Prophet](#prophet) - the most versatile one for production usage, especially for complex data ([trends](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend), [change points](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-2/#novelties), [multi-seasonality](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality))
* [Online Z-score](#online-z-score) - useful for initial testing and for simpler data ([de-trended](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) data without strict [seasonality](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality) and with anomalies of similar magnitude as your "normal" data)
* [MAD](#online-mad) - similarly to [Z-score](#online-z-score), is effective for **identifying outliers in relatively consistent data**. Useful for detecting sudden, stark deviations from the median, being less prone to outlier's magnitude than z-score.
* [Z-score](#z-score) - useful for initial testing and for simpler data ([de-trended](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) data without strict [seasonality](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality) and with anomalies of similar magnitude as your "normal" data)
* [Online Z-score](#online-z-score) - [online](#online-models) alternative to [Z-score](#z-score) model with exact same behavior and use cases.
* [Holt-Winters](#holt-winters) - well-suited for **data with moderate complexity**, exhibiting distinct [trends](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) and/or [single seasonal pattern](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality).
* [MAD (Median Absolute Deviation)](#mad-median-absolute-deviation) - similarly to [Z-score](#z-score), is effective for **identifying outliers in relatively consistent data** (useful for detecting sudden, stark deviations from the median).
* [Online MAD](#online-mad) - approximate [online](#online-models) alternative to [MAD model](#mad-median-absolute-deviation), appropriate for the same use cases.
* [Rolling Quantile](#rolling-quantile) - best for **data with evolving patterns**, as it adapts to changes over a rolling window.
* [Online Seasonal Quantile](#online-seasonal-quantile) - best used on **[de-trended](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) data with strong (possibly multiple) [seasonalities](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality)**. Can act as a (slightly less powerful) [online](#online-models) replacement to [`ProphetModel`](#prophet).
* [Seasonal Trend Decomposition](#seasonal-trend-decomposition) - similarly to Holt-Winters, is best for **data with pronounced [seasonal](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality) and [trend](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) components**
* [Isolation forest (Multivariate)](#isolation-forest-multivariate) - useful for **metrics data interaction** (several queries/metrics -> single anomaly score) and **efficient in detecting anomalies in high-dimensional datasets**
* [Holt-Winters](#holt-winters) - well-suited for **data with moderate complexity**, exhibiting distinct [trends](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) and/or [single seasonal pattern](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality).
* [Custom model](#custom-model-guide) - benefit from your own models and expertise to better support your **unique use case**.
@@ -723,9 +764,42 @@ models:
Resulting metrics of the model are described [here](#vmanomaly-output)
### [Z-score](https://en.wikipedia.org/wiki/Standard_score)
> `ZScoreModel` is a [univariate](#univariate-models), [offline](#offline-models) model.
Model is useful for initial testing and for simpler data ([de-trended](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) data without strict [seasonality](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality) and with anomalies of similar magnitude as your "normal" data).
*Parameters specific for vmanomaly*:
* `class` (string) - model class name `"model.zscore.ZscoreModel"` (or `zscore` with class alias support{{% available_from "v1.13.0" anomaly %}})
* `z_threshold` (float, optional) - [standard score](https://en.wikipedia.org/wiki/Standard_score) for calculation boundaries and anomaly score. Defaults to `2.5`.
*Config Example*
```yaml
models:
your_desired_alias_for_a_model:
class: "zscore" # or 'model.zscore.ZscoreModel' until v1.13.0
z_threshold: 3.5
# Common arguments for built-in model, if not set, default to
# See https://docs.victoriametrics.com/anomaly-detection/components/models/#common-args
#
# provide_series: ['anomaly_score', 'yhat', 'yhat_lower', 'yhat_upper']
# schedulers: [all scheduler aliases defined in `scheduler` section]
# queries: [all query aliases defined in `reader.queries` section]
# detection_direction: 'both' # meaning both drops and spikes will be captured
# min_dev_from_expected: [0.0, 0.0] # meaning, no minimal threshold is applied to prevent smaller anomalies
# scale: [1.0, 1.0] # if needed, prediction intervals' width can be increased (>1) or narrowed (<1)
# clip_predictions: False # if data_range for respective `queries` is set in reader, `yhat.*` columns will be clipped
# anomaly_score_outside_data_range: 1.01 # auto anomaly score (1.01) if `y` (real value) is outside of data_range, if set
```
Resulting metrics of the model are described [here](#vmanomaly-output).
### Online Z-score
> `OnlineZscoreModel` is a [univariate](#univariate-models), [online](#online-models) model.
> `OnlineZScoreModel` is a [univariate](#univariate-models), [online](#online-models) model.
Online version of existing [Z-score](#z-score) implementation with the same exact behavior and implications {{% available_from "v1.15.0" anomaly %}}.
@@ -760,6 +834,98 @@ models:
Resulting metrics of the model are described [here](#vmanomaly-output).
### [Holt-Winters](https://en.wikipedia.org/wiki/Exponential_smoothing)
> `HoltWinters` is a [univariate](#univariate-models), [offline](#offline-models) model.
Here we use Holt-Winters Exponential Smoothing implementation from `statsmodels` [library](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing). All parameters from this library can be passed to the model.
*Parameters specific for vmanomaly*:
* `class` (string) - model class name `"model.holtwinters.HoltWinters"` (or `holtwinters` with class alias support{{% available_from "v1.13.0" anomaly %}})
* `frequency` (string) - Must be set equal to sampling_period. Model needs to know expected data-points frequency (e.g. '10m'). If omitted, frequency is guessed during fitting as **the median of intervals between fitting data timestamps**. During inference, if incoming data doesn't have the same frequency, then it will be interpolated. E.g. data comes at 15 seconds resolution, and our resample_freq is '1m'. Then fitting data will be downsampled to '1m' and internal model is trained at '1m' intervals. So, during inference, prediction data would be produced at '1m' intervals, but interpolated to "15s" to match with expected output, as output data must have the same timestamps. As accepted by pandas.Timedelta (e.g. '5m').
* `seasonality` (string, optional) - As accepted by pandas.Timedelta.
* If `seasonal_periods` is not specified, it is calculated as `seasonality` / `frequency`
Used to compute "seasonal_periods" param for the model (e.g. '1D' or '1W').
* `z_threshold` (float, optional) - [standard score](https://en.wikipedia.org/wiki/Standard_score) for calculating boundaries to define anomaly score. Defaults to 2.5.
*Default model parameters*:
* If [parameter](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing#statsmodels.tsa.holtwinters.ExponentialSmoothing-parameters) `seasonal` is not specified, default value will be `add`.
* If [parameter](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing#statsmodels.tsa.holtwinters.ExponentialSmoothing-parameters) `initialization_method` is not specified, default value will be `estimated`.
* `args` (dict, optional) - Inner model args (key-value pairs). See accepted params in [model documentation](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing#statsmodels.tsa.holtwinters.ExponentialSmoothing-parameters). Defaults to empty (not provided). Example: {"seasonal": "add", "initialization_method": "estimated"}
*Config Example*
```yaml
models:
your_desired_alias_for_a_model:
class: "holtwinters" # or 'model.holtwinters.HoltWinters' until v1.13.0
seasonality: '1d'
frequency: '1h'
# Inner model args (key-value pairs) accepted by statsmodels.tsa.holtwinters.ExponentialSmoothing
args:
seasonal: 'add'
initialization_method: 'estimated'
# Common arguments for built-in model, if not set, default to
# See https://docs.victoriametrics.com/anomaly-detection/components/models/#common-args
#
# provide_series: ['anomaly_score', 'yhat', 'yhat_lower', 'yhat_upper']
# schedulers: [all scheduler aliases defined in `scheduler` section]
# queries: [all query aliases defined in `reader.queries` section]
# detection_direction: 'both' # meaning both drops and spikes will be captured
# min_dev_from_expected: [0.0, 0.0] # meaning, no minimal threshold is applied to prevent smaller anomalies
# scale: [1.0, 1.0] # if needed, prediction intervals' width can be increased (>1) or narrowed (<1)
# clip_predictions: False # if data_range for respective `queries` is set in reader, `yhat.*` columns will be clipped
# anomaly_score_outside_data_range: 1.01 # auto anomaly score (1.01) if `y` (real value) is outside of data_range, if set
```
Resulting metrics of the model are described [here](#vmanomaly-output).
### [MAD (Median Absolute Deviation)](https://en.wikipedia.org/wiki/Median_absolute_deviation)
> `MADModel` is a [univariate](#univariate-models), [offline](#offline-models) model.
The MAD model is a robust method for anomaly detection that is *less sensitive* to outliers in data compared to standard deviation-based models. It considers a point as an anomaly if the absolute deviation from the median is significantly large.
*Parameters specific for vmanomaly*:
* `class` (string) - model class name `"model.mad.MADModel"` (or `mad` with class alias support{{% available_from "v1.13.0" anomaly %}})
* `threshold` (float, optional) - The threshold multiplier for the MAD to determine anomalies. Defaults to `2.5`. Higher values will identify fewer points as anomalies.
*Config Example*
```yaml
models:
your_desired_alias_for_a_model:
class: "mad" # or 'model.mad.MADModel' until v1.13.0
threshold: 2.5
# Common arguments for built-in model, if not set, default to
# See https://docs.victoriametrics.com/anomaly-detection/components/models/#common-args
#
# provide_series: ['anomaly_score', 'yhat', 'yhat_lower', 'yhat_upper']
# schedulers: [all scheduler aliases defined in `scheduler` section]
# queries: [all query aliases defined in `reader.queries` section]
# detection_direction: 'both' # meaning both drops and spikes will be captured
# min_dev_from_expected: [0.0, 0.0] # meaning, no minimal threshold is applied to prevent smaller anomalies
# scale: [1.0, 1.0] # if needed, prediction intervals' width can be increased (>1) or narrowed (<1)
# clip_predictions: False # if data_range for respective `queries` is set in reader, `yhat.*` columns will be clipped
# anomaly_score_outside_data_range: 1.01 # auto anomaly score (1.01) if `y` (real value) is outside of data_range, if set
```
Resulting metrics of the model are described [here](#vmanomaly-output).
### Online MAD
> `OnlineMADModel` is a [univariate](#univariate-models), [online](#online-models) model.
@@ -989,60 +1155,6 @@ models:
# anomaly_score_outside_data_range: 1.01 # auto anomaly score (1.01) if `y` (real value) is outside of data_range, if set
```
Resulting metrics of the model are described [here](#vmanomaly-output).
### [Holt-Winters](https://en.wikipedia.org/wiki/Exponential_smoothing)
> `HoltWinters` is a [univariate](#univariate-models), [offline](#offline-models) model.
Here we use Holt-Winters Exponential Smoothing implementation from `statsmodels` [library](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing). All parameters from this library can be passed to the model.
*Parameters specific for vmanomaly*:
* `class` (string) - model class name `"model.holtwinters.HoltWinters"` (or `holtwinters` with class alias support{{% available_from "v1.13.0" anomaly %}})
* `frequency` (string) - Must be set equal to sampling_period. Model needs to know expected data-points frequency (e.g. '10m'). If omitted, frequency is guessed during fitting as **the median of intervals between fitting data timestamps**. During inference, if incoming data doesn't have the same frequency, then it will be interpolated. E.g. data comes at 15 seconds resolution, and our resample_freq is '1m'. Then fitting data will be downsampled to '1m' and internal model is trained at '1m' intervals. So, during inference, prediction data would be produced at '1m' intervals, but interpolated to "15s" to match with expected output, as output data must have the same timestamps. As accepted by pandas.Timedelta (e.g. '5m').
* `seasonality` (string, optional) - As accepted by pandas.Timedelta.
* If `seasonal_periods` is not specified, it is calculated as `seasonality` / `frequency`
Used to compute "seasonal_periods" param for the model (e.g. '1D' or '1W').
* `z_threshold` (float, optional) - [standard score](https://en.wikipedia.org/wiki/Standard_score) for calculating boundaries to define anomaly score. Defaults to 2.5.
*Default model parameters*:
* If [parameter](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing#statsmodels.tsa.holtwinters.ExponentialSmoothing-parameters) `seasonal` is not specified, default value will be `add`.
* If [parameter](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing#statsmodels.tsa.holtwinters.ExponentialSmoothing-parameters) `initialization_method` is not specified, default value will be `estimated`.
* `args` (dict, optional) - Inner model args (key-value pairs). See accepted params in [model documentation](https://www.statsmodels.org/dev/generated/statsmodels.tsa.holtwinters.ExponentialSmoothing#statsmodels.tsa.holtwinters.ExponentialSmoothing-parameters). Defaults to empty (not provided). Example: {"seasonal": "add", "initialization_method": "estimated"}
*Config Example*
```yaml
models:
your_desired_alias_for_a_model:
class: "holtwinters" # or 'model.holtwinters.HoltWinters' until v1.13.0
seasonality: '1d'
frequency: '1h'
# Inner model args (key-value pairs) accepted by statsmodels.tsa.holtwinters.ExponentialSmoothing
args:
seasonal: 'add'
initialization_method: 'estimated'
# Common arguments for built-in model, if not set, default to
# See https://docs.victoriametrics.com/anomaly-detection/components/models/#common-args
#
# provide_series: ['anomaly_score', 'yhat', 'yhat_lower', 'yhat_upper']
# schedulers: [all scheduler aliases defined in `scheduler` section]
# queries: [all query aliases defined in `reader.queries` section]
# detection_direction: 'both' # meaning both drops and spikes will be captured
# min_dev_from_expected: [0.0, 0.0] # meaning, no minimal threshold is applied to prevent smaller anomalies
# scale: [1.0, 1.0] # if needed, prediction intervals' width can be increased (>1) or narrowed (<1)
# clip_predictions: False # if data_range for respective `queries` is set in reader, `yhat.*` columns will be clipped
# anomaly_score_outside_data_range: 1.01 # auto anomaly score (1.01) if `y` (real value) is outside of data_range, if set
```
Resulting metrics of the model are described [here](#vmanomaly-output).
@@ -1219,7 +1331,7 @@ monitoring:
Let's pull the docker image for `vmanomaly`:
```sh
docker pull victoriametrics/vmanomaly:v1.28.4
docker pull victoriametrics/vmanomaly:v1.28.2
```
Now we can run the docker container putting as volumes both config and model file:
@@ -1233,7 +1345,7 @@ docker run -it \
-v $(PWD)/license:/license \
-v $(PWD)/custom_model.py:/vmanomaly/model/custom.py \
-v $(PWD)/custom.yaml:/config.yaml \
victoriametrics/vmanomaly:v1.28.4 /config.yaml \
victoriametrics/vmanomaly:v1.28.2 /config.yaml \
--licenseFile=/license
--watch
```
@@ -1249,117 +1361,3 @@ In this particular example, 2 metrics will be produced. Also, there will be adde
{__name__="custom_anomaly_score", for="ingestion_rate", model_alias="custom_model", scheduler_alias="s1", run="test-format"},
{__name__="custom_anomaly_score", for="churn_rate", model_alias="custom_model", scheduler_alias="s1", run="test-format"}
```
## Deprecations
Here is a list of all deprecated model types and specific models with respective version info and suggestions for migration.
### Deprecated types
[Rolling models](#rolling-models) - starting from [v1.28.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1280) all rolling models are deprecated in favor of their online counterparts with respective documentation adjustments. **Now every model class is [non-rolling](#non-rolling-models)**.
#### Rolling Models
> Rolling models as a class were deprecated {{% deprecated_from "v1.28.0" anomaly %}} in favor of [online models](#online-models), which provide similar benefits with additional advantages. Respective rolling models are refactored into online models (e.g., [RollingQuantile](#rolling-quantile)). Existing configurations that use rolling models' aliases will continue to function, with less limitations (e.g. no constraint on `fit_every` == `infer_every`). **Description below is kept for older deployments and hyperlinks consistency**.
A rolling model is a model that, once trained, **cannot be (naturally) used to make inference on data, not seen during its fit phase**.
An instance of rolling model is **simultaneously fit and used for inference** during its `infer` method call.
As a result, such model instances are **not stored** between consecutive re-fit calls (defined by `fit_every` [arg](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler) in `PeriodicScheduler`), leading to **lower RAM** consumption.
Such models put **more pressure** on your reader's source, i.e. if your model should be fit on large amount of data (say, 14 days with 1-minute resolution) and at the same time you have **frequent inference** (say, once per minute) on new chunks of data - that's because such models require (fit + infer) window of data to be fit first to be used later in each inference call.
> Rolling models require `fit_every` to be either unset or explicitly set equal to `infer_every` in [PeriodicScheduler](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler).
**Examples:** [RollingQuantile](#rolling-quantile), **prior to [v1.28.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1280)** where it become online model.
![vmanomaly-model-type-rolling](model-type-rolling.webp)
#### Non-Rolling Models
> The section is moved to deprecations as **Every model class is now {{% available_from "v1.28.0" anomaly %}} non-rolling**. Configurations that used rolling models' aliases will continue to function, with less limitations (e.g. no constraint on `fit_every` == `infer_every`). **Description below is kept for older deployments and hyperlinks consistency**.
Everything that is not classified as [rolling](#rolling-models).
Produced models can be explicitly used to **infer on data, not seen during its fit phase**, thus, it **doesn't require re-fit procedure**.
Such models put **less pressure** on your reader's source, i.e. if you fit on large amount of data (say, 14 days with 1-minute resolution) but do it occasionally (say, once per day), at the same time you have **frequent inference**(say, once per minute) on new chunks of data
> However, it's still highly recommended, to keep your model up-to-date with tendencies found in your data as it evolves in time.
Produced model instances are **stored in-memory** between consecutive re-fit calls (defined by `fit_every` [arg](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler) in `PeriodicScheduler`), leading to **higher RAM** consumption.
**Examples:** [Prophet](#prophet)
![vmanomaly-model-type-non-rolling](model-type-non-rolling.webp)
### Deprecated models
#### [Z-score](https://en.wikipedia.org/wiki/Standard_score)
> `ZScoreModel` is a [univariate](#univariate-models), [offline](#offline-models) model. {{% deprecated_from "v1.28.4" anomaly %}} Was removed in favor of its online version to improve data efficiency and reduce user confusion. **Configs that used this model (where model's class is `zscore` or `model.zscore.ZscoreModel`) will continue to work, with a warning raised while actually changed to the online version under the hood. Suggestion is to replace it with `zscore_online` or `model.online.OnlineZscoreModel` class explicitly to get [online model benefits](https://docs.victoriametrics.com/anomaly-detection/faq/#online-models) for data querying by reducing `fit_every` frequency**.
Model is useful for initial testing and for simpler data ([de-trended](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#trend) data without strict [seasonality](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#seasonality) and with anomalies of similar magnitude as your "normal" data).
*Parameters specific for vmanomaly*:
* `class` (string) - model class name `"model.zscore.ZscoreModel"` (or `zscore` with class alias support{{% available_from "v1.13.0" anomaly %}})
* `z_threshold` (float, optional) - [standard score](https://en.wikipedia.org/wiki/Standard_score) for calculation boundaries and anomaly score. Defaults to `2.5`.
*Config Example*
```yaml
models:
your_desired_alias_for_a_model:
class: "zscore" # or 'model.zscore.ZscoreModel' until v1.13.0
z_threshold: 3.5
# Common arguments for built-in model, if not set, default to
# See https://docs.victoriametrics.com/anomaly-detection/components/models/#common-args
#
# provide_series: ['anomaly_score', 'yhat', 'yhat_lower', 'yhat_upper']
# schedulers: [all scheduler aliases defined in `scheduler` section]
# queries: [all query aliases defined in `reader.queries` section]
# detection_direction: 'both' # meaning both drops and spikes will be captured
# min_dev_from_expected: [0.0, 0.0] # meaning, no minimal threshold is applied to prevent smaller anomalies
# scale: [1.0, 1.0] # if needed, prediction intervals' width can be increased (>1) or narrowed (<1)
# clip_predictions: False # if data_range for respective `queries` is set in reader, `yhat.*` columns will be clipped
# anomaly_score_outside_data_range: 1.01 # auto anomaly score (1.01) if `y` (real value) is outside of data_range, if set
```
Resulting metrics of the model are described [here](#vmanomaly-output).
#### [MAD (Median Absolute Deviation)](https://en.wikipedia.org/wiki/Median_absolute_deviation)
> `MADModel` is a [univariate](#univariate-models), [offline](#offline-models) model. {{% deprecated_from "v1.28.4" anomaly %}} Was removed in favor of its online version to improve data efficiency and reduce user confusion. **Configs that used this model (where model's class is `mad` or `model.mad.MADModel`) will continue to work, with a warning raised while actually changed to the online version under the hood. Suggestion is to replace it with `mad_online` or `model.online.OnlineMADModel` class explicitly to get [online model benefits](https://docs.victoriametrics.com/anomaly-detection/faq/#online-models) for data querying by reducing `fit_every` frequency**.
The MAD model is a robust method for anomaly detection that is *less sensitive* to outliers in data compared to standard deviation-based models. It considers a point as an anomaly if the absolute deviation from the median is significantly large.
*Parameters specific for vmanomaly*:
* `class` (string) - model class name `"model.mad.MADModel"` (or `mad` with class alias support{{% available_from "v1.13.0" anomaly %}})
* `threshold` (float, optional) - The threshold multiplier for the MAD to determine anomalies. Defaults to `2.5`. Higher values will identify fewer points as anomalies.
*Config Example*
```yaml
models:
your_desired_alias_for_a_model:
class: "mad" # or 'model.mad.MADModel' until v1.13.0
threshold: 2.5
# Common arguments for built-in model, if not set, default to
# See https://docs.victoriametrics.com/anomaly-detection/components/models/#common-args
#
# provide_series: ['anomaly_score', 'yhat', 'yhat_lower', 'yhat_upper']
# schedulers: [all scheduler aliases defined in `scheduler` section]
# queries: [all query aliases defined in `reader.queries` section]
# detection_direction: 'both' # meaning both drops and spikes will be captured
# min_dev_from_expected: [0.0, 0.0] # meaning, no minimal threshold is applied to prevent smaller anomalies
# scale: [1.0, 1.0] # if needed, prediction intervals' width can be increased (>1) or narrowed (<1)
# clip_predictions: False # if data_range for respective `queries` is set in reader, `yhat.*` columns will be clipped
# anomaly_score_outside_data_range: 1.01 # auto anomaly score (1.01) if `y` (real value) is outside of data_range, if set
```
Resulting metrics of the model are described [here](#vmanomaly-output).

View File

@@ -482,7 +482,7 @@ reader:
## VictoriaLogs reader
{{% available_from "v1.26.0" anomaly %}} `vmanomaly` adds support for reading data from [VictoriaLogs stats queries](https://docs.victoriametrics.com/victorialogs/querying/#querying-log-range-stats) endpoint with `VLogsReader`. This reader allows quering and analyzing log data stored in [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/), enabling anomaly detection on metrics generated from logs. **Querying [VictoriaTraces](https://docs.victoriametrics.com/victoriatraces/) is supported with the same reader, as the endpoints for both are equivalent.**
{{% available_from "v1.26.0" anomaly %}} `vmanomaly` adds support for reading data from [VictoriaLogs stats queries](https://docs.victoriametrics.com/victorialogs/querying/#querying-log-range-stats) endpoint with `VLogsReader`. This reader allows quering and analyzing log data stored in VictoriaLogs, enabling anomaly detection on metrics generated from logs.
Its queries should be expressed in a subset of [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/), which is similar to MetricsQL/PromQL but adapted for log data.
@@ -508,7 +508,7 @@ The supported stats functions currently include:
### Query Examples
> You can test your LogsQL queries with stats pipe functions using our [VictoriaLogs playground](https://play-vmlogs.victoriametrics.com/) or [VictoriaTraces playground](https://play-vtraces.victoriametrics.com/). Use either UI to access graphical results or the `/select/logsql/stats_query_range` endpoint to run your queries and see the raw results, e.g. as this [sample query](https://play-vmlogs.victoriametrics.com/select/logsql/stats_query_range?query=_time%3A5m%20%7C%20stats%20by%20%28_stream%29%20count%28%29%20as%20sample_row&step=1m).
> You can test your LogsQL queries with stats pipe functions using our [VictoriaLogs playground](https://play-vmlogs.victoriametrics.com/). Use either UI to access graphical results or the `/select/logsql/stats_query_range` endpoint to run your queries and see the raw results, e.g. as this [sample query](https://play-vmlogs.victoriametrics.com/select/logsql/stats_query_range?query=_time%3A5m%20%7C%20stats%20by%20%28_stream%29%20count%28%29%20as%20sample_row&step=1m).
Here are examples of simple valid LogsQL queries with stats pipe functions that can be used with `VLogsReader`.
@@ -557,7 +557,7 @@ The class name of the reader, must be `vlogs` (or `reader.vlogs.VLogsReader`).
See [per-query config example](#per-query-config-example-1) below
</td>
<td>
Dictionary of queries. Keys are query aliases, values are LogsQL queries to select data in format: `QUERY_ALIAS:<query>`, as accepted by `/select/logsql/stats_query_range?query=%s` VictoriaLogs/VictoriaTraces endpoint. The `<query>` must contain `stats` [pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe-functions). The calculated stats is converted into metrics with labels from `by(...)` clause of the `| stats by(...)` pipe. Only functions returning numeric values are supported, e.g. `count()`, `sum()`, `avg()`, `count_uniq()`, `median()`, `quantile()`, etc.
Dictionary of queries. Keys are query aliases, values are LogsQL queries to select data in format: `QUERY_ALIAS:<query>`, as accepted by `/select/logsql/stats_query_range?query=%s` VictoriaLogs endpoint. The `<query>` must contain `stats` [pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe-functions). The calculated stats is converted into metrics with labels from `by(...)` clause of the `| stats by(...)` pipe. Only functions returning numeric values are supported, e.g. `count()`, `sum()`, `avg()`, `count_uniq()`, `median()`, `quantile()`, etc.
</td>
</tr>
<tr>
@@ -569,7 +569,7 @@ Dictionary of queries. Keys are query aliases, values are LogsQL queries to sele
`https://play-vmlogs.victoriametrics.com/`
</td>
<td>
URL address of the VictoriaLogs/VictoriaTraces datasource. Must be a valid URL.
URL address of the VictoriaLogs datasource. Must be a valid URL.
</td>
</tr>
<tr>
@@ -766,7 +766,6 @@ reader:
class: 'vlogs' # or 'reader.vlogs.VLogsReader'
# don't include /select/stats_query_range part in the URL, it is added automatically
datasource_url: 'https://play-vmlogs.victoriametrics.com/' # source victorialogs
# datasource_url: 'https://play-vtraces.victoriametrics.com/' # source victoriatraces
# tenant_id: '0:0' # for cluster version only
sampling_period: '1m'
max_points_per_query: 10000

View File

@@ -144,6 +144,7 @@ monitoring:
# other monitoring settings
```
## State Restoration
> This feature is best used with config [hot-reloading](https://docs.victoriametrics.com/anomaly-detection/components/#hot-reload) {{% available_from "v1.25.0" anomaly %}} for increased deployment flexibility.
@@ -305,80 +306,6 @@ This means that the service upon restart:
1. Won't restore the state of `zscore_online` model, because its `z_threshold` argument **has changed**, retraining from scratch is needed on the last `fit_window` = 24 hours of data for `q1`, `q2` and `q3` (as model's `queries` arg is not set so it defaults to all queries found in the reader).
2. Will **partially** restore the state of `prophet` model, because its class and schedulers are unchanged, but **only instances trained on timeseries returned by `q1` query**. New fit/infer jobs will be set for new query `q3`. The old query `q2` artifacts will be dropped upon restart - all respective models and data for (`prophet`, `q2`) combination will be removed from the database file and from the disk.
## Retention
{{% available_from "v1.28.1" anomaly %}} The `retention` argument allows to set a [time-to-live](https://en.wikipedia.org/wiki/Time_to_live) (TTL) for service artifacts, such as stored model instances and their training data. When enabled, the service will periodically check (controlled by `check_interval` period) and clean up model instances that have not been used for inference or refitting within the specified period of time (defined in `ttl` argument as a valid period). This helps to manage resources in long-running deployments by removing stale or unused artifacts.
### Use Cases
- With **[online models](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models)** as they continuously create model instances for new timeseries over time during inference calls, especially when combined with [periodic schedulers](https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler) with infrequent `fit_every` (say, `90d`).
- In deployments where **the set of monitored timeseries changes frequently**, leading to accumulation of unused model instances and training data over time, due to high churn rate or relabeling of metrics.
- When using **[state restoration](https://docs.victoriametrics.com/anomaly-detection/components/settings/#state-restoration) feature** which improves fault tolerance, but may retain all model instances and their training data for considerable time, potentially leading to high disk or RAM usage.
### Configuration
The section is **backward-compatible and disabled by default**, meaning that all model instances and their training data are retained unless:
- The service is restarted with `restore_state` set to `false`, which triggers a cleanup of all stored artifacts.
- The models are marked as outdated once scheduled re-fitting is due, leading to retraining and replacement of previous artifacts.
`ttl` argument defines the time-to-live period for model instances and their training data. It should be a valid period string (e.g., `7d` for 7 days, `30d` for 30 days, etc.). If a model instance or its training data has not been used for inference or refitting within this period, it will be considered stale and eligible for cleanup.
> If set higher than respective scheduler's `fit_every` period, the ttl will have no effect, as models will always be refitted before they become stale.
`check_interval` argument defines how often the service should check for stale artifacts. It should be a valid period string (e.g., `1h` for 1 hour, `24h` for 24 hours, etc.). During each check, the service will evaluate all stored model instances and their training data against the defined `ttl` and remove those that are stale.
> Check interval should be set to a value smaller than `ttl` and smaller than the smallest `fit_every` period among all schedulers used in the config to ensure timely cleanup of stale artifacts, otherwise stale artifacts may persist longer than intended.
### Example
Here's an example configuration that enables retention with a TTL of 1 day and a check interval of 30 minutes, where inference is performed every 15 minutes.
- Model instances and their training data that have not been used for inference or refitting within the last day will be cleaned up every 30 minutes (m2 example on a diagram)
- While model instances used for inference within the last day at least 1 time will be retained (m1 example on a diagram)
![Retention Example Diagram](vmanomaly-ttl-example.webp)
```yaml
schedulers:
s1:
class: periodic
infer_every: 15m
# other scheduler args
# other schedulers
reader:
class: vm
datasource_url: 'https://play.victoriametrics.com'
tenant_id: "0"
queries:
q1:
expr: 'some_metricsql_query_1' # returns active timeseries
q2:
expr: 'some_metricsql_query_2' # returns high-churn timeseries
sampling_period: 30s
# other reader args
models:
m1: # model instances will be retained due to stable data returned by q1
class: zscore_online
schedulers: ['s1']
queries: ['q1']
# other model args
m2: # model instances will be likely dropped during retention checks due to high churn rate
class: prophet
schedulers: ['s1']
queries: ['q2']
# other model args
# other models
# other sections like schedulers, models, reader, writer, monitoring, etc.
settings:
# other settings
restore_state: True # enables state restoration
retention:
ttl: 24h # time-to-live for model instances and their training data
check_interval: 30m # interval to check for stale artifacts
```
## Logger Levels

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 424 KiB

View File

@@ -10,9 +10,9 @@ sitemap:
- To use *vmanomaly*, part of the enterprise package, a license key is required. Obtain your key [here](https://victoriametrics.com/products/enterprise/trial/) for this tutorial or for enterprise use.
- In the tutorial, we'll be using the following VictoriaMetrics components:
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) (v1.133.0)
- [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/) (v1.133.0)
- [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/) (v1.133.0)
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) (v1.132.0)
- [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/) (v1.132.0)
- [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/) (v1.132.0)
- [Grafana](https://grafana.com/) (v.10.2.1)
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/)
- [Node exporter](https://github.com/prometheus/node_exporter#node-exporter) (v1.7.0) and [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (v0.27.0)
@@ -323,7 +323,7 @@ Let's wrap it all up together into the `docker-compose.yml` file.
services:
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.133.0
image: victoriametrics/vmagent:v1.132.0
depends_on:
- "victoriametrics"
ports:
@@ -340,7 +340,7 @@ services:
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.133.0
image: victoriametrics/victoria-metrics:v1.132.0
ports:
- 8428:8428
volumes:
@@ -373,7 +373,7 @@ services:
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.133.0
image: victoriametrics/vmalert:v1.132.0
depends_on:
- "victoriametrics"
ports:
@@ -395,7 +395,7 @@ services:
restart: always
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.28.4
image: victoriametrics/vmanomaly:v1.28.2
depends_on:
- "victoriametrics"
ports:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 33 KiB

View File

@@ -249,27 +249,27 @@ services:
- grafana_data:/var/lib/grafana/
vmsingle:
image: victoriametrics/victoria-metrics:v1.133.0
image: victoriametrics/victoria-metrics:v1.132.0
command:
- -httpListenAddr=0.0.0.0:8429
vmstorage:
image: victoriametrics/vmstorage:v1.133.0-cluster
image: victoriametrics/vmstorage:v1.132.0-cluster
vminsert:
image: victoriametrics/vminsert:v1.133.0-cluster
image: victoriametrics/vminsert:v1.132.0-cluster
command:
- -storageNode=vmstorage:8400
- -httpListenAddr=0.0.0.0:8480
vmselect:
image: victoriametrics/vmselect:v1.133.0-cluster
image: victoriametrics/vmselect:v1.132.0-cluster
command:
- -storageNode=vmstorage:8401
- -httpListenAddr=0.0.0.0:8481
vmagent:
image: victoriametrics/vmagent:v1.133.0
image: victoriametrics/vmagent:v1.132.0
volumes:
- ./scrape.yaml:/etc/vmagent/config.yaml
command:
@@ -278,7 +278,7 @@ services:
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
vmgateway-cluster:
image: victoriametrics/vmgateway:v1.133.0-enterprise
image: victoriametrics/vmgateway:v1.132.0-enterprise
ports:
- 8431:8431
volumes:
@@ -294,7 +294,7 @@ services:
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
vmgateway-single:
image: victoriametrics/vmgateway:v1.133.0-enterprise
image: victoriametrics/vmgateway:v1.132.0-enterprise
ports:
- 8432:8431
volumes:
@@ -405,7 +405,7 @@ Once iDP configuration is done, vmagent configuration needs to be updated to use
```yaml
vmagent:
image: victoriametrics/vmagent:v1.133.0
image: victoriametrics/vmagent:v1.132.0
volumes:
- ./scrape.yaml:/etc/vmagent/config.yaml
- ./vmagent-client-secret:/etc/vmagent/oauth2-client-secret

View File

@@ -106,7 +106,6 @@ See also [case studies](https://docs.victoriametrics.com/victoriametrics/casestu
* [Why I Switched to VictoriaMetrics: Scaling from Small Business to Enterprise](https://blackmetalz.github.io/why-i-switched-to-victoriametrics-scaling-from-small-business-to-enterprise.html)
* [Backing up VictoriaMetrics Data: A Complete Guide](https://medium.com/@kanakaraju896/backing-up-victoriametrics-data-a-complete-guide-24473c74450f)
* [Unlocking the Power of VictoriaMetrics: A Prometheus Alternative](https://developer-friendly.blog/blog/2024/06/17/unlocking-the-power-of-victoriametrics-a-prometheus-alternative/)
* [How to Master Kubernetes Observability: Multi-Cluster Monitoring with VictoriaMetrics, Loki, and Grafana](https://www.keyvalue.systems/blog/kubernetes-observability-with-victoriametrics-loki-grafana/)
## Third-party articles and slides about VictoriaLogs

View File

@@ -1147,37 +1147,16 @@ Report bugs and propose new features in our [GitHub Issues](https://github.com/V
### List of command-line flags for vminsert
Pass `-help` to vminsert in order to see the list of supported command-line flags with their description.
#### Common vminsert flags
These flags are available in both VictoriaMetrics OSS and VictoriaMetrics Enterprise.
{{% content "vminsert_common_flags.md" %}}
#### Enterprise vminsert flags
These flags are available only in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
{{% content "vminsert_enterprise_flags.md" %}}
Below is the output for `/path/to/vminsert -help`:
{{% content "vminsert_flags.md" %}}
### List of command-line flags for vmselect
Pass `-help` to vmselect in order to see the list of supported command-line flags with their description.
Below is the output for `/path/to/vmselect -help`:
#### Common vmselect flags
These flags are available in both VictoriaMetrics OSS and VictoriaMetrics Enterprise.
{{% content "vmselect_common_flags.md" %}}
#### Enterprise vmselect flags
These flags are available only in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
{{% content "vmselect_enterprise_flags.md" %}}
{{% content "vmselect_flags.md" %}}
### List of command-line flags for vmstorage
Pass `-help` to vmstorage in order to see the list of supported command-line flags with their description.
#### Common vmstorage flags
These flags are available in both VictoriaMetrics OSS and VictoriaMetrics Enterprise.
{{% content "vmstorage_common_flags.md" %}}
#### Enterprise vmstorage flags
These flags are available only in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
{{% content "vmstorage_enterprise_flags.md" %}}
{{% content "vmstorage_flags.md" %}}

View File

@@ -27,5 +27,5 @@ to [the latest available releases](https://docs.victoriametrics.com/victoriametr
## Currently supported LTS release lines
- v1.122.x - the latest one is [v1.122.12 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.12)
- v1.110.x - the latest one is [v1.110.27 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.27)
- v1.122.x - the latest one is [v1.122.11 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.11)
- v1.110.x - the latest one is [v1.110.26 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.26)

View File

@@ -58,9 +58,9 @@ Download the newest available [VictoriaMetrics release](https://docs.victoriamet
from [DockerHub](https://hub.docker.com/r/victoriametrics/victoria-metrics) or [Quay](https://quay.io/repository/victoriametrics/victoria-metrics?tab=tags):
```sh
docker pull victoriametrics/victoria-metrics:v1.133.0
docker pull victoriametrics/victoria-metrics:v1.132.0
docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 \
victoriametrics/victoria-metrics:v1.133.0 --selfScrapeInterval=5s -storageDataPath=victoria-metrics-data
victoriametrics/victoria-metrics:v1.132.0 --selfScrapeInterval=5s -storageDataPath=victoria-metrics-data
```
_For Enterprise images see [this link](https://docs.victoriametrics.com/victoriametrics/enterprise/#docker-images)._

View File

@@ -1542,9 +1542,9 @@ See also [Why IndexDB size is so large?](https://docs.victoriametrics.com/victor
## Retention
Retention is configured with the `-retentionPeriod` command-line flag, which takes a number followed by a time unit
character - `h(ours)`, `d(ays)`, `w(eeks)`, `M(onth)`, `y(ears)`. If the time unit is not specified, a month (31 days) is assumed.
character - `h(ours)`, `d(ays)`, `w(eeks)`, `y(ears)`. If the time unit is not specified, a month (31 days) is assumed.
For instance, `-retentionPeriod=3` means that the data will be stored for 3 months (93 days) and then deleted.
The default retention period is one month: 1M (31 days). The **minimum retention** period is 24h or 1d.
The default retention period is one month. The **minimum retention** period is 24h or 1d.
Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
**Data partitions** outside the configured retention are deleted **on the first day of the new month**.
@@ -2443,13 +2443,7 @@ Files included in each folder:
Pass `-help` to VictoriaMetrics in order to see the list of supported command-line flags with their description:
### Common flags
These flags are available in both VictoriaMetrics OSS and VictoriaMetrics Enterprise.
{{% content "victoria_metrics_common_flags.md" %}}
### Enterprise flags
These flags are available only in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
{{% content "victoria_metrics_enterprise_flags.md" %}}
{{% content "victoria_metrics_flags.md" %}}
---

View File

@@ -114,7 +114,6 @@ and the candidate is deployed to the sandbox environment.
1. Make sure that the release branches have no security issues.
1. Update release versions if needed in [SECURITY.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/SECURITY.md).
1. Run `PKG_TAG=v1.xx.y make docs-update-version` command to update version help tooltips.
1. Run `make docs-update-flags` command to update command-line flags in the documentation. [Commit example](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/4d42b291e55ac9211130efbd5a56aa819998516d).
1. Cut new version in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/changelog/CHANGELOG.md) and commit it. See example in this [commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/b771152039d23b5ccd637a23ea748bc44a9511a7).
1. Create the following release tags:
* `git tag -s v1.xx.y` in `master` branch

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
---
weight: 8
weight: 7
title: Year 2020
search:
weight: 0.1
@@ -7,7 +7,7 @@ menu:
docs:
identifier: vm-changelog-2020
parent: vm-changelog
weight: 8
weight: 7
tags:
- metrics
aliases:

View File

@@ -1,5 +1,5 @@
---
weight: 7
weight: 6
title: Year 2021
search:
weight: 0.1
@@ -7,7 +7,7 @@ menu:
docs:
identifier: vm-changelog-2021
parent: vm-changelog
weight: 7
weight: 6
tags:
- metrics
aliases:

View File

@@ -1,5 +1,5 @@
---
weight: 6
weight: 5
title: Year 2022
search:
weight: 0.1
@@ -7,7 +7,7 @@ menu:
docs:
identifier: vm-changelog-2022
parent: vm-changelog
weight: 6
weight: 5
tags:
- metrics
aliases:

View File

@@ -1,5 +1,5 @@
---
weight: 5
weight: 4
title: Year 2023
search:
weight: 0.1
@@ -7,7 +7,7 @@ menu:
docs:
identifier: vm-changelog-2023
parent: vm-changelog
weight: 5
weight: 4
tags:
- metrics
aliases:

View File

@@ -1,5 +1,5 @@
---
weight: 4
weight: 3
title: Year 2024
search:
weight: 0.1
@@ -7,7 +7,7 @@ menu:
docs:
identifier: vm-changelog-2024
parent: vm-changelog
weight: 4
weight: 3
tags:
- metrics
aliases:

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +0,0 @@
---
weight: 2
title: Year 2026
search:
weight: 0.1
menu:
docs:
identifier: vm-changelog-2026
parent: vm-changelog
weight: 2
tags:
- metrics
aliases:
- /CHANGELOG_2026.html
- /changelog_2026
- /changelog/changelog_2026/index.html
- /changelog/changelog_2026/
---
{{% content "CHANGELOG.md" %}}

View File

@@ -117,7 +117,7 @@ It is allowed to run VictoriaMetrics and VictoriaLogs Enterprise components in [
Binary releases of Enterprise components are available at [the releases page for VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
and [the releases page for VictoriaLogs](https://github.com/VictoriaMetrics/VictoriaLogs/releases/latest).
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.133.0-enterprise.tar.gz`.
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.132.0-enterprise.tar.gz`.
In order to run binary release of Enterprise component, please download the `*-enterprise.tar.gz` archive for your OS and architecture
from the corresponding releases page and unpack it. Then run the unpacked binary.
@@ -135,8 +135,8 @@ For example, the following command runs VictoriaMetrics Enterprise binary with t
obtained at [this page](https://victoriametrics.com/products/enterprise/trial/):
```sh
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.133.0/victoria-metrics-linux-amd64-v1.133.0-enterprise.tar.gz
tar -xzf victoria-metrics-linux-amd64-v1.133.0-enterprise.tar.gz
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.132.0/victoria-metrics-linux-amd64-v1.132.0-enterprise.tar.gz
tar -xzf victoria-metrics-linux-amd64-v1.132.0-enterprise.tar.gz
./victoria-metrics-prod -license=BASE64_ENCODED_LICENSE_KEY
```
@@ -151,7 +151,7 @@ Alternatively, VictoriaMetrics Enterprise license can be stored in the file and
It is allowed to run VictoriaMetrics and VictoriaLogs Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
Docker images for Enterprise components are available at [VictoriaMetrics Docker Hub](https://hub.docker.com/u/victoriametrics) and [VictoriaMetrics Quay](https://quay.io/organization/victoriametrics).
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.133.0-enterprise`.
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.132.0-enterprise`.
In order to run Docker image of VictoriaMetrics Enterprise component, it is required to provide the license key via the command-line
flag as described in the [binary-releases](#binary-releases) section.
@@ -161,13 +161,13 @@ Enterprise license key can be obtained at [this page](https://victoriametrics.co
For example, the following command runs VictoriaMetrics Enterprise Docker image with the specified license key:
```sh
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.133.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.132.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
```
Alternatively, the license code can be stored in the file and then referred via `-licenseFile` command-line flag:
```sh
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.133.0-enterprise -licenseFile=/path/to/vm-license
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.132.0-enterprise -licenseFile=/path/to/vm-license
```
Example docker-compose configuration:
@@ -177,7 +177,7 @@ version: "3.5"
services:
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.133.0
image: victoriametrics/victoria-metrics:v1.132.0
ports:
- 8428:8428
volumes:
@@ -209,7 +209,7 @@ is used to provide the license key in plain-text:
```yaml
server:
image:
tag: v1.133.0-enterprise
tag: v1.132.0-enterprise
license:
key: {BASE64_ENCODED_LICENSE_KEY}
@@ -220,7 +220,7 @@ In order to provide the license key via existing secret, the following values fi
```yaml
server:
image:
tag: v1.133.0-enterprise
tag: v1.132.0-enterprise
license:
secret:
@@ -270,7 +270,7 @@ spec:
license:
key: {BASE64_ENCODED_LICENSE_KEY}
image:
tag: v1.133.0-enterprise
tag: v1.132.0-enterprise
```
In order to provide the license key via an existing secret, the following custom resource is used:
@@ -287,7 +287,7 @@ spec:
name: vm-license
key: license
image:
tag: v1.133.0-enterprise
tag: v1.132.0-enterprise
```
Example secret with license key:
@@ -338,7 +338,7 @@ Builds are available for amd64 and arm64 architectures.
Example archive:
`victoria-metrics-linux-amd64-v1.133.0-enterprise.tar.gz`
`victoria-metrics-linux-amd64-v1.132.0-enterprise.tar.gz`
Includes:
@@ -347,7 +347,7 @@ Includes:
Example Docker image:
`victoriametrics/victoria-metrics:v1.133.0-enterprise-fips` uses the FIPS-compatible binary and based on `scratch` image.
`victoriametrics/victoria-metrics:v1.132.0-enterprise-fips` uses the FIPS-compatible binary and based on `scratch` image.
## Monitoring license expiration

View File

@@ -51,31 +51,14 @@ Comma-separated list of expected databases can be passed to VictoriaMetrics via
## InfluxDB v2 format
VictoriaMetrics exposes endpoint for InfluxDB v2 HTTP API at `/influx/api/v2/write` and `/api/v2/write`.
Here's an example writing data with `curl`:
```sh
curl --data-binary 'measurement1,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://<victoriametrics-addr>:8428/api/v2/write'
```
And to write multiple lines of data at once, prepare a file (e.g., `influx.data`) with your data:
```text
measurement2,tag1=value1,tag2=value2 field1=456,field2=4.56
measurement3,tag1=value1,tag2=value2 field1=789,field2=7.89
```
And execute this command to import the data:
```sh
curl -X POST 'http://<victoriametrics-addr>:8428/api/v2/write' --data-binary @influx.data
curl -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://localhost:8428/api/v2/write'
```
The `/api/v1/export` endpoint should return the following response:
```json
{"metric":{"__name__":"measurement1_field1","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1766983684142]}
{"metric":{"__name__":"measurement1_field2","tag1":"value1","tag2":"value2"},"values":[1.23],"timestamps":[1766983684142]}
{"metric":{"__name__":"measurement2_field1","tag1":"value1","tag2":"value2"},"values":[456],"timestamps":[1767012583021]}
{"metric":{"__name__":"measurement2_field2","tag1":"value1","tag2":"value2"},"values":[4.56],"timestamps":[1767012583021]}
{"metric":{"__name__":"measurement3_field1","tag1":"value1","tag2":"value2"},"values":[789],"timestamps":[1767012583021]}
{"metric":{"__name__":"measurement3_field2","tag1":"value1","tag2":"value2"},"values":[7.89],"timestamps":[1767012583021]}
{"metric":{"__name__":"measurement_field1","tag1":"value1","tag2":"value2"},"values":[123],"timestamps":[1695902762311]}
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[1.23],"timestamps":[1695902762311]}
```
## Data transformations
@@ -109,13 +92,13 @@ foo_field2{tag1="value1", tag2="value2"} 40
Example for writing data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
to local VictoriaMetrics using `curl`:
```sh
curl -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://<victoriametrics-addr>:8428/write'
curl -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://localhost:8428/write'
```
An arbitrary number of lines delimited by '\n' (aka newline char) can be sent in a single request.
After that the data may be read via [/api/v1/export](https://docs.victoriametrics.com/victoriametrics/#how-to-export-data-in-json-line-format) endpoint:
```sh
curl -G 'http://<victoriametrics-addr>:8428/api/v1/export' -d 'match={__name__=~"measurement_.*"}'
curl -G 'http://localhost:8428/api/v1/export' -d 'match={__name__=~"measurement_.*"}'
```
The `/api/v1/export` endpoint should return the following response:

View File

@@ -35,8 +35,8 @@ scrape_configs:
After you created the `scrape.yaml` file, download and unpack [single-node VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) to the same directory:
```sh
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.133.0/victoria-metrics-linux-amd64-v1.133.0.tar.gz
tar xzf victoria-metrics-linux-amd64-v1.133.0.tar.gz
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.132.0/victoria-metrics-linux-amd64-v1.132.0.tar.gz
tar xzf victoria-metrics-linux-amd64-v1.132.0.tar.gz
```
Then start VictoriaMetrics and instruct it to scrape targets defined in `scrape.yaml` and save scraped metrics
@@ -150,8 +150,8 @@ Then start [single-node VictoriaMetrics](https://docs.victoriametrics.com/victor
```yaml
# Download and unpack single-node VictoriaMetrics
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.133.0/victoria-metrics-linux-amd64-v1.133.0.tar.gz
tar xzf victoria-metrics-linux-amd64-v1.133.0.tar.gz
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.132.0/victoria-metrics-linux-amd64-v1.132.0.tar.gz
tar xzf victoria-metrics-linux-amd64-v1.132.0.tar.gz
# Run single-node VictoriaMetrics with the given scrape.yaml
./victoria-metrics-prod -promscrape.config=scrape.yaml

View File

@@ -1,51 +0,0 @@
---
build:
list: never
publishResources: false
render: never
sitemap:
disable: true
---
<!-- The file has to be manually updated during feature work in PR, make docs-update-flags command could be used peridically to ensure the flags in sync. -->
```shellhelp
-downsampling.period array
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. The 'offset' must be a multiple of 'interval', and when setting multiple downsampling periods for a single filter, those periods must also be multiples of each other. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#downsampling for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-retentionFilter array
Retention filter in the format 'filter:retention'. For example, '{env="dev"}:3d' configures the retention for time series with env="dev" label to 3 days. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention-filters for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-search.logSlowQueryStats duration
Log query statistics if execution time exceeding this value - see https://docs.victoriametrics.com/victoriametrics/query-stats . Zero disables slow query statistics logging. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 5s)
-search.logSlowQueryStatsHeaders array
HTTP request header keys to log for queries exceeding the threshold set by -search.logSlowQueryStats. Case-insensitive. By default, no headers are logged. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/query-stats/#log-fields for details and examples.
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
```

View File

@@ -42,6 +42,10 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
Whether to disable the ability to trace queries. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#query-tracing
-disablePerDayIndex
Disable per-day index and use global index for all searches. This may improve performance and decrease disk space usage for the use cases with fixed set of timeseries scattered across a big time range (for example, when loading years of historical data). See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#index-tuning
-downsampling.period array
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. The 'offset' must be a multiple of 'interval', and when setting multiple downsampling periods for a single filter, those periods must also be multiples of each other. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#downsampling for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-dryRun
Whether to check config files without running VictoriaMetrics. The following config files are checked: -promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag
-enableMetadata
@@ -52,6 +56,8 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
Whether to enable reading flags from environment variables in addition to the command line. Command line flag values have priority over values from environment vars. Flags are read only from the command line if this flag isn't set. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#environment-variables for more details
-envflag.prefix string
Prefix for environment variables if -envflag.enable is set
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-filestream.disableFadvise
Whether to disable fadvise() syscall when reading large data files. The fadvise() syscall prevents from eviction of recently accessed data from OS page cache during background merges and backups. In some rare cases it is better to disable the syscall if it uses too much CPU
-finalMergeDelay duration
@@ -155,6 +161,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-logNewSeries
Whether to log new series. This option is for debug purposes only. It can lead to performance issues when big number of new series are ingested into VictoriaMetrics
-logNewSeriesAuthKey value
@@ -207,6 +221,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file.
Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-newrelic.maxInsertRequestSize size
The maximum size in bytes of a single NewRelic request to /newrelic/infra/v2/metrics/events/bulk
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
@@ -371,9 +393,13 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
Auth key for /-/reload http endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings.
Flag value can be read from the given file when using -reloadAuthKey=file:///abs/path/to/file or -reloadAuthKey=file://./relative/path/to/file.
Flag value can be read from the given http/https url when using -reloadAuthKey=http://host/path or -reloadAuthKey=https://host/path
-retentionFilter array
Retention filter in the format 'filter:retention'. For example, '{env="dev"}:3d' configures the retention for time series with env="dev" label to 3 days. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention-filters for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-retentionPeriod value
Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention. See also -retentionFilter
The following optional suffixes are supported: s (second), h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1M)
Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter
The following optional suffixes are supported: s (second), h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1)
-retentionTimezoneOffset duration
The offset for performing indexdb rotation. If set to 0, then the indexdb rotation is performed at 4am UTC time per each -retentionPeriod. If set to 2h, then the indexdb rotation is performed at 4am EET time (the timezone with +2h offset)
-search.cacheTimestampOffset duration
@@ -402,6 +428,12 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
-search.logSlowQueryDuration duration
Log queries with execution time exceeding this value. Zero disables slow query logging. See also -search.logQueryMemoryUsage (default 5s)
-search.logSlowQueryStats duration
Log query statistics if execution time exceeding this value - see https://docs.victoriametrics.com/victoriametrics/query-stats . Zero disables slow query statistics logging. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 5s)
-search.logSlowQueryStatsHeaders array
HTTP request header keys to log for queries exceeding the threshold set by -search.logSlowQueryStats. Case-insensitive. By default, no headers are logged. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/query-stats/#log-fields for details and examples.
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-search.maxBinaryOpPushdownLabelValues int
The maximum number of values for a label in the first expression that can be extracted as a common label filter and pushed down to the second expression in a binary operation. A larger value makes the pushed-down filter more complex but fewer time series will be returned. This flag is useful when selective label (e.g., 'instance') contains numerous values, and storage resources are abundant. (default 100)
-search.maxConcurrentRequests int
@@ -578,6 +610,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
Whether to enable TLS for incoming HTTP requests at the given -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-tlsCertFile array
Path to file with TLS certificate for the corresponding -httpListenAddr if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated. See also -tlsAutocertHosts
Supports an array of values separated by comma or specified via multiple flags.
@@ -613,4 +653,4 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/
-zabbixconnector.maxLineLen size
The maximum length in bytes of a single line accepted by /zabbixconnector/api/v1/history
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 33554432)
```
```

View File

@@ -9,10 +9,4 @@ aliases:
- /grafana-datasource/
- /grafana-datasource.html
---
###### VictoriaMetrics datasource
Moved to [victoriametrics/integrations/grafana](https://docs.victoriametrics.com/victoriametrics/integrations/grafana/#victoriametrics-datasource).
###### Prometheus datasource
Moved to [victoriametrics/integrations/grafana](https://docs.victoriametrics.com/victoriametrics/integrations/grafana/#prometheus-datasource).
{{% content "integrations/grafana/datasource/_index.md" %}}

View File

@@ -1244,15 +1244,9 @@ It is safe sharing the collected profiles from security point of view, since the
## Advanced usage
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their descriptions and default value.
`vmagent` can be fine-tuned with various command-line flags. Run `./vmagent -help` in order to see the full list of these flags with their descriptions and default values:
### Common flags
These flags are available in both VictoriaMetrics OSS and VictoriaMetrics Enterprise.
{{% content "vmagent_common_flags.md" %}}
### Enterprise flags
These flags are available only in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
{{% content "vmagent_enterprise_flags.md" %}}
{{% content "vmagent_flags.md" %}}
---

View File

@@ -1,109 +0,0 @@
---
build:
list: never
publishResources: false
render: never
sitemap:
disable: true
---
<!-- The file has to be manually updated during feature work in PR, make docs-update-flags command could be used peridically to ensure the flags in sync. -->
```shellhelp
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-gcp.pubsub.publish.byteThreshold int
Publish a batch when its size in bytes reaches this value. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 1000000)
-gcp.pubsub.publish.countThreshold int
Publish a batch when it has this many messages. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 100)
-gcp.pubsub.publish.credentialsFile string
Path to file with GCP credentials to use for PubSub client. If not set, default credentials will be used (see Workload Identity for K8S or https://cloud.google.com/docs/authentication/application-default-credentials). See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-gcp.pubsub.publish.delayThreshold duration
Publish a non-empty batch after this delay has passed. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 10ms)
-gcp.pubsub.publish.maxOutstandingBytes int
The maximum size of buffered messages to be published. If less than or equal to zero, this is disabled. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default -1)
-gcp.pubsub.publish.maxOutstandingMessages int
The maximum number of buffered messages to be published. If less than or equal to zero, this is disabled. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 100)
-gcp.pubsub.publish.timeout duration
The maximum time that the client will attempt to publish a bundle of messages. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 1m0s)
-gcp.pubsub.subscribe.credentialsFile string
Path to file with GCP credentials to use for PubSub client. If not set, default credentials are used (see Workload Identity for K8S or https://cloud.google.com/docs/authentication/application-default-credentials ). See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-gcp.pubsub.subscribe.defaultMessageFormat string
Default message format if -gcp.pubsub.subscribe.topicSubscription.messageFormat is missing. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default "promremotewrite")
-gcp.pubsub.subscribe.topicSubscription array
GCP PubSub topic subscription in the format: projects/<project-id>/subscriptions/<subscription-name>. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-gcp.pubsub.subscribe.topicSubscription.concurrency array
The number of concurrently processed messages for topic subscription specified via -gcp.pubsub.subscribe.topicSubscription flag. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 0)
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to default value.
-gcp.pubsub.subscribe.topicSubscription.isGzipped array
Enables gzip decompression for messages payload at the corresponding -gcp.pubsub.subscribe.topicSubscription. Only prometheus, jsonline, graphite and influx formats accept gzipped messages. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-gcp.pubsub.subscribe.topicSubscription.messageFormat array
Message format for the corresponding -gcp.pubsub.subscribe.topicSubscription. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline . See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic array
Kafka topic names for data consumption. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.basicAuth.password array
Optional basic auth password for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN' . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.basicAuth.username array
Optional basic auth username for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN' . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.brokers array
List of brokers to connect for given topic, e.g. -kafka.consumer.topic.broker=host-1:9092;host-2:9092 . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.concurrency array
Configures consumer concurrency for topic specified via -kafka.consumer.topic flag. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 1)
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to default value.
-kafka.consumer.topic.defaultFormat string
Expected data format in the topic if -kafka.consumer.topic.format is skipped. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default "promremotewrite")
-kafka.consumer.topic.format array
data format for corresponding kafka topic. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline and opentelemetry. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.groupID array
Defines group.id for topic. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.isGzipped array
Enables gzip setting for topic messages payload. Only prometheus, jsonline, graphite and influx formats accept gzipped messages.See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-kafka.consumer.topic.options array
Optional key=value;key1=value2 settings for topic consumer. See full configuration options at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
```

View File

@@ -42,6 +42,8 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmagent/ .
Whether to enable reading flags from environment variables in addition to the command line. Command line flag values have priority over values from environment vars. Flags are read only from the command line if this flag isn't set. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#environment-variables for more details
-envflag.prefix string
Prefix for environment variables if -envflag.enable is set
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-filestream.disableFadvise
Whether to disable fadvise() syscall when reading large data files. The fadvise() syscall prevents from eviction of recently accessed data from OS page cache during background merges and backups. In some rare cases it is better to disable the syscall if it uses too much CPU
-flagsAuthKey value
@@ -52,6 +54,40 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmagent/ .
Whether to use pread() instead of mmap() for reading data files. By default, mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. mmap() is usually faster for reading small data chunks than pread()
-fs.maxConcurrency int
The maximum number of concurrent goroutines to work with files; smaller values may help reducing Go scheduling latency on systems with small number of CPU cores; higher values may help reducing data ingestion latency on systems with high-latency storage such as NFS or Ceph (default fsutil.getDefaultConcurrency())
-gcp.pubsub.publish.byteThreshold int
Publish a batch when its size in bytes reaches this value. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 1000000)
-gcp.pubsub.publish.countThreshold int
Publish a batch when it has this many messages. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 100)
-gcp.pubsub.publish.credentialsFile string
Path to file with GCP credentials to use for PubSub client. If not set, default credentials will be used (see Workload Identity for K8S or https://cloud.google.com/docs/authentication/application-default-credentials). See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-gcp.pubsub.publish.delayThreshold duration
Publish a non-empty batch after this delay has passed. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 10ms)
-gcp.pubsub.publish.maxOutstandingBytes int
The maximum size of buffered messages to be published. If less than or equal to zero, this is disabled. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default -1)
-gcp.pubsub.publish.maxOutstandingMessages int
The maximum number of buffered messages to be published. If less than or equal to zero, this is disabled. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 100)
-gcp.pubsub.publish.timeout duration
The maximum time that the client will attempt to publish a bundle of messages. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#writing-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 1m0s)
-gcp.pubsub.subscribe.credentialsFile string
Path to file with GCP credentials to use for PubSub client. If not set, default credentials are used (see Workload Identity for K8S or https://cloud.google.com/docs/authentication/application-default-credentials ). See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-gcp.pubsub.subscribe.defaultMessageFormat string
Default message format if -gcp.pubsub.subscribe.topicSubscription.messageFormat is missing. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default "promremotewrite")
-gcp.pubsub.subscribe.topicSubscription array
GCP PubSub topic subscription in the format: projects/<project-id>/subscriptions/<subscription-name>. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-gcp.pubsub.subscribe.topicSubscription.concurrency array
The number of concurrently processed messages for topic subscription specified via -gcp.pubsub.subscribe.topicSubscription flag. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 0)
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to default value.
-gcp.pubsub.subscribe.topicSubscription.isGzipped array
Enables gzip decompression for messages payload at the corresponding -gcp.pubsub.subscribe.topicSubscription. Only prometheus, jsonline, graphite and influx formats accept gzipped messages. See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-gcp.pubsub.subscribe.topicSubscription.messageFormat array
Message format for the corresponding -gcp.pubsub.subscribe.topicSubscription. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline . See https://docs.victoriametrics.com/victoriametrics/integrations/pubsub/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-graphite.sanitizeMetricName
Sanitize metric names for the ingested Graphite data. See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting
-graphiteListenAddr string
@@ -133,6 +169,52 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmagent/ .
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-kafka.consumer.topic array
Kafka topic names for data consumption. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.basicAuth.password array
Optional basic auth password for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN' . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.basicAuth.username array
Optional basic auth username for -kafka.consumer.topic. Must be used in conjunction with any supported auth methods for kafka client, specified by flag -kafka.consumer.topic.options='security.protocol=SASL_SSL;sasl.mechanisms=PLAIN' . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.brokers array
List of brokers to connect for given topic, e.g. -kafka.consumer.topic.broker=host-1:9092;host-2:9092 . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.concurrency array
Configures consumer concurrency for topic specified via -kafka.consumer.topic flag. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default 1)
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to default value.
-kafka.consumer.topic.defaultFormat string
Expected data format in the topic if -kafka.consumer.topic.format is skipped. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default "promremotewrite")
-kafka.consumer.topic.format array
data format for corresponding kafka topic. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline and opentelemetry. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.groupID array
Defines group.id for topic. See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-kafka.consumer.topic.isGzipped array
Enables gzip setting for topic messages payload. Only prometheus, jsonline, graphite and influx formats accept gzipped messages.See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-kafka.consumer.topic.options array
Optional key=value;key1=value2 settings for topic consumer. See full configuration options at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md . See https://docs.victoriametrics.com/victoriametrics/integrations/kafka/#reading-metrics . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-loggerDisableTimestamps
Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int
@@ -175,6 +257,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmagent/ .
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file.
Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-newrelic.maxInsertRequestSize size
The maximum size in bytes of a single NewRelic request to /newrelic/infra/v2/metrics/events/bulk
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
@@ -599,6 +689,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmagent/ .
Whether to enable TLS for incoming HTTP requests at the given -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-tlsCertFile array
Path to file with TLS certificate for the corresponding -httpListenAddr if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated. See also -tlsAutocertHosts
Supports an array of values separated by comma or specified via multiple flags.

View File

@@ -286,7 +286,7 @@ expr: <string>
# In case of conflicts, original labels are kept with prefix `exported_`.
#
# Labels only support limited templating variables in https://docs.victoriametrics.com/victoriametrics/vmalert/#templating,
# including `$labels`, `$value` and `$expr`, to avoid breaking alert states or causing cardinality issue with results.
# including `$labels`, `$value` and `expr`, to avoid breaking alert states or causing cardinality issue with results.
# Note: be careful set dynamic label values like `$value`, because each time the $value changes - the new alert will be
# generated which also break `for` condition.
labels:
@@ -316,7 +316,6 @@ The following variables are available in templating:
| $for or .For | Alert's configured for param. | Number of connections is too high for more than {{ .For }} |
| $externalLabels or .ExternalLabels | List of labels configured via `-external.label` command-line flag. | Issues with {{ $labels.instance }} (datacenter-{{ $externalLabels.dc }}) |
| $externalURL or .ExternalURL | URL configured via `-external.url` command-line flag. Used for cases when vmalert is hidden behind proxy. | Visit {{ $externalURL }} for more details |
| $isPartial or .IsPartial | Indicates whether the latest rule query response from the datasource(that supports returning `isPartial` option, such as vmcluster) could be partial. | {{ if $isPartial }}WARNING: The latest alert state may be a false alarm due to a partial response from the datasource.{{ end }}
Additionally, `vmalert` provides some extra templating functions listed in [template functions](#template-functions) and [reusable templates](#reusable-templates).
@@ -859,8 +858,6 @@ ALERTS{alertname="your_alertname", alertstate="firing"}
Execute the query against storage which was used for `-remoteWrite.url` during the `replay`.
> Since alerting rule annotations are attached to alert messages sent to the notifier (such as Alertmanager), and vmalert does not send alert messages to notifier in replay mode, all rule annotations will be ignored.
### Additional configuration
There are following non-required `replay` flags:
@@ -1172,13 +1169,7 @@ command-line flags with their descriptions.
The shortlist of configuration flags is the following:
#### Common flags
These flags are available in both VictoriaMetrics OSS and VictoriaMetrics Enterprise.
{{% content "vmalert_common_flags.md" %}}
#### Enterprise flags
These flags are available only in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
{{% content "vmalert_enterprise_flags.md" %}}
{{% content "vmalert_flags.md" %}}
### Hot config reload

View File

@@ -1,57 +0,0 @@
---
build:
list: never
publishResources: false
render: never
sitemap:
disable: true
---
<!-- The file has to be manually updated during feature work in PR, make docs-update-flags command could be used peridically to ensure the flags in sync. -->
```shellhelp
-clusterMode
If clusterMode is enabled, then vmalert automatically adds the tenant specified in config groups to -datasource.url, -remoteWrite.url and -remoteRead.url. See https://docs.victoriametrics.com/victoriametrics/vmalert/#multitenancy . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-defaultTenant.graphite string
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/victoriametrics/vmalert/#multitenancy .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-defaultTenant.prometheus string
Default tenant for Prometheus alerting groups. See https://docs.victoriametrics.com/victoriametrics/vmalert/#multitenancy . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-rule.stripFilePath
Whether to strip file path in responses from the api/v1/rules API for files configured via -rule cmd-line flag. For example, the file path '/path/to/tenant_id/rules.yml' will be stripped to just 'rules.yml'. This flag might be useful to hide sensitive information in file path such as tenant ID. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.configFilePath string
Path to file with S3 configs. Configs are loaded from default location if not set.
See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.configProfile string
Profile name for S3 configs. If no set, the value of the environment variable will be loaded (AWS_PROFILE or AWS_DEFAULT_PROFILE), or if both not set, DefaultSharedConfigProfile is used. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.credsFilePath string
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.customEndpoint string
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.forcePathStyle
Prefixing endpoint with bucket name when set false, true by default. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default true)
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
```

View File

@@ -15,6 +15,8 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
-blockcache.missesBeforeCaching int
The number of cache misses before putting the block into cache. Higher values may reduce indexdb/dataBlocks cache size at the cost of higher CPU and disk read usage (default 2)
-clusterMode
If clusterMode is enabled, then vmalert automatically adds the tenant specified in config groups to -datasource.url, -remoteWrite.url and -remoteRead.url. See https://docs.victoriametrics.com/victoriametrics/vmalert/#multitenancy . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default, the checking is disabled. Send SIGHUP signal in order to force config check for changes.
-datasource.appendTypePrefix
@@ -69,6 +71,10 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
-datasource.url string
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect endpoint. Required parameter. Supports address in the form of IP address with a port (e.g., http://127.0.0.1:8428) or DNS SRV record. See also -remoteRead.disablePathAppend and -datasource.showURL
-defaultTenant.graphite string
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/victoriametrics/vmalert/#multitenancy .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-defaultTenant.prometheus string
Default tenant for Prometheus alerting groups. See https://docs.victoriametrics.com/victoriametrics/vmalert/#multitenancy . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-disableAlertgroupLabel
Whether to disable adding group's Name as label to generated alerts and time series.
-dryRun
@@ -79,6 +85,8 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
Whether to enable reading flags from environment variables in addition to the command line. Command line flag values have priority over values from environment vars. Flags are read only from the command line if this flag isn't set. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#environment-variables for more details
-envflag.prefix string
Prefix for environment variables if -envflag.enable is set
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-evaluationInterval duration
How often to evaluate the rules (default 1m0s)
-external.alert.source string
@@ -143,6 +151,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-loggerDisableTimestamps
Whether to disable writing timestamps in logs
-loggerErrorsPerSecondLimit int
@@ -172,6 +188,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file.
Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-notifier.basicAuth.password array
Optional basic auth password for -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
@@ -432,6 +456,8 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
MiniMum amount of time to wait before resending an alert to notifier.
-rule.resultsLimit int
Limits the number of alerts or recording results a single rule can produce. Can be overridden by the limit option under group if specified. If exceeded, the rule will be marked with an error and all its results will be discarded. 0 means no limit.
-rule.stripFilePath
Whether to strip file path in responses from the api/v1/rules API for files configured via -rule cmd-line flag. For example, the file path '/path/to/tenant_id/rules.yml' will be stripped to just 'rules.yml'. This flag might be useful to hide sensitive information in file path such as tenant ID. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-rule.templates array
Path or glob pattern to location with go template definitions for rules annotations templating. Flag can be specified multiple times.
Examples:
@@ -448,6 +474,18 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
Whether to validate rules expressions for different types. (default true)
-rule.validateTemplates
Whether to validate annotation and label templates (default true)
-s3.configFilePath string
Path to file with S3 configs. Configs are loaded from default location if not set.
See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.configProfile string
Profile name for S3 configs. If no set, the value of the environment variable will be loaded (AWS_PROFILE or AWS_DEFAULT_PROFILE), or if both not set, DefaultSharedConfigProfile is used. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.credsFilePath string
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.customEndpoint string
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-s3.forcePathStyle
Prefixing endpoint with bucket name when set false, true by default. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/ (default true)
-secret.flags array
Comma-separated list of flag names with secret values. Values for these flags are hidden in logs and on /metrics page
Supports an array of values separated by comma or specified via multiple flags.
@@ -456,6 +494,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmalert/ .
Whether to enable TLS for incoming HTTP requests at the given -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-tlsCertFile array
Path to file with TLS certificate for the corresponding -httpListenAddr if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated. See also -tlsAutocertHosts
Supports an array of values separated by comma or specified via multiple flags.

View File

@@ -1207,10 +1207,4 @@ It is safe to share the collected profiles from security point of view, since th
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
### Common flags
These flags are available in both VictoriaMetrics OSS and VictoriaMetrics Enterprise.
{{% content "vmauth_common_flags.md" %}}
### Enterprise flags
These flags are available only in [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
{{% content "vmauth_enterprise_flags.md" %}}
{{% content "vmauth_flags.md" %}}

View File

@@ -1,39 +0,0 @@
---
build:
list: never
publishResources: false
render: never
sitemap:
disable: true
---
<!-- The file has to be manually updated during feature work in PR, make docs-update-flags command could be used peridically to ensure the flags in sync. -->
```shellhelp
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-httpRealIPHeader string
HTTP request header to use for obtaining IP address of client for applying 'ip_filters'. By default vmauth uses IP address of TCP the client. Useful if vmauth is behind reverse-proxy. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
```

View File

@@ -39,6 +39,8 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/ .
Whether to enable reading flags from environment variables in addition to the command line. Command line flag values have priority over values from environment vars. Flags are read only from the command line if this flag isn't set. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#environment-variables for more details
-envflag.prefix string
Prefix for environment variables if -envflag.enable is set
-eula
Deprecated, please use -license or -licenseFile flags instead. By specifying this flag, you confirm that you have an enterprise license and accept the ESA https://victoriametrics.com/legal/esa/ . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-failTimeout duration
Sets a delay period for load balancing to skip a malfunctioning backend (default 3s)
-filestream.disableFadvise
@@ -93,6 +95,8 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/ .
Whether to use proxy protocol for connections accepted at the corresponding -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-httpRealIPHeader string
HTTP request header to use for obtaining IP address of client for applying 'ip_filters'. By default vmauth uses IP address of TCP the client. Useful if vmauth is behind reverse-proxy
-idleConnTimeout duration
The timeout for HTTP keep-alive connections to backend services. It is recommended setting this value to values smaller than -http.idleConnTimeout set at backend services (default 50s)
-internStringCacheExpireDuration duration
@@ -101,6 +105,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/ .
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
-internStringMaxLen int
The maximum length for strings to intern. A lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
-license string
License key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed via file specified by -licenseFile command-line flag
-license.forceOffline
Whether to enable offline verification for VictoriaMetrics Enterprise license key, which has been passed either via -license or via -licenseFile command-line flag. The issued license key must support offline verification feature. Contact info@victoriametrics.com if you need offline license verification. This flag is available only in Enterprise binaries
-licenseFile string
Path to file with license key for VictoriaMetrics Enterprise. See https://victoriametrics.com/products/enterprise/ . Trial Enterprise license can be obtained from https://victoriametrics.com/products/enterprise/trial/ . This flag is available only in Enterprise binaries. The license key can be also passed inline via -license command-line flag
-licenseFile.reloadInterval duration
Interval for reloading the license file specified via -licenseFile. See https://victoriametrics.com/products/enterprise/ . This flag is available only in Enterprise binaries (default 1h0m0s)
-loadBalancingPolicy string
The default load balancing policy to use for backend urls specified inside url_prefix section. Supported policies: least_loaded, first_available. See https://docs.victoriametrics.com/victoriametrics/vmauth/#load-balancing (default "least_loaded")
-logInvalidAuthTokens
@@ -149,6 +161,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/ .
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file.
Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls array
Whether to require valid client certificate for https requests to the corresponding -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-mtlsCAFile array
Optional path to TLS Root CA for verifying client certificates at the corresponding -httpListenAddr when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-pprofAuthKey value
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides -httpAuth.*
Flag value can be read from the given file when using -pprofAuthKey=file:///abs/path/to/file or -pprofAuthKey=file://./relative/path/to/file.
@@ -189,6 +209,14 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/ .
Whether to enable TLS for incoming HTTP requests at the given -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-tlsAutocertCacheDir string
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertEmail string
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
-tlsAutocertHosts array
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Each array item can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-tlsCertFile array
Path to file with TLS certificate for the corresponding -httpListenAddr if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated. See also -tlsAutocertHosts
Supports an array of values separated by comma or specified via multiple flags.

Some files were not shown because too many files have changed in this diff Show More