Compare commits

..

1 Commits

Author SHA1 Message Date
Vadim Alekseev
de3690671b lib/stringsutil: optimize AppendLowercase
The optimization includes the following improvements:
- Implementation of a function that processes 8 bytes per loop iteration to locate ASCII characters using bitwise manipulations.
- Implementation of the ToLowercaseFunc function that prevents string copying if the string is already in lowercase.
- Use of a lookup table for converting ASCII characters to lowercase, with logic copied from the VictoriaLogs repository.
2026-04-16 02:22:45 +04:00
125 changed files with 1475 additions and 1459 deletions

0
.codex
View File

View File

@@ -1 +1,10 @@
Before creating the PR, please read [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist) and remove this line after confirming you understand and follow them.
### Describe Your Changes
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
### Checklist
The following checks are **mandatory**:
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development goals](https://docs.victoriametrics.com/victoriametrics/goals/).

View File

@@ -27,7 +27,7 @@ jobs:
- run: go version
- name: Cache Go artifacts
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cache/go-build

View File

@@ -40,7 +40,7 @@ jobs:
- run: go version
- name: Cache Go artifacts
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
@@ -50,14 +50,14 @@ jobs:
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-
- name: Initialize CodeQL
uses: github/codeql-action/init@v4.35.1
uses: github/codeql-action/init@v4
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@v4.35.1
uses: github/codeql-action/autobuild@v4
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4.35.1
uses: github/codeql-action/analyze@v4
with:
category: 'language:go'

View File

@@ -47,7 +47,7 @@ jobs:
- run: go version
- name: Cache golangci-lint
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cache/golangci-lint

View File

@@ -118,8 +118,8 @@ func main() {
logger.Fatalf("cannot stop the webservice: %s", err)
}
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
vminsertcommon.StopIngestionRateLimiter()
vminsert.Stop()
vminsertcommon.StopIngestionRateLimiter()
vmstorage.Stop()
vmselect.Stop()

View File

@@ -102,8 +102,6 @@ var (
"cannot be pushed into the configured -remoteWrite.url systems in a timely manner. See https://docs.victoriametrics.com/victoriametrics/vmagent/#disabling-on-disk-persistence")
disableMetadataPerURL = flagutil.NewArrayBool("remoteWrite.disableMetadata", "Whether to disable sending metadata to the corresponding -remoteWrite.url. "+
"By default, metadata sending is controlled by the global -enableMetadata flag")
enableRerouting = flag.Bool("remoteWrite.enableRerouting", false, "Whether to reroute samples to available remote storage systems when there's any remote storage system and its persistent queue can not "+
"keep up with the data ingestion rate. If this flag is not set, then it will be calculated automatically based on -remoteWrite.disableOnDiskQueue. See https://docs.victoriametrics.com/victoriametrics/vmagent/#disabling-on-disk-persistence")
)
var (
@@ -217,10 +215,6 @@ func Init() {
// to the remaining -remoteWrite.url and dropping them on the blocked queue.
dropSamplesOnFailureGlobal = *dropSamplesOnOverload || disableOnDiskQueueAny && len(*remoteWriteURLs) > 1
if *shardByURL && !flagutil.IsSet("remoteWrite.enableRerouting") {
*enableRerouting = disableOnDiskQueueAny
}
dropDanglingQueues()
// Start config reloader.
@@ -504,13 +498,11 @@ func tryPush(at *auth.Token, wr *prompb.WriteRequest, forceDropSamplesOnFailure
//
// calculateHealthyRwctxIdx will rely on the order of rwctx to be in ascending order.
func getEligibleRemoteWriteCtxs(tss []prompb.TimeSeries, forceDropSamplesOnFailure bool) ([]*remoteWriteCtx, bool) {
if (*shardByURL && !*enableRerouting) || !disableOnDiskQueueAny {
if !disableOnDiskQueueAny {
return rwctxsGlobal, true
}
// This code is applicable when:
// 1. remoteWrite.shardByUrl is disabled and at least a single remote storage has -disableOnDiskQueue.
// 2. remoteWrite.shardByUrl is enabled and remoteWrite.enableRerouting is set to true.
// This code is applicable if at least a single remote storage has -disableOnDiskQueue
rwctxs := make([]*remoteWriteCtx, 0, len(rwctxsGlobal))
for _, rwctx := range rwctxsGlobal {
if !rwctx.fq.IsWriteBlocked() {

View File

@@ -222,9 +222,6 @@ func (r *Rule) Validate() error {
if r.Expr == "" {
return fmt.Errorf("expression can't be empty")
}
if _, ok := r.Labels["__name__"]; ok {
return fmt.Errorf("invalid rule label __name__")
}
return checkOverflow(r.XXX, "rule")
}

View File

@@ -136,9 +136,6 @@ func TestRuleValidate(t *testing.T) {
if err := (&Rule{Alert: "alert"}).Validate(); err == nil {
t.Fatalf("expected empty expr error")
}
if err := (&Rule{Record: "record", Expr: "sum(test)", Labels: map[string]string{"__name__": "test"}}).Validate(); err == nil {
t.Fatalf("invalid rule label; got %s", err)
}
if err := (&Rule{Alert: "alert", Expr: "test>0"}).Validate(); err != nil {
t.Fatalf("expected valid rule; got %s", err)
}

View File

@@ -87,7 +87,6 @@ func (m *Metric) DelLabel(key string) {
for i, l := range m.Labels {
if l.Name == key {
m.Labels = append(m.Labels[:i], m.Labels[i+1:]...)
break
}
}
}

View File

@@ -312,11 +312,9 @@ type labelSet struct {
// On k conflicts in origin set, the original value is preferred and copied
// to processed with `exported_%k` key. The copy happens only if passed v isn't equal to origin[k] value.
func (ls *labelSet) add(k, v string) {
// do not add label with empty value to the result, as it has no meaning:
// if the label already exists in the original query result, remove it to preserve compatibility with relabeling, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10766.
// otherwise, ignore the label, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984.
// do not add label with empty value, since it has no meaning.
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
if v == "" {
delete(ls.processed, k)
return
}
ls.processed[k] = v

View File

@@ -1363,7 +1363,6 @@ func TestAlertingRule_ToLabels(t *testing.T) {
{Name: "instance", Value: "0.0.0.0:8800"},
{Name: "group", Value: "vmalert"},
{Name: "alertname", Value: "ConfigurationReloadFailure"},
{Name: "pod", Value: "vmalert-0"},
},
Values: []float64{1},
Timestamps: []int64{time.Now().UnixNano()},
@@ -1375,7 +1374,6 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"group": "vmalert", // this shouldn't have effect since value in metric is equal
"invalid_label": "{{ .Values.mustRuntimeFail }}",
"empty_label": "", // this should be dropped
"pod": "", // this should remove the pod label from query result
},
Expr: "sum(vmalert_alerting_rules_error) by(instance, group, alertname) > 0",
Name: "AlertingRulesError",
@@ -1387,7 +1385,6 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"group": "vmalert",
"alertname": "ConfigurationReloadFailure",
"alertgroup": "vmalert",
"pod": "vmalert-0",
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
}

View File

@@ -409,9 +409,6 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
g.mu.Unlock()
defer g.evalCancel()
// start the interval ticker before the first evaluation,
// so that the evaluation timestamps of groups with the `eval_offset` option are also aligned,
// see https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10773
t := time.NewTicker(g.Interval)
defer t.Stop()

View File

@@ -293,11 +293,9 @@ func (rr *RecordingRule) toTimeSeries(m datasource.Metric) prompb.TimeSeries {
}
// add extra labels configured by user
for k := range rr.Labels {
// do not add label with empty value to the result, as it has no meaning:
// if the label already exists in the original query result, remove it to preserve compatibility with relabeling, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10766.
// otherwise, ignore the label, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984.
// do not add label with empty value, since it has no meaning.
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
if rr.Labels[k] == "" {
m.DelLabel(k)
continue
}
existingLabel := promrelabel.GetLabelByName(m.Labels, k)

View File

@@ -163,13 +163,11 @@ func TestRecordingRule_Exec(t *testing.T) {
f(&RecordingRule{
Name: "job:foo",
Labels: map[string]string{
"source": "test",
"empty_label": "", // this should be dropped
"pod": "", // this should remove the pod label from query result
"source": "test",
},
}, [][]datasource.Metric{{
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo", "pod", "vmalert-0"),
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin", "pod", "vmalert-1"),
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo"),
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin"),
metricWithValueAndLabels(t, 1, "__name__", "baz", "job", "baz", "source", "test"),
}}, [][]prompb.TimeSeries{{
newTimeSeries([]float64{2}, []int64{ts.UnixNano()}, []prompb.Label{

View File

@@ -481,9 +481,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
canRetry := !bbOK || bb.canRetry()
res, err := ui.rt.RoundTrip(req)
if err == nil {
defer func() { _ = res.Body.Close() }()
}
if errors.Is(r.Context().Err(), context.Canceled) {
// Do not retry canceled requests.
@@ -553,6 +550,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
w.WriteHeader(res.StatusCode)
err = copyStreamToClient(w, res.Body)
_ = res.Body.Close()
if errors.Is(r.Context().Err(), context.Canceled) {
// Do not retry canceled requests.

File diff suppressed because it is too large Load Diff

View File

@@ -23,14 +23,14 @@
"classnames": "^2.5.1",
"dayjs": "^1.11.20",
"lodash.debounce": "^4.0.8",
"marked": "^18.0.0",
"preact": "^10.29.1",
"qs": "^6.15.1",
"marked": "^17.0.5",
"preact": "^10.29.0",
"qs": "^6.15.0",
"react-input-mask": "^2.0.4",
"react-router-dom": "^7.14.1",
"react-router-dom": "^7.13.2",
"uplot": "^1.6.32",
"vite": "^8.0.8",
"web-vitals": "^5.2.0"
"vite": "^8.0.7",
"web-vitals": "^5.1.0"
},
"devDependencies": {
"@eslint/eslintrc": "^3.3.5",
@@ -39,24 +39,24 @@
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/preact": "^3.2.4",
"@types/lodash.debounce": "^4.0.9",
"@types/node": "^25.6.0",
"@types/node": "^25.5.0",
"@types/qs": "^6.15.0",
"@types/react": "^19.2.14",
"@types/react-input-mask": "^3.0.6",
"@types/react-router-dom": "^5.3.3",
"@typescript-eslint/eslint-plugin": "^8.58.2",
"@typescript-eslint/parser": "^8.58.2",
"@typescript-eslint/eslint-plugin": "^8.57.2",
"@typescript-eslint/parser": "^8.57.2",
"cross-env": "^10.1.0",
"eslint": "^9.39.2",
"eslint-plugin-react": "^7.37.5",
"eslint-plugin-unused-imports": "^4.4.1",
"globals": "^17.5.0",
"globals": "^17.4.0",
"http-proxy-middleware": "^3.0.5",
"jsdom": "^29.0.2",
"postcss": "^8.5.10",
"sass-embedded": "^1.99.0",
"typescript": "^6.0.2",
"vitest": "^4.1.4"
"jsdom": "^29.0.1",
"postcss": "^8.5.8",
"sass-embedded": "^1.98.0",
"typescript": "^5.9.3",
"vitest": "^4.1.1"
},
"browserslist": {
"production": [

View File

@@ -1,7 +1,7 @@
import { useMemo } from "preact/compat";
import "./style.scss";
import { Alert as APIAlert, Group } from "../../../types";
import { Link } from "react-router-dom";
import { Alert as APIAlert } from "../../../types";
import { createSearchParams } from "react-router-dom";
import Button from "../../Main/Button/Button";
import Badges, { BadgeColor } from "../Badges";
import { formatEventTime } from "../helpers";
@@ -9,14 +9,12 @@ import {
SearchIcon,
} from "../../Main/Icons";
import CodeExample from "../../Main/CodeExample/CodeExample";
import router from "../../../router";
interface BaseAlertProps {
item: APIAlert;
group?: Group;
}
const BaseAlert = ({ item, group }: BaseAlertProps) => {
const BaseAlert = ({ item }: BaseAlertProps) => {
const query = item?.expression;
const alertLabels = item?.labels || {};
const alertLabelsItems = useMemo(() => {
@@ -26,19 +24,13 @@ const BaseAlert = ({ item, group }: BaseAlertProps) => {
}]));
}, [alertLabels]);
const queryLink = useMemo(() => {
if (!group?.interval) return;
const params = new URLSearchParams({
const openQueryLink = () => {
const params = {
"g0.expr": query,
"g0.end_time": item.activeAt,
// Interval is the Group's evaluation interval in float seconds as present in the file. See: /app/vmalert/rule/web.go
"g0.step_input": `${group.interval}s`,
"g0.relative_time": "none",
});
return `${router.home}?${params.toString()}`;
}, [query, item.activeAt, group?.interval]);
"g0.end_time": ""
};
window.open(`#/?${createSearchParams(params).toString()}`, "_blank", "noopener noreferrer");
};
return (
<div className="vm-explore-alerts-alert-item">
@@ -53,22 +45,15 @@ const BaseAlert = ({ item, group }: BaseAlertProps) => {
style={{ "text-align": "end" }}
colSpan={2}
>
{queryLink && (
<Link
to={queryLink}
target={"_blank"}
rel="noreferrer"
>
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
>
<span className="vm-button-text">Run query</span>
</Button>
</Link>
)}
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
onClick={openQueryLink}
>
<span className="vm-button-text">Run query</span>
</Button>
</td>
</tr>
<tr>

View File

@@ -1,21 +1,19 @@
import { useMemo } from "preact/compat";
import "./style.scss";
import { Group, Rule as APIRule } from "../../../types";
import { useNavigate, Link } from "react-router-dom";
import { Rule as APIRule } from "../../../types";
import { useNavigate, createSearchParams } from "react-router-dom";
import { SearchIcon, DetailsIcon } from "../../Main/Icons";
import Button from "../../Main/Button/Button";
import Alert from "../../Main/Alert/Alert";
import Badges, { BadgeColor } from "../Badges";
import { formatDuration, formatEventTime } from "../helpers";
import CodeExample from "../../Main/CodeExample/CodeExample";
import router from "../../../router";
interface BaseRuleProps {
item: APIRule;
group?: Group;
}
const BaseRule = ({ item, group }: BaseRuleProps) => {
const BaseRule = ({ item }: BaseRuleProps) => {
const query = item?.query;
const navigate = useNavigate();
const openAlertLink = (id: string) => {
@@ -35,19 +33,13 @@ const BaseRule = ({ item, group }: BaseRuleProps) => {
}]));
}, [ruleLabels]);
const queryLink = useMemo(() => {
if (!group?.interval) return;
const params = new URLSearchParams({
const openQueryLink = () => {
const params = {
"g0.expr": query,
"g0.end_time": item.lastEvaluation,
// Interval is the Group's evaluation interval in float seconds as present in the file. See: /app/vmalert/rule/web.go
"g0.step_input": `${group.interval}s`,
"g0.relative_time": "none",
});
return `${router.home}?${params.toString()}`;
}, [query, item.lastEvaluation, group?.interval]);
"g0.end_time": ""
};
window.open(`#/?${createSearchParams(params).toString()}`, "_blank", "noopener noreferrer");
};
return (
<div className="vm-explore-alerts-rule-item">
@@ -62,22 +54,15 @@ const BaseRule = ({ item, group }: BaseRuleProps) => {
style={{ "text-align": "end" }}
colSpan={2}
>
{queryLink && (
<Link
to={queryLink}
target={"_blank"}
rel="noreferrer"
>
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
>
<span className="vm-button-text">Run query</span>
</Button>
</Link>
)}
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
onClick={openQueryLink}
>
<span className="vm-button-text">Run query</span>
</Button>
</td>
</tr>
<tr>

View File

@@ -2,16 +2,15 @@ import { FC } from "preact/compat";
import ItemHeader from "../ItemHeader";
import Accordion from "../../Main/Accordion/Accordion";
import "./style.scss";
import { Group, Rule as APIRule } from "../../../types";
import { Rule as APIRule } from "../../../types";
import BaseRule from "../BaseRule";
interface RuleProps {
states: Record<string, number>;
rule: APIRule;
group: Group;
}
const Rule: FC<RuleProps> = ({ states, rule, group }) => {
const Rule: FC<RuleProps> = ({ states, rule }) => {
const state = Object.keys(states).length > 0 ? Object.keys(states)[0] : "ok";
return (
<div className={`vm-explore-alerts-rule vm-badge-item ${state.replace(" ", "-")}`}>
@@ -26,10 +25,7 @@ const Rule: FC<RuleProps> = ({ states, rule, group }) => {
name={rule.name}
/>}
>
<BaseRule
item={rule}
group={group}
/>
<BaseRule item={rule} />
</Accordion>
</div>
);

View File

@@ -50,6 +50,7 @@ const RulesHeader = ({
label="Rule type"
placeholder="Please select rule type"
onChange={onChangeRuleType}
autofocus={!!types.length && !isMobile}
includeAll
searchable
/>

View File

@@ -17,7 +17,7 @@ export const formatDuration = (raw: number) => {
export const formatEventTime = (raw: string) => {
const t = dayjs(raw);
return t.year() <= 1 ? "Never" : t.tz().format("DD MMM YYYY HH:mm:ss");
return t.year() <= 1 ? "Never" : t.format("DD MMM YYYY HH:mm:ss");
};
export const getStates = (rule: Rule) => {

View File

@@ -2,11 +2,10 @@ import Spinner from "../../components/Main/Spinner/Spinner";
import Alert from "../../components/Main/Alert/Alert";
import { useFetchItem } from "./hooks/useFetchItem";
import "./style.scss";
import { Alert as APIAlert, Group as APIGroup } from "../../types";
import { Alert as APIAlert } from "../../types";
import ItemHeader from "../../components/ExploreAlerts/ItemHeader";
import BaseAlert from "../../components/ExploreAlerts/BaseAlert";
import Modal from "../../components/Main/Modal/Modal";
import { useFetchGroup } from "./hooks/useFetchGroup";
interface ExploreAlertProps {
groupId: string;
@@ -18,19 +17,10 @@ interface ExploreAlertProps {
const ExploreAlert = ({ groupId, id, mode, onClose }: ExploreAlertProps) => {
const {
item,
isLoading: isLoadingItem,
error: errorItem,
isLoading,
error,
} = useFetchItem<APIAlert>({ groupId, id, mode });
const {
group,
isLoading: isLoadingGroup,
error: errorGroup,
} = useFetchGroup<APIGroup>({ id: groupId });
const error = errorItem || errorGroup;
const isLoading = isLoadingItem || isLoadingGroup;
if (isLoading) return (
<Spinner />
);
@@ -61,12 +51,7 @@ const ExploreAlert = ({ groupId, id, mode, onClose }: ExploreAlertProps) => {
onClose={onClose}
>
<div className="vm-explore-alerts">
{item ? (
<BaseAlert
item={item}
group={group}
/>
) : (
{item && (<BaseAlert item={item} />) || (
<Alert variant="info">{noItemFound}</Alert>
)}
</div>

View File

@@ -2,12 +2,11 @@ import Spinner from "../../components/Main/Spinner/Spinner";
import Alert from "../../components/Main/Alert/Alert";
import { useFetchItem } from "./hooks/useFetchItem";
import "./style.scss";
import { Group as APIGroup, Rule as APIRule } from "../../types";
import { Rule as APIRule } from "../../types";
import ItemHeader from "../../components/ExploreAlerts/ItemHeader";
import BaseRule from "../../components/ExploreAlerts/BaseRule";
import Modal from "../../components/Main/Modal/Modal";
import { getStates } from "../../components/ExploreAlerts/helpers";
import { useFetchGroup } from "./hooks/useFetchGroup";
interface ExploreRuleProps {
groupId: string;
@@ -19,19 +18,10 @@ interface ExploreRuleProps {
const ExploreRule = ({ groupId, id, mode, onClose }: ExploreRuleProps) => {
const {
item,
isLoading: isLoadingItem,
error: errorItem,
isLoading,
error,
} = useFetchItem<APIRule>({ groupId, id, mode });
const {
group,
isLoading: isLoadingGroup,
error: errorGroup,
} = useFetchGroup<APIGroup>({ id: groupId });
const error = errorItem || errorGroup;
const isLoading = isLoadingItem || isLoadingGroup;
if (isLoading) return (
<Spinner />
);
@@ -59,12 +49,7 @@ const ExploreRule = ({ groupId, id, mode, onClose }: ExploreRuleProps) => {
onClose={onClose}
>
<div className="vm-explore-alerts">
{item ? (
<BaseRule
item={item}
group={group}
/>
) : (
{item && (<BaseRule item={item} />) || (
<Alert variant="info">{noItemFound}</Alert>
)}
</div>

View File

@@ -132,7 +132,7 @@ const ExploreRules: FC = () => {
newParams.set("page_num", "1");
setSearchParams(newParams);
const changes = getChanges(title, states);
setStates(changes.length === allStates.length ? [] : changes);
setStates(changes.length == allStates.length ? [] : changes);
}, [states, searchParams]);
const handleChangeRuleType = useCallback((title: string) => {
@@ -186,7 +186,6 @@ const ExploreRules: FC = () => {
<Rule
key={`rule-${rule.id}`}
rule={rule}
group={group}
states={getStates(rule)}
/>
))}

View File

@@ -15,12 +15,13 @@
"forceConsistentCasingInFileNames": true,
"noFallthroughCasesInSwitch": true,
"module": "esnext",
"moduleResolution": "bundler",
"moduleResolution": "node",
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
"jsxImportSource": "preact",
"downlevelIteration": true,
"noUnusedLocals": true,
"paths": {
"react": ["./node_modules/preact/compat/"],
@@ -31,8 +32,5 @@
},
"include": [
"src"
],
"exclude": [
"scripts/**/*.ts"
]
}

View File

@@ -33,41 +33,37 @@ func (c *Client) CloseConnections() {
c.httpCli.CloseIdleConnections()
}
// Get sends an HTTP GET request, returns
// Get sends a HTTP GET request, returns
// the response body and status code to the caller.
func (c *Client) Get(t *testing.T, url string, headers http.Header) (string, int) {
func (c *Client) Get(t *testing.T, url string) (string, int) {
t.Helper()
return c.do(t, http.MethodGet, url, nil, headers)
return c.do(t, http.MethodGet, url, "", nil)
}
// Post sends an HTTP POST request, returns
// Post sends a HTTP POST request, returns
// the response body and status code to the caller.
func (c *Client) Post(t *testing.T, url string, data []byte, headers http.Header) (string, int) {
func (c *Client) Post(t *testing.T, url, contentType string, data []byte) (string, int) {
t.Helper()
return c.do(t, http.MethodPost, url, data, headers)
return c.do(t, http.MethodPost, url, contentType, data)
}
// PostForm sends an HTTP POST request containing the POST-form data with attached getHeaders, returns
// PostForm sends a HTTP POST request containing the POST-form data, returns
// the response body and status code to the caller.
func (c *Client) PostForm(t *testing.T, url string, data url.Values, headers http.Header) (string, int) {
func (c *Client) PostForm(t *testing.T, url string, data url.Values) (string, int) {
t.Helper()
if headers == nil {
headers = make(http.Header)
}
headers.Set("Content-Type", "application/x-www-form-urlencoded")
return c.Post(t, url, []byte(data.Encode()), headers)
return c.Post(t, url, "application/x-www-form-urlencoded", []byte(data.Encode()))
}
// Delete sends an HTTP DELETE request and returns the response body and status code
// Delete sends a HTTP DELETE request and returns the response body and status code
// to the caller.
func (c *Client) Delete(t *testing.T, url string) (string, int) {
t.Helper()
return c.do(t, http.MethodDelete, url, nil, nil)
return c.do(t, http.MethodDelete, url, "", nil)
}
// do prepares an HTTP request, sends it to the server, receives the response
// do prepares a HTTP request, sends it to the server, receives the response
// from the server, returns the response body and status code to the caller.
func (c *Client) do(t *testing.T, method, url string, data []byte, headers http.Header) (string, int) {
func (c *Client) do(t *testing.T, method, url, contentType string, data []byte) (string, int) {
t.Helper()
req, err := http.NewRequest(method, url, bytes.NewReader(data))
@@ -75,7 +71,9 @@ func (c *Client) do(t *testing.T, method, url string, data []byte, headers http.
t.Fatalf("could not create a HTTP request: %v", err)
}
req.Header = headers
if len(contentType) > 0 {
req.Header.Add("Content-Type", contentType)
}
res, err := c.httpCli.Do(req)
if err != nil {
t.Fatalf("could not send HTTP request: %v", err)
@@ -137,7 +135,7 @@ func (app *ServesMetrics) GetIntMetric(t *testing.T, metricName string) int {
func (app *ServesMetrics) GetMetric(t *testing.T, metricName string) float64 {
t.Helper()
metrics, statusCode := app.cli.Get(t, app.metricsURL, nil)
metrics, statusCode := app.cli.Get(t, app.metricsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -163,7 +161,7 @@ func (app *ServesMetrics) GetMetricsByPrefix(t *testing.T, prefix string) []floa
values := []float64{}
metrics, statusCode := app.cli.Get(t, app.metricsURL, nil)
metrics, statusCode := app.cli.Get(t, app.metricsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -192,7 +190,7 @@ func (app *ServesMetrics) GetMetricsByRegexp(t *testing.T, re *regexp.Regexp) []
values := []float64{}
metrics, statusCode := app.cli.Get(t, app.metricsURL, nil)
metrics, statusCode := app.cli.Get(t, app.metricsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}

View File

@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"math"
"net/http"
"net/url"
"slices"
"sort"
@@ -90,22 +89,6 @@ type QueryOpts struct {
LatencyOffset string
Format string
NoCache string
Headers http.Header
}
// getTenant returns tenant with optional default value
func (qos *QueryOpts) getTenant() string {
if qos.Tenant == "" {
return "0"
}
return qos.Tenant
}
func (qos *QueryOpts) getHeaders() http.Header {
if qos.Headers == nil {
qos.Headers = make(http.Header)
}
return qos.Headers
}
func (qos *QueryOpts) asURLValues() url.Values {
@@ -135,6 +118,14 @@ func (qos *QueryOpts) asURLValues() url.Values {
return uv
}
// getTenant returns tenant with optional default value
func (qos *QueryOpts) getTenant() string {
if qos.Tenant == "" {
return "0"
}
return qos.Tenant
}
// PrometheusAPIV1QueryResponse is an inmemory representation of the
// /prometheus/api/v1/query or /prometheus/api/v1/query_range response.
type PrometheusAPIV1QueryResponse struct {

View File

@@ -76,13 +76,11 @@ func (app *Vmagent) APIV1ImportPrometheus(t *testing.T, records []string, opts Q
// Flushing may still be in progress on the function return.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1importprometheus
func (app *Vmagent) APIV1ImportPrometheusNoWaitFlush(t *testing.T, records []string, opts QueryOpts) {
func (app *Vmagent) APIV1ImportPrometheusNoWaitFlush(t *testing.T, records []string, _ QueryOpts) {
t.Helper()
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
_, statusCode := app.cli.Post(t, app.apiV1ImportPrometheusURL, data, headers)
_, statusCode := app.cli.Post(t, app.apiV1ImportPrometheusURL, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}

View File

@@ -114,10 +114,8 @@ func (app *Vminsert) InfluxWrite(t *testing.T, records []string, opts QueryOpts)
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -148,10 +146,8 @@ func (app *Vminsert) PrometheusAPIV1ImportCSV(t *testing.T, records []string, op
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -172,10 +168,8 @@ func (app *Vminsert) PrometheusAPIV1ImportNative(t *testing.T, data []byte, opts
if len(uvs) > 0 {
url += "?" + uvs
}
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
app.sendBlocking(t, 1, func() {
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -197,10 +191,8 @@ func (app *Vminsert) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOp
url += "?" + uvs
}
data := []byte("[" + strings.Join(records, ",") + "]")
headers := opts.getHeaders()
headers.Set("Content-Type", "application/json")
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -219,10 +211,8 @@ func (app *Vminsert) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest,
if prommetadata.IsEnabled() {
recordsCount += len(wr.Metadata)
}
headers := opts.getHeaders()
headers.Set("Content-Type", "application/x-protobuf")
app.sendBlocking(t, recordsCount, func() {
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "application/x-protobuf", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -271,10 +261,8 @@ func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, records []str
if prommetadata.IsEnabled() {
recordsCount += metadataRecords
}
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
app.sendBlocking(t, recordsCount, func() {
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -294,10 +282,8 @@ func (app *Vminsert) ZabbixConnectorHistory(t *testing.T, records []string, opts
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "application/json")
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}

View File

@@ -76,7 +76,7 @@ func (app *Vmselect) PrometheusAPIV1Export(t *testing.T, query string, opts Quer
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, exportURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, exportURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@@ -92,7 +92,7 @@ func (app *Vmselect) PrometheusAPIV1ExportNative(t *testing.T, query string, opt
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, exportURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, exportURL, values)
return []byte(res)
}
@@ -108,7 +108,7 @@ func (app *Vmselect) PrometheusAPIV1Query(t *testing.T, query string, opts Query
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@@ -124,7 +124,7 @@ func (app *Vmselect) PrometheusAPIV1QueryRange(t *testing.T, query string, opts
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@@ -139,7 +139,7 @@ func (app *Vmselect) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, _ := app.cli.PostForm(t, seriesURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, seriesURL, values)
return NewPrometheusAPIV1SeriesResponse(t, res)
}
@@ -153,7 +153,7 @@ func (app *Vmselect) PrometheusAPIV1SeriesCount(t *testing.T, opts QueryOpts) *P
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series/count", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
res, _ := app.cli.PostForm(t, seriesURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, seriesURL, values)
return NewPrometheusAPIV1SeriesCountResponse(t, res)
}
@@ -168,7 +168,7 @@ func (app *Vmselect) PrometheusAPIV1Labels(t *testing.T, matchQuery string, opts
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/labels", app.httpListenAddr, opts.getTenant())
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelsResponse(t, res)
}
@@ -183,7 +183,7 @@ func (app *Vmselect) PrometheusAPIV1LabelValues(t *testing.T, labelName, matchQu
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/label/%s/values", app.httpListenAddr, opts.getTenant(), labelName)
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelValuesResponse(t, res)
}
@@ -197,7 +197,7 @@ func (app *Vmselect) PrometheusAPIV1Metadata(t *testing.T, metric string, limit
values.Add("limit", strconv.Itoa(limit))
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/metadata", app.httpListenAddr, opts.getTenant())
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1Metadata(t, res)
}
@@ -212,7 +212,7 @@ func (app *Vmselect) APIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string,
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
@@ -231,7 +231,7 @@ func (app *Vmselect) MetricNamesStats(t *testing.T, limit, le, matchPattern stri
values.Add("match_pattern", matchPattern)
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/status/metric_names_stats", app.httpListenAddr, opts.getTenant())
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
@@ -251,7 +251,7 @@ func (app *Vmselect) MetricNamesStatsReset(t *testing.T, opts QueryOpts) {
values := opts.asURLValues()
queryURL := fmt.Sprintf("http://%s/admin/api/v1/admin/status/metric_names_stats/reset", app.httpListenAddr)
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
@@ -275,7 +275,7 @@ func (app *Vmselect) APIV1StatusTSDB(t *testing.T, matchQuery string, date strin
addNonEmpty("topN", topN)
addNonEmpty("date", date)
res, statusCode := app.cli.PostForm(t, seriesURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, seriesURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
@@ -295,7 +295,7 @@ func (app *Vmselect) GraphiteMetricsIndex(t *testing.T, opts QueryOpts) Graphite
t.Helper()
seriesURL := fmt.Sprintf("http://%s/select/%s/graphite/metrics/index.json", app.httpListenAddr, opts.getTenant())
res, statusCode := app.cli.Get(t, seriesURL, opts.Headers)
res, statusCode := app.cli.Get(t, seriesURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
@@ -317,7 +317,7 @@ func (app *Vmselect) GraphiteTagsTagSeries(t *testing.T, record string, opts Que
values := opts.asURLValues()
values.Add("path", record)
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
@@ -332,7 +332,7 @@ func (app *Vmselect) GraphiteTagsTagMultiSeries(t *testing.T, records []string,
values.Add("path", rec)
}
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
@@ -343,7 +343,7 @@ func (app *Vmselect) APIV1AdminTenants(t *testing.T) *AdminTenantsResponse {
t.Helper()
tenantsURL := fmt.Sprintf("http://%s/admin/tenants", app.httpListenAddr)
res, statusCode := app.cli.Get(t, tenantsURL, nil)
res, statusCode := app.cli.Get(t, tenantsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}

View File

@@ -98,7 +98,7 @@ func StartVmsingleAt(instance, binary string, flags []string, cli *Client, outpu
func (app *Vmsingle) ForceFlush(t *testing.T) {
t.Helper()
_, statusCode := app.cli.Get(t, app.forceFlushURL, nil)
_, statusCode := app.cli.Get(t, app.forceFlushURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -108,7 +108,7 @@ func (app *Vmsingle) ForceFlush(t *testing.T) {
func (app *Vmsingle) ForceMerge(t *testing.T) {
t.Helper()
_, statusCode := app.cli.Get(t, app.forceMergeURL, nil)
_, statusCode := app.cli.Get(t, app.forceMergeURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -130,9 +130,8 @@ func (app *Vmsingle) InfluxWrite(t *testing.T, records []string, opts QueryOpts)
if len(uvs) > 0 {
url += "?" + uvs
}
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -162,9 +161,7 @@ func (app *Vmsingle) PrometheusAPIV1ImportCSV(t *testing.T, records []string, op
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -184,9 +181,7 @@ func (app *Vmsingle) PrometheusAPIV1ImportNative(t *testing.T, data []byte, opts
if len(uvs) > 0 {
url += "?" + uvs
}
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -208,9 +203,7 @@ func (app *Vmsingle) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOp
url += "?" + uvs
}
data := []byte("[" + strings.Join(records, ",") + "]")
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -219,13 +212,11 @@ func (app *Vmsingle) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOp
// PrometheusAPIV1Write is a test helper function that inserts a
// collection of records in Prometheus remote-write format by sending a HTTP
// POST request to /prometheus/api/v1/write vmsingle endpoint.
func (app *Vmsingle) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, opts QueryOpts) {
func (app *Vmsingle) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, _ QueryOpts) {
t.Helper()
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
headers := opts.getHeaders()
headers.Set("Content-Type", "application/x-protobuf")
_, statusCode := app.cli.Post(t, app.prometheusAPIV1WriteURL, data, headers)
_, statusCode := app.cli.Post(t, app.prometheusAPIV1WriteURL, "application/x-protobuf", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -246,10 +237,9 @@ func (app *Vmsingle) PrometheusAPIV1ImportPrometheus(t *testing.T, records []str
if len(uvs) > 0 {
url += "?" + uvs
}
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
data := []byte(strings.Join(records, "\n"))
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
@@ -266,7 +256,7 @@ func (app *Vmsingle) PrometheusAPIV1Export(t *testing.T, query string, opts Quer
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@@ -283,7 +273,7 @@ func (app *Vmsingle) PrometheusAPIV1ExportNative(t *testing.T, query string, opt
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportNativeURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportNativeURL, values)
return []byte(res)
}
@@ -297,7 +287,7 @@ func (app *Vmsingle) PrometheusAPIV1Query(t *testing.T, query string, opts Query
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@@ -312,7 +302,7 @@ func (app *Vmsingle) PrometheusAPIV1QueryRange(t *testing.T, query string, opts
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryRangeURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryRangeURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@@ -326,7 +316,7 @@ func (app *Vmsingle) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1SeriesURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1SeriesURL, values)
return NewPrometheusAPIV1SeriesResponse(t, res)
}
@@ -340,7 +330,7 @@ func (app *Vmsingle) PrometheusAPIV1SeriesCount(t *testing.T, opts QueryOpts) *P
values := opts.asURLValues()
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/series/count", app.httpListenAddr)
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1SeriesCountResponse(t, res)
}
@@ -355,7 +345,7 @@ func (app *Vmsingle) PrometheusAPIV1Labels(t *testing.T, matchQuery string, opts
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/labels", app.httpListenAddr)
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelsResponse(t, res)
}
@@ -370,7 +360,7 @@ func (app *Vmsingle) PrometheusAPIV1LabelValues(t *testing.T, labelName, matchQu
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/label/%s/values", app.httpListenAddr, labelName)
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelValuesResponse(t, res)
}
@@ -384,7 +374,7 @@ func (app *Vmsingle) PrometheusAPIV1Metadata(t *testing.T, metric string, limit
values.Add("limit", strconv.Itoa(limit))
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/metadata", app.httpListenAddr)
res, _ := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1Metadata(t, res)
}
@@ -399,7 +389,7 @@ func (app *Vmsingle) APIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string,
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
@@ -412,7 +402,7 @@ func (app *Vmsingle) GraphiteMetricsIndex(t *testing.T, _ QueryOpts) GraphiteMet
t.Helper()
seriesURL := fmt.Sprintf("http://%s/metrics/index.json", app.httpListenAddr)
res, statusCode := app.cli.Get(t, seriesURL, nil)
res, statusCode := app.cli.Get(t, seriesURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
@@ -434,7 +424,7 @@ func (app *Vmsingle) GraphiteTagsTagSeries(t *testing.T, record string, opts Que
values := opts.asURLValues()
values.Add("path", record)
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
@@ -449,7 +439,7 @@ func (app *Vmsingle) GraphiteTagsTagMultiSeries(t *testing.T, records []string,
values.Add("path", rec)
}
_, statusCode := app.cli.PostForm(t, url, values, opts.Headers)
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
@@ -468,7 +458,7 @@ func (app *Vmsingle) APIV1StatusMetricNamesStats(t *testing.T, limit, le, matchP
values.Add("match_pattern", matchPattern)
queryURL := fmt.Sprintf("http://%s/api/v1/status/metric_names_stats", app.httpListenAddr)
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
@@ -488,7 +478,7 @@ func (app *Vmsingle) APIV1AdminStatusMetricNamesStatsReset(t *testing.T, opts Qu
values := opts.asURLValues()
queryURL := fmt.Sprintf("http://%s/api/v1/admin/status/metric_names_stats/reset", app.httpListenAddr)
res, statusCode := app.cli.PostForm(t, queryURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
@@ -501,7 +491,7 @@ func (app *Vmsingle) APIV1AdminStatusMetricNamesStatsReset(t *testing.T, opts Qu
func (app *Vmsingle) SnapshotCreate(t *testing.T) *SnapshotCreateResponse {
t.Helper()
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), nil, nil)
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
@@ -527,7 +517,7 @@ func (app *Vmsingle) APIV1AdminTSDBSnapshot(t *testing.T) *APIV1AdminTSDBSnapsho
t.Helper()
queryURL := fmt.Sprintf("http://%s/api/v1/admin/tsdb/snapshot", app.httpListenAddr)
data, statusCode := app.cli.Post(t, queryURL, nil, nil)
data, statusCode := app.cli.Post(t, queryURL, "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
@@ -548,7 +538,7 @@ func (app *Vmsingle) SnapshotList(t *testing.T) *SnapshotListResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/list", app.httpListenAddr)
data, statusCode := app.cli.Get(t, queryURL, nil)
data, statusCode := app.cli.Get(t, queryURL)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
@@ -594,7 +584,7 @@ func (app *Vmsingle) SnapshotDeleteAll(t *testing.T) *SnapshotDeleteAllResponse
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/delete_all", app.httpListenAddr)
data, statusCode := app.cli.Get(t, queryURL, nil)
data, statusCode := app.cli.Get(t, queryURL)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
@@ -625,7 +615,7 @@ func (app *Vmsingle) APIV1StatusTSDB(t *testing.T, matchQuery string, date strin
addNonEmpty("topN", topN)
addNonEmpty("date", date)
res, statusCode := app.cli.PostForm(t, seriesURL, values, opts.Headers)
res, statusCode := app.cli.PostForm(t, seriesURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
@@ -651,9 +641,7 @@ func (app *Vmsingle) ZabbixConnectorHistory(t *testing.T, records []string, opts
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "application/json")
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}

View File

@@ -77,7 +77,7 @@ func (app *Vmstorage) ForceFlush(t *testing.T) {
t.Helper()
forceFlushURL := fmt.Sprintf("http://%s/internal/force_flush", app.httpListenAddr)
_, statusCode := app.cli.Get(t, forceFlushURL, nil)
_, statusCode := app.cli.Get(t, forceFlushURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -88,7 +88,7 @@ func (app *Vmstorage) ForceMerge(t *testing.T) {
t.Helper()
forceMergeURL := fmt.Sprintf("http://%s/internal/force_merge", app.httpListenAddr)
_, statusCode := app.cli.Get(t, forceMergeURL, nil)
_, statusCode := app.cli.Get(t, forceMergeURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -101,7 +101,7 @@ func (app *Vmstorage) ForceMerge(t *testing.T) {
func (app *Vmstorage) SnapshotCreate(t *testing.T) *SnapshotCreateResponse {
t.Helper()
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), nil, nil)
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
@@ -127,7 +127,7 @@ func (app *Vmstorage) SnapshotList(t *testing.T) *SnapshotListResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/list", app.httpListenAddr)
data, statusCode := app.cli.Get(t, queryURL, nil)
data, statusCode := app.cli.Get(t, queryURL)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
@@ -173,7 +173,7 @@ func (app *Vmstorage) SnapshotDeleteAll(t *testing.T) *SnapshotDeleteAllResponse
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/delete_all", app.httpListenAddr)
data, statusCode := app.cli.Post(t, queryURL, nil, nil)
data, statusCode := app.cli.Post(t, queryURL, "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}

View File

@@ -151,7 +151,7 @@ Some alerting rules thresholds are just recommendations and could require an adj
The list of alerting rules is the following:
* [alerts-health.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-health.yml):
alerting rules related to all VictoriaMetrics components for tracking their "health" state;
* [alerts-single-node.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-single-node.yml):
* [alerts.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts.yml):
alerting rules related to [single-server VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) installation;
* [alerts-cluster.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-cluster.yml):
alerting rules related to [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/);

View File

@@ -125,7 +125,7 @@ services:
ports:
- 8880:8880
volumes:
- ./rules/alerts-cluster.yml:/etc/alerts/alerts-cluster.yml
- ./rules/alerts-cluster.yml:/etc/alerts/alerts.yml
- ./rules/alerts-health.yml:/etc/alerts/alerts-health.yml
- ./rules/alerts-vmagent.yml:/etc/alerts/alerts-vmagent.yml
- ./rules/alerts-vmalert.yml:/etc/alerts/alerts-vmalert.yml

View File

@@ -66,7 +66,7 @@ services:
ports:
- 8880:8880
volumes:
- ./rules/alerts-single-node.yml:/etc/alerts/alerts-single-node.yml
- ./rules/alerts.yml:/etc/alerts/alerts.yml
- ./rules/alerts-health.yml:/etc/alerts/alerts-health.yml
- ./rules/alerts-vmagent.yml:/etc/alerts/alerts-vmagent.yml
- ./rules/alerts-vmalert.yml:/etc/alerts/alerts-vmalert.yml

View File

@@ -170,57 +170,3 @@ groups:
is saturated by more than 90% and vminsert won't be able to keep up.\n
This usually means that more vminsert or vmstorage nodes must be added to the cluster in order to increase
the total number of vminsert -> vmstorage links."
- alert: MetadataCacheUtilizationIsTooHigh
expr: |
vm_metrics_metadata_storage_size_bytes / vm_metrics_metadata_storage_max_size_bytes > 0.95
for: 15m
labels:
severity: warning
annotations:
summary: "Metadata cache capacity on {{ $labels.instance }} (job={{ $labels.job }}) is utilized for more than 95% for the last 15min"
description: "Metadata cache stores meta information about ingested time series - see https://docs.victoriametrics.com/victoriametrics/#metrics-metadata.
When cache is overutilized, the oldest entries will be dropped out automatically. It may result into incomplete
response for /api/v1/metadata API calls. It doesn't impact regular queries or alerts. Cache size is controlled
via -storage.maxMetadataStorageSize cmd-line flag."
- alert: MetricNameStatsCacheUtilizationIsTooHigh
expr: |
vm_cache_size_bytes{type="storage/metricNamesStatsTracker"} / vm_cache_size_max_bytes{type="storage/metricNamesStatsTracker"} > 0.95
for: 15m
labels:
severity: warning
annotations:
summary: "Cache capacity for tracking metric names usage on {{ $labels.instance }} (job={{ $labels.job }}) is utilized for more than 95% during the last 15min"
description: "Metric names usage cache stores information about unique metric names and how frequently they are queried - see https://docs.victoriametrics.com/victoriametrics/#track-ingested-metrics-usage.
When cache is overutilized, it will stop tracking the new metric names. It has no other negative impact.
Usually, the number of unique metric names is very limited (thousands). The cache can be overutilized only if metric names
are changing too frequently or if the cache size is too low. There are following ways to mitigate cache overutilization:
- disable cache via `--storage.trackMetricNamesStats=false` flag, so metric names usage will stop tracking
- increase the cache size via `--storage.cacheSizeMetricNamesStats` flag
- reset the cache (see docs for details)"
- alert: IndexDBRecordsDrop
expr: increase(vm_indexdb_items_dropped_total[5m]) > 0
labels:
severity: critical
annotations:
summary: "IndexDB skipped registering items during data ingestion with reason={{ $labels.reason }}."
description: |
VictoriaMetrics could skip registering new timeseries during ingestion if they fail the validation process.
For example, `reason=too_long_item` means that time series cannot exceed 64KB. Please, reduce the number
of labels or label values for such series. Or enforce these limits via `-maxLabelsPerTimeseries` and
`-maxLabelValueLen` command-line flags.
- alert: TooManyTSIDMisses
expr: increase(vm_missing_tsids_for_metric_id_total[5m]) > 0
for: 15m
labels:
severity: critical
annotations:
summary: "Unexpected TSID misses for job \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes"
description: |
Unexpected TSID misses for \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes.
If this happens after unclean shutdown of VictoriaMetrics process (via \"kill -9\", OOM or power off),
then this is OK - the alert must go away in a few minutes after the restart.
Otherwise this may point to the corruption of index data.

View File

@@ -82,6 +82,19 @@ groups:
Check the logs for the given target. Check also the \"location\" label at the vm_log_messages_total metric if -loggerLevel command-line flag is set to value other than INFO.
This label contains code locations responsible for generating log messages suppressed by -loggerLevel.
- alert: TooManyTSIDMisses
expr: increase(vm_missing_tsids_for_metric_id_total[5m]) > 0
for: 15m
labels:
severity: critical
annotations:
summary: "Unexpected TSID misses for job \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes"
description: |
Unexpected TSID misses for \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes.
If this happens after unclean shutdown of VictoriaMetrics process (via \"kill -9\", OOM or power off),
then this is OK - the alert must go away in a few minutes after the restart.
Otherwise this may point to the corruption of index data.
- alert: ConcurrentInsertsHitTheLimit
expr: avg_over_time(vm_concurrent_insert_current[1m]) >= vm_concurrent_insert_capacity
for: 15m
@@ -96,6 +109,28 @@ groups:
making write attempts. If vmagent's or vminsert's CPU usage and network saturation are at normal level, then
it might be worth adjusting `-maxConcurrentInserts` cmd-line flag.
- alert: IndexDBRecordsDrop
expr: increase(vm_indexdb_items_dropped_total[5m]) > 0
labels:
severity: critical
annotations:
summary: "IndexDB skipped registering items during data ingestion with reason={{ $labels.reason }}."
description: |
VictoriaMetrics could skip registering new timeseries during ingestion if they fail the validation process.
For example, `reason=too_long_item` means that time series cannot exceed 64KB. Please, reduce the number
of labels or label values for such series. Or enforce these limits via `-maxLabelsPerTimeseries` and
`-maxLabelValueLen` command-line flags.
- alert: RowsRejectedOnIngestion
expr: rate(vm_rows_ignored_total[5m]) > 0
for: 15m
labels:
severity: warning
annotations:
summary: "Some rows are rejected on \"{{ $labels.instance }}\" on ingestion attempt"
description: "Ingested rows on instance \"{{ $labels.instance }}\" are rejected due to the
following reason: \"{{ $labels.reason }}\""
- alert: TooHighQueryLoad
expr: increase(vm_concurrent_select_limit_timeout_total[5m]) > 0
for: 15m
@@ -113,14 +148,3 @@ groups:
* increase compute resources or number of replicas;
* adjust limits `-search.maxConcurrentRequests` and `-search.maxQueueDuration`.
See more at https://docs.victoriametrics.com/victoriametrics/troubleshooting/#slow-queries.
- alert: RowsRejectedOnIngestion
expr: rate(vm_rows_ignored_total[5m]) > 0
for: 15m
labels:
severity: warning
annotations:
summary: "Some rows are rejected on \"{{ $labels.instance }}\" on ingestion attempt"
description: "Ingested rows on instance \"{{ $labels.instance }}\" are rejected due to the
following reason: \"{{ $labels.reason }}\""

View File

@@ -148,45 +148,4 @@ groups:
description: "Metadata cache stores meta information about ingested time series - see https://docs.victoriametrics.com/victoriametrics/#metrics-metadata.
When cache is overutilized, the oldest entries will be dropped out automatically. It may result into incomplete
response for /api/v1/metadata API calls. It doesn't impact regular queries or alerts. Cache size is controlled
via -storage.maxMetadataStorageSize cmd-line flag."
- alert: MetricNameStatsCacheUtilizationIsTooHigh
expr: |
vm_cache_size_bytes{type="storage/metricNamesStatsTracker"} / vm_cache_size_max_bytes{type="storage/metricNamesStatsTracker"} > 0.95
for: 15m
labels:
severity: warning
annotations:
summary: "Cache capacity for tracking metric names usage on {{ $labels.instance }} (job={{ $labels.job }}) is utilized for more than 95% during the last 15min"
description: "Metric names usage cache stores information about unique metric names and how frequently they are queried - see https://docs.victoriametrics.com/victoriametrics/#track-ingested-metrics-usage.
When cache is overutilized, it will stop tracking the new metric names. It has no other negative impact.
Usually, the number of unique metric names is very limited (thousands). The cache can be overutilized only if metric names
are changing too frequently or if the cache size is too low. There are following ways to mitigate cache overutilization:
- disable cache via `--storage.trackMetricNamesStats=false` flag, so metric names usage will stop tracking
- increase the cache size via `--storage.cacheSizeMetricNamesStats` flag
- reset the cache (see docs for details)"
- alert: IndexDBRecordsDrop
expr: increase(vm_indexdb_items_dropped_total[5m]) > 0
labels:
severity: critical
annotations:
summary: "IndexDB skipped registering items during data ingestion with reason={{ $labels.reason }}."
description: |
VictoriaMetrics could skip registering new timeseries during ingestion if they fail the validation process.
For example, `reason=too_long_item` means that time series cannot exceed 64KB. Please, reduce the number
of labels or label values for such series. Or enforce these limits via `-maxLabelsPerTimeseries` and
`-maxLabelValueLen` command-line flags.
- alert: TooManyTSIDMisses
expr: increase(vm_missing_tsids_for_metric_id_total[5m]) > 0
for: 15m
labels:
severity: critical
annotations:
summary: "Unexpected TSID misses for job \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes"
description: |
Unexpected TSID misses for \"{{ $labels.job }}\" ({{ $labels.instance }}) for the last 15 minutes.
If this happens after unclean shutdown of VictoriaMetrics process (via \"kill -9\", OOM or power off),
then this is OK - the alert must go away in a few minutes after the restart.
Otherwise this may point to the corruption of index data.
via -storage.maxMetadataStorageSize cmd-line flag."

View File

@@ -59,7 +59,7 @@ services:
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr": },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
restart: always
vmanomaly:
image: victoriametrics/vmanomaly:v1.29.3
image: victoriametrics/vmanomaly:v1.29.2
depends_on:
- "victoriametrics"
ports:

View File

@@ -14,11 +14,6 @@ aliases:
---
Please find the changelog for VictoriaMetrics Anomaly Detection below.
## v1.29.3
Released: 2026-04-16
- UI: Updated [vmanomaly UI](https://docs.victoriametrics.com/anomaly-detection/ui/) from [v1.6.0](https://docs.victoriametrics.com/anomaly-detection/ui/#v160) to [v1.6.1](https://docs.victoriametrics.com/anomaly-detection/ui/#v161), see respective [release notes](https://docs.victoriametrics.com/anomaly-detection/ui/#v161) for details.
## v1.29.2
Released: 2026-04-02

View File

@@ -48,15 +48,13 @@ Please see example graph illustrating this logic below:
## What data does vmanomaly operate on?
> [!NOTE]
> `vmanomaly` operates on timeseries (metrics) data, and supports both **VictoriaMetrics** and **VictoriaLogs/VictoriaTraces** as data sources to get metrics-compatible data. Choose the source depending on the use case. Single-node / Cluster and OpenSource / Enterprise datasources are supported as well, `vmanomaly` is compatible with both, yet itself requires an [Enterprise license](https://victoriametrics.com/products/enterprise/) to run.
`vmanomaly` operates on timeseries (metrics) data, and supports both **VictoriaMetrics** and **VictoriaLogs** as data sources. Choose the source depending on the use case.
**VictoriaMetrics (metrics):** use full [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) for selection, sampling, and processing; [global filters](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#prometheus-querying-api-enhancements) are also supported. See the [VmReader](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) for the details.
**VictoriaLogs (logs → metrics):** {{% available_from "v1.26.0" anomaly %}} use [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) via the [`VLogsReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vlogs-reader) to create log-derived or traces-derived metrics for anomaly detection (e.g., error rates, request latencies, error spans count).
**VictoriaLogs (logs → metrics):** {{% available_from "v1.26.0" anomaly %}} use [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/) via the [`VLogsReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vlogs-reader) to create log-derived metrics for anomaly detection (e.g., error rates, request latencies).
> [!NOTE]
> Please note that only LogsQL queries with [stats pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe) functions [subset](https://docs.victoriametrics.com/anomaly-detection/components/reader/#valid-stats-functions) are supported, as they produce **numeric** time series.
> Please note that only LogsQL queries with [stats pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stats-pipe) functions [subset](https://docs.victoriametrics.com/anomaly-detection/components/reader/#valid-stats-functions) are supported, as they produce **numeric** time series.
## Using offsets
@@ -423,7 +421,7 @@ services:
# ...
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.29.3
image: victoriametrics/vmanomaly:v1.29.2
# ...
restart: always
volumes:
@@ -641,7 +639,7 @@ options:
Heres an example of using the config splitter to divide configurations based on the `extra_filters` argument from the reader section:
```sh
docker pull victoriametrics/vmanomaly:v1.29.3 && docker image tag victoriametrics/vmanomaly:v1.29.3 vmanomaly
docker pull victoriametrics/vmanomaly:v1.29.2 && docker image tag victoriametrics/vmanomaly:v1.29.2 vmanomaly
```
```sh

View File

@@ -45,8 +45,8 @@ There are 2 types of compatibility to consider when migrating in stateful mode:
| Group start | Group end | Compatibility | Notes |
|---------|--------- |------------|-------|
| [v1.29.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1293) | Latest* | Fully Compatible | Just a placeholder for new releases |
| [v1.29.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1291) | [v1.29.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1293) | Fully Compatible | - |
| [v1.29.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1292) | Latest* | Fully Compatible | Just a placeholder for new releases |
| [v1.29.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1291) | [v1.29.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1292) | Fully Compatible | - |
| [v1.28.7](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1287) | [v1.29.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1290) | Partially compatible* | Dumped models of class [prophet](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet) and [seasonal quantile](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-seasonal-quantile) have problems with loading to [v1.29.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1290) due to dropped `pytz` library. **Upgrading directly from v1.28.7 to [v1.29.1](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1291) with a fix is suggested** |
| [v1.26.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1262) | [v1.28.7](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1287) | Fully Compatible | [v1.28.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1280) introduced [rolling](https://docs.victoriametrics.com/anomaly-detection/components/models/#rolling-models) model class drop in favor of [online](https://docs.victoriametrics.com/anomaly-detection/components/models/#online-models) models (`rolling_quantile` and `std` models), however, it does not impact compatibility, as artifacts were not produced by default for rolling models. Also, offline `mad` and `zscore` models are redirecting to their respective online counterparts since [v1.28.4](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1284). |
| [v1.25.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1253) | [v1.26.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1270) | Partially Compatible* | [v1.25.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1253) introduced `forecast_at` argument for base [univariate](https://docs.victoriametrics.com/anomaly-detection/components/models/#univariate-models) and `Prophet` [models](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet), however, itself remains backward-reversible from newer states like [v1.26.2](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1262), [v1.27.0](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1270). (All models except `isolation_forest_multivariate` class will be dropped) |
@@ -81,4 +81,4 @@ In stateless mode, the migration process is almost straightforward as there are
# Other VmReader settings...
sampling_period: 1m
...
```
```

View File

@@ -122,7 +122,7 @@ Below are the steps to get `vmanomaly` up and running inside a Docker container:
1. Pull Docker image:
```sh
docker pull victoriametrics/vmanomaly:v1.29.3
docker pull victoriametrics/vmanomaly:v1.29.2
```
2. Create the license file with your license key.
@@ -142,7 +142,7 @@ docker run -it \
-v ./license:/license \
-v ./config.yaml:/config.yaml \
-p 8490:8490 \
victoriametrics/vmanomaly:v1.29.3 \
victoriametrics/vmanomaly:v1.29.2 \
/config.yaml \
--licenseFile=/license \
--loggerLevel=INFO \
@@ -159,7 +159,7 @@ docker run -it \
-e VMANOMALY_DATA_DUMPS_DIR=/tmp/vmanomaly/data \
-e VMANOMALY_MODEL_DUMPS_DIR=/tmp/vmanomaly/models \
-p 8490:8490 \
victoriametrics/vmanomaly:v1.29.3 \
victoriametrics/vmanomaly:v1.29.2 \
/config.yaml \
--licenseFile=/license \
--loggerLevel=INFO \
@@ -172,7 +172,7 @@ services:
# ...
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.29.3
image: victoriametrics/vmanomaly:v1.29.2
# ...
restart: always
volumes:

View File

@@ -9,17 +9,14 @@ sitemap:
In today's fast-paced and complex landscape of system monitoring, [VictoriaMetrics Anomaly Detection](https://victoriametrics.com/products/enterprise/anomaly-detection/) (`vmanomaly`), a part of our [Enterprise offering](https://victoriametrics.com/products/enterprise/), serves as an **observability layer** for SREs and DevOps teams atop of collected data to **automate the detection of anomalies in time-series data**, reducing manual efforts required to identify abnormal system behavior.
Unlike traditional threshold-based alerting, which relies on **raw metric values** and requires constant tuning and maintenance of thresholds and alerting rules, `vmanomaly` introduces a **unified, interpretable [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score)** - a **de-trended, de-seasonalized metric** generated through machine learning. This approach eliminates the need for frequent manual adjustments by enabling **stable, long-term static thresholds (as simple as `anomaly_score > 1`)** that remain effective over time through continuous model retraining and updates.
Unlike traditional threshold-based alerting, which relies on **raw metric values** and requires constant tuning and maintenance of thresholds and alerting rules, `vmanomaly` introduces a **unified, interpretable [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score)** - a **de-trended, de-seasonalized metric** generated through machine learning. This approach eliminates the need for frequent manual adjustments by enabling **stable, long-term static thresholds (as simple as `anomaly_score > 1`)** that remain effective over time through continuous model retraining.
By shifting to anomaly-based detection, teams can **identify and respond to potential issues faster**, enhancing system reliability and operational efficiency while significantly **reducing the engineering effort spent on handcrafting and maintaining alerting rules**.
## What does it do?
`vmanomaly` is designed to **periodically analyze new data points** across selected metrics - either requested from [VictoriaMetrics TSDB](https://docs.victoriametrics.com/victoriametrics/) or produced by [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/) or [VictoriaTraces](https://docs.victoriametrics.com/victoriatraces/) metrics [endpoint](https://docs.victoriametrics.com/victorialogs/querying/#querying-log-range-stats) - to generate a **unified metric** called [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score).
> [!NOTE]
> `vmanomaly` can use both single-node and cluster versions of VictoriaMetrics/VictoriaLogs/VictoriaTraces as a data source, and is compatible with both OpenSource and Enterprise versions of it. However, `vmanomaly` itself requires an Enterprise license to run, and is part of our [Enterprise offering](https://victoriametrics.com/products/enterprise/).
`vmanomaly` is designed to **periodically analyze new data points** across selected metrics (either requested from [VictoriaMetrics TSDB](https://docs.victoriametrics.com/victoriametrics/) or produced by [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/) metrics [endpoint](https://docs.victoriametrics.com/victorialogs/querying/#querying-log-range-stats)), generating a **unified metric** called [anomaly score](https://docs.victoriametrics.com/anomaly-detection/faq/#what-is-anomaly-score).
Key functions:
- **Automated anomaly detection** - continuously scans time-series data to identify deviations from expected behavior.

View File

@@ -315,7 +315,7 @@ docker run -it --rm \
-e VMANOMALY_MCP_SERVER_URL=http://mcp-vmanomaly:8081/mcp \
-p 8080:8080 \
-p 8490:8490 \
victoriametrics/vmanomaly:v1.29.3 \
victoriametrics/vmanomaly:v1.29.2 \
vmanomaly_config.yaml
```
@@ -640,23 +640,6 @@ If the **results** look good and the **model configuration should be deployed in
## Changelog
### v1.6.1
Released: 2026-04-16
vmanomaly version: [v1.29.3](https://docs.victoriametrics.com/anomaly-detection/changelog/#v1293)
- IMPROVEMENT: Consecutive anomalies (when "streaks" option is enabled) are now grouped in the Visualization Panel as a single anomaly line instead of multiple dots for reduced visual noise and better representation of prolonged anomalous periods, while still showing the exact anomaly score and labels on hover.
- IMPROVEMENT: Raw query results now refresh automatically after time range changes; yet anomaly detection results are preserved until "Detect Anomalies" button is hit again, to avoid recalculating anomalies on the new time range without explicit user action, which could be costly if the new time range is large and the model is complex.
- IMPROVEMENT: Table legend view is now enabled by default for sorting and filtering enablement.
- BUGFIX: Generated config and example alert outputs now preserve configured fit/infer values correctly and avoid invalid float-based duration strings in generated YAML, which could lead to data validation errors if copied to production configuration without adjustments.
- BUGFIX: Fixed multiple confusing anomaly UI behaviors around scheduler fields (fit_every, infer_every) and generated artifacts.
- BUGFIX: Chart y-axis range is now updating after legend series selection (regression introduced in v1.6.0).
### v1.6.0
Released: 2026-04-02

Binary file not shown.

Before

Width:  |  Height:  |  Size: 357 KiB

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 467 KiB

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 188 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -395,7 +395,7 @@ services:
restart: always
vmanomaly:
container_name: vmanomaly
image: victoriametrics/vmanomaly:v1.29.3
image: victoriametrics/vmanomaly:v1.29.2
depends_on:
- "victoriametrics"
ports:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 234 KiB

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 282 KiB

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.0 MiB

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 929 KiB

After

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 563 KiB

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 871 KiB

After

Width:  |  Height:  |  Size: 331 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 310 KiB

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 944 KiB

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 303 KiB

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 681 KiB

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 805 KiB

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 111 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 MiB

After

Width:  |  Height:  |  Size: 160 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 189 KiB

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 428 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 740 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 225 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 189 KiB

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 160 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 425 KiB

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 514 KiB

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 193 KiB

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -62,13 +62,11 @@ Pull requests requirements:
1. The pull request must conform to [VictoriaMetrics development goals](https://docs.victoriametrics.com/victoriametrics/goals/).
1. Don't use `master` branch for making PRs, as it makes it impossible for reviewers to modify the changes.
1. All commits need to be [signed](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits).
1. Pull request title should be prefixed with `<dir>/<component>:` to show what component has been changed, i.e. `app/vmalert: fix...`.
Pull request description should contain clear and concise description of what was done, why it is needed and for what purpose.
Use clear language, so reviewers can quickly understand the change and its impact.
1. A commit message should contain clear and concise description of what was done and for what purpose.
Use the imperative, present tense: "change" not "changed" nor "changes". Read your commit message as "This commit will ..", don't capitalize the first letter.
Message should be prefixed with `<dir>/<component>:` to show what component has been changed, i.e. `app/vmalert: fix...`.
1. A link to the issue(s) related to the change, if any. Use `Fixes [issue link]` if the PR resolves the issue, or `Related to [issue link]` for reference.
1. Tests proving that the change is effective. Tests are expected for non-trivial new functionality or non-trivial modifications.
Bug fixes must include tests unless a maintainer explicitly agrees otherwise.
See [this style guide](https://itnext.io/f-tests-as-a-replacement-for-table-driven-tests-in-go-8814a8b19e9e) for tests.
1. Tests proving that the change is effective. See [this style guide](https://itnext.io/f-tests-as-a-replacement-for-table-driven-tests-in-go-8814a8b19e9e) for tests.
To run tests and code checks locally, execute commands `make test-full` and `make check-all`.
1. Try to not extend the scope of the pull requests outside the issue, do not make unrelated changes.
1. Update [docs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/docs) if needed. For example, adding a new flag or changing behavior of existing flags or features

View File

@@ -25,30 +25,12 @@ The sandbox cluster installation runs under the constant load generated by
See also [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-releases/).
## tip
* FEATURE: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): introduce a new flag `-remoteWrite.enableRerouting` to explicitly control rerouting behavior when `vmagent` is started with `-remoteWrite.shardByUrl` and any remote storage system cannot keep up with the ingestion rate. Previously, this behavior was defined internally based on the value of `-remoteWrite.disableOnDiskQueue`. See [#10507](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10507).
* FEATURE: all VictoriaMetrics components: add support for reading cpu/memory limits configured via [systemd slices](https://www.freedesktop.org/software/systemd/man/latest/systemd.slice.html). Previously, only limits set directly on the process's own cgroup were detected. See [#10635](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10635). Thanks to @andriibeee for the contribution.
* FEATURE: [vmui](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#vmui): now `Run query` link on the Alerting Rules page correctly propagates the alerts interval and evaluation time. See [#10366](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10366).
* FEATURE: [alerts](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules): add new `MetricNameStatsCacheUtilizationIsTooHigh` alerting rule to track overutilization of [Metric names usage stats tracker](https://docs.victoriametrics.com/victoriametrics/#track-ingested-metrics-usage) (used in [Cardinality Explorer](https://docs.victoriametrics.com/victoriametrics/#cardinality-explorer)). See [#10840](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10840).
* FEATURE: [stream aggregation](https://docs.victoriametrics.com/victoriametrics/stream-aggregation/): add `vm_streamaggr_counter_resets_total` metric for `total*`, `increase*` and `rate*` outputs that is useful for aggregation behaviour tracking. These metrics help to identify issues described in [Troubleshooting: counter resets](https://docs.victoriametrics.com/victoriametrics/stream-aggregation/#counter-resets).
* BUGFIX: `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): fix increased memory usage after upgrade to [v1.140.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.140.0) by properly accounting for internal buffer count when calculating per-storage buffer size. See [#10725](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10725#issuecomment-4282256709).
* BUGFIX: all VictoriaMetrics components: properly parse IPv6 source address when accepting connections with proxy protocol v2 enabled. See [#10839](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10839). Thanks to @andriibeee for the contribution.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/) and [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/): `-maxScrapeSize` is now correctly applied when reading response bodies, including non-OK scrape error responses. See [#10804](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10804).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): fix `ec2_sd_configs` returning 401 `AuthFailure` from AWS when credentials are obtained via IRSA, instance role or `AWS_CONTAINER_CREDENTIALS_*` env vars. The regression was introduced in [v1.140.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.140.0). See [#10815](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10815).
* BUGFIX: [vmauth](https://docs.victoriametrics.com/victoriametrics/vmauth/): fix leak of backend TCP connections, file descriptors and goroutines when the client cancels the request after the backend response has been received. See [#10833](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10833). Thanks to @andriibeee for the contribution.
* BUGFIX: `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): stop logging warnings about failed handshakes when the `clusternative` port receives TCP healthchecks from load balancers. See [#10786](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10786). Thanks to @andriibeee for the contribution.
* BUGFIX: [vmrestore](https://docs.victoriametrics.com/victoriametrics/vmrestore/): fix an issue where vmrestore could hang indefinitely when interrupted during backup download. See [#10794](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10794).
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/): properly execute graceful shutdown for vmsingle if `-maxIngestionRate` is configured. See [#10795](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10795).
* BUGFIX: [vmui](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#vmui): fix time display on Alerting Rules page to use selected timezone. See [#10827](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10827).
* BUGFIX: [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/): delete labels from rule results if they are specified with an empty string value in rule or group labels. See [#10766](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10766).
## [v1.140.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.140.0)
Released at 2026-04-10
**Update Note 1:** [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): [CSV export](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-export-csv-data) (`/api/v1/export/csv`) now adds a header row as the first line of the response, so existing CSV-processing scripts may need to skip this header. See [#10666](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10666).
**Update Note 2:** [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): Due to an ordering bug in binary operations, some queries may produce incorrect results. For example, `10 - (3 + 3 + 4)` is evaluated as `10 - 3 + 3 + 4`. The issue was introduced in versions v1.140.0, v1.136.4, v1.122.19, and is addressed in upcoming releases. It is strongly recommended to avoid these versions entirely. See [#10856](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10856).
* SECURITY: upgrade Go builder from Go1.26.1 to Go1.26.2. See [the list of issues addressed in Go1.26.2](https://github.com/golang/go/issues?q=milestone%3AGo1.26.2%20label%3ACherryPickApproved).
@@ -155,8 +137,6 @@ It enables back `Discovered targets` debug UI by default.
## [v1.136.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.136.4)
**Update Note 1:** [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): Due to an ordering bug in binary operations, some queries may produce incorrect results. For example, `10 - (3 + 3 + 4)` is evaluated as `10 - 3 + 3 + 4`. The issue was introduced in versions v1.140.0, v1.136.4, v1.122.19, and is addressed in upcoming releases. It is strongly recommended to avoid these versions entirely. See [#10856](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10856).
Released at 2026-04-10
**v1.136.x is a line of [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).
@@ -377,8 +357,6 @@ See changes [here](https://docs.victoriametrics.com/victoriametrics/changelog/ch
## [v1.122.19](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.19)
**Update Note 1:** [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): Due to an ordering bug in binary operations, some queries may produce incorrect results. For example, `10 - (3 + 3 + 4)` is evaluated as `10 - 3 + 3 + 4`. The issue was introduced in versions v1.140.0, v1.136.4, v1.122.19, and is addressed in upcoming releases. It is strongly recommended to avoid these versions entirely. See [#10856](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10856).
Released at 2026-04-10
**v1.122.x is a line of [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/).

View File

@@ -338,7 +338,7 @@ Following convention is a good practice.
Every measurement can contain an arbitrary number of `key="value"` labels. The good practice is to keep this number limited.
Otherwise, it would be difficult to deal with measurements containing a big number of labels.
By default, VictoriaMetrics limits the number of labels per measurement to `40` and drops other labels.
By default, VictoriaMetrics limits the number of labels per measurement to `30` and drops other labels.
This limit can be changed via `-maxLabelsPerTimeseries` command-line flag if necessary (but this isn't recommended).
Every label value can contain an arbitrary string value. The good practice is to use short and meaningful label values to

View File

@@ -571,7 +571,6 @@ Below is an example of an `aggr.yaml` configuration that drops the `replica` and
# Troubleshooting
- [Unexpected spikes for `total` or `increase` outputs](#staleness).
- [Excessively large values for `total*`, `increase*`, and `rate*` outputs](#counter-resets).
- [Lower than expected values for `total_prometheus` and `increase_prometheus` outputs](#staleness).
- [High memory usage and CPU usage](#high-resource-usage).
- [Unexpected results in vmagent cluster mode](#cluster-mode).
@@ -602,10 +601,6 @@ the following settings:
- `enable_windows` option in [aggregation config](https://docs.victoriametrics.com/victoriametrics/stream-aggregation/#stream-aggregation-config).
It allows enabling aggregation windows for a specific aggregator.
## Counter resets
If counter-specific outputs, such as `total*`, `rate*`, and `increase*`, produce values that are significantly higher than anticipated, then check the `vm_streamaggr_counter_resets_total` metric. This metric increments each time when [counter reset event](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#counter) is happening and could be caused by duplication or collision of raw samples. If you observe duplication or collision - try solving this problem by either fixing the source of these metrics or by [deduplicating](https://docs.victoriametrics.com/victoriametrics/stream-aggregation/#deduplication) these samples before aggregation.
## Staleness
The following outputs track the last seen per-series values in order to properly calculate output values:

View File

@@ -900,10 +900,6 @@ When `-remoteWrite.disableOnDiskQueue` command-line flag is set, `vmagent` may s
if it cannot keep up with the data ingestion rate. In this case [deduplication](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#deduplication)
must be enabled on all the configured remote storage systems.
If `-remoteWrite.shardByUrl` command-line flag is set and `-remoteWrite.disableOnDiskQueue` is set for at least one `-remoteWrite.url`, then if any remote storage systems
and its corresponding persistent queue can not keep up with the ingestion rate, `vmagent` will reroute samples to other available remote storage systems.
If on-disk persistence is enabled for all `-remoteWrite.url`, then `vmagent` will not perform rerouting. But the rerouting behavior can be controlled by explicitly setting `-remoteWrite.enableRerouting`.
## Cardinality limiter
By default, `vmagent` doesn't limit the number of time series each scrape target can expose.

View File

@@ -284,12 +284,9 @@ expr: <string>
# Available starting from https://docs.victoriametrics.com/victoriametrics/changelog/#v1860
[ update_entries_limit: <integer> | default 0 ]
# Labels to add or overwrite labels from other external label sources, such as group labels, for each alert.
# Labels to add or overwrite for each alert.
# Labels are merged with labels received from `expr` evaluation and uniquely identify each generated alert.
#
# In case of conflicts, original labels are kept with prefix `exported_`.
# As a special case, specifying a label with an empty string value removes the label from the result if it exists
# in the original query result; otherwise, it is ignored.
#
# Labels only support limited templating variables in https://docs.victoriametrics.com/victoriametrics/vmalert/#templating,
# including `$labels`, `$value` and `$expr`, to avoid breaking alert states or causing cardinality issue with results.
@@ -419,11 +416,8 @@ record: <string>
# must contain valid Graphite expression.
expr: <string>
# Labels to add or overwrite labels from other external label sources, such as group labels, before storing the result.
#
# Labels to add or overwrite before storing the result.
# In case of conflicts, original labels are kept with prefix `exported_`.
# As a special case, specifying a label with an empty string value removes the label from the result if it exists
# in the original query result; otherwise, it is ignored.
#
# Labels do not support templating in https://docs.victoriametrics.com/victoriametrics/vmalert/#templating due to cardinality concerns. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8171.
labels:

View File

@@ -1523,11 +1523,7 @@ It is recommended to protect the following endpoints with authKeys:
* `/metrics` with `-metricsAuthKey` command-line flag, so unauthorized users couldn't access [vmauth metrics](https://docs.victoriametrics.com/victoriametrics/vmauth/#monitoring).
* `/debug/pprof` with `-pprofAuthKey` command-line flag, so unauthorized users couldn't access [profiling information](#profiling).
As an alternative, you can serve internal API routes on a different listen address using the command-line flag `-httpInternalListenAddr=127.0.0.1:8426`{{% available_from "v1.111.0" %}}.
To enable TLS on the public listener while keeping the internal listener non-TLS, configure multiple listeners like this:
```
/path/to/vmauth -httpInternalListenAddr=,localhost:8426 -httpListenAddr=0.0.0.0:443, -tls=true,false -tlsCertFile=a-cert.crt -tlsKeyFile=a-key.key
```
As an alternative, you can serve internal API routes on a different listen address using the command-line flag `-httpInternalListenAddr=127.0.0.1:8426`. {{% available_from "v1.111.0" %}}
`vmauth` also supports restricting access by IP - see [these docs](#ip-filters). See also [concurrency limiting docs](#concurrency-limiting).

4
go.mod
View File

@@ -10,8 +10,8 @@ require (
github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0
github.com/VictoriaMetrics/easyproto v1.2.0
github.com/VictoriaMetrics/fastcache v1.13.3
github.com/VictoriaMetrics/metrics v1.43.2
github.com/VictoriaMetrics/metricsql v0.86.1
github.com/VictoriaMetrics/metrics v1.43.1
github.com/VictoriaMetrics/metricsql v0.86.0
github.com/aws/aws-sdk-go-v2 v1.41.5
github.com/aws/aws-sdk-go-v2/config v1.32.14
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.13

4
go.sum
View File

@@ -60,12 +60,8 @@ github.com/VictoriaMetrics/fastcache v1.13.3 h1:rBabE0iIxcqKEMCwUmwHZ9dgEqXerg8F
github.com/VictoriaMetrics/fastcache v1.13.3/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU=
github.com/VictoriaMetrics/metrics v1.43.1 h1:j3Ba4l2K1q3pkvzPqt6aSiQ2DBlAEj3VPVeBtpR3t/Y=
github.com/VictoriaMetrics/metrics v1.43.1/go.mod h1:xDM82ULLYCYdFRgQ2JBxi8Uf1+8En1So9YUwlGTOqTc=
github.com/VictoriaMetrics/metrics v1.43.2 h1:+8pIQEGwchKS5CYFyvv3LKvNXGi7baZ9hmIV4RHqibY=
github.com/VictoriaMetrics/metrics v1.43.2/go.mod h1:xDM82ULLYCYdFRgQ2JBxi8Uf1+8En1So9YUwlGTOqTc=
github.com/VictoriaMetrics/metricsql v0.86.0 h1:IFD08amp+nkW6I+pB3+iyamewkIrbEojkQP4cmEbwkU=
github.com/VictoriaMetrics/metricsql v0.86.0/go.mod h1:d4EisFO6ONP/HIGDYTAtwrejJBBeKGQYiRl095bS4QQ=
github.com/VictoriaMetrics/metricsql v0.86.1 h1:GuNqbbIaWZ9eNa6dOCi6itG/fJ96TGOFV3KWLnAyC2o=
github.com/VictoriaMetrics/metricsql v0.86.1/go.mod h1:d4EisFO6ONP/HIGDYTAtwrejJBBeKGQYiRl095bS4QQ=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=

View File

@@ -91,12 +91,9 @@ func NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey,
if len(secretKey) > 0 {
cfg.defaultSecretKey = secretKey
}
if len(cfg.defaultAccessKey) > 0 && len(cfg.defaultSecretKey) > 0 && len(cfg.roleARN) == 0 {
// static credentials without roleARN never need refreshing; pre-populate them.
cfg.creds = &credentials{
AccessKeyID: cfg.defaultAccessKey,
SecretAccessKey: cfg.defaultSecretKey,
}
cfg.creds = &credentials{
AccessKeyID: cfg.defaultAccessKey,
SecretAccessKey: cfg.defaultSecretKey,
}
return cfg, nil

View File

@@ -2,13 +2,11 @@ package awsapi
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
@@ -471,109 +469,6 @@ func TestGetAPICredentialsWithProfile(t *testing.T) {
}
}
func TestGetFreshAPICredentialsFetchesWhenUnset(t *testing.T) {
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10815
response := `
<AssumeRoleWithWebIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<AssumeRoleWithWebIdentityResult>
<Credentials>
<AccessKeyId>IRSAACCESSKEYID</AccessKeyId>
<SecretAccessKey>IRSASECRETACCESSKEY</SecretAccessKey>
<SessionToken>IRSATOKEN</SessionToken>
<Expiration>2026-01-01T00:00:00Z</Expiration>
</Credentials>
</AssumeRoleWithWebIdentityResult>
<ResponseMetadata><RequestId>test</RequestId></ResponseMetadata>
</AssumeRoleWithWebIdentityResponse>
`
rt := &fakeRoundTripper{responses: make(map[string]*http.Response)}
recorder := httptest.NewRecorder()
recorder.WriteHeader(http.StatusOK)
_, _ = recorder.WriteString(response)
rt.responses["AssumeRoleWithWebIdentity"] = recorder.Result()
tempDir := t.TempDir()
tokenPath := filepath.Join(tempDir, "token")
fs.MustWriteSync(tokenPath, []byte("webtoken"))
cfg := &Config{
stsEndpoint: "http://stsendpoint",
irsaRoleARN: "irsarole",
webTokenPath: tokenPath,
client: &http.Client{Transport: rt},
}
creds, err := cfg.getFreshAPICredentials()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if creds.AccessKeyID != "IRSAACCESSKEYID" {
t.Fatalf("unexpected AccessKeyID; got %q, want %q", creds.AccessKeyID, "IRSAACCESSKEYID")
}
}
func TestGetFreshAPICredentialsFetchesIMDS(t *testing.T) {
// verify that IMDS credentials are fetched when no static keys or IRSA config is set
// see https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10817#issuecomment-4258125403
imdsRT := &imdsRoundTripper{
roleName: "test-role",
securityCredentials: `{
"AccessKeyId": "IMDSACCESSKEYID",
"SecretAccessKey": "IMDSSECRETACCESSKEY",
"Token": "IMDSTOKEN",
"Expiration": "2026-01-01T00:00:00Z"
}`,
}
cfg := &Config{
client: &http.Client{Transport: imdsRT},
}
creds, err := cfg.getFreshAPICredentials()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if creds.AccessKeyID != "IMDSACCESSKEYID" {
t.Fatalf("unexpected AccessKeyID; got %q, want %q", creds.AccessKeyID, "IMDSACCESSKEYID")
}
if creds.SecretAccessKey != "IMDSSECRETACCESSKEY" {
t.Fatalf("unexpected SecretAccessKey; got %q, want %q", creds.SecretAccessKey, "IMDSSECRETACCESSKEY")
}
if creds.Token != "IMDSTOKEN" {
t.Fatalf("unexpected Token; got %q, want %q", creds.Token, "IMDSTOKEN")
}
}
type imdsRoundTripper struct {
roleName string
securityCredentials string
}
func (rt *imdsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
switch {
case req.Method == http.MethodPut && strings.HasSuffix(req.URL.Path, "/api/token"):
// IMDSv2 session token request
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(strings.NewReader("fake-imds-token")),
Header: http.Header{"Content-Type": []string{"text/plain"}},
}, nil
case strings.HasSuffix(req.URL.Path, "/meta-data/iam/security-credentials/"):
// Role name listing
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(strings.NewReader(rt.roleName)),
Header: http.Header{"Content-Type": []string{"text/plain"}},
}, nil
case strings.HasSuffix(req.URL.Path, "/meta-data/iam/security-credentials/"+rt.roleName):
// Security credentials for the role
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(strings.NewReader(rt.securityCredentials)),
Header: http.Header{"Content-Type": []string{"application/json"}},
}, nil
default:
return nil, fmt.Errorf("unexpected IMDS request: %s %s", req.Method, req.URL)
}
}
func mustParseRFC3339(s string) time.Time {
expTime, err := time.Parse(time.RFC3339, s)
if err != nil {

View File

@@ -96,12 +96,9 @@ func runParallelPerPathInternal(ctx context.Context, concurrency int, perPath ma
// Read results.
var err error
for range len(perPath) {
select {
case <-ctx.Done():
err = ctx.Err()
case err = <-resultCh:
}
err = <-resultCh
if err != nil {
// Stop the work.
cancelLocal()
break
}

View File

@@ -3,7 +3,6 @@ package cgroup
import (
"fmt"
"os"
"path"
"runtime"
"strconv"
"strings"
@@ -101,31 +100,17 @@ func getOnlineCPUCount() float64 {
return n
}
// See https://www.freedesktop.org/software/systemd/man/latest/systemd.slice.html
func getCPUQuotaV2(sysfsPrefix, cgroupPath string) (float64, error) {
subPath, err := readCgroupV2SubPath(cgroupPath)
func getCPUQuotaV2(sysPrefix, cgroupPath string) (float64, error) {
data, err := getFileContents("cpu.max", sysPrefix, cgroupPath, "")
if err != nil {
subPath = "/"
return 0, err
}
var minQuota float64 = -1
for {
// travers sub path hierarchy and use a minimal value for stat
data, err := os.ReadFile(path.Join(sysfsPrefix, subPath, "cpu.max"))
if err == nil {
quota, err := parseCPUMax(strings.TrimSpace(string(data)))
if err != nil {
return 0, fmt.Errorf("cannot parse cpu.max at %s: %w", subPath, err)
}
if quota > 0 && (minQuota < 0 || quota < minQuota) {
minQuota = quota
}
}
if subPath == "/" || subPath == "." {
break
}
subPath = path.Dir(subPath)
data = strings.TrimSpace(data)
n, err := parseCPUMax(data)
if err != nil {
return 0, fmt.Errorf("cannot parse cpu.max file contents: %w", err)
}
return minQuota, nil
return n, nil
}
// See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#cpu

View File

@@ -37,7 +37,4 @@ func TestGetCPUQuotaV2(t *testing.T) {
f("testdata/cgroup", "testdata/self/cgroupv2", 2)
f("testdata/cgroup/cpu_unset", "", -1)
f("testdata/cgroup/cpu_onlymax", "", 2)
// systemd slice
f("testdata/v2slice", "testdata/self/cgroupv2_slice", 2)
}

View File

@@ -1,12 +1,9 @@
package cgroup
import (
"fmt"
"os"
"path"
"runtime/debug"
"strconv"
"strings"
)
// GetGOGC returns GOGC value for the currently running process.
@@ -45,44 +42,15 @@ func GetMemoryLimit() int64 {
return n
}
n, err = getMemStatV2("memory.max")
if err != nil || n <= 0 {
if err != nil {
return 0
}
return n
}
func getMemStatV2(statName string) (int64, error) {
// See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#memory-interface-files
return getMemLimitV2("/sys/fs/cgroup", "/proc/self/cgroup", statName)
}
func getMemLimitV2(sysfsPrefix, cgroupPath, statName string) (int64, error) {
subPath, err := readCgroupV2SubPath(cgroupPath)
if err != nil {
subPath = "/"
}
var minLimit int64 = -1
for {
// travers sub path hierarchy and use a minimal value for stat
data, err := os.ReadFile(path.Join(sysfsPrefix, subPath, statName))
if err == nil {
s := strings.TrimSpace(string(data))
if s != "max" {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse %s at %s: %w", statName, subPath, err)
}
if n > 0 && (minLimit < 0 || n < minLimit) {
minLimit = n
}
}
}
if subPath == "/" || subPath == "." {
break
}
subPath = path.Dir(subPath)
}
return minLimit, nil
// See https: //www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#memory-interface-files
return getStatGeneric(statName, "/sys/fs/cgroup", "/proc/self/cgroup", "")
}
func getMemStat(statName string) (int64, error) {

View File

@@ -19,22 +19,6 @@ func TestGetHierarchicalMemoryLimitSuccess(t *testing.T) {
f("testdata/cgroup", "testdata/self/cgroup", 120)
}
func TestGetMemLimitV2(t *testing.T) {
f := func(sysPrefix, cgroupPath string, want int64) {
t.Helper()
got, err := getMemLimitV2(sysPrefix, cgroupPath, "memory.max")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if got != want {
t.Fatalf("unexpected result, got: %d, want %d", got, want)
}
}
f("testdata/cgroup", "testdata/self/cgroupv2", 523372036854771712)
// systemd slice
f("testdata/v2slice", "testdata/self/cgroupv2_slice", 1073741824)
}
func TestGetHierarchicalMemoryLimitFailure(t *testing.T) {
f := func(sysPath, cgroupPath string) {
t.Helper()

Some files were not shown because too many files have changed in this diff Show More