mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
8 Commits
weakpointe
...
gh-9452
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d862c82f5 | ||
|
|
7d552dbd9a | ||
|
|
795c3deaee | ||
|
|
cb44353a36 | ||
|
|
7e05200c60 | ||
|
|
a2f033ce6c | ||
|
|
78b217d70c | ||
|
|
c9b23de9ce |
@@ -15,3 +15,24 @@ export const getExportDataUrl = (server: string, query: string, period: TimePara
|
||||
if (reduceMemUsage) params.set("reduce_mem_usage", "1");
|
||||
return `${server}/api/v1/export?${params}`;
|
||||
};
|
||||
|
||||
export const getExportCSVDataUrl = (server: string, query: string[], period: TimeParams, reduceMemUsage: boolean): string => {
|
||||
const params = new URLSearchParams({
|
||||
start: period.start.toString(),
|
||||
end: period.end.toString(),
|
||||
format: "__name__,__value__,__timestamp__:unix_ms",
|
||||
});
|
||||
query.forEach((q => params.append("match[]", q)));
|
||||
if (reduceMemUsage) params.set("reduce_mem_usage", "1");
|
||||
return `${server}/api/v1/export/csv?${params}`;
|
||||
};
|
||||
|
||||
export const getExportJSONDataUrl = (server: string, query: string[], period: TimeParams, reduceMemUsage: boolean): string => {
|
||||
const params = new URLSearchParams({
|
||||
start: period.start.toString(),
|
||||
end: period.end.toString(),
|
||||
});
|
||||
query.forEach((q => params.append("match[]", q)));
|
||||
if (reduceMemUsage) params.set("reduce_mem_usage", "1");
|
||||
return `${server}/api/v1/export?${params}`;
|
||||
};
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
import { FC, useCallback } from "preact/compat";
|
||||
import { useCallback, useRef } from "preact/compat";
|
||||
import Tooltip from "../Main/Tooltip/Tooltip";
|
||||
import Button from "../Main/Button/Button";
|
||||
import { DownloadIcon } from "../Main/Icons";
|
||||
import Popper from "../Main/Popper/Popper";
|
||||
import { useRef } from "react";
|
||||
import "./style.scss";
|
||||
import useBoolean from "../../hooks/useBoolean";
|
||||
|
||||
interface DownloadButtonProps {
|
||||
interface DownloadButtonProps<T extends string> {
|
||||
title: string;
|
||||
downloadFormatOptions?: string[];
|
||||
onDownload: (format?: string) => void;
|
||||
downloadFormatOptions?: T[];
|
||||
onDownload: (format?: T) => void;
|
||||
}
|
||||
|
||||
/** TODO: Currently unused, later will be added for the exporting metrics */
|
||||
const DownloadButton: FC<DownloadButtonProps> = ({ title, downloadFormatOptions, onDownload }) => {
|
||||
const DownloadButton = <T extends string>({ title, downloadFormatOptions, onDownload }: DownloadButtonProps<T>) => {
|
||||
const {
|
||||
value: isPopupOpen,
|
||||
setTrue: onOpenPopup,
|
||||
@@ -35,9 +33,19 @@ const DownloadButton: FC<DownloadButtonProps> = ({ title, downloadFormatOptions,
|
||||
}
|
||||
}, [onDownload, onClosePopup, isPopupOpen, onOpenPopup]);
|
||||
|
||||
const isDownloadFormat = useCallback((format: string): format is T => {
|
||||
return (downloadFormatOptions as string[])?.includes(format);
|
||||
}, [downloadFormatOptions]);
|
||||
|
||||
const onDownloadFormatClick = useCallback((event: Event) => {
|
||||
const button = event.currentTarget as HTMLButtonElement;
|
||||
onDownload(button.textContent ?? undefined);
|
||||
const format = button.textContent;
|
||||
if (format && isDownloadFormat(format)) {
|
||||
onDownload(format);
|
||||
} else {
|
||||
onDownload();
|
||||
}
|
||||
onClosePopup();
|
||||
}, [onDownload]);
|
||||
|
||||
return (
|
||||
|
||||
@@ -578,97 +578,13 @@ export const CommentIcon = () => (
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const FilterIcon = () => (
|
||||
export const DebugIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path
|
||||
d="M4.25 5.61C6.27 8.2 10 13 10 13v6c0 .55.45 1 1 1h2c.55 0 1-.45 1-1v-6s3.72-4.8 5.74-7.39c.51-.66.04-1.61-.79-1.61H5.04c-.83 0-1.3.95-.79 1.61"
|
||||
></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const FilterOffIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path
|
||||
d="M19.79 5.61C20.3 4.95 19.83 4 19 4H6.83l7.97 7.97zM2.81 2.81 1.39 4.22 10 13v6c0 .55.45 1 1 1h2c.55 0 1-.45 1-1v-2.17l5.78 5.78 1.41-1.41z"
|
||||
></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const OpenNewIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path
|
||||
d="M19 19H5V5h7V3H5c-1.11 0-2 .9-2 2v14c0 1.1.89 2 2 2h14c1.1 0 2-.9 2-2v-7h-2zM14 3v2h3.59l-9.83 9.83 1.41 1.41L19 6.41V10h2V3z"
|
||||
></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const ModalIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M19 4H5c-1.11 0-2 .9-2 2v12c0 1.1.89 2 2 2h14c1.1 0 2-.9 2-2V6c0-1.1-.89-2-2-2m0 14H5V8h14z"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const PauseIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M6 19h4V5H6v14zm8-14v14h4V5h-4z" />
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const ScrollToTopIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path
|
||||
d="M8 12l4-4 4 4m-4-4v12"
|
||||
strokeWidth="2"
|
||||
stroke="currentColor"
|
||||
fill="none"
|
||||
d="M20 8h-2.81c-.45-.78-1.07-1.45-1.82-1.96L17 4.41 15.59 3l-2.17 2.17C12.96 5.06 12.49 5 12 5c-.49 0-.96.06-1.41.17L8.41 3 7 4.41l1.62 1.63C7.88 6.55 7.26 7.22 6.81 8H4v2h2.09c-.05.33-.09.66-.09 1v1H4v2h2v1c0 .34.04.67.09 1H4v2h2.81c1.04 1.79 2.97 3 5.19 3s4.15-1.21 5.19-3H20v-2h-2.09c.05-.33.09-.66.09-1v-1h2v-2h-2v-1c0-.34-.04-.67-.09-1H20V8zm-6 8h-4v-2h4v2zm0-4h-4v-2h4v2z"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const SortIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M4 3 L4 15 L1.5 15 L5.5 21 L9.5 15 L7 15 L7 3 Z"/>
|
||||
<path d="M13 21 L13 9 L10.5 9 L14.5 3 L18.5 9 L16 9 L16 21 Z"/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const SortArrowDownIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M10.5 3 L10.5 15 L8 15 L12 21 L16 15 L13.5 15 L13.5 3 Z"/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const SortArrowUpIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M10.5 21 L10.5 9 L8 9 L12 3 L16 9 L13.5 9 L13.5 21 Z"/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { FC, useCallback, useEffect, useRef, useState } from "preact/compat";
|
||||
import { DownloadIcon } from "../../../components/Main/Icons";
|
||||
import { DebugIcon } from "../../../components/Main/Icons";
|
||||
import Button from "../../../components/Main/Button/Button";
|
||||
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
|
||||
import useBoolean from "../../../hooks/useBoolean";
|
||||
@@ -217,17 +217,17 @@ const DownloadReport: FC<Props> = ({ fetchUrl, reportType = ReportType.QUERY_DAT
|
||||
|
||||
return (
|
||||
<>
|
||||
<Tooltip title={"Export query"}>
|
||||
<Tooltip title={"Debug query"}>
|
||||
<Button
|
||||
variant="text"
|
||||
startIcon={<DownloadIcon/>}
|
||||
startIcon={<DebugIcon />}
|
||||
onClick={toggleOpen}
|
||||
ariaLabel="export query"
|
||||
ariaLabel="Debug query"
|
||||
/>
|
||||
</Tooltip>
|
||||
{openModal && (
|
||||
<Modal
|
||||
title={"Export query"}
|
||||
title={"Debug query"}
|
||||
onClose={handleClose}
|
||||
isOpen={openModal}
|
||||
>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { FC, useEffect, useState } from "preact/compat";
|
||||
import { FC, useEffect, useState, useMemo, useRef, useCallback } from "preact/compat";
|
||||
import QueryConfigurator from "./QueryConfigurator/QueryConfigurator";
|
||||
import { useFetchQuery } from "../../hooks/useFetchQuery";
|
||||
import { DisplayTypeSwitch } from "./DisplayTypeSwitch";
|
||||
@@ -12,13 +12,17 @@ import Alert from "../../components/Main/Alert/Alert";
|
||||
import classNames from "classnames";
|
||||
import useDeviceDetect from "../../hooks/useDeviceDetect";
|
||||
import InstantQueryTip from "./InstantQueryTip/InstantQueryTip";
|
||||
import { useRef } from "react";
|
||||
import CustomPanelTraces from "./CustomPanelTraces/CustomPanelTraces";
|
||||
import WarningLimitSeries from "./WarningLimitSeries/WarningLimitSeries";
|
||||
import CustomPanelTabs from "./CustomPanelTabs";
|
||||
import { DisplayType } from "../../types";
|
||||
import DownloadReport from "./DownloadReport/DownloadReport";
|
||||
import WarningHeatmapToLine from "./WarningHeatmapToLine/WarningHeatmapToLine";
|
||||
import DownloadButton from "../../components/DownloadButton/DownloadButton";
|
||||
import { downloadCSV, downloadJSON } from "../../utils/file";
|
||||
import { convertMetricsDataToCSV } from "./utils";
|
||||
|
||||
type ExportFormats = "csv" | "json";
|
||||
|
||||
const CustomPanel: FC = () => {
|
||||
useSetQueryParams();
|
||||
@@ -55,6 +59,27 @@ const CustomPanel: FC = () => {
|
||||
showAllSeries
|
||||
});
|
||||
|
||||
const fileDownloaders = useMemo(() => {
|
||||
const getFilename = (format: ExportFormats) => {
|
||||
return `vmui_export_${query.join("_")}.${format}`;
|
||||
};
|
||||
|
||||
return {
|
||||
csv: async () => {
|
||||
if(!liveData) return;
|
||||
const csvData = convertMetricsDataToCSV(liveData);
|
||||
downloadCSV(csvData, getFilename("csv"));
|
||||
},
|
||||
json: async () => {
|
||||
downloadJSON(JSON.stringify(liveData), getFilename("json"));
|
||||
},
|
||||
};
|
||||
}, [liveData, query]);
|
||||
|
||||
const onDownloadClick = useCallback((format?: ExportFormats) => {
|
||||
format && fileDownloaders[format]();
|
||||
}, [fileDownloaders]);
|
||||
|
||||
const showInstantQueryTip = !liveData?.length && (displayType !== DisplayType.chart);
|
||||
const showError = !hideError && error;
|
||||
|
||||
@@ -110,7 +135,7 @@ const CustomPanel: FC = () => {
|
||||
"vm-block_mobile": isMobile,
|
||||
})}
|
||||
>
|
||||
{isLoading && <LineLoader />}
|
||||
{isLoading && <LineLoader/>}
|
||||
<div
|
||||
className="vm-custom-panel-body-header"
|
||||
ref={controlsRef}
|
||||
@@ -118,7 +143,13 @@ const CustomPanel: FC = () => {
|
||||
<div className="vm-custom-panel-body-header__tabs">
|
||||
<DisplayTypeSwitch/>
|
||||
</div>
|
||||
{(graphData || liveData) && <DownloadReport fetchUrl={fetchUrl}/>}
|
||||
{displayType === "table" && (
|
||||
<DownloadButton
|
||||
title={"Export query"}
|
||||
onDownload={onDownloadClick}
|
||||
downloadFormatOptions={["json", "csv"]}
|
||||
/>)}
|
||||
{(graphData || liveData) && displayType !== "code" && <DownloadReport fetchUrl={fetchUrl}/>}
|
||||
</div>
|
||||
<CustomPanelTabs
|
||||
graphData={graphData}
|
||||
|
||||
86
app/vmui/packages/vmui/src/pages/CustomPanel/utils.test.ts
Normal file
86
app/vmui/packages/vmui/src/pages/CustomPanel/utils.test.ts
Normal file
@@ -0,0 +1,86 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { convertMetricsDataToCSV } from "./utils";
|
||||
import { InstantMetricResult } from "../../api/types";
|
||||
|
||||
describe("convertMetricsDataToCSV", () => {
|
||||
it("should return an empty string if headers are empty", () => {
|
||||
const data: InstantMetricResult[] = [];
|
||||
expect(convertMetricsDataToCSV(data)).toBe("");
|
||||
});
|
||||
|
||||
it("should return a valid CSV string for single metric entry with value", () => {
|
||||
const data: InstantMetricResult[] = [
|
||||
{
|
||||
value: [1623945600, "123"],
|
||||
group: 0,
|
||||
metric: {
|
||||
header1: "123",
|
||||
header2: "value2"
|
||||
}
|
||||
},
|
||||
];
|
||||
const result = convertMetricsDataToCSV(data);
|
||||
expect(result).toBe("header1,header2\n123,value2");
|
||||
});
|
||||
|
||||
it("should return a valid CSV string for multiple metric entries with values", () => {
|
||||
const data: InstantMetricResult[] = [
|
||||
{
|
||||
value: [1623945600, "123"],
|
||||
group: 0,
|
||||
metric: {
|
||||
header1: "123",
|
||||
header2: "value2"
|
||||
}
|
||||
},
|
||||
{
|
||||
value: [1623949200, "456"],
|
||||
group: 0,
|
||||
metric: {
|
||||
header1: "456",
|
||||
header2: "value4"
|
||||
}
|
||||
},
|
||||
];
|
||||
const result = convertMetricsDataToCSV(data);
|
||||
expect(result).toBe("header1,header2\n123,value2\n456,value4");
|
||||
});
|
||||
|
||||
it("should handle metric entries with multiple values field", () => {
|
||||
const data: InstantMetricResult[] = [
|
||||
{
|
||||
values: [[1623945600, "123"], [1623949200, "456"]],
|
||||
group: 0,
|
||||
metric: {
|
||||
header1: "123-456",
|
||||
header2: "values"
|
||||
}
|
||||
},
|
||||
];
|
||||
const result = convertMetricsDataToCSV(data);
|
||||
expect(result).toBe("header1,header2\n123-456,values");
|
||||
});
|
||||
|
||||
it("should handle a combination of metric entries with value and values", () => {
|
||||
const data: InstantMetricResult[] = [
|
||||
{
|
||||
value: [1623945600, "123"],
|
||||
group: 0,
|
||||
metric: {
|
||||
header1: "123",
|
||||
header2: "first"
|
||||
}
|
||||
},
|
||||
{
|
||||
values: [[1623949200, "456"], [1623952800, "789"]],
|
||||
group: 0,
|
||||
metric: {
|
||||
header1: "456-789",
|
||||
header2: "second"
|
||||
}
|
||||
},
|
||||
];
|
||||
const result = convertMetricsDataToCSV(data);
|
||||
expect(result).toBe("header1,header2\n123,first\n456-789,second");
|
||||
});
|
||||
});
|
||||
18
app/vmui/packages/vmui/src/pages/CustomPanel/utils.ts
Normal file
18
app/vmui/packages/vmui/src/pages/CustomPanel/utils.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { InstantMetricResult } from "../../api/types";
|
||||
import { getColumns, MetricCategory } from "../../hooks/useSortedCategories";
|
||||
import { formatValueToCSV } from "../../utils/csv";
|
||||
|
||||
const getHeaders = (data: InstantMetricResult[]): string => {
|
||||
return getColumns(data).map(({ key }) => key).join(",");
|
||||
};
|
||||
|
||||
const getRows = (data: InstantMetricResult[], headers: MetricCategory[]) => {
|
||||
return data?.map(d => headers.map(c => formatValueToCSV(d.metric[c.key] || "-")).join(","));
|
||||
};
|
||||
|
||||
export const convertMetricsDataToCSV = (data: InstantMetricResult[]): string => {
|
||||
const headers = getHeaders(data);
|
||||
if (!headers.length) return "";
|
||||
const rows = getRows(data, getColumns(data));
|
||||
return [headers, ...rows].join("\n");
|
||||
};
|
||||
@@ -1,13 +1,15 @@
|
||||
import { Dispatch, SetStateAction, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
|
||||
import { MetricBase, MetricResult, ExportMetricResult } from "../../../api/types";
|
||||
import { ErrorTypes, SeriesLimits } from "../../../types";
|
||||
import { ErrorTypes, SeriesLimits, TimeParams } from "../../../types";
|
||||
import { useQueryState } from "../../../state/query/QueryStateContext";
|
||||
import { useTimeState } from "../../../state/time/TimeStateContext";
|
||||
import { useAppState } from "../../../state/common/StateContext";
|
||||
import { useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext";
|
||||
import { isValidHttpUrl } from "../../../utils/url";
|
||||
import { getExportDataUrl } from "../../../api/query-range";
|
||||
import { getExportCSVDataUrl, getExportDataUrl, getExportJSONDataUrl } from "../../../api/query-range";
|
||||
import { parseLineToJSON } from "../../../utils/json";
|
||||
import { downloadCSV, downloadJSON } from "../../../utils/file";
|
||||
import { useSnack } from "../../../contexts/Snackbar";
|
||||
|
||||
interface FetchQueryParams {
|
||||
hideQuery?: number[];
|
||||
@@ -16,6 +18,7 @@ interface FetchQueryParams {
|
||||
|
||||
interface FetchQueryReturn {
|
||||
fetchUrl?: string[],
|
||||
exportData: (format: ExportFormats) => void,
|
||||
isLoading: boolean,
|
||||
data?: MetricResult[],
|
||||
error?: ErrorTypes | string,
|
||||
@@ -25,11 +28,16 @@ interface FetchQueryReturn {
|
||||
abortFetch: () => void
|
||||
}
|
||||
|
||||
type ExportFormats = "csv" | "json";
|
||||
type FormatDownloader = (serverUrl: string, query: string[], period: TimeParams, reduceMemUsage: boolean) => void;
|
||||
type DownloadFileFormats = Record<ExportFormats, FormatDownloader>
|
||||
|
||||
export const useFetchExport = ({ hideQuery, showAllSeries }: FetchQueryParams): FetchQueryReturn => {
|
||||
const { query } = useQueryState();
|
||||
const { period } = useTimeState();
|
||||
const { displayType, reduceMemUsage, seriesLimits: stateSeriesLimits } = useCustomPanelState();
|
||||
const { serverUrl } = useAppState();
|
||||
const { showInfoMessage } = useSnack();
|
||||
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [data, setData] = useState<MetricResult[]>();
|
||||
@@ -55,6 +63,35 @@ export const useFetchExport = ({ hideQuery, showAllSeries }: FetchQueryParams):
|
||||
}
|
||||
}, [serverUrl, period, hideQuery, reduceMemUsage]);
|
||||
|
||||
const fileDownloaders: DownloadFileFormats = useMemo(() => {
|
||||
const getFilename = (format: ExportFormats) => `vmui_export_${query.join("_")}_${period.start}_${period.end}.${format}`;
|
||||
return {
|
||||
csv: async () => {
|
||||
const url = getExportCSVDataUrl(serverUrl, query, period, reduceMemUsage);
|
||||
const response = await fetch(url);
|
||||
try {
|
||||
let text = await response.text();
|
||||
text = "name,value,timestamp\n" + text;
|
||||
downloadCSV(text, getFilename("csv"));
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
showInfoMessage({ text: "Couldn't fetch data for CSV export. Please try again", type: "error" });
|
||||
}
|
||||
},
|
||||
json: async () => {
|
||||
const url = getExportJSONDataUrl(serverUrl, query, period, reduceMemUsage);
|
||||
try {
|
||||
const response = await fetch(url);
|
||||
const text = await response.text();
|
||||
downloadJSON(text, getFilename("json"));
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
showInfoMessage({ text: "Couldn't fetch data for JSON export. Please try again", type: "error" });
|
||||
}
|
||||
}
|
||||
};
|
||||
}, [query, period, serverUrl, reduceMemUsage]);
|
||||
|
||||
const fetchData = useCallback(async ({ fetchUrl, stateSeriesLimits, showAllSeries }: {
|
||||
fetchUrl: string[];
|
||||
stateSeriesLimits: SeriesLimits;
|
||||
@@ -144,6 +181,12 @@ export const useFetchExport = ({ hideQuery, showAllSeries }: FetchQueryParams):
|
||||
}
|
||||
}, [displayType, hideQuery]);
|
||||
|
||||
const exportData = useCallback((format: ExportFormats) => {
|
||||
if (error) return;
|
||||
const updatedPeriod = { ...period };
|
||||
fileDownloaders[format](serverUrl, query, updatedPeriod, reduceMemUsage);
|
||||
}, [serverUrl, query, period, reduceMemUsage, error, fileDownloaders]);
|
||||
|
||||
const abortFetch = useCallback(() => {
|
||||
abortControllerRef.current.abort();
|
||||
setData([]);
|
||||
@@ -167,5 +210,6 @@ export const useFetchExport = ({ hideQuery, showAllSeries }: FetchQueryParams):
|
||||
setQueryErrors,
|
||||
warning,
|
||||
abortFetch,
|
||||
exportData
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { FC, useState } from "preact/compat";
|
||||
import { FC, useCallback, useState } from "preact/compat";
|
||||
import LineLoader from "../../components/Main/LineLoader/LineLoader";
|
||||
import { useCustomPanelState } from "../../state/customPanel/CustomPanelStateContext";
|
||||
import { useQueryState } from "../../state/query/QueryStateContext";
|
||||
@@ -17,7 +17,7 @@ import { DisplayType } from "../../types";
|
||||
import Hyperlink from "../../components/Main/Hyperlink/Hyperlink";
|
||||
import { CloseIcon } from "../../components/Main/Icons";
|
||||
import Button from "../../components/Main/Button/Button";
|
||||
import DownloadReport, { ReportType } from "../CustomPanel/DownloadReport/DownloadReport";
|
||||
import DownloadButton from "../../components/DownloadButton/DownloadButton";
|
||||
|
||||
const RawSamplesLink = () => (
|
||||
<Hyperlink
|
||||
@@ -66,7 +66,7 @@ const RawQueryPage: FC = () => {
|
||||
queryErrors,
|
||||
setQueryErrors,
|
||||
abortFetch,
|
||||
fetchUrl,
|
||||
exportData
|
||||
} = useFetchExport({ hideQuery, showAllSeries });
|
||||
|
||||
const controlsRef = useRef<HTMLDivElement>(null);
|
||||
@@ -85,6 +85,11 @@ const RawQueryPage: FC = () => {
|
||||
setShowPageDescription(false);
|
||||
};
|
||||
|
||||
const onExportClick = useCallback(async (format?: "csv" | "json") => {
|
||||
if (!format) return;
|
||||
exportData(format);
|
||||
}, [exportData]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={classNames({
|
||||
@@ -159,9 +164,10 @@ const RawQueryPage: FC = () => {
|
||||
<DisplayTypeSwitch tabFilter={(tab) => (tab.value !== DisplayType.table)}/>
|
||||
</div>
|
||||
{data && (
|
||||
<DownloadReport
|
||||
fetchUrl={fetchUrl}
|
||||
reportType={ReportType.RAW_DATA}
|
||||
<DownloadButton
|
||||
title={"Export query"}
|
||||
downloadFormatOptions={["json", "csv"]}
|
||||
onDownload={onExportClick}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
34
app/vmui/packages/vmui/src/utils/csv.test.ts
Normal file
34
app/vmui/packages/vmui/src/utils/csv.test.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { formatValueToCSV } from "./csv";
|
||||
|
||||
describe("formatValueToCSV", () => {
|
||||
it("should wrap value in quotes if it contains a comma", () => {
|
||||
const value = "hello,world";
|
||||
const result = formatValueToCSV(value);
|
||||
expect(result).toBe("\"hello,world\"");
|
||||
});
|
||||
|
||||
it("should wrap value in quotes if it contains a newline", () => {
|
||||
const value = "hello\nworld";
|
||||
const result = formatValueToCSV(value);
|
||||
expect(result).toBe("\"hello\nworld\"");
|
||||
});
|
||||
|
||||
it("should escape quotes and wrap in quotes if value contains a double quote", () => {
|
||||
const value = "hello \"world\"";
|
||||
const result = formatValueToCSV(value);
|
||||
expect(result).toBe("\"hello \"\"world\"\"\"");
|
||||
});
|
||||
|
||||
it("should return the same value if it does not contain special characters", () => {
|
||||
const value = "hello world";
|
||||
const result = formatValueToCSV(value);
|
||||
expect(result).toBe("hello world");
|
||||
});
|
||||
|
||||
it("should handle empty strings correctly", () => {
|
||||
const value = "";
|
||||
const result = formatValueToCSV(value);
|
||||
expect(result).toBe("");
|
||||
});
|
||||
});
|
||||
4
app/vmui/packages/vmui/src/utils/csv.ts
Normal file
4
app/vmui/packages/vmui/src/utils/csv.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
export const formatValueToCSV= (value: string) =>
|
||||
(value.includes(",") || value.includes("\n") || value.includes("\""))
|
||||
? "\"" + value.replace(/"/g, "\"\"") + "\""
|
||||
: value;
|
||||
@@ -11,38 +11,12 @@ export const downloadFile = (data: Blob, filename: string) => {
|
||||
URL.revokeObjectURL(url);
|
||||
};
|
||||
|
||||
export const downloadCSV = (data: Record<string, string>[], filename: string) => {
|
||||
const getHeader = (data: Record<string, string>[]) => {
|
||||
const headersObj = data.reduce<Record<string, boolean>>((headers, row) => {
|
||||
Object.keys(row).forEach((key) => {
|
||||
if(key && !headers[key]){
|
||||
headers[key] = true;
|
||||
}
|
||||
});
|
||||
return headers;
|
||||
}, {});
|
||||
return Object.keys(headersObj);
|
||||
};
|
||||
|
||||
const formatValueToCSV= (value: string) =>
|
||||
(value.includes(",") || value.includes("\n") || value.includes("\""))
|
||||
? "\"" + value.replace(/"/g, "\"\"") + "\""
|
||||
: value;
|
||||
|
||||
const convertToCSV = (data: Record<string, string>[]): string => {
|
||||
const header = getHeader(data);
|
||||
const rows = data.map(item =>
|
||||
header.map(fieldName => item[fieldName] ? formatValueToCSV(item[fieldName]): "").join(",")
|
||||
);
|
||||
return [header.map(formatValueToCSV).join(","), ...rows].join("\r\n");
|
||||
};
|
||||
|
||||
const csvContent = convertToCSV(data);
|
||||
const blob = new Blob([csvContent], { type: "text/csv;charset=utf-8;" });
|
||||
export const downloadCSV = (data: string, filename: string) => {
|
||||
const blob = new Blob([data], { type: "text/csv;charset=utf-8;" });
|
||||
downloadFile(blob, filename);
|
||||
};
|
||||
|
||||
export const downloadJSON = (data: string, filename: string) => {
|
||||
const blob = new Blob([data], { type: "application/json" });
|
||||
downloadFile(blob, filename);
|
||||
};
|
||||
};
|
||||
|
||||
@@ -7,7 +7,7 @@ groups:
|
||||
# note the `job` filter and update accordingly to your setup
|
||||
rules:
|
||||
- alert: TooManyRestarts
|
||||
expr: changes(process_start_time_seconds{job=~".*(victoriametrics|vmselect|vminsert|vmstorage|vmagent|vmalert|vmsingle|vmalertmanager|vmauth|victorialogs|vlstorage|vlselect|vlinsert).*"}[15m]) > 2
|
||||
expr: changes(process_start_time_seconds{job=~".*(victoriametrics|vmselect|vminsert|vmstorage|vmagent|vmalert|vmsingle|vmalertmanager|vmauth).*"}[15m]) > 2
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
@@ -17,7 +17,7 @@ groups:
|
||||
It might be crashlooping.
|
||||
|
||||
- alert: ServiceDown
|
||||
expr: up{job=~".*(victoriametrics|vmselect|vminsert|vmstorage|vmagent|vmalert|vmsingle|vmalertmanager|vmauth|victorialogs|vlstorage|vlselect|vlinsert).*"} == 0
|
||||
expr: up{job=~".*(victoriametrics|vmselect|vminsert|vmstorage|vmagent|vmalert|vmsingle|vmalertmanager|vmauth).*"} == 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -59,7 +59,7 @@ groups:
|
||||
Consider to either increase available CPU resources or decrease the load on the process.
|
||||
|
||||
- alert: TooHighGoroutineSchedulingLatency
|
||||
expr: histogram_quantile(0.99, sum(rate(go_sched_latencies_seconds_bucket[5m])) by (le, job, instance)) > 0.1
|
||||
expr: histogram_quantile(0.99, sum(rate(go_sched_latencies_seconds_bucket{job=~".*(victoriametrics|vmselect|vminsert|vmstorage|vmagent|vmalert|vmsingle|vmalertmanager|vmauth).*"}[5m])) by (le, job, instance)) > 0.1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
@@ -1356,7 +1356,7 @@ Below is the output for `/path/to/vminsert -help`:
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
@@ -1722,7 +1722,7 @@ Below is the output for `/path/to/vmselect -help`:
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
@@ -2012,7 +2012,7 @@ Below is the output for `/path/to/vmstorage -help`:
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -273,9 +273,13 @@ Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](
|
||||
|
||||
VictoriaMetrics provides UI for query troubleshooting and exploration. The UI is available at `http://victoriametrics:8428/vmui`
|
||||
(or at `http://<vmselect>:8481/select/<accountID>/vmui/` in [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/)).
|
||||
The UI allows exploring query results via graphs and tables. It also provides the following features:
|
||||
|
||||
- View [raw samples](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#raw-samples) via `Raw Query` tab {{% available_from "v1.107.0" %}}. Helps in debugging of [unexpected query results](https://docs.victoriametrics.com/victoriametrics/troubleshooting/#unexpected-query-results).
|
||||
> See [VMUI at VictoriaMetrics playground](https://play.victoriametrics.com?g0.expr=up).
|
||||
|
||||
VMUI provides the following features:
|
||||
|
||||
- `Query` tab for ad-hoc queries in MetricsQL, supporting time series, tables and histogram representation
|
||||
- `Raw Query` tab {{% available_from "v1.107.0" %}} for viewing [raw samples](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#raw-samples). Helps in debugging of [unexpected query results](https://docs.victoriametrics.com/victoriametrics/troubleshooting/#unexpected-query-results).
|
||||
- Explore:
|
||||
- [Metrics explorer](#metrics-explorer) - automatically builds graphs for selected metrics;
|
||||
- [Cardinality explorer](#cardinality-explorer) - stats about existing metrics in TSDB;
|
||||
@@ -286,46 +290,71 @@ The UI allows exploring query results via graphs and tables. It also provides th
|
||||
- [Query analyzer](#query-tracing) - explore query results and traces loaded from JSON. See `Export query` button below;
|
||||
- [WITH expressions playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/expand-with-exprs) - test how WITH expressions work;
|
||||
- [Metric relabel debugger](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/relabeling) - debug [relabeling](#relabeling) rules.
|
||||
- [Downsampling filters debugger](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/downsampling-filters-debug) - debug [downsampling](#downsampling) configs {{% available_from "v1.105.0" %}}.
|
||||
- [Retention filters debugger](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/retention-filters-debug) - debug [retention filter](#retention-filters) configs {{% available_from "v1.105.0" %}}.
|
||||
- [Downsampling filters debugger](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/downsampling-filters-debug) {{% available_from "v1.105.0" %}} - debug [downsampling](#downsampling) configs.
|
||||
- [Retention filters debugger](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/retention-filters-debug) {{% available_from "v1.105.0" %}} - debug [retention filter](#retention-filters) configs.
|
||||
|
||||
VMUI provides auto-completion for [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) functions, metric names, label names and label values. The auto-completion can be enabled
|
||||
by checking the `Autocomplete` toggle. When the auto-completion is disabled, it can still be triggered for the current cursor position by pressing `ctrl+space`.
|
||||
**Querying**
|
||||
|
||||
Enter the MetricsQL query in `Query` field and hit `Enter`. Multi-line queries can be entered by pressing `Shift-Enter`.
|
||||
|
||||
VMUI provides auto-completion for [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) functions, metric names, label names and label values.
|
||||
The auto-completion can be enabled by checking the `Autocomplete` toggle. When the auto-completion is disabled, it can
|
||||
still be triggered for the current cursor position by pressing `ctrl+space`.
|
||||
|
||||
To correlate between multiple queries on the same graph click `Add Query` button and enter an additional query.
|
||||
Results for all the queries are displayed simultaneously on the same graph.
|
||||
|
||||
Results of a particular query can be hidden by clicking the `eye` icon on the right side of the input field.
|
||||
Clicking on the `eye` icon while holding the `ctrl` key hides results of all other queries.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range.
|
||||
The step value can be customized by changing `Step` value in the top-right corner.
|
||||
|
||||
Clicking on the line on graph pins the tooltip. User can pin multiple tooltips. Press `x` icon to unpin the tooltip.
|
||||
|
||||
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
|
||||
|
||||
VMUI automatically switches from graph view to heatmap view when the query returns [histogram](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#histogram) buckets
|
||||
(both [Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
and [VictoriaMetrics histograms](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) are supported).
|
||||
Try, for example, [this query](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/?g0.expr=sum%28rate%28vm_promscrape_scrape_duration_seconds_bucket%29%29+by+%28vmrange%29&g0.range_input=24h&g0.end_input=2023-04-10T17%3A46%3A12&g0.relative_time=last_24_hours&g0.step_input=31m).
|
||||
To disable heatmap view press on settings icon in the top-right corner of graph area and disable `Histogram mode` toggle.
|
||||
|
||||
Graphs in `vmui` support scrolling and zooming:
|
||||
**Time range**
|
||||
|
||||
* Select the needed time range on the graph in order to zoom in into the selected time range. Hold `ctrl` (or `cmd` on MacOS) and scroll down in order to zoom out.
|
||||
* Hold `ctrl` (or `cmd` on MacOS) and scroll up in order to zoom in the area under cursor.
|
||||
* Hold `ctrl` (or `cmd` on MacOS) and drag the graph to the left / right in order to move the displayed time range into the future / past.
|
||||
The time range for graphs can be adjusted in multiple ways:
|
||||
|
||||
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
|
||||
* Click on time picker in the top-right corner to select a relative (`Last N minutes`) or absolute time range (specify `From` and `To`);
|
||||
* Zoom-in into graph by click-and-drag motion over the graph area;
|
||||
* When hovering cursor over the graph area, hold `ctrl` (or `cmd` on MacOS) and scroll up or down to zoom out or zoom in;
|
||||
* When hovering cursor over the graph area, hold `ctrl` (or `cmd` on MacOS) and drag the graph to the left / right to move the displayed time range into the future / past.
|
||||
|
||||
Multi-line queries can be entered by pressing `Shift-Enter` in query input field.
|
||||
**Legend**
|
||||
|
||||
Legend is displayed below the graph area.
|
||||
Clicking on item in legend hides all other items from displaying. Clicking on the item while holding the `ctrl` key hides
|
||||
only this item.
|
||||
|
||||
Clicking on the label-value pair in item automatically copies it into buffer, so it can be pasted later.
|
||||
|
||||
There are additional visualization settings in the top right-corner of the legend view: switching to table view,
|
||||
hiding common labels, etc.
|
||||
|
||||
**Troubleshooting**
|
||||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#backfilling)
|
||||
or during [query troubleshooting](https://docs.victoriametrics.com/victoriametrics/troubleshooting/#unexpected-query-results),
|
||||
it may be useful disabling response cache by clicking `Disable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range.
|
||||
The step value can be customized by changing `Step value` input.
|
||||
Query can be [traced](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#query-tracing)
|
||||
by clicking on `Trace query` toggle below query input area and executing query again. Once trace is generated, click
|
||||
on it to expand for more details.
|
||||
|
||||
VMUI allows investigating correlations between multiple queries on the same graph. Just click `Add Query` button,
|
||||
enter an additional query in the newly appeared input field and press `Enter`.
|
||||
Results for all the queries are displayed simultaneously on the same graph.
|
||||
Graphs for a particular query can be temporarily hidden by clicking the `eye` icon on the right side of the input field.
|
||||
When the `eye` icon is clicked while holding the `ctrl` key, then query results for the rest of queries become hidden
|
||||
except of the current query results.
|
||||
The query and its trace can be exported by clicking on `debug` icon in top right corner of trace block. The exported file
|
||||
file can be loaded again in VMUI on `Tools=>Query Analyzer` page.
|
||||
|
||||
VMUI allows sharing query and [trace](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#query-tracing) results by clicking on
|
||||
`Export query` button in top right corner of the graph area. The query and trace will be exported as a file that later
|
||||
can be loaded in VMUI via `Query Analyzer` tool.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
`Raw query` page allows displaying raw, unmodified data. It can be useful for seeing the actual scrape interval or detecting
|
||||
sample duplicates.
|
||||
|
||||
### Top queries
|
||||
|
||||
@@ -2959,7 +2988,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -25,11 +25,16 @@ See also [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-rel
|
||||
## tip
|
||||
|
||||
* FEATURE: upgrade Go builder from Go1.24.6 to Go1.25. See [Go1.25 release notes](https://tip.golang.org/doc/go1.25).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#vmui): add export functionality for Query (Table view) and RawQuery tabs in CSV/JSON format. See [#9332](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9332).
|
||||
|
||||
* BUG: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): fix `timestamp` function compatibility with Prometheus when used with sub-expressions such as `timestamp(sum(foo))`. The fix applies only when `-search.disableImplicitConversion` flag is set. See more in [#9527-comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9527#issuecomment-3200646020) and [metricsql#55](https://github.com/VictoriaMetrics/metricsql/pull/55).
|
||||
|
||||
## [v1.124.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.124.0)
|
||||
|
||||
Released at 2025-08-15
|
||||
|
||||
**Update Note 1:** [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): performance regression for queries that match [previously deleted time series](https://docs.victoriametrics.com/#how-to-delete-time-series). The issue affects installation that previously deleted big number of time series but continue querying them. More details in [#9602](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9602). The degradation will be addressed in upcomming releases.
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.24.5 to Go1.24.6. See [the list of issues addressed in Go1.24.6](https://github.com/golang/go/issues?q=milestone%3AGo1.24.6+label%3ACherryPickApproved).
|
||||
|
||||
* FEATURE: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and [vmselect](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/) in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): protect graphite `/render` API endpoint with new flag `-search.maxGraphitePathExpressionLen`. See this PR [#9534](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/9534) for details.
|
||||
|
||||
@@ -2147,7 +2147,7 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmagent/ .
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -1621,7 +1621,7 @@ The shortlist of configuration flags is the following:
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -1415,7 +1415,7 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmauth/ .
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -504,7 +504,7 @@ Run `vmbackup -help` in order to see all the available options:
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -636,7 +636,7 @@ command-line flags:
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -508,7 +508,7 @@ Below is the list of configuration flags (it can be viewed by running `./vmgatew
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
@@ -208,7 +208,7 @@ Run `vmrestore -help` in order to see all the available options:
|
||||
-tlsAutocertCacheDir string
|
||||
Directory to store TLS certificates issued via Let's Encrypt. Certificates are lost on restarts if this flag isn't set. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertEmail string
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir .This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Contact email for the issued Let's Encrypt TLS certificates. See also -tlsAutocertHosts and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
-tlsAutocertHosts array
|
||||
Optional hostnames for automatic issuing of Let's Encrypt TLS certificates. These hostnames must be reachable at -httpListenAddr . The -httpListenAddr must listen tcp port 443 . The -tlsAutocertHosts overrides -tlsCertFile and -tlsKeyFile . See also -tlsAutocertEmail and -tlsAutocertCacheDir . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
||||
2
go.mod
2
go.mod
@@ -30,7 +30,7 @@ require (
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4
|
||||
github.com/VictoriaMetrics/fastcache v1.13.0
|
||||
github.com/VictoriaMetrics/metrics v1.39.1
|
||||
github.com/VictoriaMetrics/metricsql v0.84.6
|
||||
github.com/VictoriaMetrics/metricsql v0.84.7
|
||||
github.com/aws/aws-sdk-go-v2 v1.37.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.30.2
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.2
|
||||
|
||||
4
go.sum
4
go.sum
@@ -60,8 +60,8 @@ github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMG
|
||||
github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU=
|
||||
github.com/VictoriaMetrics/metrics v1.39.1 h1:AT7jz7oSpAK9phDl5O5Tmy06nXnnzALwqVnf4ros3Ow=
|
||||
github.com/VictoriaMetrics/metrics v1.39.1/go.mod h1:XE4uudAAIRaJE614Tl5HMrtoEU6+GDZO4QTnNSsZRuA=
|
||||
github.com/VictoriaMetrics/metricsql v0.84.6 h1:r1rl05prim/r+Me4BUULaZQYXn2eZa3dnrtk+hY3X90=
|
||||
github.com/VictoriaMetrics/metricsql v0.84.6/go.mod h1:d4EisFO6ONP/HIGDYTAtwrejJBBeKGQYiRl095bS4QQ=
|
||||
github.com/VictoriaMetrics/metricsql v0.84.7 h1:zMONjtEULMbwEYU/qL4Hkc3GDfTTrv1bO+a9lmJf3do=
|
||||
github.com/VictoriaMetrics/metricsql v0.84.7/go.mod h1:d4EisFO6ONP/HIGDYTAtwrejJBBeKGQYiRl095bS4QQ=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
|
||||
|
||||
@@ -28,7 +28,7 @@ func initExposeMetadata() {
|
||||
metrics.ExposeMetadata(*exposeMetadata)
|
||||
}
|
||||
|
||||
var versionRe = regexp.MustCompile(`v\d+\.\d+\.\d+(?:-enterprise)?(?:-cluster.*)?`)
|
||||
var versionRe = regexp.MustCompile(`v\d+\.\d+\.\d+(?:-enterprise)?(?:-cluster)?`)
|
||||
|
||||
// WritePrometheusMetrics writes all the registered metrics to w in Prometheus exposition format.
|
||||
func WritePrometheusMetrics(w io.Writer) {
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package promutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
|
||||
func TestLabelsCompressorV2(t *testing.T) {
|
||||
lc := NewLabelsCompressorV2()
|
||||
|
||||
labels1 := []prompb.Label{
|
||||
{Name: "label1", Value: "value1"},
|
||||
{Name: "label2", Value: "value2"},
|
||||
{Name: "label3", Value: "value3"},
|
||||
}
|
||||
labels2 := []prompb.Label{
|
||||
{Name: "label3", Value: "value3"},
|
||||
{Name: "label4", Value: "value4"},
|
||||
{Name: "label5", Value: "value5"},
|
||||
}
|
||||
|
||||
compressed1 := lc.Compress(labels1)
|
||||
compressed2 := lc.Compress(labels2)
|
||||
|
||||
runtime.GC()
|
||||
cleaned := lc.Cleanup()
|
||||
if cleaned != 0 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned zero unused labels, got %d", cleaned)
|
||||
}
|
||||
|
||||
decompressed1 := compressed1.Decompress()
|
||||
if !reflect.DeepEqual(labels1, decompressed1) {
|
||||
t.Fatalf("decompressed labels1 do not match original: got %+v, want %+v", decompressed1, labels1)
|
||||
}
|
||||
|
||||
compressed1 = Key{}
|
||||
runtime.GC()
|
||||
cleaned = lc.Cleanup()
|
||||
if cleaned != 2 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned two unused labels, got %d", cleaned)
|
||||
}
|
||||
|
||||
decompressed2 := compressed2.Decompress()
|
||||
if !reflect.DeepEqual(labels2, decompressed2) {
|
||||
t.Fatalf("decompressed labels2 do not match original: got %+v, want %+v", decompressed2, labels2)
|
||||
}
|
||||
|
||||
compressed2 = Key{}
|
||||
runtime.GC()
|
||||
cleaned = lc.Cleanup()
|
||||
if cleaned != 3 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned two unused labels, got %d", cleaned)
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
package promutil
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
"weak"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
|
||||
type Key struct {
|
||||
labelRefs []labelRef
|
||||
}
|
||||
|
||||
func (k Key) Decompress() []prompb.Label {
|
||||
res := make([]prompb.Label, 0, len(k.labelRefs))
|
||||
for i := range k.labelRefs {
|
||||
res = append(res, cloneLabel(*k.labelRefs[i].label))
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type labelRef struct {
|
||||
label *prompb.Label
|
||||
}
|
||||
|
||||
type LabelsCompressorV2 struct {
|
||||
mux sync.Mutex
|
||||
labels map[prompb.Label]weak.Pointer[prompb.Label]
|
||||
}
|
||||
|
||||
func NewLabelsCompressorV2() *LabelsCompressorV2 {
|
||||
lc := &LabelsCompressorV2{
|
||||
labels: make(map[prompb.Label]weak.Pointer[prompb.Label]),
|
||||
}
|
||||
|
||||
go lc.cleanup()
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) Compress(labels []prompb.Label) Key {
|
||||
lc.mux.Lock()
|
||||
defer lc.mux.Unlock()
|
||||
|
||||
labelRefs := make([]labelRef, 0, len(labels))
|
||||
for i := range labels {
|
||||
wl := lc.labels[labels[i]]
|
||||
l := wl.Value()
|
||||
if l == nil {
|
||||
labelKey := cloneLabel(labels[i])
|
||||
labelVal := cloneLabel(labels[i])
|
||||
|
||||
wl = weak.Make(&labelVal)
|
||||
lc.labels[labelKey] = wl
|
||||
|
||||
l = wl.Value()
|
||||
}
|
||||
|
||||
labelRefs = append(labelRefs, labelRef{
|
||||
label: l,
|
||||
})
|
||||
}
|
||||
|
||||
return Key{
|
||||
labelRefs: labelRefs,
|
||||
}
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) cleanup() {
|
||||
t := time.NewTicker(5 * time.Minute)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
lc.Cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) Cleanup() int {
|
||||
lc.mux.Lock()
|
||||
defer lc.mux.Unlock()
|
||||
|
||||
count := 0
|
||||
|
||||
for l, wl := range lc.labels {
|
||||
if wl.Value() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Println(l)
|
||||
|
||||
count++
|
||||
delete(lc.labels, l)
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -434,7 +435,21 @@ func (pt *partition) AddRows(rows []rawRow) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if pt.s.enableDailyPartitioning {
|
||||
// sort rows in order to properly group it by date later
|
||||
slices.SortFunc(rows, func(a, b rawRow) int {
|
||||
if a.Timestamp < b.Timestamp {
|
||||
return -1
|
||||
}
|
||||
if a.Timestamp > b.Timestamp {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
pt.rawRows.addRowsGroupByDate(pt, rows)
|
||||
return
|
||||
}
|
||||
pt.rawRows.addRows(pt, rows)
|
||||
}
|
||||
|
||||
@@ -468,6 +483,18 @@ func (rrss *rawRowsShards) addRows(pt *partition, rows []rawRow) {
|
||||
}
|
||||
}
|
||||
|
||||
func (rrss *rawRowsShards) addRowsGroupByDate(pt *partition, rows []rawRow) {
|
||||
shards := rrss.shards
|
||||
shardsLen := uint32(len(shards))
|
||||
for len(rows) > 0 {
|
||||
n := rrss.shardIdx.Add(1)
|
||||
idx := n % shardsLen
|
||||
tailRows, rowsToFlush := shards[idx].addRowsGroupByDate(rows)
|
||||
rrss.addRowsToFlush(pt, rowsToFlush)
|
||||
rows = tailRows
|
||||
}
|
||||
}
|
||||
|
||||
func (rrss *rawRowsShards) addRowsToFlush(pt *partition, rowsToFlush []rawRow) {
|
||||
if len(rowsToFlush) == 0 {
|
||||
return
|
||||
@@ -555,6 +582,69 @@ func (rrs *rawRowsShard) addRows(rows []rawRow) ([]rawRow, []rawRow) {
|
||||
return rows, rowsToFlush
|
||||
}
|
||||
|
||||
func (rrs *rawRowsShard) addRowsGroupByDate(rows []rawRow) ([]rawRow, []rawRow) {
|
||||
var rowsToFlush []rawRow
|
||||
|
||||
rrs.mu.Lock()
|
||||
defer rrs.mu.Unlock()
|
||||
if cap(rrs.rows) == 0 {
|
||||
rrs.rows = newRawRows()
|
||||
}
|
||||
if len(rrs.rows) == 0 {
|
||||
rrs.updateFlushDeadline()
|
||||
}
|
||||
|
||||
firstRowsDate := rows[0].Timestamp / msecPerDay
|
||||
|
||||
// check if already buffered data belong to the same date
|
||||
if len(rrs.rows) > 0 && rrs.rows[len(rrs.rows)-1].Timestamp/msecPerDay != firstRowsDate {
|
||||
// flush buffered rows
|
||||
rowsToFlush = rrs.rows
|
||||
rrs.rows = newRawRows()
|
||||
rrs.updateFlushDeadline()
|
||||
return rows, rowsToFlush
|
||||
}
|
||||
|
||||
// check if all rows belong to the same date
|
||||
// rows must be sorted in ascending order
|
||||
if firstRowsDate < rows[len(rows)-1].Timestamp/msecPerDay {
|
||||
nextDateTimestamp := (firstRowsDate + 1) * msecPerDay
|
||||
i := sort.Search(len(rows), func(i int) bool {
|
||||
return rows[i].Timestamp >= nextDateTimestamp
|
||||
})
|
||||
|
||||
if isDebug {
|
||||
switch i {
|
||||
case 0:
|
||||
logger.Panicf("BUG: index: %d cannot be equal to the first row timestamp: %d", i, rows[0].Timestamp)
|
||||
case len(rows) - 1:
|
||||
logger.Panicf("BUG: index: %d cannot be equal to the last row timestamp: %d", i, rows[len(rows)-1].Timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
n := copy(rrs.rows[len(rrs.rows):cap(rrs.rows)], rows[:i])
|
||||
rrs.rows = rrs.rows[:len(rrs.rows)+n]
|
||||
rows = rows[n:]
|
||||
rowsToFlush = rrs.rows
|
||||
rrs.rows = newRawRows()
|
||||
rrs.updateFlushDeadline()
|
||||
return rows, rowsToFlush
|
||||
}
|
||||
n := copy(rrs.rows[len(rrs.rows):cap(rrs.rows)], rows)
|
||||
rrs.rows = rrs.rows[:len(rrs.rows)+n]
|
||||
rows = rows[n:]
|
||||
if len(rows) > 0 {
|
||||
rowsToFlush = rrs.rows
|
||||
rrs.rows = newRawRows()
|
||||
rrs.updateFlushDeadline()
|
||||
n = copy(rrs.rows[:cap(rrs.rows)], rows)
|
||||
rrs.rows = rrs.rows[:n]
|
||||
rows = rows[n:]
|
||||
}
|
||||
|
||||
return rows, rowsToFlush
|
||||
}
|
||||
|
||||
func newRawRows() []rawRow {
|
||||
return make([]rawRow, 0, maxRawRowsPerShard)
|
||||
}
|
||||
@@ -576,7 +666,6 @@ func (pt *partition) flushRowssToInmemoryParts(rowss [][]rawRow) {
|
||||
<-inmemoryPartsConcurrencyCh
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
pw := pt.createInmemoryPart(rowsChunk)
|
||||
if pw != nil {
|
||||
pwsLock.Lock()
|
||||
@@ -588,6 +677,16 @@ func (pt *partition) flushRowssToInmemoryParts(rowss [][]rawRow) {
|
||||
wg.Wait()
|
||||
putWaitGroup(wg)
|
||||
|
||||
if pt.s.enableDailyPartitioning {
|
||||
// daily partitioned parts may belong to the differents parts
|
||||
// so it cannot be merged into single part
|
||||
pt.partsLock.Lock()
|
||||
pt.inmemoryParts = append(pt.inmemoryParts, pws...)
|
||||
pt.startInmemoryPartsMergerLocked()
|
||||
pt.partsLock.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
// Merge pws into a single in-memory part.
|
||||
maxPartSize := getMaxInmemoryPartSize()
|
||||
for len(pws) > 1 {
|
||||
@@ -627,7 +726,12 @@ func (pt *partition) inmemoryPartsMerger() {
|
||||
return
|
||||
}
|
||||
maxOutBytes := pt.getMaxBigPartSize()
|
||||
|
||||
if pt.s.enableDailyPartitioning {
|
||||
if !pt.tryMergeDailyPartitionedParts(partInmemory, inmemoryPartsConcurrencyCh, maxOutBytes) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
pt.partsLock.Lock()
|
||||
pws := getPartsToMerge(pt.inmemoryParts, maxOutBytes)
|
||||
pt.partsLock.Unlock()
|
||||
@@ -660,7 +764,12 @@ func (pt *partition) smallPartsMerger() {
|
||||
return
|
||||
}
|
||||
maxOutBytes := pt.getMaxBigPartSize()
|
||||
|
||||
if pt.s.enableDailyPartitioning {
|
||||
if !pt.tryMergeDailyPartitionedParts(partSmall, smallPartsConcurrencyCh, maxOutBytes) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
pt.partsLock.Lock()
|
||||
pws := getPartsToMerge(pt.smallParts, maxOutBytes)
|
||||
pt.partsLock.Unlock()
|
||||
@@ -693,7 +802,12 @@ func (pt *partition) bigPartsMerger() {
|
||||
return
|
||||
}
|
||||
maxOutBytes := pt.getMaxBigPartSize()
|
||||
|
||||
if pt.s.enableDailyPartitioning {
|
||||
if !pt.tryMergeDailyPartitionedParts(partBig, bigPartsConcurrencyCh, maxOutBytes) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
pt.partsLock.Lock()
|
||||
pws := getPartsToMerge(pt.bigParts, maxOutBytes)
|
||||
pt.partsLock.Unlock()
|
||||
@@ -735,6 +849,9 @@ func putWaitGroup(wg *sync.WaitGroup) {
|
||||
var wgPool sync.Pool
|
||||
|
||||
func (pt *partition) mustMergeInmemoryParts(pws []*partWrapper) []*partWrapper {
|
||||
if pt.s.enableDailyPartitioning {
|
||||
logger.Fatalf("BUG: mustMergeInmemoryParts cannot be called for daily partitioning scheme")
|
||||
}
|
||||
var pwsResult []*partWrapper
|
||||
var pwsResultLock sync.Mutex
|
||||
wg := getWaitGroup()
|
||||
@@ -1124,6 +1241,16 @@ func (pt *partition) flushInmemoryPartsToFiles(isFinal bool) {
|
||||
}
|
||||
pt.partsLock.Unlock()
|
||||
|
||||
if pt.s.enableDailyPartitioning {
|
||||
pwss := groupPartsByDate(pws)
|
||||
for _, pws := range pwss {
|
||||
if err := pt.mergePartsToFiles(pws, nil, inmemoryPartsConcurrencyCh, false); err != nil {
|
||||
logger.Panicf("FATAL: cannot merge in-memory parts: %s", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
if err := pt.mergePartsToFiles(pws, nil, inmemoryPartsConcurrencyCh, false); err != nil {
|
||||
logger.Panicf("FATAL: cannot merge in-memory parts: %s", err)
|
||||
}
|
||||
@@ -1142,13 +1269,13 @@ func (rrss *rawRowsShards) flush(pt *partition, isFinal bool) {
|
||||
}
|
||||
|
||||
for i := range rrss.shards {
|
||||
dst = rrss.shards[i].appendRawRowsToFlush(dst, currentTimeMs, isFinal)
|
||||
dst = rrss.shards[i].appendRawRowsToFlush(dst, currentTimeMs, isFinal, pt.s.enableDailyPartitioning)
|
||||
}
|
||||
|
||||
pt.flushRowssToInmemoryParts(dst)
|
||||
}
|
||||
|
||||
func (rrs *rawRowsShard) appendRawRowsToFlush(dst [][]rawRow, currentTimeMs int64, isFinal bool) [][]rawRow {
|
||||
func (rrs *rawRowsShard) appendRawRowsToFlush(dst [][]rawRow, currentTimeMs int64, isFinal bool, useDailyPartitioning bool) [][]rawRow {
|
||||
flushDeadlineMs := rrs.flushDeadlineMs.Load()
|
||||
if !isFinal && currentTimeMs < flushDeadlineMs {
|
||||
// Fast path - nothing to flush
|
||||
@@ -1157,7 +1284,12 @@ func (rrs *rawRowsShard) appendRawRowsToFlush(dst [][]rawRow, currentTimeMs int6
|
||||
|
||||
// Slow path - move rrs.rows to dst.
|
||||
rrs.mu.Lock()
|
||||
dst = appendRawRowss(dst, rrs.rows)
|
||||
if useDailyPartitioning {
|
||||
dst = appendRawRowssGroupByDate(dst, rrs.rows)
|
||||
} else {
|
||||
dst = appendRawRowss(dst, rrs.rows)
|
||||
}
|
||||
//dst = appendRawRowss(dst, rrs.rows)
|
||||
rrs.rows = rrs.rows[:0]
|
||||
rrs.mu.Unlock()
|
||||
|
||||
@@ -1189,6 +1321,41 @@ func appendRawRowss(dst [][]rawRow, src []rawRow) [][]rawRow {
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendRawRowssGroupByDate(dst [][]rawRow, src []rawRow) [][]rawRow {
|
||||
if len(src) == 0 {
|
||||
return dst
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
dst = append(dst, newRawRows())
|
||||
}
|
||||
firstRowsDate := src[0].Timestamp / msecPerDay
|
||||
if firstRowsDate < src[len(src)-1].Timestamp/msecPerDay {
|
||||
logger.Panicf("BUG: src must belong to the same date, firstRowsDate: %d last row date: %d", firstRowsDate, src[len(src)-1].Timestamp/msecPerDay)
|
||||
}
|
||||
|
||||
prows := &dst[len(dst)-1]
|
||||
// check if buffered rows belong to the same date
|
||||
if len(*prows) > 0 {
|
||||
lastProwsDate := (*prows)[len(*prows)-1].Timestamp / msecPerDay
|
||||
if lastProwsDate != firstRowsDate {
|
||||
dst = append(dst, newRawRows())
|
||||
}
|
||||
prows = &dst[len(dst)-1]
|
||||
}
|
||||
|
||||
n := copy((*prows)[len(*prows):cap(*prows)], src)
|
||||
*prows = (*prows)[:len(*prows)+n]
|
||||
src = src[n:]
|
||||
for len(src) > 0 {
|
||||
rows := newRawRows()
|
||||
n := copy(rows[:cap(rows)], src)
|
||||
rows = rows[:len(rows)+n]
|
||||
src = src[n:]
|
||||
dst = append(dst, rows)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func (pt *partition) mergePartsToFiles(pws []*partWrapper, stopCh <-chan struct{}, concurrencyCh chan struct{}, useSparseCache bool) error {
|
||||
pwsLen := len(pws)
|
||||
|
||||
@@ -1242,6 +1409,15 @@ func (pt *partition) ForceMergeAllParts(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pt.s.enableDailyPartitioning {
|
||||
pwss := groupPartsByDate(pws)
|
||||
for _, pws := range pwss {
|
||||
if err := pt.mergePartsToFiles(pws, stopCh, bigPartsConcurrencyCh, true); err != nil {
|
||||
return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If len(pws) == 1, then the merge must run anyway.
|
||||
// This allows applying the configured retention, removing the deleted series
|
||||
// and performing de-duplication if needed.
|
||||
@@ -1496,11 +1672,17 @@ func getFlushToDiskDeadline(pws []*partWrapper) time.Time {
|
||||
type partType int
|
||||
|
||||
var (
|
||||
partInmemory = partType(0)
|
||||
partSmall = partType(1)
|
||||
partBig = partType(2)
|
||||
partInmemory = partType(0)
|
||||
partSmall = partType(1)
|
||||
partBig = partType(2)
|
||||
partTypesString = [3]string{0: "in-memory", 1: "small", 2: "big"}
|
||||
)
|
||||
|
||||
// String implements stringer interface
|
||||
func (p partType) String() string {
|
||||
return partTypesString[p]
|
||||
}
|
||||
|
||||
func (pt *partition) getDstPartType(pws []*partWrapper, isFinal bool) partType {
|
||||
dstPartSize := getPartsSize(pws)
|
||||
if dstPartSize > pt.getMaxSmallPartSize() {
|
||||
@@ -2108,3 +2290,98 @@ func mustReadPartNamesFromDir(srcDir string) []string {
|
||||
func isSpecialDir(name string) bool {
|
||||
return name == "tmp" || name == "txn" || name == snapshotsDirname
|
||||
}
|
||||
|
||||
func groupPartsByDate(src []*partWrapper) [][]*partWrapper {
|
||||
if len(src) == 0 {
|
||||
return nil
|
||||
}
|
||||
// allocate new slice to cover 30 days + 1 day reserved for migration
|
||||
dst := make([][]*partWrapper, 0, 31)
|
||||
|
||||
// reserve 0 index as a destination for parts that covers more than 1 day
|
||||
// it's usual case for migration from previous versions
|
||||
dst = dst[:1]
|
||||
sort.Slice(src, func(i, j int) bool { return src[i].p.ph.MinTimestamp < src[j].p.ph.MinTimestamp })
|
||||
var prevDate int64
|
||||
var prevIdx int
|
||||
for _, ptw := range src {
|
||||
currDate := ptw.p.ph.MinTimestamp / msecPerDay
|
||||
maxDate := ptw.p.ph.MaxTimestamp / msecPerDay
|
||||
if maxDate-currDate >= 1 {
|
||||
dst[0] = append(dst[0], ptw)
|
||||
continue
|
||||
}
|
||||
if currDate == prevDate {
|
||||
dst[prevIdx] = append(dst[prevIdx], ptw)
|
||||
continue
|
||||
}
|
||||
prevIdx++
|
||||
prevDate = currDate
|
||||
dst = append(dst, []*partWrapper{ptw})
|
||||
}
|
||||
if len(dst[0]) == 0 {
|
||||
dst = dst[1:]
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// tryMergeDailyPartitionedParts handles merging when daily partitioning is enabled.
|
||||
// Returns false if no further merging should continue.
|
||||
func (pt *partition) tryMergeDailyPartitionedParts(partsType partType, concurrencyCh chan struct{}, maxOutBytes uint64) bool {
|
||||
pt.partsLock.Lock()
|
||||
|
||||
var pws []*partWrapper
|
||||
var partPath string
|
||||
switch partsType {
|
||||
case partBig:
|
||||
partPath = pt.bigPartsPath
|
||||
pws = pt.bigParts
|
||||
case partSmall:
|
||||
partPath = pt.smallPartsPath
|
||||
pws = pt.smallParts
|
||||
case partInmemory:
|
||||
partPath = "in-memory"
|
||||
pws = pt.inmemoryParts
|
||||
default:
|
||||
logger.Fatalf("BUG: unexpected partsType: %d", partsType)
|
||||
}
|
||||
pwss := groupPartsByDate(pws)
|
||||
var cnt int
|
||||
for _, pws := range pwss {
|
||||
pws = getPartsToMerge(pws, maxOutBytes)
|
||||
if len(pws) == 0 {
|
||||
continue
|
||||
}
|
||||
pwss[cnt] = pws
|
||||
cnt++
|
||||
}
|
||||
pwss = pwss[:cnt]
|
||||
pt.partsLock.Unlock()
|
||||
|
||||
if len(pwss) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, pws := range pwss {
|
||||
concurrencyCh <- struct{}{}
|
||||
err := pt.mergeParts(pws, pt.stopCh, false, false)
|
||||
<-concurrencyCh
|
||||
if err != nil {
|
||||
if errors.Is(err, errForciblyStopped) {
|
||||
if idx+1 < len(pwss) {
|
||||
// properly release unmerged parts at current iteration
|
||||
// so it could be flushed at MustClose call
|
||||
for _, pwsToRelease := range pwss[idx+1:] {
|
||||
pt.releasePartsToMerge(pwsToRelease)
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing to do - finish the merger.
|
||||
return false
|
||||
}
|
||||
// Unexpected error.
|
||||
logger.Panicf("FATAL: unrecoverable error when merging %s parts at %q: %s", partsType, partPath, err)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -323,3 +323,102 @@ func TestMustOpenPartition_smallAndBigPartsPathsAreNotTheSame(t *testing.T) {
|
||||
_ = mustOpenPartition(smallPartsPath, bigPartsPath, s)
|
||||
|
||||
}
|
||||
|
||||
func TestGroupPartsByDate(t *testing.T) {
|
||||
f := func(pws []*partWrapper, expected [][]*partWrapper) {
|
||||
t.Helper()
|
||||
got := groupPartsByDate(pws)
|
||||
|
||||
if len(expected) != len(got) {
|
||||
t.Fatalf("groupPartsByDate: unexpected number of day groups: expected=%d, got=%d", len(expected), len(got))
|
||||
}
|
||||
|
||||
cmpPws := func(idx int, a, b []*partWrapper) {
|
||||
t.Helper()
|
||||
if len(a) != len(b) {
|
||||
t.Fatalf("group[%d]: unexpected number of parts: expected=%d, got=%d", idx, len(a), len(b))
|
||||
}
|
||||
for i := range a {
|
||||
if a[i].p.ph.MinTimestamp != b[i].p.ph.MinTimestamp {
|
||||
t.Fatalf("group[%d] part[%d]: MinTimestamp mismatch: expected=%d, got=%d",
|
||||
idx, i, a[i].p.ph.MinTimestamp, b[i].p.ph.MinTimestamp)
|
||||
}
|
||||
if a[i].p.ph.MaxTimestamp != b[i].p.ph.MaxTimestamp {
|
||||
t.Fatalf("group[%d] part[%d]: MaxTimestamp mismatch: expected=%d, got=%d",
|
||||
idx, i, a[i].p.ph.MaxTimestamp, b[i].p.ph.MaxTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := range expected {
|
||||
cmpPws(i, expected[i], got[i])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// empty
|
||||
f(nil, nil)
|
||||
src := []*partWrapper{
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: msecPerDay}}},
|
||||
}
|
||||
expected := [][]*partWrapper{
|
||||
{
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: msecPerDay}}},
|
||||
},
|
||||
}
|
||||
f(src, expected)
|
||||
|
||||
// group by a single day
|
||||
src = []*partWrapper{
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: msecPerDay - 1024}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: msecPerDay - 512}}},
|
||||
}
|
||||
expected = [][]*partWrapper{
|
||||
{
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: msecPerDay - 1024}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: msecPerDay - 512}}},
|
||||
},
|
||||
}
|
||||
f(src, expected)
|
||||
|
||||
// group into 2 days
|
||||
src = []*partWrapper{
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 1024}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 512}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: 2 * msecPerDay, MaxTimestamp: 2*msecPerDay + 1024}}},
|
||||
}
|
||||
expected = [][]*partWrapper{
|
||||
{
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 1024}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 512}}},
|
||||
},
|
||||
{
|
||||
{p: &part{ph: partHeader{MinTimestamp: 2 * msecPerDay, MaxTimestamp: 2*msecPerDay + 1024}}},
|
||||
},
|
||||
}
|
||||
f(src, expected)
|
||||
|
||||
// group int 2 days + extra day for migration
|
||||
src = []*partWrapper{
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 1024}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 512}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: 2 * msecPerDay, MaxTimestamp: 2*msecPerDay + 1024}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: 2 * msecPerDay}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: 5 * msecPerDay}}},
|
||||
}
|
||||
expected = [][]*partWrapper{
|
||||
{
|
||||
{p: &part{ph: partHeader{MinTimestamp: 0, MaxTimestamp: 2 * msecPerDay}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: 5 * msecPerDay}}},
|
||||
},
|
||||
{
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 1024}}},
|
||||
{p: &part{ph: partHeader{MinTimestamp: msecPerDay, MaxTimestamp: msecPerDay + 512}}},
|
||||
},
|
||||
{
|
||||
{p: &part{ph: partHeader{MinTimestamp: 2 * msecPerDay, MaxTimestamp: 2*msecPerDay + 1024}}},
|
||||
},
|
||||
}
|
||||
f(src, expected)
|
||||
|
||||
}
|
||||
|
||||
@@ -91,6 +91,8 @@ type Storage struct {
|
||||
|
||||
disablePerDayIndex bool
|
||||
|
||||
enableDailyPartitioning bool
|
||||
|
||||
tb *table
|
||||
|
||||
// Series cardinality limiters.
|
||||
@@ -204,12 +206,17 @@ func MustOpenStorage(path string, opts OpenOptions) *Storage {
|
||||
if idbPrefillStart <= 0 {
|
||||
idbPrefillStart = time.Hour
|
||||
}
|
||||
var enableDailyPartitioning bool
|
||||
if retention < 30*24*time.Hour {
|
||||
enableDailyPartitioning = true
|
||||
}
|
||||
s := &Storage{
|
||||
path: path,
|
||||
cachePath: filepath.Join(path, cacheDirname),
|
||||
retentionMsecs: retention.Milliseconds(),
|
||||
stopCh: make(chan struct{}),
|
||||
idbPrefillStartSeconds: idbPrefillStart.Milliseconds() / 1000,
|
||||
path: path,
|
||||
cachePath: filepath.Join(path, cacheDirname),
|
||||
retentionMsecs: retention.Milliseconds(),
|
||||
stopCh: make(chan struct{}),
|
||||
idbPrefillStartSeconds: idbPrefillStart.Milliseconds() / 1000,
|
||||
enableDailyPartitioning: enableDailyPartitioning,
|
||||
}
|
||||
s.logNewSeries.Store(opts.LogNewSeries)
|
||||
|
||||
|
||||
@@ -1694,7 +1694,7 @@ func testStorageAddRows(rng *rand.Rand, s *Storage) error {
|
||||
|
||||
// Try opening the storage from snapshot.
|
||||
snapshotPath := filepath.Join(s.path, snapshotsDirname, snapshotName)
|
||||
s1 := MustOpenStorage(snapshotPath, OpenOptions{})
|
||||
s1 := MustOpenStorage(snapshotPath, OpenOptions{Retention: time.Hour * 24 * 10})
|
||||
|
||||
// Verify the snapshot contains rows
|
||||
var m1 Metrics
|
||||
@@ -1709,17 +1709,20 @@ func testStorageAddRows(rng *rand.Rand, s *Storage) error {
|
||||
if err := s1.ForceMergePartitions(""); err != nil {
|
||||
return fmt.Errorf("error when force merging partitions: %w", err)
|
||||
}
|
||||
ptws := s1.tb.GetPartitions(nil)
|
||||
for _, ptw := range ptws {
|
||||
pws := ptw.pt.GetParts(nil, true)
|
||||
numParts := len(pws)
|
||||
ptw.pt.PutParts(pws)
|
||||
if numParts > 1 {
|
||||
s1.tb.PutPartitions(ptws)
|
||||
return fmt.Errorf("unexpected number of parts for partition %q after force merge; got %d; want at most 1", ptw.pt.name, numParts)
|
||||
if !s.enableDailyPartitioning {
|
||||
ptws := s1.tb.GetPartitions(nil)
|
||||
for _, ptw := range ptws {
|
||||
pws := ptw.pt.GetParts(nil, true)
|
||||
numParts := len(pws)
|
||||
ptw.pt.PutParts(pws)
|
||||
if numParts > 1 {
|
||||
s1.tb.PutPartitions(ptws)
|
||||
return fmt.Errorf("unexpected number of parts for partition %q after force merge; got %d; want at most 1", ptw.pt.name, numParts)
|
||||
}
|
||||
}
|
||||
s1.tb.PutPartitions(ptws)
|
||||
|
||||
}
|
||||
s1.tb.PutPartitions(ptws)
|
||||
|
||||
s1.MustClose()
|
||||
|
||||
@@ -4679,3 +4682,37 @@ func assertIndexDBIsNotNil(t *testing.T, idb *indexDB) {
|
||||
t.Fatalf("unexpected idb: got nil, want non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageAddRowsDailyPartitioning(t *testing.T) {
|
||||
rng := rand.New(rand.NewSource(1))
|
||||
path := "TestStorageAddRowsDailyPartitioning"
|
||||
opts := OpenOptions{
|
||||
Retention: 10 * 24 * time.Hour,
|
||||
MaxHourlySeries: 1e5,
|
||||
MaxDailySeries: 1e5,
|
||||
}
|
||||
s := MustOpenStorage(path, opts)
|
||||
if err := testStorageAddRows(rng, s); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
assertPartRowsBelongToTheSameDate := func(pws []*partWrapper) {
|
||||
t.Helper()
|
||||
for _, pw := range pws {
|
||||
minDate := pw.p.ph.MinTimestamp / msecPerDay
|
||||
maxDate := pw.p.ph.MaxTimestamp / msecPerDay
|
||||
if maxDate-minDate > 0 {
|
||||
t.Fatalf("part path: %s rows must be belong to the same date, minDate: %d maxDate: %d", pw.p.path, minDate, maxDate)
|
||||
}
|
||||
}
|
||||
}
|
||||
partitions := s.tb.GetPartitions(nil)
|
||||
for _, p := range partitions {
|
||||
p.pt.partsLock.Lock()
|
||||
assertPartRowsBelongToTheSameDate(p.pt.smallParts)
|
||||
assertPartRowsBelongToTheSameDate(p.pt.bigParts)
|
||||
p.pt.partsLock.Unlock()
|
||||
}
|
||||
s.tb.PutPartitions(partitions)
|
||||
s.MustClose()
|
||||
fs.MustRemoveDir(path)
|
||||
}
|
||||
|
||||
@@ -612,3 +612,49 @@ func benchmarkDirSize(path string) int64 {
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func BenchmarkStorageAddRowsDailyPartitioning(b *testing.B) {
|
||||
defer fs.MustRemoveDir(b.Name())
|
||||
|
||||
f := func(b *testing.B, numRows int) {
|
||||
b.Helper()
|
||||
|
||||
s := MustOpenStorage(b.Name(), OpenOptions{Retention: 10 * 24 * time.Hour})
|
||||
defer s.MustClose()
|
||||
|
||||
var globalOffset atomic.Uint64
|
||||
|
||||
globalOffset.Store(uint64(time.Now().UnixMilli() - msecPerDay*2))
|
||||
|
||||
b.SetBytes(int64(numRows))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
mrs := make([]MetricRow, numRows)
|
||||
var mn MetricName
|
||||
mn.MetricGroup = []byte("rps")
|
||||
mn.Tags = []Tag{
|
||||
{[]byte("job"), []byte("webservice")},
|
||||
{[]byte("instance"), []byte("1.2.3.4")},
|
||||
}
|
||||
for pb.Next() {
|
||||
offset := int(globalOffset.Add(uint64(numRows)))
|
||||
for i := 0; i < numRows; i++ {
|
||||
mr := &mrs[i]
|
||||
mr.MetricNameRaw = mn.marshalRaw(mr.MetricNameRaw[:0])
|
||||
mr.Timestamp = int64(offset + i)
|
||||
mr.Value = float64(offset + i)
|
||||
}
|
||||
s.AddRows(mrs, defaultPrecisionBits)
|
||||
}
|
||||
})
|
||||
b.StopTimer()
|
||||
s.DebugFlush()
|
||||
}
|
||||
|
||||
for _, numRows := range []int{1, 10, 100, 1000, 10000} {
|
||||
b.Run(fmt.Sprintf("%d", numRows), func(b *testing.B) {
|
||||
f(b, numRows)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
19
vendor/github.com/VictoriaMetrics/metricsql/utils.go
generated
vendored
19
vendor/github.com/VictoriaMetrics/metricsql/utils.go
generated
vendored
@@ -65,10 +65,10 @@ func VisitAll(e Expr, f func(expr Expr)) {
|
||||
//
|
||||
// These expressions are implicitly converted into another expressions, which returns unexpected results most of the time:
|
||||
//
|
||||
// rate(default_rollup(sum(foo))[1i:1i])
|
||||
// rate(default_rollup(abs(foo))[1i:1i])
|
||||
// rate(default_rollup(foo + bar)[1i:1i])
|
||||
// rate(default_rollup(foo > 10)[1i:1i])
|
||||
// rate(sum(default_rollup(foo[1i:1i])))
|
||||
// rate(abs(default_rollup(foo[1i:1i])))
|
||||
// rate(default_rollup(foo[1i:1i]) + default_rollup(bar[1i:1i]))
|
||||
// rate(default_rollup(foo[1i:1i]) > 10)
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victoriametrics/metricsql/#implicit-query-conversions
|
||||
//
|
||||
@@ -83,6 +83,17 @@ func IsLikelyInvalid(e Expr) bool {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if fe.Name == `timestamp` {
|
||||
// In Prometheus, timestamp is defined as a transform function on instant vectors,
|
||||
// but its behavior is closer to a rollup since it returns raw sample timestamps.
|
||||
// VictoriaMetrics explicitly defines timestamp as a rollup function.
|
||||
// To remain consistent with Prometheus, IsLikelyInvalid does not treat timestamp
|
||||
// as an implicit conversion even when applied to non-metric expressions, like timestamp(sum(foo)).
|
||||
//
|
||||
// See more in https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9527#issuecomment-3191439447
|
||||
return
|
||||
}
|
||||
|
||||
idx := GetRollupArgIdx(fe)
|
||||
if idx < 0 || idx >= len(fe.Args) {
|
||||
return
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -142,7 +142,7 @@ github.com/VictoriaMetrics/fastcache
|
||||
# github.com/VictoriaMetrics/metrics v1.39.1
|
||||
## explicit; go 1.18
|
||||
github.com/VictoriaMetrics/metrics
|
||||
# github.com/VictoriaMetrics/metricsql v0.84.6
|
||||
# github.com/VictoriaMetrics/metricsql v0.84.7
|
||||
## explicit; go 1.24.2
|
||||
github.com/VictoriaMetrics/metricsql
|
||||
github.com/VictoriaMetrics/metricsql/binaryop
|
||||
|
||||
Reference in New Issue
Block a user