mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
212 Commits
detached
...
feature/me
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
520123508e | ||
|
|
1590626d0b | ||
|
|
d2487fdb19 | ||
|
|
0c1f624985 | ||
|
|
a3fb0fece1 | ||
|
|
0d842a7620 | ||
|
|
48bd251817 | ||
|
|
11ce870625 | ||
|
|
d84f96505b | ||
|
|
124163d4b2 | ||
|
|
65087f08c4 | ||
|
|
ca0479fff3 | ||
|
|
644c7a97c8 | ||
|
|
098cba5b73 | ||
|
|
5d0e8c0d1b | ||
|
|
442bfa6c35 | ||
|
|
ee031b21a7 | ||
|
|
deec361a64 | ||
|
|
fd4dce81ce | ||
|
|
5431696d83 | ||
|
|
1b71184bfb | ||
|
|
29c06b9543 | ||
|
|
369f3f0da1 | ||
|
|
6f93f0e1a7 | ||
|
|
96b773198f | ||
|
|
006381266b | ||
|
|
ae1dffe5d3 | ||
|
|
80e508eac7 | ||
|
|
86d8095417 | ||
|
|
df847e62c0 | ||
|
|
1e3791cbf6 | ||
|
|
b1f3d7f3eb | ||
|
|
c2c6f97bd3 | ||
|
|
274e38ebee | ||
|
|
e05738749c | ||
|
|
3428d0d27d | ||
|
|
f8170b2471 | ||
|
|
57925c34e6 | ||
|
|
9e60fc8fc8 | ||
|
|
07c8a4a9a7 | ||
|
|
e098ef901f | ||
|
|
7693c4bcce | ||
|
|
8bb56f8ef5 | ||
|
|
b91d249a29 | ||
|
|
7b274e0d6d | ||
|
|
b3c92540e5 | ||
|
|
0dd63a60d0 | ||
|
|
32fc801519 | ||
|
|
771f742842 | ||
|
|
a845fe815a | ||
|
|
2eadd4c9f9 | ||
|
|
eb7b088c91 | ||
|
|
19a50ae7cd | ||
|
|
01d413873e | ||
|
|
d307a64cd2 | ||
|
|
27f1c1ab13 | ||
|
|
60396d0daa | ||
|
|
9b2c1b00cf | ||
|
|
892008b05d | ||
|
|
dc2da9a71b | ||
|
|
5908ee1009 | ||
|
|
c930a81ea9 | ||
|
|
c95990f47f | ||
|
|
f0442e40a0 | ||
|
|
5d06c74e2b | ||
|
|
63dccea932 | ||
|
|
46acf8edc0 | ||
|
|
d478d1496a | ||
|
|
272a77a9c3 | ||
|
|
e72a3fdb67 | ||
|
|
7413000e57 | ||
|
|
ddd686c026 | ||
|
|
0e8007a02b | ||
|
|
cbd76ac4dc | ||
|
|
96b8213b0d | ||
|
|
971c759acc | ||
|
|
01a7ab8bf4 | ||
|
|
d29bb97fec | ||
|
|
48554d51b9 | ||
|
|
105a42ce08 | ||
|
|
8b58dc1892 | ||
|
|
d9064dc781 | ||
|
|
ff9cb3f821 | ||
|
|
50969ca780 | ||
|
|
195afd1c2e | ||
|
|
c5743a7099 | ||
|
|
ac11f184fc | ||
|
|
973eb1cc4f | ||
|
|
3382bbf285 | ||
|
|
5ec7cc5dd4 | ||
|
|
9b21dc5a30 | ||
|
|
e828f03eaa | ||
|
|
4dc9ca26fc | ||
|
|
63e1bf5d97 | ||
|
|
d8b36fb2e3 | ||
|
|
aa3a2b01aa | ||
|
|
fd543883fa | ||
|
|
bbf3ab099b | ||
|
|
fa68453e41 | ||
|
|
8f47e30c1d | ||
|
|
f66981cac1 | ||
|
|
780c67d139 | ||
|
|
9244557b6e | ||
|
|
ee940e81ec | ||
|
|
695532fc8d | ||
|
|
ccb5b47914 | ||
|
|
e0f3ecd073 | ||
|
|
646604d850 | ||
|
|
0e6b3eabb5 | ||
|
|
7165820b6a | ||
|
|
d233170ada | ||
|
|
c7f2d91d08 | ||
|
|
7f5a8af464 | ||
|
|
181a465c89 | ||
|
|
7288adab21 | ||
|
|
993a9d92d6 | ||
|
|
45c889a1cf | ||
|
|
dca5d44f2b | ||
|
|
9e4f0cc900 | ||
|
|
54dc9cc322 | ||
|
|
67e6752b82 | ||
|
|
bb54075c23 | ||
|
|
08f5220bc3 | ||
|
|
f9015da6eb | ||
|
|
539498058e | ||
|
|
b3d22403eb | ||
|
|
0fce51e3b4 | ||
|
|
9e118fe1ee | ||
|
|
3553c60399 | ||
|
|
9b54bd6e8d | ||
|
|
a90edc71c7 | ||
|
|
83deddc84c | ||
|
|
434cb7028c | ||
|
|
107b6517b7 | ||
|
|
f68f5b3113 | ||
|
|
d49b4a7550 | ||
|
|
bd8b4eb78b | ||
|
|
41558066db | ||
|
|
af064ca65a | ||
|
|
eced71a96d | ||
|
|
23fd269ccf | ||
|
|
1f5d02e059 | ||
|
|
690aaf7d2d | ||
|
|
1e0f7f0d28 | ||
|
|
c7a16e1df6 | ||
|
|
2cb909022f | ||
|
|
fe70b963e4 | ||
|
|
9bb726751c | ||
|
|
3c85ffb1e6 | ||
|
|
65cb6468ac | ||
|
|
8e645ea708 | ||
|
|
b95bdb5781 | ||
|
|
5ecc5770c2 | ||
|
|
02c03793b3 | ||
|
|
c74c4b24d7 | ||
|
|
07be0c6129 | ||
|
|
826c408e0e | ||
|
|
913b64d9b5 | ||
|
|
6b76dead5a | ||
|
|
41991edb34 | ||
|
|
eb7c21bde5 | ||
|
|
3cc8013dd9 | ||
|
|
1209f33c6d | ||
|
|
3c87e361ba | ||
|
|
f5c9c5bf01 | ||
|
|
7712a34ba6 | ||
|
|
d890bf52fe | ||
|
|
f52478dac7 | ||
|
|
bcc2c85e53 | ||
|
|
001f9218b1 | ||
|
|
f7fc897f85 | ||
|
|
e58b512305 | ||
|
|
d33efbbd95 | ||
|
|
23cb0475e9 | ||
|
|
3d3fcf8fcb | ||
|
|
d99e3e52f3 | ||
|
|
bbcfc0ce59 | ||
|
|
d9ac6867cb | ||
|
|
00712b184b | ||
|
|
30ca617960 | ||
|
|
aba5205896 | ||
|
|
aef59d9281 | ||
|
|
b1582b3012 | ||
|
|
dd769d87c0 | ||
|
|
febe9a2882 | ||
|
|
337ccd7c62 | ||
|
|
c9789b3c18 | ||
|
|
c9db487613 | ||
|
|
77fffb4dc7 | ||
|
|
8701ec0968 | ||
|
|
94f3302aca | ||
|
|
16909a2b6b | ||
|
|
51fdd885ea | ||
|
|
a213f5a423 | ||
|
|
3a812a8b28 | ||
|
|
4375699013 | ||
|
|
53a6bbfdf8 | ||
|
|
897f1b97e3 | ||
|
|
309f1898b3 | ||
|
|
8998526384 | ||
|
|
e55e2a4274 | ||
|
|
29ec5d2898 | ||
|
|
adef9693af | ||
|
|
8f01ac42a8 | ||
|
|
8223a5235f | ||
|
|
fe5f2bd5d7 | ||
|
|
00075ac4ee | ||
|
|
3f39946f99 | ||
|
|
1ddfd55e51 | ||
|
|
5bb012b67b | ||
|
|
78fb987bef | ||
|
|
a0084dc223 |
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -6,4 +6,4 @@ Please provide a brief description of the changes you made. Be as specific as po
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/).
|
||||
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
|
||||
|
||||
21
Makefile
21
Makefile
@@ -195,6 +195,25 @@ vmutils-crossbuild: \
|
||||
vmutils-openbsd-amd64 \
|
||||
vmutils-windows-amd64
|
||||
|
||||
publish-latest:
|
||||
PKG_TAG=$(TAG) APP_NAME=victoria-metrics $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmagent $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert-tool $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmauth $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmbackup $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmrestore $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmctl $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmgateway $(MAKE) publish-via-docker-latest
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackupmanager $(MAKE) publish-via-docker-latest
|
||||
|
||||
publish-victoria-logs-latest:
|
||||
PKG_TAG=$(TAG) APP_NAME=victoria-logs $(MAKE) publish-via-docker-latest
|
||||
PKG_TAG=$(TAG) APP_NAME=vlogscli $(MAKE) publish-via-docker-latest
|
||||
|
||||
publish-release:
|
||||
rm -rf bin/*
|
||||
git checkout $(TAG) && $(MAKE) release && $(MAKE) publish && \
|
||||
@@ -526,7 +545,7 @@ test-full:
|
||||
test-full-386:
|
||||
GOEXPERIMENT=synctest GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
integration-test: victoria-metrics vmagent vmalert vmauth
|
||||
integration-test: victoria-metrics vmagent vmalert vmauth vmctl vmbackup vmrestore victoria-logs
|
||||
go test ./apptest/... -skip="^TestCluster.*"
|
||||
|
||||
benchmark:
|
||||
|
||||
12
README.md
12
README.md
@@ -40,16 +40,16 @@ VictoriaMetrics is optimized for timeseries data, even when old time series are
|
||||
* **Easy to setup**: No dependencies, single [small binary](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d), configuration through command-line flags, but the default is also fine-tuned; backup and restore with [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* **Global query view**: Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics and queried via a single query.
|
||||
* **Various Protocols**: Support metric scraping, ingestion and backfilling in various protocol.
|
||||
* [Prometheus exporters](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/victoriametrics/integrations/prometheus), [Prometheus exposition format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](https://docs.victoriametrics.com/victoriametrics/integrations/influxdb) over HTTP, TCP and UDP.
|
||||
* [Prometheus exporters](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/victoriametrics/integrations/prometheus/), [Prometheus exposition format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](https://docs.victoriametrics.com/victoriametrics/integrations/influxdb/) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb#sending-data-via-telnet).
|
||||
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb#sending-data-via-http).
|
||||
* [OpenTSDB put message](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-telnet).
|
||||
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-http).
|
||||
* [JSON line format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-csv-data).
|
||||
* [Native binary format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/victoriametrics/integrations/datadog).
|
||||
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/victoriametrics/integrations/newrelic#sending-data-from-agent).
|
||||
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/victoriametrics/integrations/datadog/).
|
||||
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/victoriametrics/integrations/newrelic/#sending-data-from-agent).
|
||||
* [OpenTelemetry metrics format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#sending-data-via-opentelemetry).
|
||||
* **NFS-based storages**: Supports storing data on NFS-based storages such as Amazon EFS, Google Filestore.
|
||||
* And many other features such as metrics relabeling, cardinality limiter, etc.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
@@ -44,6 +45,8 @@ func main() {
|
||||
|
||||
vlstorage.Init()
|
||||
vlselect.Init()
|
||||
|
||||
insertutil.SetLogRowsStorage(&vlstorage.Storage{})
|
||||
vlinsert.Init()
|
||||
|
||||
go httpserver.Serve(listenAddrs, requestHandler, httpserver.ServeOptions{
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -33,10 +32,10 @@ var parserPool fastjson.ParserPool
|
||||
// RequestHandler processes Datadog insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
case "/api/v1/validate":
|
||||
case "/insert/datadog/api/v1/validate":
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
case "/api/v2/logs":
|
||||
case "/insert/datadog/api/v2/logs":
|
||||
return datadogLogsIngestion(w, r)
|
||||
default:
|
||||
return false
|
||||
@@ -74,7 +73,7 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
cp.IgnoreFields = *datadogIgnoreFields
|
||||
}
|
||||
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
@@ -102,7 +101,7 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
var (
|
||||
v2LogsRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/datadog/api/v2/logs"}`)
|
||||
v2LogsRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/datadog/api/v2/logs"}`)
|
||||
v2LogsRequestDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/datadog/api/v2/logs"}`)
|
||||
)
|
||||
|
||||
// datadog message field has two formats:
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -31,36 +30,38 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
// This header is needed for Logstash
|
||||
w.Header().Set("X-Elastic-Product", "Elasticsearch")
|
||||
|
||||
if strings.HasPrefix(path, "/_ilm/policy") {
|
||||
if strings.HasPrefix(path, "/insert/elasticsearch/_ilm/policy") {
|
||||
// Return fake response for Elasticsearch ilm request.
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_index_template") {
|
||||
if strings.HasPrefix(path, "/insert/elasticsearch/_index_template") {
|
||||
// Return fake response for Elasticsearch index template request.
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_ingest") {
|
||||
if strings.HasPrefix(path, "/insert/elasticsearch/_ingest") {
|
||||
// Return fake response for Elasticsearch ingest pipeline request.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/put-pipeline-api.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_nodes") {
|
||||
if strings.HasPrefix(path, "/insert/elasticsearch/_nodes") {
|
||||
// Return fake response for Elasticsearch nodes discovery request.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/cluster.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/logstash") || strings.HasPrefix(path, "/_logstash") {
|
||||
if strings.HasPrefix(path, "/insert/elasticsearch/logstash") || strings.HasPrefix(path, "/insert/elasticsearch/_logstash") {
|
||||
// Return fake response for Logstash APIs requests.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/logstash-apis.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
switch path {
|
||||
case "/", "":
|
||||
// some clients may omit trailing slash
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8353
|
||||
case "/insert/elasticsearch/", "/insert/elasticsearch":
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Return fake response for Elasticsearch ping request.
|
||||
@@ -75,7 +76,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
|
||||
return true
|
||||
case "/_license":
|
||||
case "/insert/elasticsearch/_license":
|
||||
// Return fake response for Elasticsearch license request.
|
||||
fmt.Fprintf(w, `{
|
||||
"license": {
|
||||
@@ -86,7 +87,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
}`)
|
||||
return true
|
||||
case "/_bulk":
|
||||
case "/insert/elasticsearch/_bulk":
|
||||
startTime := time.Now()
|
||||
bulkRequestsTotal.Inc()
|
||||
|
||||
@@ -95,7 +96,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
@@ -128,7 +129,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
var (
|
||||
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
|
||||
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
bulkRequestDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(streamName string, r io.Reader, encoding string, timeFields, msgFields []string, lmp insertutil.LogMessageProcessor) (int, error) {
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
@@ -36,6 +35,7 @@ type CommonParams struct {
|
||||
DecolorizeFields []string
|
||||
ExtraFields []logstorage.Field
|
||||
|
||||
IsTimeFieldSet bool
|
||||
Debug bool
|
||||
DebugRequestURI string
|
||||
DebugRemoteAddr string
|
||||
@@ -49,8 +49,10 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var isTimeFieldSet bool
|
||||
timeFields := []string{"_time"}
|
||||
if tfs := httputil.GetArray(r, "_time_field", "VL-Time-Field"); len(tfs) > 0 {
|
||||
isTimeFieldSet = true
|
||||
timeFields = tfs
|
||||
}
|
||||
|
||||
@@ -86,9 +88,11 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
||||
IgnoreFields: ignoreFields,
|
||||
DecolorizeFields: decolorizeFields,
|
||||
ExtraFields: extraFields,
|
||||
Debug: debug,
|
||||
DebugRequestURI: debugRequestURI,
|
||||
DebugRemoteAddr: debugRemoteAddr,
|
||||
|
||||
IsTimeFieldSet: isTimeFieldSet,
|
||||
Debug: debug,
|
||||
DebugRequestURI: debugRequestURI,
|
||||
DebugRemoteAddr: debugRemoteAddr,
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
@@ -141,6 +145,29 @@ func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignore
|
||||
return cp
|
||||
}
|
||||
|
||||
// LogRowsStorage is an interface for ingesting logs into the storage.
|
||||
type LogRowsStorage interface {
|
||||
// MustAddRows must add lr to the underlying storage.
|
||||
MustAddRows(lr *logstorage.LogRows)
|
||||
|
||||
// CanWriteData must returns non-nil error if logs cannot be added to the underlying storage.
|
||||
CanWriteData() error
|
||||
}
|
||||
|
||||
var logRowsStorage LogRowsStorage
|
||||
|
||||
// SetLogRowsStorage sets the storage for writing data to via LogMessageProcessor.
|
||||
//
|
||||
// This function must be called before using LogMessageProcessor and CanWriteData from this package.
|
||||
func SetLogRowsStorage(storage LogRowsStorage) {
|
||||
logRowsStorage = storage
|
||||
}
|
||||
|
||||
// CanWriteData returns non-nil error if data cannot be written to the underlying storage.
|
||||
func CanWriteData() error {
|
||||
return logRowsStorage.CanWriteData()
|
||||
}
|
||||
|
||||
// LogMessageProcessor is an interface for log message processors.
|
||||
type LogMessageProcessor interface {
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and fields.
|
||||
@@ -264,7 +291,7 @@ func (lmp *logMessageProcessor) AddInsertRow(r *logstorage.InsertRow) {
|
||||
// flushLocked must be called under locked lmp.mu.
|
||||
func (lmp *logMessageProcessor) flushLocked() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
vlstorage.MustAddRows(lmp.lr)
|
||||
logRowsStorage.MustAddRows(lmp.lr)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
}
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ func NewLineReader(name string, r io.Reader) *LineReader {
|
||||
// Check for Err in this case.
|
||||
func (lr *LineReader) NextLine() bool {
|
||||
for {
|
||||
lr.Line = nil
|
||||
if lr.bufOffset >= len(lr.buf) {
|
||||
if lr.err != nil || lr.eofReached {
|
||||
return false
|
||||
@@ -101,9 +102,11 @@ func (lr *LineReader) readMoreData() bool {
|
||||
|
||||
bufLen := len(lr.buf)
|
||||
if bufLen >= MaxLineSizeBytes.IntN() {
|
||||
logger.Warnf("%s: the line length exceeds -insert.maxLineSizeBytes=%d; skipping it; line contents=%q", lr.name, MaxLineSizeBytes.IntN(), lr.buf)
|
||||
ok, skippedBytes := lr.skipUntilNextLine()
|
||||
logger.Warnf("%s: the line length exceeds -insert.maxLineSizeBytes=%d; skipping it; total skipped bytes=%d",
|
||||
lr.name, MaxLineSizeBytes.IntN(), skippedBytes)
|
||||
tooLongLinesSkipped.Inc()
|
||||
return lr.skipUntilNextLine()
|
||||
return ok
|
||||
}
|
||||
|
||||
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
|
||||
@@ -121,26 +124,35 @@ func (lr *LineReader) readMoreData() bool {
|
||||
|
||||
var tooLongLinesSkipped = metrics.NewCounter("vl_too_long_lines_skipped_total")
|
||||
|
||||
func (lr *LineReader) skipUntilNextLine() bool {
|
||||
func (lr *LineReader) skipUntilNextLine() (bool, int) {
|
||||
|
||||
// Initialize skipped bytes count with MaxLineSizeBytes because
|
||||
// we've already read that many bytes without encountering a newline,
|
||||
// indicating the line size exceeds the maximum allowed limit.
|
||||
skipSizeBytes := MaxLineSizeBytes.IntN()
|
||||
|
||||
for {
|
||||
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
|
||||
n, err := lr.r.Read(lr.buf)
|
||||
skipSizeBytes += n
|
||||
lr.buf = lr.buf[:n]
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
lr.eofReached = true
|
||||
lr.buf = lr.buf[:0]
|
||||
return true
|
||||
return true, skipSizeBytes
|
||||
}
|
||||
lr.err = fmt.Errorf("cannot skip the current line: %s", err)
|
||||
return false
|
||||
return false, skipSizeBytes
|
||||
}
|
||||
if n := bytes.IndexByte(lr.buf, '\n'); n >= 0 {
|
||||
// Include skipped bytes before \n, including the newline itself.
|
||||
skipSizeBytes += n + 1 - len(lr.buf)
|
||||
// Include \n in the buf, so too long line is replaced with an empty line.
|
||||
// This is needed for maintaining synchorinzation consistency between lines
|
||||
// in protocols such as Elasticsearch bulk import.
|
||||
lr.buf = append(lr.buf[:0], lr.buf[n:]...)
|
||||
return true
|
||||
return true, skipSizeBytes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,9 @@ func TestLineReader_Success(t *testing.T) {
|
||||
if lr.NextLine() {
|
||||
t.Fatalf("expecting error on the second call to NextLine()")
|
||||
}
|
||||
if len(lr.Line) > 0 {
|
||||
t.Fatalf("unexpected non-empty line after failed NextLine(): %q", lr.Line)
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
|
||||
}
|
||||
|
||||
@@ -38,7 +38,10 @@ func ExtractTimestampFromFields(timeFields []string, fields []logstorage.Field)
|
||||
}
|
||||
|
||||
func parseTimestamp(s string) (int64, error) {
|
||||
if s == "" || s == "0" {
|
||||
// "-" is a nil timestamp value, if the syslog
|
||||
// application is incapable of obtaining system time
|
||||
// https://datatracker.ietf.org/doc/html/rfc5424#section-6.2.3
|
||||
if s == "" || s == "0" || s == "-" {
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
if len(s) <= len("YYYY") || s[len("YYYY")] != '-' {
|
||||
|
||||
@@ -133,6 +133,33 @@ func TestExtractTimestampFromFields_Success(t *testing.T) {
|
||||
}, 1718773640000000000)
|
||||
}
|
||||
|
||||
func TestExtractTimestampFromFields_Now(t *testing.T) {
|
||||
f := func(timeField string, fields []logstorage.Field) {
|
||||
t.Helper()
|
||||
|
||||
nsecs, err := ExtractTimestampFromFields([]string{timeField}, fields)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if nsecs < 1 {
|
||||
t.Fatalf("expected generated timestamp, got error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RFC5424 allows `-` for nil timestamp (log ingestion time)
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "time", Value: "-"},
|
||||
})
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "time", Value: ""},
|
||||
})
|
||||
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "time", Value: "0"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestExtractTimestampFromFields_Error(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package internalinsert
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
@@ -9,7 +8,6 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -18,17 +16,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
disableInsert = flag.Bool("internalinsert.disable", false, "Whether to disable /internal/insert HTTP endpoint")
|
||||
maxRequestSize = flagutil.NewBytes("internalinsert.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single request, which can be accepted at /internal/insert HTTP endpoint")
|
||||
)
|
||||
|
||||
// RequestHandler processes /internal/insert requests.
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if *disableInsert {
|
||||
httpserver.Errorf(w, r, "requests to /internal/insert are disabled with -internalinsert.disable command-line flag")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
if r.Method != "POST" {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
@@ -47,7 +39,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
@@ -92,5 +84,5 @@ var (
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/internal/insert"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/internal/insert"}`)
|
||||
|
||||
requestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/internal/insert"}`)
|
||||
requestDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/internal/insert"}`)
|
||||
)
|
||||
|
||||
@@ -3,29 +3,30 @@ package journald
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// See https://github.com/systemd/systemd/blob/main/src/libsystemd/sd-journal/journal-file.c#L1703
|
||||
const journaldEntryMaxNameLen = 64
|
||||
|
||||
var allowedJournaldEntryNameChars = regexp.MustCompile(`^[A-Z_][A-Z0-9_]*`)
|
||||
const maxFieldNameLen = 64
|
||||
|
||||
var (
|
||||
journaldStreamFields = flagutil.NewArrayString("journald.streamFields", "Comma-separated list of fields to use as log stream fields for logs ingested over journald protocol. "+
|
||||
@@ -36,9 +37,7 @@ var (
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/#time-field")
|
||||
journaldTenantID = flag.String("journald.tenantID", "0:0", "TenantID for logs ingested via the Journald endpoint. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/#multitenancy")
|
||||
journaldIncludeEntryMetadata = flag.Bool("journald.includeEntryMetadata", false, "Include journal entry fields, which with double underscores.")
|
||||
|
||||
maxRequestSize = flagutil.NewBytes("journald.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single journald request")
|
||||
journaldIncludeEntryMetadata = flag.Bool("journald.includeEntryMetadata", false, "Include Journald fields with double underscore prefixes")
|
||||
)
|
||||
|
||||
func getCommonParams(r *http.Request) (*insertutil.CommonParams, error) {
|
||||
@@ -53,11 +52,12 @@ func getCommonParams(r *http.Request) (*insertutil.CommonParams, error) {
|
||||
}
|
||||
cp.TenantID = tenantID
|
||||
}
|
||||
if len(cp.TimeFields) == 0 {
|
||||
|
||||
if !cp.IsTimeFieldSet {
|
||||
cp.TimeFields = []string{*journaldTimeField}
|
||||
}
|
||||
if len(cp.StreamFields) == 0 {
|
||||
cp.StreamFields = *journaldStreamFields
|
||||
cp.StreamFields = getStreamFields()
|
||||
}
|
||||
if len(cp.IgnoreFields) == 0 {
|
||||
cp.IgnoreFields = *journaldIgnoreFields
|
||||
@@ -66,10 +66,23 @@ func getCommonParams(r *http.Request) (*insertutil.CommonParams, error) {
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func getStreamFields() []string {
|
||||
if len(*journaldStreamFields) > 0 {
|
||||
return *journaldStreamFields
|
||||
}
|
||||
return defaultStreamFields
|
||||
}
|
||||
|
||||
var defaultStreamFields = []string{
|
||||
"_MACHINE_ID",
|
||||
"_HOSTNAME",
|
||||
"_SYSTEMD_UNIT",
|
||||
}
|
||||
|
||||
// RequestHandler processes Journald Export insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
case "/upload":
|
||||
case "/insert/journald/upload":
|
||||
if r.Header.Get("Content-Type") != "application/vnd.fdo.journal" {
|
||||
httpserver.Errorf(w, r, "only application/vnd.fdo.journal encoding is supported for Journald")
|
||||
return true
|
||||
@@ -84,7 +97,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
// handleJournald parses Journal binary entries
|
||||
func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsJournaldTotal.Inc()
|
||||
requestsTotal.Inc()
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
@@ -93,19 +106,25 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.NewLogMessageProcessor("journald", false)
|
||||
err := parseJournaldRequest(data, lmp, cp)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
reader, err := protoparserutil.GetUncompressedReader(r.Body, encoding)
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
logger.Errorf("cannot decode journald request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor("journald", true)
|
||||
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
|
||||
err = processStreamInternal(streamName, reader, lmp, cp)
|
||||
protoparserutil.PutUncompressedReader(reader)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "cannot read journald protocol data: %s", err)
|
||||
@@ -117,102 +136,185 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
// See https://github.com/systemd/systemd/pull/34822
|
||||
w.Header().Set("Accept-Encoding", "zstd")
|
||||
|
||||
// update requestJournaldDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJournaldDuration for request errors,
|
||||
// update requestDuration only for successfully parsed requests
|
||||
// There is no need in updating requestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestJournaldDuration.UpdateDuration(startTime)
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
var (
|
||||
requestsJournaldTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/journald/upload"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/journald/upload"}`)
|
||||
|
||||
requestJournaldDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/journald/upload"}`)
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/journald/upload"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/journald/upload"}`)
|
||||
requestDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/journald/upload"}`)
|
||||
)
|
||||
|
||||
func processStreamInternal(streamName string, r io.Reader, lmp insertutil.LogMessageProcessor, cp *insertutil.CommonParams) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lr := insertutil.NewLineReader("journald", wcr)
|
||||
|
||||
for {
|
||||
err := readJournaldLogEntry(streamName, lr, lmp, cp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s: %w", streamName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fieldsBuf struct {
|
||||
fields []logstorage.Field
|
||||
|
||||
buf []byte
|
||||
name []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
func (fb *fieldsBuf) reset() {
|
||||
fb.fields = fb.fields[:0]
|
||||
fb.buf = fb.buf[:0]
|
||||
fb.name = fb.name[:0]
|
||||
fb.value = fb.value[:0]
|
||||
}
|
||||
|
||||
func (fb *fieldsBuf) addField(name, value string) {
|
||||
bufLen := len(fb.buf)
|
||||
fb.buf = append(fb.buf, name...)
|
||||
nameCopy := bytesutil.ToUnsafeString(fb.buf[bufLen:])
|
||||
|
||||
bufLen = len(fb.buf)
|
||||
fb.buf = append(fb.buf, value...)
|
||||
valueCopy := bytesutil.ToUnsafeString(fb.buf[bufLen:])
|
||||
|
||||
fb.fields = append(fb.fields, logstorage.Field{
|
||||
Name: nameCopy,
|
||||
Value: valueCopy,
|
||||
})
|
||||
}
|
||||
|
||||
func (fb *fieldsBuf) appendNextLineToValue(lr *insertutil.LineReader) error {
|
||||
if !lr.NextLine() {
|
||||
if err := lr.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unexpected end of stream")
|
||||
}
|
||||
fb.value = append(fb.value, lr.Line...)
|
||||
fb.value = append(fb.value, '\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFieldsBuf() *fieldsBuf {
|
||||
fb := fieldsBufPool.Get()
|
||||
if fb == nil {
|
||||
return &fieldsBuf{}
|
||||
}
|
||||
return fb.(*fieldsBuf)
|
||||
}
|
||||
|
||||
func putFieldsBuf(fb *fieldsBuf) {
|
||||
fb.reset()
|
||||
fieldsBufPool.Put(fb)
|
||||
}
|
||||
|
||||
var fieldsBufPool sync.Pool
|
||||
|
||||
// readJournaldLogEntry reads a single log entry in Journald format.
|
||||
//
|
||||
// See https://systemd.io/JOURNAL_EXPORT_FORMATS/#journal-export-format
|
||||
func parseJournaldRequest(data []byte, lmp insertutil.LogMessageProcessor, cp *insertutil.CommonParams) error {
|
||||
var fields []logstorage.Field
|
||||
func readJournaldLogEntry(streamName string, lr *insertutil.LineReader, lmp insertutil.LogMessageProcessor, cp *insertutil.CommonParams) error {
|
||||
var ts int64
|
||||
var size uint64
|
||||
var name, value string
|
||||
var line []byte
|
||||
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
fb := getFieldsBuf()
|
||||
defer putFieldsBuf(fb)
|
||||
|
||||
for len(data) > 0 {
|
||||
idx := bytes.IndexByte(data, '\n')
|
||||
switch {
|
||||
case idx > 0:
|
||||
// process fields
|
||||
line = data[:idx]
|
||||
data = data[idx+1:]
|
||||
case idx == 0:
|
||||
// next message or end of file
|
||||
// double new line is a separator for the next message
|
||||
if len(fields) > 0 {
|
||||
if !lr.NextLine() {
|
||||
if err := lr.Err(); err != nil {
|
||||
return fmt.Errorf("cannot read the first field: %w", err)
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
for {
|
||||
line := lr.Line
|
||||
if len(line) == 0 {
|
||||
// The end of a single log entry. Write it to the storage
|
||||
if len(fb.fields) > 0 {
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
ts = time.Now().UnixNano()
|
||||
}
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
fields = fields[:0]
|
||||
lmp.AddRow(ts, fb.fields, nil)
|
||||
}
|
||||
// skip newline separator
|
||||
data = data[1:]
|
||||
continue
|
||||
case idx < 0:
|
||||
return fmt.Errorf("missing new line separator, unread data left=%d", len(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
idx = bytes.IndexByte(line, '=')
|
||||
// could b either e key=value\n pair
|
||||
// or just key\n
|
||||
// with binary data at the buffer
|
||||
if idx > 0 {
|
||||
name = bytesutil.ToUnsafeString(line[:idx])
|
||||
value = bytesutil.ToUnsafeString(line[idx+1:])
|
||||
// line could be either "key=value" or "key"
|
||||
// according to https://systemd.io/JOURNAL_EXPORT_FORMATS/#journal-export-format
|
||||
if n := bytes.IndexByte(line, '='); n >= 0 {
|
||||
// line = "key=value"
|
||||
fb.name = append(fb.name[:0], line[:n]...)
|
||||
name = bytesutil.ToUnsafeString(fb.name)
|
||||
|
||||
fb.value = append(fb.value[:0], line[n+1:]...)
|
||||
value = bytesutil.ToUnsafeString(fb.value)
|
||||
} else {
|
||||
name = bytesutil.ToUnsafeString(line)
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("unexpected zero data for binary field value of key=%s", name)
|
||||
// line = "key"
|
||||
// Parse the binary-encoded value from the next line according to "key\n<little_endian_size_64>value\n" format
|
||||
fb.name = append(fb.name[:0], line...)
|
||||
name = bytesutil.ToUnsafeString(fb.name)
|
||||
|
||||
fb.value = fb.value[:0]
|
||||
for len(fb.value) < 8 {
|
||||
if err := fb.appendNextLineToValue(lr); err != nil {
|
||||
return fmt.Errorf("cannot read value size: %w", err)
|
||||
}
|
||||
}
|
||||
// size of binary data encoded as le i64 at the begging
|
||||
idx, err := binary.Decode(data, binary.LittleEndian, &size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
|
||||
size := binary.LittleEndian.Uint64(fb.value[:8])
|
||||
|
||||
// Read the value until its lenth exceeds the given size - the last char in the read value will always be '\n'
|
||||
// because it is appended by appendNextLineToValue().
|
||||
for uint64(len(fb.value[8:])) <= size {
|
||||
if err := fb.appendNextLineToValue(lr); err != nil {
|
||||
return fmt.Errorf("cannot read %q value with size %d bytes; read only %d bytes: %w", fb.name, size, len(fb.value[8:]), err)
|
||||
}
|
||||
}
|
||||
// skip binary data size
|
||||
data = data[idx:]
|
||||
if size == 0 {
|
||||
return fmt.Errorf("unexpected zero binary data size decoded %d", size)
|
||||
value = bytesutil.ToUnsafeString(fb.value[8 : len(fb.value)-1])
|
||||
if uint64(len(value)) != size {
|
||||
return fmt.Errorf("unexpected %q value size; got %d bytes; want %d bytes; value: %q", fb.name, len(value), size, value)
|
||||
}
|
||||
if int(size) > len(data) {
|
||||
return fmt.Errorf("binary data size=%d cannot exceed size of the data at buffer=%d", size, len(data))
|
||||
}
|
||||
value = bytesutil.ToUnsafeString(data[:size])
|
||||
data = data[int(size):]
|
||||
// binary data must has new line separator for the new line or next field
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("unexpected empty buffer after binary field=%s read", name)
|
||||
}
|
||||
lastB := data[0]
|
||||
if lastB != '\n' {
|
||||
return fmt.Errorf("expected new line separator after binary field=%s, got=%s", name, string(lastB))
|
||||
}
|
||||
data = data[1:]
|
||||
}
|
||||
if len(name) > journaldEntryMaxNameLen {
|
||||
return fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
|
||||
|
||||
if !lr.NextLine() {
|
||||
if err := lr.Err(); err != nil {
|
||||
return fmt.Errorf("cannot read the next log field: %w", err)
|
||||
}
|
||||
|
||||
// add the last log field below before the return
|
||||
}
|
||||
if !allowedJournaldEntryNameChars.MatchString(name) {
|
||||
return fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
|
||||
|
||||
if len(name) > maxFieldNameLen {
|
||||
logger.Errorf("%s: field name size should not exceed %d bytes; got %d bytes: %q; skipping this field", streamName, maxFieldNameLen, len(name), name)
|
||||
continue
|
||||
}
|
||||
if !isValidFieldName(name) {
|
||||
logger.Errorf("%s: invalid field name %q; it must consist of `A-Z0-9_` chars and must start from non-digit char; skipping this field", streamName, name)
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(cp.TimeFields, name) {
|
||||
n, err := strconv.ParseInt(value, 10, 64)
|
||||
t, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse Journald timestamp, %w", err)
|
||||
logger.Errorf("%s: cannot parse timestamp from the field %q: %w; using the current timestamp", streamName, name, err)
|
||||
ts = 0
|
||||
} else {
|
||||
// Convert journald microsecond timestamp to nanoseconds
|
||||
ts = t * 1e3
|
||||
}
|
||||
ts = n * 1e3
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -220,18 +322,56 @@ func parseJournaldRequest(data []byte, lmp insertutil.LogMessageProcessor, cp *i
|
||||
name = "_msg"
|
||||
}
|
||||
|
||||
if *journaldIncludeEntryMetadata || !strings.HasPrefix(name, "__") {
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
if name == "PRIORITY" {
|
||||
priority := journaldPriorityToLevel(value)
|
||||
fb.addField("level", priority)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(name, "__") || *journaldIncludeEntryMetadata {
|
||||
fb.addField(name, value)
|
||||
}
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func journaldPriorityToLevel(priority string) string {
|
||||
// See https://wiki.archlinux.org/title/Systemd/Journal#Priority_level
|
||||
// and https://grafana.com/docs/grafana/latest/explore/logs-integration/#log-level
|
||||
switch priority {
|
||||
case "0":
|
||||
return "emerg"
|
||||
case "1":
|
||||
return "alert"
|
||||
case "2":
|
||||
return "critical"
|
||||
case "3":
|
||||
return "error"
|
||||
case "4":
|
||||
return "warning"
|
||||
case "5":
|
||||
return "notice"
|
||||
case "6":
|
||||
return "info"
|
||||
case "7":
|
||||
return "debug"
|
||||
default:
|
||||
return priority
|
||||
}
|
||||
}
|
||||
|
||||
func isValidFieldName(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c >= 'A' && c <= 'Z' || c == '_') {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 1; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if !(c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c == '_') {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,20 +1,81 @@
|
||||
package journald
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
)
|
||||
|
||||
func TestPushJournaldOk(t *testing.T) {
|
||||
func TestIsValidFieldName(t *testing.T) {
|
||||
f := func(name string, resultExpected bool) {
|
||||
t.Helper()
|
||||
|
||||
result := isValidFieldName(name)
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result for isValidJournaldFieldName(%q); got %v; want %v", name, result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", false)
|
||||
f("a", false)
|
||||
f("1", false)
|
||||
f("_", true)
|
||||
f("X", true)
|
||||
f("Xa", false)
|
||||
f("X_343", true)
|
||||
f("X_0123456789_AZ", true)
|
||||
f("SDDFD sdf", false)
|
||||
}
|
||||
|
||||
func TestGetCommonParams_TimeField(t *testing.T) {
|
||||
f := func(timeFieldHeader, expectedTimeField string) {
|
||||
t.Helper()
|
||||
|
||||
req, err := http.NewRequest("POST", "/insert/journald/upload", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating request: %s", err)
|
||||
}
|
||||
|
||||
if timeFieldHeader != "" {
|
||||
req.Header.Set("VL-Time-Field", timeFieldHeader)
|
||||
}
|
||||
|
||||
cp, err := getCommonParams(req)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if len(cp.TimeFields) != 1 || cp.TimeFields[0] != expectedTimeField {
|
||||
t.Fatalf("unexpected TimeFields; got %v; want [%s]", cp.TimeFields, expectedTimeField)
|
||||
}
|
||||
}
|
||||
|
||||
// Test default behavior - when no custom time field is specified, journald uses __REALTIME_TIMESTAMP
|
||||
f("", "__REALTIME_TIMESTAMP")
|
||||
|
||||
// Test custom time field - when a custom time field is specified via HTTP header, it's respected
|
||||
f("custom_time", "custom_time")
|
||||
}
|
||||
|
||||
func TestPushJournald_Success(t *testing.T) {
|
||||
f := func(src string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
cp := &insertutil.CommonParams{
|
||||
TimeFields: []string{"__REALTIME_TIMESTAMP"},
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
|
||||
r, err := http.NewRequest("GET", "https://foo.bar/baz", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create request: %s", err)
|
||||
}
|
||||
if err := parseJournaldRequest([]byte(src), tlp, cp); err != nil {
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create commonParams: %s", err)
|
||||
}
|
||||
|
||||
buf := bytes.NewBufferString(src)
|
||||
if err := processStreamInternal("test", buf, tlp, cp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
@@ -22,16 +83,17 @@ func TestPushJournaldOk(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Single event
|
||||
f("__REALTIME_TIMESTAMP=91723819283\nMESSAGE=Test message\n",
|
||||
f("__REALTIME_TIMESTAMP=91723819283\nMESSAGE=Test message\n\n",
|
||||
[]int64{91723819283000},
|
||||
"{\"_msg\":\"Test message\"}",
|
||||
)
|
||||
|
||||
// Multiple events
|
||||
f("__REALTIME_TIMESTAMP=91723819283\nMESSAGE=Test message\n\n__REALTIME_TIMESTAMP=91723819284\nMESSAGE=Test message2\n",
|
||||
f("__REALTIME_TIMESTAMP=91723819283\nPRIORITY=3\nMESSAGE=Test message\n\n__REALTIME_TIMESTAMP=91723819284\nMESSAGE=Test message2\n",
|
||||
[]int64{91723819283000, 91723819284000},
|
||||
"{\"_msg\":\"Test message\"}\n{\"_msg\":\"Test message2\"}",
|
||||
"{\"level\":\"error\",\"PRIORITY\":\"3\",\"_msg\":\"Test message\"}\n{\"_msg\":\"Test message2\"}",
|
||||
)
|
||||
|
||||
// Parse binary data
|
||||
@@ -39,30 +101,66 @@ func TestPushJournaldOk(t *testing.T) {
|
||||
[]int64{1729698775704404000},
|
||||
"{\"E\":\"JobStateChanged\",\"_BOOT_ID\":\"f778b6e2f7584a77b991a2366612a7b5\",\"_UID\":\"0\",\"_GID\":\"0\",\"_MACHINE_ID\":\"a4a970370c30a925df02a13c67167847\",\"_HOSTNAME\":\"ecd5e4555787\",\"_RUNTIME_SCOPE\":\"system\",\"_TRANSPORT\":\"journal\",\"_CAP_EFFECTIVE\":\"1ffffffffff\",\"_SYSTEMD_CGROUP\":\"/init.scope\",\"_SYSTEMD_UNIT\":\"init.scope\",\"_SYSTEMD_SLICE\":\"-.slice\",\"CODE_FILE\":\"\\u003cstdin>\",\"CODE_LINE\":\"1\",\"CODE_FUNC\":\"\\u003cmodule>\",\"SYSLOG_IDENTIFIER\":\"python3\",\"_COMM\":\"python3\",\"_EXE\":\"/usr/bin/python3.12\",\"_CMDLINE\":\"python3\",\"_msg\":\"foo\\nbar\\n\\n\\nasda\\nasda\",\"_PID\":\"2763\",\"_SOURCE_REALTIME_TIMESTAMP\":\"1729698775704375\"}",
|
||||
)
|
||||
|
||||
// Parse binary data with trailing newline
|
||||
f("__REALTIME_TIMESTAMP=1729698775704404\n_CMDLINE=python3\nMESSAGE\n\x14\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasda\nasda\n\n_PID=2763\n\n",
|
||||
[]int64{1729698775704404000},
|
||||
`{"_CMDLINE":"python3","_msg":"foo\nbar\n\n\nasda\nasda\n","_PID":"2763"}`,
|
||||
)
|
||||
f("__REALTIME_TIMESTAMP=1729698775704404\n_CMDLINE=python3\nMESSAGE\n\x00\x00\x00\x00\x00\x00\x00\x00\n_PID=2763\n\n",
|
||||
[]int64{1729698775704404000},
|
||||
`{"_CMDLINE":"python3","_PID":"2763"}`,
|
||||
)
|
||||
f("__REALTIME_TIMESTAMP=1729698775704404\n_CMDLINE=python3\nMESSAGE\n\x0A\x00\x00\x00\x00\x00\x00\x00123456789\n\n_PID=2763\n\n",
|
||||
[]int64{1729698775704404000},
|
||||
`{"_CMDLINE":"python3","_msg":"123456789\n","_PID":"2763"}`,
|
||||
)
|
||||
f("__REALTIME_TIMESTAMP=1729698775704404\n_CMDLINE=python3\nMESSAGE\n\x0A\x00\x00\x00\x00\x00\x00\x001234567890\n_PID=2763\n\n",
|
||||
[]int64{1729698775704404000},
|
||||
`{"_CMDLINE":"python3","_msg":"1234567890","_PID":"2763"}`,
|
||||
)
|
||||
|
||||
// Empty field name must be ignored
|
||||
f("__REALTIME_TIMESTAMP=91723819283\na=b\n=Test message", nil, "")
|
||||
f("__REALTIME_TIMESTAMP=91723819284\nMESSAGE=Test message2\n\n__REALTIME_TIMESTAMP=91723819283\n=Test message\n", []int64{91723819284000}, `{"_msg":"Test message2"}`)
|
||||
|
||||
// field name starting with number must be ignored
|
||||
f("__REALTIME_TIMESTAMP=91723819283\n1incorrect=Test message\n\n__REALTIME_TIMESTAMP=91723819284\nMESSAGE=Test message2\n\n", []int64{91723819284000}, `{"_msg":"Test message2"}`)
|
||||
|
||||
// field name exceeding 64 bytes limit must be ignored
|
||||
f("__REALTIME_TIMESTAMP=91723819283\ntoolooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooongcorrecooooooooooooong=Test message\n", nil, "")
|
||||
|
||||
// field name with invalid chars must be ignored
|
||||
f("__REALTIME_TIMESTAMP=91723819283\nbadC!@$!@$as=Test message\n", nil, "")
|
||||
}
|
||||
|
||||
func TestPushJournald_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
cp := &insertutil.CommonParams{
|
||||
TimeFields: []string{"__REALTIME_TIMESTAMP"},
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
|
||||
r, err := http.NewRequest("GET", "https://foo.bar/baz", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create request: %s", err)
|
||||
}
|
||||
if err := parseJournaldRequest([]byte(data), tlp, cp); err == nil {
|
||||
t.Fatalf("expected non nil error")
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create commonParams: %s", err)
|
||||
}
|
||||
|
||||
buf := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal("test", buf, tlp, cp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
// missing new line terminator for binary encoded message
|
||||
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasdaasda2")
|
||||
// missing new line terminator
|
||||
f("__REALTIME_TIMESTAMP=91723819283\n=Test message")
|
||||
// empty field name
|
||||
f("__REALTIME_TIMESTAMP=91723819283\n=Test message\n")
|
||||
// field name starting with number
|
||||
f("__REALTIME_TIMESTAMP=91723819283\n1incorrect=Test message\n")
|
||||
// field name exceeds 64 limit
|
||||
f("__REALTIME_TIMESTAMP=91723819283\ntoolooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooongcorrecooooooooooooong=Test message\n")
|
||||
// Only allow A-Z0-9 and '_'
|
||||
f("__REALTIME_TIMESTAMP=91723819283\nbadC!@$!@$as=Test message\n")
|
||||
|
||||
// too short binary encoded message
|
||||
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasdaasd")
|
||||
f("__REALTIME_TIMESTAMP=1729698775704404\n_CMDLINE=python3\nMESSAGE\n\x00\x00\x00\x00\x00\x00\x00\x00_PID=2763\n\n")
|
||||
f("__REALTIME_TIMESTAMP=1729698775704404\n_CMDLINE=python3\nMESSAGE\n\x0A\x00\x00\x00\x00\x00\x00\x001234567890_PID=2763\n\n")
|
||||
f("__REALTIME_TIMESTAMP=1729698775704404\n_CMDLINE=python3\nMESSAGE\n\x0A\x00\x00\x00\x00\x00\x00\x00123456789\n_PID=2763\n\n")
|
||||
|
||||
// too long binary encoded message
|
||||
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasdaasdakljlsfd")
|
||||
}
|
||||
|
||||
82
app/vlinsert/journald/journald_timing_test.go
Normal file
82
app/vlinsert/journald/journald_timing_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package journald
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
)
|
||||
|
||||
func BenchmarkIsValidFieldName(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchmarkFields)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for _, field := range benchmarkFields {
|
||||
if !isValidFieldName(field) {
|
||||
panic(fmt.Errorf("cannot validate field %q", field))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var benchmarkFields = strings.Split(
|
||||
"E,_BOOT_ID,_UID,_GID,_MACHINE_ID,_HOSTNAME,_RUNTIME_SCOPE,_TRANSPORT,_CAP_EFFECTIVE,_SYSTEMD_CGROUP,_SYSTEMD_UNIT,"+
|
||||
"_SYSTEMD_SLICE,CODE_FILE,CODE_LINE,CODE_FUNC,SYSLOG_IDENTIFIER,_COMM,_EXE,_CMDLINE,MESSAGE,_PID,_SOURCE_REALTIME_TIMESTAMP,_REALTIME_TIMESTAMP",
|
||||
",")
|
||||
|
||||
func BenchmarkPushJournaldPerformance(b *testing.B) {
|
||||
cp := &insertutil.CommonParams{
|
||||
TimeFields: []string{"__REALTIME_TIMESTAMP"},
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
}
|
||||
const dataChunkSize = 1024 * 1024
|
||||
|
||||
data := generateJournaldData(dataChunkSize)
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
r := &bytes.Reader{}
|
||||
blp := &insertutil.BenchmarkLogMessageProcessor{}
|
||||
for pb.Next() {
|
||||
r.Reset(data)
|
||||
if err := processStreamInternal("performance_test", r, blp, cp); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func generateJournaldData(size int) []byte {
|
||||
var buf []byte
|
||||
timestamp := time.Now().UnixMicro()
|
||||
binaryMsg := []byte("binary message data for performance test")
|
||||
var sizeBuf [8]byte
|
||||
|
||||
for len(buf) < size {
|
||||
timestamp++
|
||||
|
||||
var entry string
|
||||
// Generate a mix of simple and binary messages
|
||||
if timestamp%10 == 0 {
|
||||
// Generate binary message
|
||||
binary.LittleEndian.PutUint64(sizeBuf[:], uint64(len(binaryMsg)))
|
||||
entry = fmt.Sprintf("__REALTIME_TIMESTAMP=%d\nMESSAGE\n%s%s\n\n",
|
||||
timestamp,
|
||||
sizeBuf[:],
|
||||
binaryMsg,
|
||||
)
|
||||
} else {
|
||||
// Generate simple message
|
||||
entry = fmt.Sprintf("__REALTIME_TIMESTAMP=%d\nMESSAGE=Performance test message %d\n\n", timestamp, timestamp)
|
||||
}
|
||||
buf = append(buf, entry...)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -33,7 +32,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
@@ -120,5 +119,5 @@ var (
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/jsonline"}`)
|
||||
|
||||
requestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/jsonline"}`)
|
||||
requestDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/jsonline"}`)
|
||||
)
|
||||
|
||||
@@ -16,10 +16,10 @@ var disableMessageParsing = flag.Bool("loki.disableMessageParsing", false, "Whet
|
||||
// RequestHandler processes Loki insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
case "/api/v1/push":
|
||||
case "/insert/loki/api/v1/push":
|
||||
handleInsert(r, w)
|
||||
return true
|
||||
case "/ready":
|
||||
case "/insert/loki/ready":
|
||||
// See https://grafana.com/docs/loki/latest/api/#identify-ready-loki-instance
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("ready"))
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -30,7 +29,7 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
@@ -59,7 +58,7 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
|
||||
var (
|
||||
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
requestJSONDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
)
|
||||
|
||||
func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields []string, useDefaultStreamFields, parseMessage bool) error {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
@@ -29,7 +28,7 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
@@ -63,7 +62,7 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
|
||||
var (
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func parseProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields []string, useDefaultStreamFields, parseMessage bool) error {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package vlinsert
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -13,6 +14,12 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/opentelemetry"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/syslog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
)
|
||||
|
||||
var (
|
||||
disableInsert = flag.Bool("insert.disable", false, "Whether to disable /insert/* HTTP endpoints")
|
||||
disableInternal = flag.Bool("internalinsert.disable", false, "Whether to disable /internal/insert HTTP endpoint")
|
||||
)
|
||||
|
||||
// Init initializes vlinsert
|
||||
@@ -27,49 +34,55 @@ func Stop() {
|
||||
|
||||
// RequestHandler handles insert requests for VictoriaLogs
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
path := strings.ReplaceAll(r.URL.Path, "//", "/")
|
||||
|
||||
if strings.HasPrefix(path, "/insert/") {
|
||||
if *disableInsert {
|
||||
httpserver.Errorf(w, r, "requests to /insert/* are disabled with -insert.disable command-line flag")
|
||||
return true
|
||||
}
|
||||
|
||||
return insertHandler(w, r, path)
|
||||
}
|
||||
|
||||
if path == "/internal/insert" {
|
||||
if *disableInternal || *disableInsert {
|
||||
httpserver.Errorf(w, r, "requests to /internal/insert are disabled with -internalinsert.disable or -insert.disable command-line flag")
|
||||
return true
|
||||
}
|
||||
internalinsert.RequestHandler(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(path, "/insert/") {
|
||||
// Skip requests, which do not start with /insert/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/insert")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
return false
|
||||
}
|
||||
|
||||
func insertHandler(w http.ResponseWriter, r *http.Request, path string) bool {
|
||||
switch path {
|
||||
case "/jsonline":
|
||||
case "/insert/jsonline":
|
||||
jsonline.RequestHandler(w, r)
|
||||
return true
|
||||
case "/ready":
|
||||
case "/insert/ready":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/elasticsearch"):
|
||||
// some clients may omit trailing slash
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8353
|
||||
path = strings.TrimPrefix(path, "/elasticsearch")
|
||||
// some clients may omit trailing slash at elasticsearch protocol.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8353
|
||||
case strings.HasPrefix(path, "/insert/elasticsearch"):
|
||||
return elasticsearch.RequestHandler(path, w, r)
|
||||
case strings.HasPrefix(path, "/loki/"):
|
||||
path = strings.TrimPrefix(path, "/loki")
|
||||
|
||||
case strings.HasPrefix(path, "/insert/loki/"):
|
||||
return loki.RequestHandler(path, w, r)
|
||||
case strings.HasPrefix(path, "/opentelemetry/"):
|
||||
path = strings.TrimPrefix(path, "/opentelemetry")
|
||||
case strings.HasPrefix(path, "/insert/opentelemetry/"):
|
||||
return opentelemetry.RequestHandler(path, w, r)
|
||||
case strings.HasPrefix(path, "/journald/"):
|
||||
path = strings.TrimPrefix(path, "/journald")
|
||||
case strings.HasPrefix(path, "/insert/journald/"):
|
||||
return journald.RequestHandler(path, w, r)
|
||||
case strings.HasPrefix(path, "/datadog/"):
|
||||
path = strings.TrimPrefix(path, "/datadog")
|
||||
case strings.HasPrefix(path, "/insert/datadog/"):
|
||||
return datadog.RequestHandler(path, w, r)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -22,7 +21,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
// use the same path as opentelemetry collector
|
||||
// https://opentelemetry.io/docs/specs/otlp/#otlphttp-request
|
||||
case "/v1/logs":
|
||||
case "/insert/opentelemetry/v1/logs":
|
||||
if r.Header.Get("Content-Type") == "application/json" {
|
||||
httpserver.Errorf(w, r, "json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
return true
|
||||
@@ -43,7 +42,7 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
@@ -71,7 +70,7 @@ var (
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func pushProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields []string, useDefaultStreamFields bool) error {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
@@ -385,7 +384,7 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, encoding string, us
|
||||
|
||||
// processStream parses a stream of syslog messages from r and ingests them into vlstorage.
|
||||
func processStream(protocol string, r io.Reader, encoding string, useLocalTimestamp bool, cp *insertutil.CommonParams) error {
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
if err := insertutil.CanWriteData(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -101,8 +101,8 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
currentYear := 2023
|
||||
timestampsExpected := []int64{1685794113000000000, 1685880513000000000, 1685814132345000000}
|
||||
resultExpected := `{"format":"rfc3164","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
|
||||
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
|
||||
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
|
||||
{"priority":"165","facility_keyword":"local4","level":"notice","facility":"20","severity":"5","format":"rfc3164","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
|
||||
{"priority":"123","facility_keyword":"solaris-cron","level":"error","facility":"15","severity":"3","format":"rfc5424","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
|
||||
f(data, currentYear, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package internalselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -22,15 +21,8 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
)
|
||||
|
||||
var disableSelect = flag.Bool("internalselect.disable", false, "Whether to disable /internal/select/* HTTP endpoints")
|
||||
|
||||
// RequestHandler processes requests to /internal/select/*
|
||||
func RequestHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
if *disableSelect {
|
||||
httpserver.Errorf(w, r, "requests to /internal/select/* are disabled with -internalselect.disable command-line flag")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
path := r.URL.Path
|
||||
|
||||
@@ -55,7 +55,10 @@ func ProcessFacetsRequest(ctx context.Context, w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
keepConstFields := httputil.GetBool(r, "keep_const_fields")
|
||||
|
||||
// Pipes must be dropped, since it is expected facets are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
q.AddFacetsPipe(limit, maxValuesPerField, maxValueLen, keepConstFields)
|
||||
|
||||
var mLock sync.Mutex
|
||||
@@ -156,8 +159,10 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
fieldsLimit = 0
|
||||
}
|
||||
|
||||
// Prepare the query for hits count.
|
||||
// Pipes must be dropped, since it is expected hits are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
q.AddCountByTimePipe(int64(step), int64(offset), fields)
|
||||
|
||||
var mLock sync.Mutex
|
||||
@@ -290,6 +295,10 @@ func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected field names are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain field names for the given query
|
||||
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
@@ -329,6 +338,10 @@ func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected field values are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain unique values for the given field
|
||||
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -351,6 +364,10 @@ func ProcessStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream field names are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain stream field names for the given query
|
||||
names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
@@ -389,6 +406,10 @@ func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter,
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream field values are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain stream field values for the given query and the given fieldName
|
||||
values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -420,6 +441,10 @@ func ProcessStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream ids are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain streamIDs for the given query
|
||||
streamIDs, err := vlstorage.GetStreamIDs(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -451,6 +476,10 @@ func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain streams for the given query
|
||||
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -551,7 +580,7 @@ var liveTailRequests = metrics.NewCounter(`vl_live_tailing_requests`)
|
||||
const tailOffsetNsecs = 5e9
|
||||
|
||||
type logRow struct {
|
||||
timestamp string
|
||||
timestamp int64
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
@@ -567,7 +596,7 @@ type tailProcessor struct {
|
||||
mu sync.Mutex
|
||||
|
||||
perStreamRows map[string][]logRow
|
||||
lastTimestamps map[string]string
|
||||
lastTimestamps map[string]int64
|
||||
|
||||
err error
|
||||
}
|
||||
@@ -577,7 +606,7 @@ func newTailProcessor(cancel func()) *tailProcessor {
|
||||
cancel: cancel,
|
||||
|
||||
perStreamRows: make(map[string][]logRow),
|
||||
lastTimestamps: make(map[string]string),
|
||||
lastTimestamps: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -594,7 +623,7 @@ func (tp *tailProcessor) writeBlock(_ uint, db *logstorage.DataBlock) {
|
||||
}
|
||||
|
||||
// Make sure columns contain _time field, since it is needed for proper tail work.
|
||||
timestamps, ok := db.GetTimestamps()
|
||||
timestamps, ok := db.GetTimestamps(nil)
|
||||
if !ok {
|
||||
tp.err = fmt.Errorf("missing _time field")
|
||||
tp.cancel()
|
||||
@@ -1043,9 +1072,7 @@ func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID,
|
||||
}
|
||||
|
||||
func getLastNRows(rows []logRow, limit int) []logRow {
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
return rows[i].timestamp < rows[j].timestamp
|
||||
})
|
||||
sortLogRows(rows)
|
||||
if len(rows) > limit {
|
||||
rows = rows[len(rows)-limit:]
|
||||
}
|
||||
@@ -1070,7 +1097,7 @@ func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.Tenant
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
|
||||
timestamps, ok := db.GetTimestamps()
|
||||
timestamps, ok := db.GetTimestamps(nil)
|
||||
if !ok {
|
||||
missingTimeColumn.Store(true)
|
||||
cancel()
|
||||
|
||||
@@ -25,6 +25,9 @@ var (
|
||||
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the search request waits for execution when -search.maxConcurrentRequests "+
|
||||
"limit is reached; see also -search.maxQueryDuration")
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution. It can be overridden to a smaller value on a per-query basis via 'timeout' query arg")
|
||||
|
||||
disableSelect = flag.Bool("select.disable", false, "Whether to disable /select/* HTTP endpoints")
|
||||
disableInternal = flag.Bool("internalselect.disable", false, "Whether to disable /internal/select/* HTTP endpoints")
|
||||
)
|
||||
|
||||
func getDefaultMaxConcurrentRequests() int {
|
||||
@@ -71,13 +74,31 @@ var vmuiFileServer = http.FileServer(http.FS(vmuiFiles))
|
||||
|
||||
// RequestHandler handles select requests for VictoriaLogs
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
path := strings.ReplaceAll(r.URL.Path, "//", "/")
|
||||
|
||||
if !strings.HasPrefix(path, "/select/") && !strings.HasPrefix(path, "/internal/select/") {
|
||||
// Skip requests, which do not start with /select/, since these aren't our requests.
|
||||
return false
|
||||
if strings.HasPrefix(path, "/select/") {
|
||||
if *disableSelect {
|
||||
httpserver.Errorf(w, r, "requests to /select/* are disabled with -select.disable command-line flag")
|
||||
return true
|
||||
}
|
||||
|
||||
return selectHandler(w, r, path)
|
||||
}
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if strings.HasPrefix(path, "/internal/select/") {
|
||||
if *disableInternal || *disableSelect {
|
||||
httpserver.Errorf(w, r, "requests to /internal/select/* are disabled with -internalselect.disable or -select.disable command-line flag")
|
||||
return true
|
||||
}
|
||||
internalselect.RequestHandler(r.Context(), w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func selectHandler(w http.ResponseWriter, r *http.Request, path string) bool {
|
||||
ctx := r.Context()
|
||||
|
||||
if path == "/select/vmui" {
|
||||
// VMUI access via incomplete url without `/` in the end. Redirect to complete url.
|
||||
@@ -100,7 +121,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
if path == "/select/logsql/tail" {
|
||||
logsqlTailRequests.Inc()
|
||||
// Process live tailing request without timeout, since it is OK to run live tailing requests for very long time.
|
||||
@@ -120,13 +140,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
defer decRequestConcurrency()
|
||||
|
||||
if strings.HasPrefix(path, "/internal/select/") {
|
||||
// Process internal request from vlselect without timeout (e.g. use ctx instead of ctxWithTimeout),
|
||||
// since the timeout must be controlled by the vlselect.
|
||||
internalselect.RequestHandler(ctx, w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
ok := processSelectRequest(ctxWithTimeout, w, r, path)
|
||||
if !ok {
|
||||
return false
|
||||
|
||||
@@ -66,8 +66,8 @@ or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/victo
|
||||
The list of MetricsQL features on top of PromQL:
|
||||
|
||||
* Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax.
|
||||
See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite#selecting-graphite-metrics).
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite#graphite-api-usage) for details.
|
||||
See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#selecting-graphite-metrics).
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#range-query)
|
||||
@@ -742,7 +742,23 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
See also [irate](#irate), [rollup_rate](#rollup_rate) and [rate_prometheus](#rate_prometheus).
|
||||
|
||||
#### rate_prometheus
|
||||
|
||||
`rate_prometheus(series_selector[d])` {{% available_from "#" %}} is a [rollup function](#rollup-functions), which calculates the average per-second
|
||||
increase rate over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#filtering).
|
||||
The resulting calculation is equivalent to `increase_prometheus(series_selector[d]) / d`.
|
||||
|
||||
It doesn't take into account the last sample before the given lookbehind window `d` when calculating the result in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is usually applied to [counters](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#counter).
|
||||
|
||||
See also [increase_prometheus](#increase_prometheus) and [rate](#rate).
|
||||
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
208
app/vlselect/vmui/assets/index-721xTF8u.js
Normal file
208
app/vlselect/vmui/assets/index-721xTF8u.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
67
app/vlselect/vmui/assets/vendor-V4vnRsM-.js
Normal file
67
app/vlselect/vmui/assets/vendor-V4vnRsM-.js
Normal file
File diff suppressed because one or more lines are too long
@@ -35,10 +35,10 @@
|
||||
<meta property="og:title" content="UI for VictoriaLogs">
|
||||
<meta property="og:url" content="https://victoriametrics.com/products/victorialogs/">
|
||||
<meta property="og:description" content="Explore your log data with VictoriaLogs UI">
|
||||
<script type="module" crossorigin src="./assets/index-DLp5TlUn.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-D8IJGiEn.js">
|
||||
<script type="module" crossorigin src="./assets/index-721xTF8u.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-V4vnRsM-.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-C85_NB5q.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-C36SC0pJ.css">
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
|
||||
@@ -253,8 +253,11 @@ func processForceFlush(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Storage implements insertutil.LogRowsStorage interface
|
||||
type Storage struct{}
|
||||
|
||||
// CanWriteData returns non-nil error if it cannot write data to vlstorage
|
||||
func CanWriteData() error {
|
||||
func (*Storage) CanWriteData() error {
|
||||
if localStorage == nil {
|
||||
// The data can be always written in non-local mode.
|
||||
return nil
|
||||
@@ -273,7 +276,7 @@ func CanWriteData() error {
|
||||
// MustAddRows adds lr to vlstorage
|
||||
//
|
||||
// It is advised to call CanWriteData() before calling MustAddRows()
|
||||
func MustAddRows(lr *logstorage.LogRows) {
|
||||
func (*Storage) MustAddRows(lr *logstorage.LogRows) {
|
||||
if localStorage != nil {
|
||||
// Store lr in the local storage.
|
||||
localStorage.MustAddRows(lr)
|
||||
|
||||
@@ -248,6 +248,9 @@ func (sn *storageNode) executeRequestAt(ctx context.Context, path string, args u
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating a request: %s", err)
|
||||
}
|
||||
if err := sn.ac.SetHeaders(req, true); err != nil {
|
||||
return nil, fmt.Errorf("cannot set auth headers for %q: %w", reqURL, err)
|
||||
}
|
||||
|
||||
// send the request to the storage node
|
||||
resp, err := sn.c.Do(req)
|
||||
|
||||
@@ -89,15 +89,18 @@ func (t *Type) ValidateExpr(expr string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportedType is true if given datasource type is supported
|
||||
func SupportedType(dsType string) bool {
|
||||
return dsType == "graphite" || dsType == "prometheus" || dsType == "vlogs"
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (t *Type) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
switch s {
|
||||
case "graphite", "prometheus", "vlogs":
|
||||
default:
|
||||
if !SupportedType(s) {
|
||||
return fmt.Errorf("unknown datasource type=%q, want prometheus, graphite or vlogs", s)
|
||||
}
|
||||
t.Name = s
|
||||
|
||||
@@ -148,9 +148,13 @@ func main() {
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init datasource: %s", err)
|
||||
}
|
||||
if err := replay(groupsCfg, q, rw); err != nil {
|
||||
totalRows, droppedRows, err := replay(groupsCfg, q, rw)
|
||||
if err != nil {
|
||||
logger.Fatalf("replay failed: %s", err)
|
||||
}
|
||||
if droppedRows > 0 {
|
||||
logger.Fatalf("failed to push all generated samples to remote write url, dropped %d samples out of %d", droppedRows, totalRows)
|
||||
}
|
||||
logger.Infof("replay succeed!")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -216,7 +216,7 @@ var (
|
||||
)
|
||||
|
||||
// GetDroppedRows returns value of droppedRows metric
|
||||
func GetDroppedRows() int64 { return int64(droppedRows.Get()) }
|
||||
func GetDroppedRows() int { return int(droppedRows.Get()) }
|
||||
|
||||
// flush is a blocking function that marshals WriteRequest and sends
|
||||
// it to remote-write endpoint. Flush performs limited amount of retries
|
||||
|
||||
@@ -20,9 +20,8 @@ var (
|
||||
"The time filter in RFC3339 format to finish the replay by. E.g. '2020-01-01T20:07:00Z'. "+
|
||||
"By default, is set to the current time.")
|
||||
replayRulesDelay = flag.Duration("replay.rulesDelay", time.Second,
|
||||
"Delay between rules evaluation within the group. Could be important if there are chained rules inside the group "+
|
||||
"and processing need to wait for previous rule results to be persisted by remote storage before evaluating the next rule."+
|
||||
"Keep it equal or bigger than -remoteWrite.flushInterval.")
|
||||
"Delay before evaluating the next rule within the group. Is important for chained rules. "+
|
||||
"Keep it equal or bigger than -remoteWrite.flushInterval. When set to >0, replay ignores group's concurrency setting.")
|
||||
replayMaxDatapoints = flag.Int("replay.maxDatapointsPerQuery", 1e3,
|
||||
"Max number of data points expected in one request. It affects the max time range for every '/query_range' request during the replay. The higher the value, the less requests will be made during replay.")
|
||||
replayRuleRetryAttempts = flag.Int("replay.ruleRetryAttempts", 5,
|
||||
@@ -31,13 +30,13 @@ var (
|
||||
"Progress bar rendering might be verbose or break the logs parsing, so it is recommended to be disabled when not used in interactive mode.")
|
||||
)
|
||||
|
||||
func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw remotewrite.RWClient) error {
|
||||
func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw remotewrite.RWClient) (totalRows, droppedRows int, err error) {
|
||||
if *replayMaxDatapoints < 1 {
|
||||
return fmt.Errorf("replay.maxDatapointsPerQuery can't be lower than 1")
|
||||
return 0, 0, fmt.Errorf("replay.maxDatapointsPerQuery can't be lower than 1")
|
||||
}
|
||||
tFrom, err := time.Parse(time.RFC3339, *replayFrom)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse replay.timeFrom=%q: %w", *replayFrom, err)
|
||||
return 0, 0, fmt.Errorf("failed to parse replay.timeFrom=%q: %w", *replayFrom, err)
|
||||
}
|
||||
|
||||
// use tFrom location for default value, otherwise filters could have different locations
|
||||
@@ -45,12 +44,12 @@ func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw remotewri
|
||||
if *replayTo != "" {
|
||||
tTo, err = time.Parse(time.RFC3339, *replayTo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse replay.timeTo=%q: %w", *replayTo, err)
|
||||
return 0, 0, fmt.Errorf("failed to parse replay.timeTo=%q: %w", *replayTo, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !tTo.After(tFrom) {
|
||||
return fmt.Errorf("replay.timeTo=%v must be bigger than replay.timeFrom=%v", tTo, tFrom)
|
||||
return 0, 0, fmt.Errorf("replay.timeTo=%v must be bigger than replay.timeFrom=%v", tTo, tFrom)
|
||||
}
|
||||
labels := make(map[string]string)
|
||||
for _, s := range *externalLabels {
|
||||
@@ -59,7 +58,7 @@ func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw remotewri
|
||||
}
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing '=' in `-label`. It must contain label in the form `name=value`; got %q", s)
|
||||
return 0, 0, fmt.Errorf("missing '=' in `-label`. It must contain label in the form `name=value`; got %q", s)
|
||||
}
|
||||
labels[s[:n]] = s[n+1:]
|
||||
}
|
||||
@@ -70,18 +69,14 @@ func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw remotewri
|
||||
"\nmax data points per request: %d\n",
|
||||
tFrom, tTo, *replayMaxDatapoints)
|
||||
|
||||
var total int
|
||||
for _, cfg := range groupsCfg {
|
||||
ng := rule.NewGroup(cfg, qb, *evaluationInterval, labels)
|
||||
total += ng.Replay(tFrom, tTo, rw, *replayMaxDatapoints, *replayRuleRetryAttempts, *replayRulesDelay, *disableProgressBar)
|
||||
totalRows += ng.Replay(tFrom, tTo, rw, *replayMaxDatapoints, *replayRuleRetryAttempts, *replayRulesDelay, *disableProgressBar)
|
||||
}
|
||||
logger.Infof("replay evaluation finished, generated %d samples", total)
|
||||
logger.Infof("replay evaluation finished, generated %d samples", totalRows)
|
||||
if err := rw.Close(); err != nil {
|
||||
return err
|
||||
return 0, 0, err
|
||||
}
|
||||
droppedRows := remotewrite.GetDroppedRows()
|
||||
if droppedRows > 0 {
|
||||
return fmt.Errorf("failed to push all generated samples to remote write url, dropped %d samples out of %d", droppedRows, total)
|
||||
}
|
||||
return nil
|
||||
droppedRows = remotewrite.GetDroppedRows()
|
||||
return totalRows, droppedRows, nil
|
||||
}
|
||||
|
||||
@@ -8,38 +8,45 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutil"
|
||||
)
|
||||
|
||||
type fakeReplayQuerier struct {
|
||||
datasource.FakeQuerier
|
||||
registry map[string]map[string]struct{}
|
||||
registry map[string]map[string][]datasource.Metric
|
||||
}
|
||||
|
||||
func (fr *fakeReplayQuerier) BuildWithParams(_ datasource.QuerierParams) datasource.Querier {
|
||||
return fr
|
||||
}
|
||||
|
||||
type fakeRWClient struct{}
|
||||
|
||||
func (fc *fakeRWClient) Push(_ prompbmarshal.TimeSeries) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *fakeRWClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fr *fakeReplayQuerier) QueryRange(_ context.Context, q string, from, to time.Time) (res datasource.Result, err error) {
|
||||
key := fmt.Sprintf("%s+%s", from.Format("15:04:05"), to.Format("15:04:05"))
|
||||
dps, ok := fr.registry[q]
|
||||
if !ok {
|
||||
return res, fmt.Errorf("unexpected query received: %q", q)
|
||||
}
|
||||
_, ok = dps[key]
|
||||
metrics, ok := dps[key]
|
||||
if !ok {
|
||||
return res, fmt.Errorf("unexpected time range received: %q", key)
|
||||
}
|
||||
delete(dps, key)
|
||||
if len(fr.registry[q]) < 1 {
|
||||
delete(fr.registry, q)
|
||||
}
|
||||
res.Data = metrics
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func TestReplay(t *testing.T) {
|
||||
f := func(from, to string, maxDP int, cfg []config.Group, qb *fakeReplayQuerier) {
|
||||
f := func(from, to string, maxDP int, ruleDelay time.Duration, cfg []config.Group, qb *fakeReplayQuerier, expectTotalRows int) {
|
||||
t.Helper()
|
||||
|
||||
fromOrig, toOrig, maxDatapointsOrig := *replayFrom, *replayTo, *replayMaxDatapoints
|
||||
@@ -51,90 +58,172 @@ func TestReplay(t *testing.T) {
|
||||
}()
|
||||
|
||||
*replayRuleRetryAttempts = 1
|
||||
*replayRulesDelay = time.Millisecond
|
||||
rwb := &remotewrite.DebugClient{}
|
||||
*replayRulesDelay = ruleDelay
|
||||
rwb := &fakeRWClient{}
|
||||
*replayFrom = from
|
||||
*replayTo = to
|
||||
*replayMaxDatapoints = maxDP
|
||||
if err := replay(cfg, qb, rwb); err != nil {
|
||||
totalRows, _, err := replay(cfg, qb, rwb)
|
||||
if err != nil {
|
||||
t.Fatalf("replay failed: %s", err)
|
||||
}
|
||||
if len(qb.registry) > 0 {
|
||||
t.Fatalf("not all requests were sent: %#v", qb.registry)
|
||||
if totalRows != expectTotalRows {
|
||||
t.Fatalf("unexpected total rows count: got %d, want %d", totalRows, expectTotalRows)
|
||||
}
|
||||
}
|
||||
|
||||
// one rule + one response
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:00.000Z", 10, []config.Group{
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:00.000Z", 10, time.Millisecond, []config.Group{
|
||||
{Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}},
|
||||
}, &fakeReplayQuerier{
|
||||
registry: map[string]map[string]struct{}{
|
||||
"sum(up)": {"12:00:00+12:02:00": {}},
|
||||
registry: map[string]map[string][]datasource.Metric{
|
||||
"sum(up)": {"12:00:00+12:02:00": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
}},
|
||||
},
|
||||
})
|
||||
}, 1)
|
||||
|
||||
// one rule + multiple responses
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, []config.Group{
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, time.Millisecond, []config.Group{
|
||||
{Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}},
|
||||
}, &fakeReplayQuerier{
|
||||
registry: map[string]map[string]struct{}{
|
||||
registry: map[string]map[string][]datasource.Metric{
|
||||
"sum(up)": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:00:00+12:01:00": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
},
|
||||
"12:01:00+12:02:00": {},
|
||||
"12:02:00+12:02:30": {},
|
||||
"12:02:00+12:02:30": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}, 2)
|
||||
|
||||
// datapoints per step
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T15:02:30.000Z", 60, []config.Group{
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T15:02:30.000Z", 60, time.Millisecond, []config.Group{
|
||||
{Interval: promutil.NewDuration(time.Minute), Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}},
|
||||
}, &fakeReplayQuerier{
|
||||
registry: map[string]map[string]struct{}{
|
||||
registry: map[string]map[string][]datasource.Metric{
|
||||
"sum(up)": {
|
||||
"12:00:00+13:00:00": {},
|
||||
"13:00:00+14:00:00": {},
|
||||
"12:00:00+13:00:00": {
|
||||
{
|
||||
Timestamps: []int64{1, 2},
|
||||
Values: []float64{1, 2},
|
||||
},
|
||||
},
|
||||
"13:00:00+14:00:00": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
},
|
||||
"14:00:00+15:00:00": {},
|
||||
"15:00:00+15:02:30": {},
|
||||
},
|
||||
},
|
||||
})
|
||||
}, 3)
|
||||
|
||||
// multiple recording rules + multiple responses
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, []config.Group{
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, time.Millisecond, []config.Group{
|
||||
{Rules: []config.Rule{{Record: "foo", Expr: "sum(up)"}}},
|
||||
{Rules: []config.Rule{{Record: "bar", Expr: "max(up)"}}},
|
||||
}, &fakeReplayQuerier{
|
||||
registry: map[string]map[string]struct{}{
|
||||
registry: map[string]map[string][]datasource.Metric{
|
||||
"sum(up)": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:00:00+12:01:00": {
|
||||
{
|
||||
Timestamps: []int64{1, 2},
|
||||
Values: []float64{1, 2},
|
||||
},
|
||||
},
|
||||
"12:01:00+12:02:00": {},
|
||||
"12:02:00+12:02:30": {},
|
||||
},
|
||||
"max(up)": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:01:00+12:02:00": {},
|
||||
"12:01:00+12:02:00": {
|
||||
{
|
||||
Timestamps: []int64{1, 2},
|
||||
Values: []float64{1, 2},
|
||||
},
|
||||
},
|
||||
"12:02:00+12:02:30": {},
|
||||
},
|
||||
},
|
||||
})
|
||||
}, 4)
|
||||
|
||||
// multiple alerting rules + multiple responses
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, []config.Group{
|
||||
// alerting rule generates two series `ALERTS` and `ALERTS_FOR_STATE` when triggered
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, time.Millisecond, []config.Group{
|
||||
{Rules: []config.Rule{{Alert: "foo", Expr: "sum(up) > 1"}}},
|
||||
{Rules: []config.Rule{{Alert: "bar", Expr: "max(up) < 1"}}},
|
||||
}, &fakeReplayQuerier{
|
||||
registry: map[string]map[string]struct{}{
|
||||
registry: map[string]map[string][]datasource.Metric{
|
||||
"sum(up) > 1": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:00:00+12:01:00": {
|
||||
{
|
||||
Timestamps: []int64{1, 2},
|
||||
Values: []float64{1, 2},
|
||||
},
|
||||
},
|
||||
"12:01:00+12:02:00": {},
|
||||
"12:02:00+12:02:30": {},
|
||||
},
|
||||
"max(up) < 1": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:00:00+12:01:00": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
},
|
||||
"12:01:00+12:02:00": {},
|
||||
"12:02:00+12:02:30": {},
|
||||
},
|
||||
},
|
||||
})
|
||||
}, 6)
|
||||
|
||||
// multiple recording rules in one group+ multiple responses + concurrency
|
||||
f("2021-01-01T12:00:00.000Z", "2021-01-01T12:02:30.000Z", 1, 0, []config.Group{
|
||||
{Rules: []config.Rule{{Record: "foo", Expr: "sum(up) > 1"}, {Record: "bar", Expr: "max(up) < 1"}}, Concurrency: 2}}, &fakeReplayQuerier{
|
||||
registry: map[string]map[string][]datasource.Metric{
|
||||
"sum(up) > 1": {
|
||||
"12:00:00+12:01:00": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
},
|
||||
"12:01:00+12:02:00": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
},
|
||||
"12:02:00+12:02:30": {
|
||||
{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
},
|
||||
},
|
||||
},
|
||||
"max(up) < 1": {
|
||||
"12:00:00+12:01:00": {},
|
||||
"12:01:00+12:02:00": {{
|
||||
Timestamps: []int64{1},
|
||||
Values: []float64{1},
|
||||
}},
|
||||
"12:02:00+12:02:30": {},
|
||||
},
|
||||
},
|
||||
}, 4)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -335,7 +336,9 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
|
||||
var result []prompbmarshal.TimeSeries
|
||||
holdAlertState := make(map[uint64]*notifier.Alert)
|
||||
qFn := func(_ string) ([]datasource.Metric, error) {
|
||||
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
||||
logger.Warnf("`query` template isn't supported in replay mode, mocked data is used")
|
||||
// mock query results to allow common used template {{ query <$expr> | first | value }}
|
||||
return []datasource.Metric{{Timestamps: []int64{0}, Values: []float64{math.NaN()}}}, nil
|
||||
}
|
||||
for _, s := range res.Data {
|
||||
ls, as, err := ar.expandTemplates(s, qFn, time.Time{})
|
||||
@@ -413,7 +416,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
||||
}
|
||||
|
||||
ar.logDebugf(ts, nil, "query returned %d samples (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartialResponse(res))
|
||||
ar.logDebugf(ts, nil, "query returned %d series (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartialResponse(res))
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res.Data, err
|
||||
|
||||
@@ -445,11 +445,17 @@ func (g *Group) Start(ctx context.Context, nts func() []notifier.Notifier, rw re
|
||||
|
||||
g.infof("re-started")
|
||||
case <-t.C:
|
||||
missed := (time.Since(evalTS) / g.Interval) - 1
|
||||
// calculate the real wall clock offset by stripping the monotonic clock first,
|
||||
// then evalTS can be corrected when wall clock is adjusted.
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8790#issuecomment-2986541829
|
||||
offset := time.Now().Round(0).Sub(evalTS.Round(0))
|
||||
missed := (offset / g.Interval) - 1
|
||||
if missed < 0 {
|
||||
// missed can become < 0 due to irregular delays during evaluation
|
||||
// which can result in time.Since(evalTS) < g.Interval
|
||||
// which can result in time.Since(evalTS) < g.Interval;
|
||||
// or the system wall clock was changed backward
|
||||
missed = 0
|
||||
evalTS = time.Now()
|
||||
}
|
||||
if missed > 0 {
|
||||
g.metrics.iterationMissed.Inc()
|
||||
@@ -514,36 +520,84 @@ func (g *Group) Replay(start, end time.Time, rw remotewrite.RWClient, maxDataPoi
|
||||
iterations := int(end.Sub(start)/step) + 1
|
||||
fmt.Printf("\nGroup %q"+
|
||||
"\ninterval: \t%v"+
|
||||
"\nrequests to make: \t%d"+
|
||||
"\nconcurrency: \t %d"+
|
||||
"\nrequests to make per rule: \t%d"+
|
||||
"\nmax range per request: \t%v\n",
|
||||
g.Name, g.Interval, iterations, step)
|
||||
g.Name, g.Interval, g.Concurrency, iterations, step)
|
||||
if g.Limit > 0 {
|
||||
fmt.Printf("\nPlease note, `limit: %d` param has no effect during replay.\n",
|
||||
fmt.Printf("\nWarning: `limit: %d` param has no effect during replay.\n",
|
||||
g.Limit)
|
||||
}
|
||||
for _, rule := range g.Rules {
|
||||
fmt.Printf("> Rule %q (ID: %d)\n", rule, rule.ID())
|
||||
var bar *pb.ProgressBar
|
||||
if !disableProgressBar {
|
||||
bar = pb.StartNew(iterations)
|
||||
}
|
||||
ri.reset()
|
||||
for ri.next() {
|
||||
n, err := replayRule(rule, ri.s, ri.e, rw, replayRuleRetryAttempts)
|
||||
if err != nil {
|
||||
logger.Fatalf("rule %q: %s", rule, err)
|
||||
concurrency := g.Concurrency
|
||||
if g.Concurrency > 1 && replayDelay > 0 {
|
||||
fmt.Printf("\nWarning: group concurrency %d will be ignored since `-replay.rulesDelay` is %.3f seconds."+
|
||||
" Set -replay.rulesDelay=0 to enable concurrency for replay.\n", g.Concurrency, replayDelay.Seconds())
|
||||
concurrency = 1
|
||||
}
|
||||
|
||||
if concurrency == 1 {
|
||||
for _, rule := range g.Rules {
|
||||
var bar *pb.ProgressBar
|
||||
if !disableProgressBar {
|
||||
bar = pb.StartNew(iterations)
|
||||
}
|
||||
total += n
|
||||
// pass ri as a copy, so it can be modified within the replayRuleRange
|
||||
total += replayRuleRange(rule, ri, bar, rw, replayRuleRetryAttempts)
|
||||
if bar != nil {
|
||||
bar.Increment()
|
||||
bar.Finish()
|
||||
}
|
||||
// sleep to let remote storage to flush data on-disk
|
||||
// so chained rules could be calculated correctly
|
||||
time.Sleep(replayDelay)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
sem := make(chan struct{}, g.Concurrency)
|
||||
res := make(chan int, len(g.Rules)*iterations)
|
||||
wg := sync.WaitGroup{}
|
||||
var bar *pb.ProgressBar
|
||||
if !disableProgressBar {
|
||||
bar = pb.StartNew(iterations * len(g.Rules))
|
||||
}
|
||||
for _, r := range g.Rules {
|
||||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(r Rule, ri rangeIterator) {
|
||||
// pass ri as a copy, so it can be modified within the replayRuleRange
|
||||
res <- replayRuleRange(r, ri, bar, rw, replayRuleRetryAttempts)
|
||||
<-sem
|
||||
wg.Done()
|
||||
}(r, ri)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(res)
|
||||
close(sem)
|
||||
|
||||
if bar != nil {
|
||||
bar.Finish()
|
||||
}
|
||||
|
||||
total = 0
|
||||
for n := range res {
|
||||
total += n
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
func replayRuleRange(r Rule, ri rangeIterator, bar *pb.ProgressBar, rw remotewrite.RWClient, replayRuleRetryAttempts int) int {
|
||||
fmt.Printf("> Rule %q (ID: %d)\n", r, r.ID())
|
||||
total := 0
|
||||
for ri.next() {
|
||||
n, err := replayRule(r, ri.s, ri.e, rw, replayRuleRetryAttempts)
|
||||
if err != nil {
|
||||
logger.Fatalf("rule %q: %s", r, err)
|
||||
}
|
||||
if bar != nil {
|
||||
bar.Finish()
|
||||
bar.Increment()
|
||||
}
|
||||
// sleep to let remote storage to flush data on-disk
|
||||
// so chained rules could be calculated correctly
|
||||
time.Sleep(replayDelay)
|
||||
total += n
|
||||
}
|
||||
return total
|
||||
}
|
||||
@@ -570,11 +624,10 @@ type rangeIterator struct {
|
||||
s, e time.Time
|
||||
}
|
||||
|
||||
func (ri *rangeIterator) reset() {
|
||||
ri.iter = 0
|
||||
ri.s, ri.e = time.Time{}, time.Time{}
|
||||
}
|
||||
|
||||
// next iterates with given step between start and end
|
||||
// by modifying iter, s and e.
|
||||
// Returns true until it reaches end.
|
||||
// next modifies ri and isn't thread-safe.
|
||||
func (ri *rangeIterator) next() bool {
|
||||
ri.s = ri.start.Add(ri.step * time.Duration(ri.iter))
|
||||
if !ri.end.After(ri.s) {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/rule"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
|
||||
@@ -27,6 +28,7 @@ var (
|
||||
// such as Grafana, and proxied via vmselect.
|
||||
{"api/v1/rules", "list all loaded groups and rules"},
|
||||
{"api/v1/alerts", "list all active alerts"},
|
||||
{"api/v1/notifiers", "list all notifiers"},
|
||||
{fmt.Sprintf("api/v1/alert?%s=<int>&%s=<int>", paramGroupID, paramAlertID), "get alert status by group and alert ID"},
|
||||
}
|
||||
systemLinks = [][2]string{
|
||||
@@ -42,6 +44,10 @@ var (
|
||||
{Name: "Notifiers", URL: "notifiers"},
|
||||
{Name: "Docs", URL: "https://docs.victoriametrics.com/victoriametrics/vmalert/"},
|
||||
}
|
||||
ruleTypeMap = map[string]string{
|
||||
"alert": ruleTypeAlerting,
|
||||
"record": ruleTypeRecording,
|
||||
}
|
||||
)
|
||||
|
||||
type requestHandler struct {
|
||||
@@ -89,10 +95,13 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
WriteRuleDetails(w, r, rule)
|
||||
return true
|
||||
case "/vmalert/groups":
|
||||
filter := r.URL.Query().Get("filter")
|
||||
rf := extractRulesFilter(r, filter)
|
||||
rf, err := newRulesFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
data := rh.groups(rf)
|
||||
WriteListGroups(w, r, data, filter)
|
||||
WriteListGroups(w, r, data, rf.filter)
|
||||
return true
|
||||
case "/vmalert/notifiers":
|
||||
WriteListTargets(w, r, notifier.GetTargets())
|
||||
@@ -102,23 +111,35 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// served without `vmalert` prefix:
|
||||
case "/rules":
|
||||
// Grafana makes an extra request to `/rules`
|
||||
// handler in addition to `/api/v1/rules` calls in alerts UI,
|
||||
var data []apiGroup
|
||||
filter := r.URL.Query().Get("filter")
|
||||
rf := extractRulesFilter(r, filter)
|
||||
// handler in addition to `/api/v1/rules` calls in alerts UI
|
||||
var data []*apiGroup
|
||||
rf, err := newRulesFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
data = rh.groups(rf)
|
||||
WriteListGroups(w, r, data, filter)
|
||||
WriteListGroups(w, r, data, rf.filter)
|
||||
return true
|
||||
|
||||
case "/vmalert/api/v1/notifiers", "/api/v1/notifiers":
|
||||
data, err := rh.listNotifiers()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(data)
|
||||
return true
|
||||
case "/vmalert/api/v1/rules", "/api/v1/rules":
|
||||
// path used by Grafana for ng alerting
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
filter := r.URL.Query().Get("filter")
|
||||
rf := extractRulesFilter(r, filter)
|
||||
rf, err := newRulesFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
data, err = rh.listGroups(rf)
|
||||
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
@@ -129,7 +150,12 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
case "/vmalert/api/v1/alerts", "/api/v1/alerts":
|
||||
// path used by Grafana for ng alerting
|
||||
data, err := rh.listAlerts()
|
||||
rf, err := newRulesFilter(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
data, err := rh.listAlerts(rf)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
@@ -218,7 +244,7 @@ func (rh *requestHandler) getAlert(r *http.Request) (*apiAlert, error) {
|
||||
type listGroupsResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Groups []apiGroup `json:"groups"`
|
||||
Groups []*apiGroup `json:"groups"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -229,82 +255,102 @@ type rulesFilter struct {
|
||||
ruleNames []string
|
||||
ruleType string
|
||||
excludeAlerts bool
|
||||
onlyUnhealthy bool
|
||||
onlyNoMatch bool
|
||||
filter string
|
||||
dsType config.Type
|
||||
}
|
||||
|
||||
func extractRulesFilter(r *http.Request, filter string) rulesFilter {
|
||||
rf := rulesFilter{}
|
||||
func newRulesFilter(r *http.Request) (*rulesFilter, error) {
|
||||
rf := &rulesFilter{}
|
||||
query := r.URL.Query()
|
||||
|
||||
var ruleType string
|
||||
ruleTypeParam := r.URL.Query().Get("type")
|
||||
// for some reason, `type` in filter doesn't match `type` in response,
|
||||
// so we use this matching here
|
||||
if ruleTypeParam == "alert" {
|
||||
ruleType = ruleTypeAlerting
|
||||
} else if ruleTypeParam == "record" {
|
||||
ruleType = ruleTypeRecording
|
||||
ruleTypeParam := query.Get("type")
|
||||
if len(ruleTypeParam) > 0 {
|
||||
if ruleType, ok := ruleTypeMap[ruleTypeParam]; ok {
|
||||
rf.ruleType = ruleType
|
||||
} else {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "type": not supported value %q`, ruleTypeParam), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
dsType := query.Get("datasource_type")
|
||||
if len(dsType) > 0 {
|
||||
if config.SupportedType(dsType) {
|
||||
rf.dsType = config.NewRawType(dsType)
|
||||
} else {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "datasource_type": not supported value %q`, dsType), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
filter := strings.ToLower(query.Get("filter"))
|
||||
if len(filter) > 0 {
|
||||
if filter == "nomatch" || filter == "unhealthy" {
|
||||
rf.filter = filter
|
||||
} else {
|
||||
return nil, errResponse(fmt.Errorf(`invalid parameter "filter": not supported value %q`, filter), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
rf.ruleType = ruleType
|
||||
|
||||
rf.excludeAlerts = httputil.GetBool(r, "exclude_alerts")
|
||||
rf.ruleNames = append([]string{}, r.Form["rule_name[]"]...)
|
||||
rf.groupNames = append([]string{}, r.Form["rule_group[]"]...)
|
||||
rf.files = append([]string{}, r.Form["file[]"]...)
|
||||
switch filter {
|
||||
case "unhealthy":
|
||||
rf.onlyUnhealthy = true
|
||||
case "noMatch":
|
||||
rf.onlyNoMatch = true
|
||||
}
|
||||
return rf
|
||||
return rf, nil
|
||||
}
|
||||
|
||||
func (rh *requestHandler) groups(rf rulesFilter) []apiGroup {
|
||||
func (rf *rulesFilter) matchesGroup(group *rule.Group) bool {
|
||||
if len(rf.groupNames) > 0 && !slices.Contains(rf.groupNames, group.Name) {
|
||||
return false
|
||||
}
|
||||
if len(rf.files) > 0 && !slices.Contains(rf.files, group.File) {
|
||||
return false
|
||||
}
|
||||
if len(rf.dsType.Name) > 0 && rf.dsType.String() != group.Type.String() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (rh *requestHandler) groups(rf *rulesFilter) []*apiGroup {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
groups := make([]apiGroup, 0)
|
||||
groups := make([]*apiGroup, 0)
|
||||
for _, group := range rh.m.groups {
|
||||
if len(rf.groupNames) > 0 && !slices.Contains(rf.groupNames, group.Name) {
|
||||
if !rf.matchesGroup(group) {
|
||||
continue
|
||||
}
|
||||
if len(rf.files) > 0 && !slices.Contains(rf.files, group.File) {
|
||||
continue
|
||||
}
|
||||
|
||||
g := groupToAPI(group)
|
||||
// the returned list should always be non-nil
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4221
|
||||
filteredRules := make([]apiRule, 0)
|
||||
for _, r := range g.Rules {
|
||||
if rf.ruleType != "" && rf.ruleType != r.Type {
|
||||
for _, rule := range g.Rules {
|
||||
if rf.ruleType != "" && rf.ruleType != rule.Type {
|
||||
continue
|
||||
}
|
||||
if len(rf.ruleNames) > 0 && !slices.Contains(rf.ruleNames, r.Name) {
|
||||
if len(rf.ruleNames) > 0 && !slices.Contains(rf.ruleNames, rule.Name) {
|
||||
continue
|
||||
}
|
||||
if (rule.LastError == "" && rf.filter == "unhealthy") || (!isNoMatch(rule) && rf.filter == "nomatch") {
|
||||
continue
|
||||
}
|
||||
if rf.excludeAlerts {
|
||||
r.Alerts = nil
|
||||
rule.Alerts = nil
|
||||
}
|
||||
if (r.LastError == "" && rf.onlyUnhealthy) || (!isNoMatch(r) && rf.onlyNoMatch) {
|
||||
continue
|
||||
}
|
||||
if r.LastError != "" {
|
||||
if rule.LastError != "" {
|
||||
g.Unhealthy++
|
||||
} else {
|
||||
g.Healthy++
|
||||
}
|
||||
if isNoMatch(r) {
|
||||
if isNoMatch(rule) {
|
||||
g.NoMatch++
|
||||
}
|
||||
filteredRules = append(filteredRules, r)
|
||||
filteredRules = append(filteredRules, rule)
|
||||
}
|
||||
g.Rules = filteredRules
|
||||
groups = append(groups, g)
|
||||
}
|
||||
// sort list of groups for deterministic output
|
||||
slices.SortFunc(groups, func(a, b apiGroup) int {
|
||||
slices.SortFunc(groups, func(a, b *apiGroup) int {
|
||||
if a.Name != b.Name {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
}
|
||||
@@ -313,7 +359,7 @@ func (rh *requestHandler) groups(rf rulesFilter) []apiGroup {
|
||||
return groups
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listGroups(rf rulesFilter) ([]byte, error) {
|
||||
func (rh *requestHandler) listGroups(rf *rulesFilter) ([]byte, error) {
|
||||
lr := listGroupsResponse{Status: "success"}
|
||||
lr.Data.Groups = rh.groups(rf)
|
||||
b, err := json.Marshal(lr)
|
||||
@@ -360,14 +406,17 @@ func (rh *requestHandler) groupAlerts() []groupAlerts {
|
||||
return gAlerts
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listAlerts() ([]byte, error) {
|
||||
func (rh *requestHandler) listAlerts(rf *rulesFilter) ([]byte, error) {
|
||||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
lr := listAlertsResponse{Status: "success"}
|
||||
lr.Data.Alerts = make([]*apiAlert, 0)
|
||||
for _, g := range rh.m.groups {
|
||||
for _, r := range g.Rules {
|
||||
for _, group := range rh.m.groups {
|
||||
if !rf.matchesGroup(group) {
|
||||
continue
|
||||
}
|
||||
for _, r := range group.Rules {
|
||||
a, ok := r.(*rule.AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
@@ -391,6 +440,42 @@ func (rh *requestHandler) listAlerts() ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type listNotifiersResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Notifiers []*apiNotifier `json:"notifiers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
func (rh *requestHandler) listNotifiers() ([]byte, error) {
|
||||
targets := notifier.GetTargets()
|
||||
|
||||
lr := listNotifiersResponse{Status: "success"}
|
||||
lr.Data.Notifiers = make([]*apiNotifier, 0)
|
||||
for protoName, protoTargets := range targets {
|
||||
notifier := &apiNotifier{
|
||||
Kind: string(protoName),
|
||||
Targets: make([]*apiTarget, 0, len(protoTargets)),
|
||||
}
|
||||
for _, target := range protoTargets {
|
||||
notifier.Targets = append(notifier.Targets, &apiTarget{
|
||||
Address: target.Notifier.Addr(),
|
||||
Labels: target.Labels.ToMap(),
|
||||
})
|
||||
}
|
||||
lr.Data.Notifiers = append(lr.Data.Notifiers, notifier)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf(`error encoding list of notifiers: %w`, err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func errResponse(err error, sc int) *httpserver.ErrorWithStatusCode {
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: err,
|
||||
|
||||
@@ -93,18 +93,18 @@
|
||||
{%= tpl.Footer(r) %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ListGroups(r *http.Request, groups []apiGroup, filter string) %}
|
||||
{% func ListGroups(r *http.Request, groups []*apiGroup, filter string) %}
|
||||
{%code
|
||||
prefix := vmalertutil.Prefix(r.URL.Path)
|
||||
filters := map[string]string{
|
||||
"": "All",
|
||||
"unhealthy": "Unhealthy",
|
||||
"noMatch": "No Match",
|
||||
"nomatch": "No Match",
|
||||
}
|
||||
icons := map[string]string{
|
||||
"": "all",
|
||||
"unhealthy": "unhealthy",
|
||||
"noMatch": "nomatch",
|
||||
"nomatch": "nomatch",
|
||||
}
|
||||
currentText := filters[filter]
|
||||
currentIcon := icons[filter]
|
||||
@@ -162,7 +162,7 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col" style="width: 60%">Rule</th>
|
||||
<th scope="col" style="width: 20%" class="text-center" title="How many samples were produced by the rule">Samples</th>
|
||||
<th scope="col" style="width: 20%" class="text-center" title="How many series were produced by the rule">Series</th>
|
||||
<th scope="col" style="width: 20%" class="text-center" title="How many seconds ago rule was executed">Updated</th>
|
||||
</tr>
|
||||
</thead>
|
||||
@@ -594,7 +594,7 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col" title="The time when event was created">Updated at</th>
|
||||
<th scope="col" style="width: 10%" class="text-center" title="How many samples were returned">Samples</th>
|
||||
<th scope="col" style="width: 10%" class="text-center" title="How many series expression returns. Each series will represent an alert.">Series returned</th>
|
||||
{% if seriesFetchedEnabled %}<th scope="col" style="width: 10%" class="text-center" title="How many series were scanned by datasource during the evaluation">Series fetched</th>{% endif %}
|
||||
<th scope="col" style="width: 10%" class="text-center" title="How many seconds request took">Duration</th>
|
||||
<th scope="col" class="text-center" title="Time used for rule execution">Executed at</th>
|
||||
|
||||
@@ -316,7 +316,7 @@ func Welcome(r *http.Request) string {
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:96
|
||||
func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []apiGroup, filter string) {
|
||||
func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []*apiGroup, filter string) {
|
||||
//line app/vmalert/web.qtpl:96
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
@@ -325,12 +325,12 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []apiGr
|
||||
filters := map[string]string{
|
||||
"": "All",
|
||||
"unhealthy": "Unhealthy",
|
||||
"noMatch": "No Match",
|
||||
"nomatch": "No Match",
|
||||
}
|
||||
icons := map[string]string{
|
||||
"": "all",
|
||||
"unhealthy": "unhealthy",
|
||||
"noMatch": "nomatch",
|
||||
"nomatch": "nomatch",
|
||||
}
|
||||
currentText := filters[filter]
|
||||
currentIcon := icons[filter]
|
||||
@@ -524,7 +524,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []apiGr
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col" style="width: 60%">Rule</th>
|
||||
<th scope="col" style="width: 20%" class="text-center" title="How many samples were produced by the rule">Samples</th>
|
||||
<th scope="col" style="width: 20%" class="text-center" title="How many series were produced by the rule">Series</th>
|
||||
<th scope="col" style="width: 20%" class="text-center" title="How many seconds ago rule was executed">Updated</th>
|
||||
</tr>
|
||||
</thead>
|
||||
@@ -722,7 +722,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, groups []apiGr
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:222
|
||||
func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, groups []apiGroup, filter string) {
|
||||
func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, groups []*apiGroup, filter string) {
|
||||
//line app/vmalert/web.qtpl:222
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/web.qtpl:222
|
||||
@@ -733,7 +733,7 @@ func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, groups []apiGr
|
||||
}
|
||||
|
||||
//line app/vmalert/web.qtpl:222
|
||||
func ListGroups(r *http.Request, groups []apiGroup, filter string) string {
|
||||
func ListGroups(r *http.Request, groups []*apiGroup, filter string) string {
|
||||
//line app/vmalert/web.qtpl:222
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/web.qtpl:222
|
||||
@@ -1697,7 +1697,7 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule apiRule)
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col" title="The time when event was created">Updated at</th>
|
||||
<th scope="col" style="width: 10%" class="text-center" title="How many samples were returned">Samples</th>
|
||||
<th scope="col" style="width: 10%" class="text-center" title="How many series expression returns. Each series will represent an alert.">Series returned</th>
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:598
|
||||
if seriesFetchedEnabled {
|
||||
|
||||
@@ -19,25 +19,34 @@ import (
|
||||
func TestHandler(t *testing.T) {
|
||||
fq := &datasource.FakeQuerier{}
|
||||
fq.Add(datasource.Metric{
|
||||
Values: []float64{1}, Timestamps: []int64{0},
|
||||
Values: []float64{1},
|
||||
Timestamps: []int64{0},
|
||||
})
|
||||
g := rule.NewGroup(config.Group{
|
||||
Name: "group",
|
||||
File: "rules.yaml",
|
||||
Concurrency: 1,
|
||||
Rules: []config.Rule{
|
||||
{ID: 0, Alert: "alert"},
|
||||
{ID: 1, Record: "record"},
|
||||
},
|
||||
}, fq, 1*time.Minute, nil)
|
||||
ar := g.Rules[0].(*rule.AlertingRule)
|
||||
rr := g.Rules[1].(*rule.RecordingRule)
|
||||
|
||||
g.ExecOnce(context.Background(), func() []notifier.Notifier { return nil }, nil, time.Time{})
|
||||
|
||||
m := &manager{groups: map[uint64]*rule.Group{
|
||||
g.CreateID(): g,
|
||||
}}
|
||||
m := &manager{groups: map[uint64]*rule.Group{}}
|
||||
var ar *rule.AlertingRule
|
||||
var rr *rule.RecordingRule
|
||||
for _, dsType := range []string{"prometheus", "", "graphite"} {
|
||||
g := rule.NewGroup(config.Group{
|
||||
Name: "group",
|
||||
File: "rules.yaml",
|
||||
Type: config.NewRawType(dsType),
|
||||
Concurrency: 1,
|
||||
Rules: []config.Rule{
|
||||
{
|
||||
ID: 0,
|
||||
Alert: "alert",
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
Record: "record",
|
||||
},
|
||||
},
|
||||
}, fq, 1*time.Minute, nil)
|
||||
ar = g.Rules[0].(*rule.AlertingRule)
|
||||
rr = g.Rules[1].(*rule.RecordingRule)
|
||||
g.ExecOnce(context.Background(), func() []notifier.Notifier { return nil }, nil, time.Time{})
|
||||
m.groups[g.CreateID()] = g
|
||||
}
|
||||
rh := &requestHandler{m: m}
|
||||
|
||||
getResp := func(t *testing.T, url string, to any, code int) {
|
||||
@@ -54,7 +63,7 @@ func TestHandler(t *testing.T) {
|
||||
t.Fatalf("err closing body %s", err)
|
||||
}
|
||||
}()
|
||||
if to != nil {
|
||||
if to != nil && code < 300 {
|
||||
if err = json.NewDecoder(resp.Body).Decode(to); err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
@@ -95,14 +104,23 @@ func TestHandler(t *testing.T) {
|
||||
t.Run("/api/v1/alerts", func(t *testing.T) {
|
||||
lr := listAlertsResponse{}
|
||||
getResp(t, ts.URL+"/api/v1/alerts", &lr, 200)
|
||||
if length := len(lr.Data.Alerts); length != 1 {
|
||||
t.Fatalf("expected 1 alert got %d", length)
|
||||
if length := len(lr.Data.Alerts); length != 3 {
|
||||
t.Fatalf("expected 3 alert got %d", length)
|
||||
}
|
||||
|
||||
lr = listAlertsResponse{}
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/alerts", &lr, 200)
|
||||
if length := len(lr.Data.Alerts); length != 1 {
|
||||
t.Fatalf("expected 1 alert got %d", length)
|
||||
if length := len(lr.Data.Alerts); length != 3 {
|
||||
t.Fatalf("expected 3 alert got %d", length)
|
||||
}
|
||||
|
||||
lr = listAlertsResponse{}
|
||||
getResp(t, ts.URL+"/api/v1/alerts?datasource_type=test", &lr, 400)
|
||||
|
||||
lr = listAlertsResponse{}
|
||||
getResp(t, ts.URL+"/api/v1/alerts?datasource_type=prometheus", &lr, 200)
|
||||
if length := len(lr.Data.Alerts); length != 2 {
|
||||
t.Fatalf("expected 2 alert got %d", length)
|
||||
}
|
||||
})
|
||||
t.Run("/api/v1/alert?alertID&groupID", func(t *testing.T) {
|
||||
@@ -138,14 +156,14 @@ func TestHandler(t *testing.T) {
|
||||
t.Run("/api/v1/rules", func(t *testing.T) {
|
||||
lr := listGroupsResponse{}
|
||||
getResp(t, ts.URL+"/api/v1/rules", &lr, 200)
|
||||
if length := len(lr.Data.Groups); length != 1 {
|
||||
t.Fatalf("expected 1 group got %d", length)
|
||||
if length := len(lr.Data.Groups); length != 3 {
|
||||
t.Fatalf("expected 3 group got %d", length)
|
||||
}
|
||||
|
||||
lr = listGroupsResponse{}
|
||||
getResp(t, ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
if length := len(lr.Data.Groups); length != 1 {
|
||||
t.Fatalf("expected 1 group got %d", length)
|
||||
if length := len(lr.Data.Groups); length != 3 {
|
||||
t.Fatalf("expected 3 group got %d", length)
|
||||
}
|
||||
})
|
||||
t.Run("/api/v1/rule?ruleID&groupID", func(t *testing.T) {
|
||||
@@ -172,10 +190,10 @@ func TestHandler(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("/api/v1/rules&filters", func(t *testing.T) {
|
||||
check := func(url string, expGroups, expRules int) {
|
||||
check := func(url string, statusCode, expGroups, expRules int) {
|
||||
t.Helper()
|
||||
lr := listGroupsResponse{}
|
||||
getResp(t, ts.URL+url, &lr, 200)
|
||||
getResp(t, ts.URL+url, &lr, statusCode)
|
||||
if length := len(lr.Data.Groups); length != expGroups {
|
||||
t.Fatalf("expected %d groups got %d", expGroups, length)
|
||||
}
|
||||
@@ -191,25 +209,31 @@ func TestHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
check("/api/v1/rules?type=alert", 1, 1)
|
||||
check("/api/v1/rules?type=record", 1, 1)
|
||||
check("/api/v1/rules?type=alert", 200, 3, 3)
|
||||
check("/api/v1/rules?type=record", 200, 3, 3)
|
||||
check("/api/v1/rules?type=records", 400, 0, 0)
|
||||
|
||||
check("/vmalert/api/v1/rules?type=alert", 1, 1)
|
||||
check("/vmalert/api/v1/rules?type=record", 1, 1)
|
||||
check("/vmalert/api/v1/rules?type=alert", 200, 3, 3)
|
||||
check("/vmalert/api/v1/rules?type=record", 200, 3, 3)
|
||||
check("/vmalert/api/v1/rules?type=recording", 400, 0, 0)
|
||||
|
||||
check("/vmalert/api/v1/rules?datasource_type=prometheus", 200, 2, 4)
|
||||
check("/vmalert/api/v1/rules?datasource_type=graphite", 200, 1, 2)
|
||||
check("/vmalert/api/v1/rules?datasource_type=graphiti", 400, 0, 0)
|
||||
|
||||
// no filtering expected due to bad params
|
||||
check("/api/v1/rules?type=badParam", 1, 2)
|
||||
check("/api/v1/rules?foo=bar", 1, 2)
|
||||
check("/api/v1/rules?type=badParam", 400, 0, 0)
|
||||
check("/api/v1/rules?foo=bar", 200, 3, 6)
|
||||
|
||||
check("/api/v1/rules?rule_group[]=foo&rule_group[]=bar", 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=foo&rule_group[]=group&rule_group[]=bar", 1, 2)
|
||||
check("/api/v1/rules?rule_group[]=foo&rule_group[]=bar", 200, 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=foo&rule_group[]=group&rule_group[]=bar", 200, 3, 6)
|
||||
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=foo", 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml", 1, 2)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=foo", 200, 0, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml", 200, 3, 6)
|
||||
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=foo", 1, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert", 1, 1)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert&rule_name[]=record", 1, 2)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=foo", 200, 3, 0)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert", 200, 3, 3)
|
||||
check("/api/v1/rules?rule_group[]=group&file[]=rules.yaml&rule_name[]=alert&rule_name[]=record", 200, 3, 6)
|
||||
})
|
||||
t.Run("/api/v1/rules&exclude_alerts=true", func(t *testing.T) {
|
||||
// check if response returns active alerts by default
|
||||
@@ -259,7 +283,7 @@ func TestEmptyResponse(t *testing.T) {
|
||||
t.Fatalf("err closing body %s", err)
|
||||
}
|
||||
}()
|
||||
if to != nil {
|
||||
if to != nil && code < 300 {
|
||||
if err = json.NewDecoder(resp.Body).Decode(to); err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
|
||||
@@ -20,6 +20,16 @@ const (
|
||||
paramRuleID = "rule_id"
|
||||
)
|
||||
|
||||
type apiNotifier struct {
|
||||
Kind string `json:"kind"`
|
||||
Targets []*apiTarget `json:"targets"`
|
||||
}
|
||||
|
||||
type apiTarget struct {
|
||||
Address string `json:"address"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
// apiAlert represents a notifier.AlertingRule state
|
||||
// for WEB view
|
||||
// https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md#get-apiv1rules
|
||||
@@ -108,7 +118,7 @@ type apiGroup struct {
|
||||
|
||||
// groupAlerts represents a group of alerts for WEB view
|
||||
type groupAlerts struct {
|
||||
Group apiGroup
|
||||
Group *apiGroup
|
||||
Alerts []*apiAlert
|
||||
}
|
||||
|
||||
@@ -327,7 +337,7 @@ func newAlertAPI(ar *rule.AlertingRule, a *notifier.Alert) *apiAlert {
|
||||
return aa
|
||||
}
|
||||
|
||||
func groupToAPI(g *rule.Group) apiGroup {
|
||||
func groupToAPI(g *rule.Group) *apiGroup {
|
||||
g = g.DeepCopy()
|
||||
ag := apiGroup{
|
||||
// encode as string to avoid rounding
|
||||
@@ -353,7 +363,7 @@ func groupToAPI(g *rule.Group) apiGroup {
|
||||
for _, r := range g.Rules {
|
||||
ag.Rules = append(ag.Rules, ruleToAPI(r))
|
||||
}
|
||||
return ag
|
||||
return &ag
|
||||
}
|
||||
|
||||
func urlValuesToStrings(values url.Values) []string {
|
||||
|
||||
@@ -120,6 +120,9 @@ func normalizeURL(uOrig *url.URL) *url.URL {
|
||||
u := *uOrig
|
||||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
u.Path = path.Clean(u.Path)
|
||||
if u.Path == "." {
|
||||
u.Path = "/"
|
||||
}
|
||||
if !strings.HasSuffix(u.Path, "/") && strings.HasSuffix(uOrig.Path, "/") {
|
||||
// The path.Clean() removes trailing slash.
|
||||
// Return it back if needed.
|
||||
|
||||
@@ -128,7 +128,40 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
// Simple routing with `url_prefix`
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "", "http://foo.bar/.", "", "", nil, "least_loaded", 0)
|
||||
}, "", "http://foo.bar", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "/", "http://foo.bar", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "http://aaa///", "http://foo.bar", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/"),
|
||||
}, "/", "http://foo.bar/", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/"),
|
||||
}, "/x", "http://foo.bar/x", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/"),
|
||||
}, "/x/", "http://foo.bar/x/", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/"),
|
||||
}, "http://abc///x/", "http://foo.bar/x/", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/"),
|
||||
}, "http://foo//x", "http://foo.bar/x", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/baz"),
|
||||
}, "", "http://foo.bar/baz", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/baz"),
|
||||
}, "/", "http://foo.bar/baz", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/x/"),
|
||||
}, "/abc", "http://foo.bar/x/abc", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/x/"),
|
||||
}, "/abc/", "http://foo.bar/x/abc/", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
HeadersConf: HeadersConf{
|
||||
@@ -149,6 +182,12 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar"),
|
||||
}, "/a/b?c=d", "http://foo.bar/a/b?c=d", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("http://foo.bar/"),
|
||||
}, "/a/b?c=d", "http://foo.bar/a/b?c=d", "", "", nil, "least_loaded", 0)
|
||||
f(&UserInfo{
|
||||
URLPrefix: mustParseURL("https://sss:3894/x/y"),
|
||||
}, "/z", "https://sss:3894/x/y/z", "", "", nil, "least_loaded", 0)
|
||||
|
||||
@@ -70,7 +70,7 @@ var (
|
||||
Usage: "VictoriaMetrics address to perform import requests. \n" +
|
||||
"Should be the same as --httpListenAddr value for single-node version or vminsert component. \n" +
|
||||
"When importing into the clustered version do not forget to set additionally --vm-account-id flag. \n" +
|
||||
"Please note, that `vmctl` performs initial readiness check for the given address by checking `/health` endpoint.",
|
||||
"Please note, that vmctl performs initial readiness check for the given address by checking /health endpoint.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmUser,
|
||||
@@ -514,27 +514,27 @@ var (
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcBearerToken,
|
||||
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-src-addr`",
|
||||
Usage: "Optional bearer auth token to use for the corresponding --vm-native-src-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to `--vm-native-src-addr`",
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to --vm-native-src-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to `--vm-native-src-addr`",
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to --vm-native-src-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to `--vm-native-src-addr`. By default, system CA is used",
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to --vm-native-src-addr. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcServerName,
|
||||
Usage: "Optional TLS server name to use for connections to `--vm-native-src-addr`. By default, the server name from `--vm-native-src-addr` is used",
|
||||
Usage: "Optional TLS server name to use for connections to --vm-native-src-addr. By default, the server name from --vm-native-src-addr is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeSrcInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to `--vm-native-src-addr`",
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to --vm-native-src-addr",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
@@ -563,27 +563,27 @@ var (
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstBearerToken,
|
||||
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-dst-addr`",
|
||||
Usage: "Optional bearer auth token to use for the corresponding --vm-native-dst-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to `--vm-native-dst-addr`",
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to --vm-native-dst-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to `--vm-native-dst-addr`",
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to --vm-native-dst-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to `--vm-native-dst-addr`. By default, system CA is used",
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to --vm-native-dst-addr. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstServerName,
|
||||
Usage: "Optional TLS server name to use for connections to `--vm-native-dst-addr`. By default, the server name from `--vm-native-dst-addr` is used",
|
||||
Usage: "Optional TLS server name to use for connections to --vm-native-dst-addr. By default, the server name from --vm-native-dst-addr is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeDstInsecureSkipVerify,
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to `--vm-native-dst-addr`",
|
||||
Usage: "Whether to skip TLS certificate verification when connecting to --vm-native-dst-addr",
|
||||
Value: false,
|
||||
},
|
||||
|
||||
@@ -597,7 +597,7 @@ var (
|
||||
Name: vmRateLimit,
|
||||
Usage: "Optional data transfer rate limit in bytes per second.\n" +
|
||||
"By default, the rate limit is disabled. It can be useful for limiting load on source or destination databases. \n" +
|
||||
"Rate limit is applied per worker, see `--vm-concurrency`.",
|
||||
"Rate limit is applied per worker, see --vm-concurrency.",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmInterCluster,
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
||||
@@ -44,6 +45,7 @@ func main() {
|
||||
if c.Bool(globalDisableProgressBar) {
|
||||
barpool.Disable(true)
|
||||
}
|
||||
netutil.EnableIPv6()
|
||||
return nil
|
||||
}
|
||||
app := &cli.App{
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
remote_read_integration "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
)
|
||||
|
||||
const (
|
||||
testSnapshot = "./testdata/snapshots/20250118T124506Z-59d1b952d7eaf547"
|
||||
blockData = "./testdata/snapshots/20250118T124506Z-59d1b952d7eaf547/01JHWQ445Y2P1TDYB05AEKD6MC"
|
||||
)
|
||||
|
||||
// This test simulates close process if user abort it
|
||||
func TestPrometheusProcessorRun(t *testing.T) {
|
||||
|
||||
f := func(startStr, endStr string, numOfSeries int, resultExpected []vm.TimeSeries) {
|
||||
t.Helper()
|
||||
|
||||
dst := remote_read_integration.NewRemoteWriteServer(t)
|
||||
|
||||
defer func() {
|
||||
dst.Close()
|
||||
}()
|
||||
|
||||
dst.Series(resultExpected)
|
||||
dst.ExpectedSeries(resultExpected)
|
||||
|
||||
if err := fillStorage(resultExpected); err != nil {
|
||||
t.Fatalf("cannot fill storage: %s", err)
|
||||
}
|
||||
|
||||
isSilent = true
|
||||
defer func() { isSilent = false }()
|
||||
|
||||
bf, err := backoff.New(1, 1.8, time.Second*2)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create backoff: %s", err)
|
||||
}
|
||||
|
||||
importerCfg := vm.Config{
|
||||
Addr: dst.URL(),
|
||||
Transport: nil,
|
||||
Concurrency: 1,
|
||||
Backoff: bf,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
importer, err := vm.NewImporter(ctx, importerCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create importer: %s", err)
|
||||
}
|
||||
defer importer.Close()
|
||||
|
||||
matchName := "__name__"
|
||||
matchValue := ".*"
|
||||
filter := prometheus.Filter{
|
||||
TimeMin: startStr,
|
||||
TimeMax: endStr,
|
||||
Label: matchName,
|
||||
LabelValue: matchValue,
|
||||
}
|
||||
|
||||
runner, err := prometheus.NewClient(prometheus.Config{
|
||||
Snapshot: testSnapshot,
|
||||
Filter: filter,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create prometheus client: %s", err)
|
||||
}
|
||||
p := &prometheusProcessor{
|
||||
cl: runner,
|
||||
im: importer,
|
||||
cc: 1,
|
||||
}
|
||||
|
||||
if err := p.run(); err != nil {
|
||||
t.Fatalf("run() error: %s", err)
|
||||
}
|
||||
|
||||
collectedTs := dst.GetCollectedTimeSeries()
|
||||
t.Logf("collected timeseries: %d; expected timeseries: %d", len(collectedTs), len(resultExpected))
|
||||
if len(collectedTs) != len(resultExpected) {
|
||||
t.Fatalf("unexpected number of collected time series; got %d; want %d", len(collectedTs), numOfSeries)
|
||||
}
|
||||
|
||||
deleted, err := deleteSeries(matchName, matchValue)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot delete series: %s", err)
|
||||
}
|
||||
if deleted != numOfSeries {
|
||||
t.Fatalf("unexpected number of deleted series; got %d; want %d", deleted, numOfSeries)
|
||||
}
|
||||
}
|
||||
|
||||
processFlags()
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
defer func() {
|
||||
vmstorage.Stop()
|
||||
if err := os.RemoveAll(storagePath); err != nil {
|
||||
log.Fatalf("cannot remove %q: %s", storagePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
barpool.Disable(true)
|
||||
defer func() {
|
||||
barpool.Disable(false)
|
||||
}()
|
||||
|
||||
b, err := tsdb.OpenBlock(nil, blockData, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open block: %s", err)
|
||||
}
|
||||
// timestamp is equal to minTime and maxTime from meta.json
|
||||
ss, err := readBlock(b, 1737204082361, 1737204302539)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot read block: %s", err)
|
||||
}
|
||||
|
||||
resultExpected, err := prepareExpectedData(ss)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot prepare expected data: %s", err)
|
||||
}
|
||||
|
||||
f("2025-01-18T12:40:00Z", "2025-01-18T12:46:00Z", 2792, resultExpected)
|
||||
}
|
||||
|
||||
func readBlock(b tsdb.BlockReader, timeMin int64, timeMax int64) (storage.SeriesSet, error) {
|
||||
minTime, maxTime := b.Meta().MinTime, b.Meta().MaxTime
|
||||
|
||||
if timeMin != 0 {
|
||||
minTime = timeMin
|
||||
}
|
||||
if timeMax != 0 {
|
||||
maxTime = timeMax
|
||||
}
|
||||
|
||||
q, err := tsdb.NewBlockQuerier(b, minTime, maxTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matchName := "__name__"
|
||||
matchValue := ".*"
|
||||
ctx := context.Background()
|
||||
ss := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, matchName, matchValue))
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func prepareExpectedData(ss storage.SeriesSet) ([]vm.TimeSeries, error) {
|
||||
var expectedSeriesSet []vm.TimeSeries
|
||||
var it chunkenc.Iterator
|
||||
for ss.Next() {
|
||||
var name string
|
||||
var labelPairs []vm.LabelPair
|
||||
series := ss.At()
|
||||
|
||||
for _, label := range series.Labels() {
|
||||
if label.Name == "__name__" {
|
||||
name = label.Value
|
||||
continue
|
||||
}
|
||||
labelPairs = append(labelPairs, vm.LabelPair{
|
||||
Name: label.Name,
|
||||
Value: label.Value,
|
||||
})
|
||||
}
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("failed to find `__name__` label in labelset for block")
|
||||
}
|
||||
|
||||
var timestamps []int64
|
||||
var values []float64
|
||||
it = series.Iterator(it)
|
||||
for {
|
||||
typ := it.Next()
|
||||
if typ == chunkenc.ValNone {
|
||||
break
|
||||
}
|
||||
if typ != chunkenc.ValFloat {
|
||||
// Skip unsupported values
|
||||
continue
|
||||
}
|
||||
t, v := it.At()
|
||||
timestamps = append(timestamps, t)
|
||||
values = append(values, v)
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts := vm.TimeSeries{
|
||||
Name: name,
|
||||
LabelPairs: labelPairs,
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
}
|
||||
expectedSeriesSet = append(expectedSeriesSet, ts)
|
||||
}
|
||||
return expectedSeriesSet, nil
|
||||
}
|
||||
@@ -1,351 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
)
|
||||
|
||||
func TestRemoteRead(t *testing.T) {
|
||||
barpool.Disable(true)
|
||||
defer func() {
|
||||
barpool.Disable(false)
|
||||
}()
|
||||
defer func() { isSilent = false }()
|
||||
|
||||
var testCases = []struct {
|
||||
name string
|
||||
remoteReadConfig remoteread.Config
|
||||
vmCfg vm.Config
|
||||
start string
|
||||
end string
|
||||
numOfSamples int64
|
||||
numOfSeries int64
|
||||
rrp remoteReadProcessor
|
||||
chunk string
|
||||
remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
|
||||
expectedSeries []vm.TimeSeries
|
||||
}{
|
||||
{
|
||||
name: "step minute on minute time range",
|
||||
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
|
||||
vmCfg: vm.Config{Addr: "", Concurrency: 1},
|
||||
start: "2022-11-26T11:23:05+02:00",
|
||||
end: "2022-11-26T11:24:05+02:00",
|
||||
numOfSamples: 2,
|
||||
numOfSeries: 3,
|
||||
chunk: stepper.StepMinute,
|
||||
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
|
||||
expectedSeries: []vm.TimeSeries{
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1669454585000, 1669454615000},
|
||||
Values: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1669454585000, 1669454615000},
|
||||
Values: []float64{100, 100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1669454585000, 1669454615000},
|
||||
Values: []float64{200, 200},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "step month on month time range",
|
||||
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
|
||||
vmCfg: vm.Config{
|
||||
Addr: "",
|
||||
Concurrency: 1,
|
||||
Transport: httputil.NewTransport(false, "vmctl_test_read"),
|
||||
},
|
||||
start: "2022-09-26T11:23:05+02:00",
|
||||
end: "2022-11-26T11:24:05+02:00",
|
||||
numOfSamples: 2,
|
||||
numOfSeries: 3,
|
||||
chunk: stepper.StepMonth,
|
||||
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
|
||||
expectedSeries: []vm.TimeSeries{
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{200},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{200}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
remoteReadServer := remote_read_integration.NewRemoteReadServer(t)
|
||||
defer remoteReadServer.Close()
|
||||
remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
|
||||
defer remoteWriteServer.Close()
|
||||
|
||||
tt.remoteReadConfig.Addr = remoteReadServer.URL()
|
||||
|
||||
rr, err := remoteread.NewClient(tt.remoteReadConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("error create remote read client: %s", err)
|
||||
}
|
||||
|
||||
start, err := time.Parse(time.RFC3339, tt.start)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parse start time: %s", err)
|
||||
}
|
||||
|
||||
end, err := time.Parse(time.RFC3339, tt.end)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parse end time: %s", err)
|
||||
}
|
||||
|
||||
rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
|
||||
|
||||
remoteReadServer.SetRemoteReadSeries(rrs)
|
||||
remoteWriteServer.ExpectedSeries(tt.expectedSeries)
|
||||
|
||||
tt.vmCfg.Addr = remoteWriteServer.URL()
|
||||
|
||||
b, err := backoff.New(10, 1.8, time.Second*2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create backoff: %s", err)
|
||||
}
|
||||
tt.vmCfg.Backoff = b
|
||||
|
||||
importer, err := vm.NewImporter(ctx, tt.vmCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create VM importer: %s", err)
|
||||
}
|
||||
defer importer.Close()
|
||||
|
||||
rmp := remoteReadProcessor{
|
||||
src: rr,
|
||||
dst: importer,
|
||||
filter: remoteReadFilter{
|
||||
timeStart: &start,
|
||||
timeEnd: &end,
|
||||
chunk: tt.chunk,
|
||||
},
|
||||
cc: 1,
|
||||
isVerbose: false,
|
||||
}
|
||||
|
||||
err = rmp.run(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run remote read processor: %s", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSteamRemoteRead(t *testing.T) {
|
||||
barpool.Disable(true)
|
||||
defer func() {
|
||||
barpool.Disable(false)
|
||||
}()
|
||||
defer func() { isSilent = false }()
|
||||
|
||||
var testCases = []struct {
|
||||
name string
|
||||
remoteReadConfig remoteread.Config
|
||||
vmCfg vm.Config
|
||||
start string
|
||||
end string
|
||||
numOfSamples int64
|
||||
numOfSeries int64
|
||||
rrp remoteReadProcessor
|
||||
chunk string
|
||||
remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
|
||||
expectedSeries []vm.TimeSeries
|
||||
}{
|
||||
{
|
||||
name: "step minute on minute time range",
|
||||
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
|
||||
vmCfg: vm.Config{Addr: "", Concurrency: 1},
|
||||
start: "2022-11-26T11:23:05+02:00",
|
||||
end: "2022-11-26T11:24:05+02:00",
|
||||
numOfSamples: 2,
|
||||
numOfSeries: 3,
|
||||
chunk: stepper.StepMinute,
|
||||
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
|
||||
expectedSeries: []vm.TimeSeries{
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1669454585000, 1669454615000},
|
||||
Values: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1669454585000, 1669454615000},
|
||||
Values: []float64{100, 100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1669454585000, 1669454615000},
|
||||
Values: []float64{200, 200},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "step month on month time range",
|
||||
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
|
||||
vmCfg: vm.Config{Addr: "", Concurrency: 1},
|
||||
start: "2022-09-26T11:23:05+02:00",
|
||||
end: "2022-11-26T11:24:05+02:00",
|
||||
numOfSamples: 2,
|
||||
numOfSeries: 3,
|
||||
chunk: stepper.StepMonth,
|
||||
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
|
||||
expectedSeries: []vm.TimeSeries{
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{200},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{200}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
remoteReadServer := remote_read_integration.NewRemoteReadStreamServer(t)
|
||||
defer remoteReadServer.Close()
|
||||
remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
|
||||
defer remoteWriteServer.Close()
|
||||
|
||||
tt.remoteReadConfig.Addr = remoteReadServer.URL()
|
||||
|
||||
rr, err := remoteread.NewClient(tt.remoteReadConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("error create remote read client: %s", err)
|
||||
}
|
||||
|
||||
start, err := time.Parse(time.RFC3339, tt.start)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parse start time: %s", err)
|
||||
}
|
||||
|
||||
end, err := time.Parse(time.RFC3339, tt.end)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parse end time: %s", err)
|
||||
}
|
||||
|
||||
rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
|
||||
|
||||
remoteReadServer.InitMockStorage(rrs)
|
||||
remoteWriteServer.ExpectedSeries(tt.expectedSeries)
|
||||
|
||||
tt.vmCfg.Addr = remoteWriteServer.URL()
|
||||
|
||||
b, err := backoff.New(10, 1.8, time.Second*2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create backoff: %s", err)
|
||||
}
|
||||
|
||||
tt.vmCfg.Backoff = b
|
||||
importer, err := vm.NewImporter(ctx, tt.vmCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create VM importer: %s", err)
|
||||
}
|
||||
defer importer.Close()
|
||||
|
||||
rmp := remoteReadProcessor{
|
||||
src: rr,
|
||||
dst: importer,
|
||||
filter: remoteReadFilter{
|
||||
timeStart: &start,
|
||||
timeEnd: &end,
|
||||
chunk: tt.chunk,
|
||||
},
|
||||
cc: 1,
|
||||
isVerbose: false,
|
||||
}
|
||||
|
||||
err = rmp.run(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run remote read processor: %s", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,306 +0,0 @@
|
||||
package remote_read_integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport"
|
||||
)
|
||||
|
||||
// LabelValues represents series from api/v1/series response
|
||||
type LabelValues map[string]string
|
||||
|
||||
// Response represents response from api/v1/series
|
||||
type Response struct {
|
||||
Status string `json:"status"`
|
||||
Series []LabelValues `json:"data"`
|
||||
}
|
||||
|
||||
type MetricNamesResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data []string `json:"data"`
|
||||
}
|
||||
|
||||
// RemoteWriteServer represents fake remote write server with database
|
||||
type RemoteWriteServer struct {
|
||||
server *httptest.Server
|
||||
series []vm.TimeSeries
|
||||
expectedSeries []vm.TimeSeries
|
||||
tss []vm.TimeSeries
|
||||
}
|
||||
|
||||
// NewRemoteWriteServer prepares test remote write server
|
||||
func NewRemoteWriteServer(t *testing.T) *RemoteWriteServer {
|
||||
rws := &RemoteWriteServer{series: make([]vm.TimeSeries, 0)}
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mux.Handle("/api/v1/import", rws.getWriteHandler(t))
|
||||
mux.Handle("/health", rws.handlePing())
|
||||
mux.Handle("/api/v1/series", rws.seriesHandler())
|
||||
mux.Handle("/api/v1/label/__name__/values", rws.valuesHandler())
|
||||
mux.Handle("/api/v1/export/native", rws.exportNativeHandler())
|
||||
mux.Handle("/api/v1/import/native", rws.importNativeHandler(t))
|
||||
rws.server = httptest.NewServer(mux)
|
||||
return rws
|
||||
}
|
||||
|
||||
// Close closes the server
|
||||
func (rws *RemoteWriteServer) Close() {
|
||||
rws.server.Close()
|
||||
}
|
||||
|
||||
// Series saves generated series for fake database
|
||||
func (rws *RemoteWriteServer) Series(series []vm.TimeSeries) {
|
||||
rws.series = append(rws.series, series...)
|
||||
}
|
||||
|
||||
// ExpectedSeries saves expected results to check in the handler
|
||||
func (rws *RemoteWriteServer) ExpectedSeries(series []vm.TimeSeries) {
|
||||
rws.expectedSeries = append(rws.expectedSeries, series...)
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) GetCollectedTimeSeries() []vm.TimeSeries {
|
||||
return rws.tss
|
||||
}
|
||||
|
||||
// URL returns server url
|
||||
func (rws *RemoteWriteServer) URL() string {
|
||||
return rws.server.URL
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) getWriteHandler(t *testing.T) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
scanner := bufio.NewScanner(r.Body)
|
||||
var rows parser.Rows
|
||||
for scanner.Scan() {
|
||||
|
||||
rows.Unmarshal(scanner.Text())
|
||||
for _, row := range rows.Rows {
|
||||
var labelPairs []vm.LabelPair
|
||||
var ts vm.TimeSeries
|
||||
nameValue := ""
|
||||
for _, tag := range row.Tags {
|
||||
if string(tag.Key) == "__name__" {
|
||||
nameValue = string(tag.Value)
|
||||
continue
|
||||
}
|
||||
labelPairs = append(labelPairs, vm.LabelPair{Name: string(tag.Key), Value: string(tag.Value)})
|
||||
}
|
||||
|
||||
ts.Values = append(ts.Values, row.Values...)
|
||||
ts.Timestamps = append(ts.Timestamps, row.Timestamps...)
|
||||
ts.Name = nameValue
|
||||
ts.LabelPairs = labelPairs
|
||||
rws.tss = append(rws.tss, ts)
|
||||
}
|
||||
rows.Reset()
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) handlePing() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("OK"))
|
||||
})
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) seriesHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var labelValues []LabelValues
|
||||
for _, ser := range rws.series {
|
||||
metricNames := make(LabelValues)
|
||||
if ser.Name != "" {
|
||||
metricNames["__name__"] = ser.Name
|
||||
}
|
||||
for _, p := range ser.LabelPairs {
|
||||
metricNames[p.Name] = p.Value
|
||||
}
|
||||
labelValues = append(labelValues, metricNames)
|
||||
}
|
||||
|
||||
resp := Response{
|
||||
Status: "success",
|
||||
Series: labelValues,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
if err != nil {
|
||||
log.Printf("error send series: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) valuesHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
labelNames := make(map[string]struct{})
|
||||
for _, ser := range rws.series {
|
||||
if ser.Name != "" {
|
||||
labelNames[ser.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
metricNames := make([]string, 0, len(labelNames))
|
||||
for k := range labelNames {
|
||||
metricNames = append(metricNames, k)
|
||||
}
|
||||
resp := MetricNamesResponse{
|
||||
Status: "success",
|
||||
Data: metricNames,
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
err := json.NewEncoder(buf).Encode(resp)
|
||||
if err != nil {
|
||||
log.Printf("error send series: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = w.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Printf("error send series: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) exportNativeHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
err := prometheus.ExportNativeHandler(now, w, r)
|
||||
if err != nil {
|
||||
log.Printf("error export series via native protocol: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
protoparserutil.StartUnmarshalWorkers()
|
||||
defer protoparserutil.StopUnmarshalWorkers()
|
||||
|
||||
var gotTimeSeries []vm.TimeSeries
|
||||
var mx sync.RWMutex
|
||||
|
||||
err := stream.Parse(r.Body, "", func(block *stream.Block) error {
|
||||
mn := &block.MetricName
|
||||
var timeseries vm.TimeSeries
|
||||
timeseries.Name = string(mn.MetricGroup)
|
||||
timeseries.Timestamps = append(timeseries.Timestamps, block.Timestamps...)
|
||||
timeseries.Values = append(timeseries.Values, block.Values...)
|
||||
|
||||
for i := range mn.Tags {
|
||||
tag := &mn.Tags[i]
|
||||
timeseries.LabelPairs = append(timeseries.LabelPairs, vm.LabelPair{
|
||||
Name: string(tag.Key),
|
||||
Value: string(tag.Value),
|
||||
})
|
||||
}
|
||||
|
||||
mx.Lock()
|
||||
gotTimeSeries = append(gotTimeSeries, timeseries)
|
||||
mx.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("error parse stream blocks: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// got timeseries should be sorted
|
||||
// because they are processed independently
|
||||
sort.SliceStable(gotTimeSeries, func(i, j int) bool {
|
||||
iv, jv := gotTimeSeries[i], gotTimeSeries[j]
|
||||
switch {
|
||||
case iv.Values[0] != jv.Values[0]:
|
||||
return iv.Values[0] < jv.Values[0]
|
||||
case iv.Timestamps[0] != jv.Timestamps[0]:
|
||||
return iv.Timestamps[0] < jv.Timestamps[0]
|
||||
default:
|
||||
return iv.Name < jv.Name
|
||||
}
|
||||
})
|
||||
|
||||
if !reflect.DeepEqual(gotTimeSeries, rws.expectedSeries) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Fatalf("datasets not equal, expected: %#v;\n got: %#v", rws.expectedSeries, gotTimeSeries)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// GenerateVNSeries generates test timeseries
|
||||
func GenerateVNSeries(start, end, numOfSeries, numOfSamples int64) []vm.TimeSeries {
|
||||
var ts []vm.TimeSeries
|
||||
j := 0
|
||||
for i := 0; i < int(numOfSeries); i++ {
|
||||
if i%3 == 0 {
|
||||
j++
|
||||
}
|
||||
|
||||
timeSeries := vm.TimeSeries{
|
||||
Name: fmt.Sprintf("vm_metric_%d", j),
|
||||
LabelPairs: []vm.LabelPair{
|
||||
{Name: "job", Value: strconv.Itoa(i)},
|
||||
},
|
||||
}
|
||||
|
||||
ts = append(ts, timeSeries)
|
||||
}
|
||||
|
||||
for i := range ts {
|
||||
t, v := generateTimeStampsAndValues(i, start, end, numOfSamples)
|
||||
ts[i].Timestamps = t
|
||||
ts[i].Values = v
|
||||
}
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
func generateTimeStampsAndValues(idx int, startTime, endTime, numOfSamples int64) ([]int64, []float64) {
|
||||
delta := (endTime - startTime) / numOfSamples
|
||||
|
||||
var timestamps []int64
|
||||
var values []float64
|
||||
t := startTime
|
||||
for t != endTime {
|
||||
v := 100 * int64(idx)
|
||||
timestamps = append(timestamps, t*1000)
|
||||
values = append(values, float64(v))
|
||||
t = t + delta
|
||||
}
|
||||
|
||||
return timestamps, values
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"ulid": "01JHWQ445Y2P1TDYB05AEKD6MC",
|
||||
"minTime": 1737204082361,
|
||||
"maxTime": 1737204302539,
|
||||
"stats": {
|
||||
"numSamples": 60275,
|
||||
"numSeries": 2792,
|
||||
"numChunks": 2792
|
||||
},
|
||||
"compaction": {
|
||||
"level": 1,
|
||||
"sources": [
|
||||
"01JHWQ445Y2P1TDYB05AEKD6MC"
|
||||
]
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
@@ -1,268 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||
remote_read_integration "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
storagePath = "TestStorage"
|
||||
retentionPeriod = "100y"
|
||||
deleteSeriesLimit = 3e3
|
||||
)
|
||||
|
||||
func TestVMNativeProcessorRun(t *testing.T) {
|
||||
f := func(startStr, endStr string, numOfSeries, numOfSamples int, resultExpected []vm.TimeSeries) {
|
||||
t.Helper()
|
||||
|
||||
src := remote_read_integration.NewRemoteWriteServer(t)
|
||||
dst := remote_read_integration.NewRemoteWriteServer(t)
|
||||
|
||||
defer func() {
|
||||
src.Close()
|
||||
dst.Close()
|
||||
}()
|
||||
|
||||
start, err := time.Parse(time.RFC3339, startStr)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse start time: %s", err)
|
||||
}
|
||||
|
||||
end, err := time.Parse(time.RFC3339, endStr)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse end time: %s", err)
|
||||
}
|
||||
|
||||
matchName := "__name__"
|
||||
matchValue := ".*"
|
||||
filter := native.Filter{
|
||||
Match: fmt.Sprintf("{%s=~%q}", matchName, matchValue),
|
||||
TimeStart: startStr,
|
||||
TimeEnd: endStr,
|
||||
}
|
||||
|
||||
rws := remote_read_integration.GenerateVNSeries(start.Unix(), end.Unix(), int64(numOfSeries), int64(numOfSamples))
|
||||
|
||||
src.Series(rws)
|
||||
dst.ExpectedSeries(resultExpected)
|
||||
|
||||
if err := fillStorage(rws); err != nil {
|
||||
t.Fatalf("cannot add series to storage: %s", err)
|
||||
}
|
||||
|
||||
tr := httputil.NewTransport(false, "test_client")
|
||||
tr.DisableKeepAlives = false
|
||||
|
||||
srcClient := &native.Client{
|
||||
AuthCfg: nil,
|
||||
Addr: src.URL(),
|
||||
ExtraLabels: []string{},
|
||||
HTTPClient: &http.Client{
|
||||
Transport: tr,
|
||||
},
|
||||
}
|
||||
dstClient := &native.Client{
|
||||
AuthCfg: nil,
|
||||
Addr: dst.URL(),
|
||||
ExtraLabels: []string{},
|
||||
HTTPClient: &http.Client{
|
||||
Transport: tr,
|
||||
},
|
||||
}
|
||||
|
||||
isSilent = true
|
||||
defer func() { isSilent = false }()
|
||||
|
||||
bf, err := backoff.New(10, 1.8, time.Second*2)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create backoff: %s", err)
|
||||
}
|
||||
|
||||
p := &vmNativeProcessor{
|
||||
filter: filter,
|
||||
dst: dstClient,
|
||||
src: srcClient,
|
||||
backoff: bf,
|
||||
cc: 1,
|
||||
isNative: true,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := p.run(ctx); err != nil {
|
||||
t.Fatalf("run() error: %s", err)
|
||||
}
|
||||
deleted, err := deleteSeries(matchName, matchValue)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot delete series: %s", err)
|
||||
}
|
||||
if deleted != numOfSeries {
|
||||
t.Fatalf("unexpected number of deleted series; got %d; want %d", deleted, numOfSeries)
|
||||
}
|
||||
}
|
||||
|
||||
processFlags()
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
defer func() {
|
||||
vmstorage.Stop()
|
||||
if err := os.RemoveAll(storagePath); err != nil {
|
||||
log.Fatalf("cannot remove %q: %s", storagePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
barpool.Disable(true)
|
||||
defer func() {
|
||||
barpool.Disable(false)
|
||||
}()
|
||||
|
||||
// step minute on minute time range
|
||||
start := "2022-11-25T11:23:05+02:00"
|
||||
end := "2022-11-27T11:24:05+02:00"
|
||||
numOfSeries := 3
|
||||
numOfSamples := 2
|
||||
resultExpected := []vm.TimeSeries{
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1669368185000, 1669454615000},
|
||||
Values: []float64{0, 0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1669368185000, 1669454615000},
|
||||
Values: []float64{100, 100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1669368185000, 1669454615000},
|
||||
Values: []float64{200, 200},
|
||||
},
|
||||
}
|
||||
f(start, end, numOfSeries, numOfSamples, resultExpected)
|
||||
|
||||
// step month on month time range
|
||||
start = "2022-09-26T11:23:05+02:00"
|
||||
end = "2022-11-26T11:24:05+02:00"
|
||||
numOfSeries = 3
|
||||
numOfSamples = 2
|
||||
resultExpected = []vm.TimeSeries{
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{0},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{100},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1664184185000},
|
||||
Values: []float64{200},
|
||||
},
|
||||
{
|
||||
Name: "vm_metric_1",
|
||||
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
|
||||
Timestamps: []int64{1666819415000},
|
||||
Values: []float64{200},
|
||||
},
|
||||
}
|
||||
f(start, end, numOfSeries, numOfSamples, resultExpected)
|
||||
}
|
||||
|
||||
func processFlags() {
|
||||
flag.Parse()
|
||||
for _, fv := range []struct {
|
||||
flag string
|
||||
value string
|
||||
}{
|
||||
{flag: "storageDataPath", value: storagePath},
|
||||
{flag: "retentionPeriod", value: retentionPeriod},
|
||||
} {
|
||||
// panics if flag doesn't exist
|
||||
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
|
||||
log.Fatalf("unable to set %q with value %q, err: %v", fv.flag, fv.value, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fillStorage(series []vm.TimeSeries) error {
|
||||
var mrs []storage.MetricRow
|
||||
for _, series := range series {
|
||||
var labels []prompbmarshal.Label
|
||||
for _, lp := range series.LabelPairs {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: lp.Name,
|
||||
Value: lp.Value,
|
||||
})
|
||||
}
|
||||
if series.Name != "" {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: series.Name,
|
||||
})
|
||||
}
|
||||
mr := storage.MetricRow{}
|
||||
mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels)
|
||||
|
||||
timestamps := series.Timestamps
|
||||
values := series.Values
|
||||
for i, value := range values {
|
||||
mr.Timestamp = timestamps[i]
|
||||
mr.Value = value
|
||||
mrs = append(mrs, mr)
|
||||
}
|
||||
}
|
||||
|
||||
if err := vmstorage.AddRows(mrs); err != nil {
|
||||
return fmt.Errorf("unexpected error in AddRows: %s", err)
|
||||
}
|
||||
vmstorage.Storage.DebugFlush()
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteSeries(name, value string) (int, error) {
|
||||
tfs := storage.NewTagFilters()
|
||||
if err := tfs.Add([]byte(name), []byte(value), false, true); err != nil {
|
||||
return 0, fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
|
||||
}
|
||||
return vmstorage.DeleteSeries(nil, []*storage.TagFilters{tfs}, deleteSeriesLimit)
|
||||
}
|
||||
|
||||
func TestBuildMatchWithFilter_Failure(t *testing.T) {
|
||||
f := func(filter, metricName string) {
|
||||
t.Helper()
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/csvimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/datadogsketches"
|
||||
@@ -36,6 +34,7 @@ import (
|
||||
influxserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/influx"
|
||||
opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb"
|
||||
opentsdbhttpserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
@@ -43,6 +42,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -70,6 +70,7 @@ var (
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 40, "The maximum number of labels per time series to be accepted. Series with superfluous labels are ignored. In this case the vm_rows_ignored_total{reason=\"too_many_labels\"} metric at /metrics page is incremented")
|
||||
maxLabelNameLen = flag.Int("maxLabelNameLen", 256, "The maximum length of label name in the accepted time series. Series with longer label name are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_name\"} metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 4*1024, "The maximum length of label values in the accepted time series. Series with longer label value are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_value\"} metric at /metrics page is incremented")
|
||||
maxMemoryUsage = flag.Int("insert.circuitBreakMemoryUsage", 90, "Reject insert requests when memory usage exceeds a certain percentage. 0 means no circuit breaking. An integer value from 1-100 represents 1%-100%.")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -131,6 +132,13 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
startTime := time.Now()
|
||||
defer requestDuration.UpdateDuration(startTime)
|
||||
|
||||
if *maxMemoryUsage >= 1 && *maxMemoryUsage <= 100 {
|
||||
if memory.CurrentPercentage() > *maxMemoryUsage {
|
||||
httpserver.Errorf(w, r, "server overloaded, request rejected by circuit breaker")
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
path := strings.Replace(r.URL.Path, "//", "/", -1)
|
||||
if strings.HasPrefix(path, "/static") {
|
||||
staticServer.ServeHTTP(w, r)
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
var maxGraphiteSeries = flag.Int("search.maxGraphiteSeries", 300e3, "The maximum number of time series, which can be scanned during queries to Graphite Render API. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/integrations/graphite#render-api")
|
||||
"See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#render-api")
|
||||
|
||||
type evalConfig struct {
|
||||
startTime int64
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
|
||||
var (
|
||||
maxGraphiteTagKeysPerSearch = flag.Int("search.maxGraphiteTagKeys", 100e3, "The maximum number of tag keys returned from Graphite API, which returns tags. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/integrations/graphite#tags-api")
|
||||
"See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#tags-api")
|
||||
maxGraphiteTagValuesPerSearch = flag.Int("search.maxGraphiteTagValues", 100e3, "The maximum number of tag values returned from Graphite API, which returns tag values. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/integrations/graphite#tags-api")
|
||||
"See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#tags-api")
|
||||
)
|
||||
|
||||
// TagsDelSeriesHandler implements /tags/delSeries handler.
|
||||
|
||||
@@ -818,6 +818,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilterss: etfs,
|
||||
CacheTagFilters: etfs,
|
||||
GetRequestURI: func() string {
|
||||
return httpserver.GetRequestURI(r)
|
||||
},
|
||||
@@ -927,6 +928,7 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilterss: etfs,
|
||||
CacheTagFilters: etfs,
|
||||
GetRequestURI: func() string {
|
||||
return httpserver.GetRequestURI(r)
|
||||
},
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/atomicutil"
|
||||
)
|
||||
|
||||
// callbacks for optimized incremental calculations for aggregate functions
|
||||
@@ -66,9 +68,8 @@ var incrementalAggrFuncCallbacksMap = map[string]*incrementalAggrFuncCallbacks{
|
||||
type incrementalAggrContextMap struct {
|
||||
m map[string]*incrementalAggrContext
|
||||
|
||||
// The padding prevents false sharing on widespread platforms with
|
||||
// 128 mod (cache line size) = 0 .
|
||||
_ [128 - unsafe.Sizeof(map[string]*incrementalAggrContext{})%128]byte
|
||||
// The padding prevents false sharing
|
||||
_ [atomicutil.CacheLineSize - unsafe.Sizeof(map[string]*incrementalAggrContext{})%atomicutil.CacheLineSize]byte
|
||||
}
|
||||
|
||||
type incrementalAggrFuncContext struct {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/atomicutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
@@ -139,6 +140,13 @@ type EvalConfig struct {
|
||||
// EnforcedTagFilterss may contain additional label filters to use in the query.
|
||||
EnforcedTagFilterss [][]storage.TagFilter
|
||||
|
||||
// CacheTagFilters stores the original tag-filter sets and extra_label from the request.
|
||||
// The slice is never modified after creation and is used only to build
|
||||
// the query-cache key.
|
||||
//
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9001
|
||||
CacheTagFilters [][]storage.TagFilter
|
||||
|
||||
// The callback, which returns the request URI during logging.
|
||||
// The request URI isn't stored here because its' construction may take non-trivial amounts of CPU.
|
||||
GetRequestURI func() string
|
||||
@@ -165,6 +173,7 @@ func copyEvalConfig(src *EvalConfig) *EvalConfig {
|
||||
ec.LookbackDelta = src.LookbackDelta
|
||||
ec.RoundDigits = src.RoundDigits
|
||||
ec.EnforcedTagFilterss = src.EnforcedTagFilterss
|
||||
ec.CacheTagFilters = src.CacheTagFilters
|
||||
ec.GetRequestURI = src.GetRequestURI
|
||||
ec.QueryStats = src.QueryStats
|
||||
|
||||
@@ -1885,9 +1894,8 @@ func doRollupForTimeseries(funcName string, keepMetricNames bool, rc *rollupConf
|
||||
type timeseriesWithPadding struct {
|
||||
tss []*timeseries
|
||||
|
||||
// The padding prevents false sharing on widespread platforms with
|
||||
// 128 mod (cache line size) = 0 .
|
||||
_ [128 - unsafe.Sizeof([]*timeseries{})%128]byte
|
||||
// The padding prevents false sharing
|
||||
_ [atomicutil.CacheLineSize - unsafe.Sizeof([]*timeseries{})%atomicutil.CacheLineSize]byte
|
||||
}
|
||||
|
||||
type timeseriesByWorkerID struct {
|
||||
@@ -1966,11 +1974,14 @@ func sumNoOverflow(a, b int64) int64 {
|
||||
}
|
||||
|
||||
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
if *noStaleMarkers || funcName == "default_rollup" || funcName == "stale_samples_over_time" {
|
||||
if *noStaleMarkers || funcName == "stale_samples_over_time" ||
|
||||
funcName == "default_rollup" || funcName == "increase" || funcName == "rate" {
|
||||
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
||||
// since it uses them for Prometheus-style staleness detection.
|
||||
// Do not drop staleness marks for stale_samples_over_time() function, since it needs
|
||||
// to calculate the number of staleness markers.
|
||||
// Do not drop staleness marks for increase() and rate() function, so they could stop
|
||||
// returning results for stale series. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8891
|
||||
return values, timestamps
|
||||
}
|
||||
// Remove Prometheus staleness marks, so non-default rollup functions don't hit NaN values.
|
||||
|
||||
@@ -71,7 +71,8 @@ var rollupFuncs = map[string]newRollupFunc{
|
||||
"quantile_over_time": newRollupQuantile,
|
||||
"quantiles_over_time": newRollupQuantiles,
|
||||
"range_over_time": newRollupFuncOneArg(rollupRange),
|
||||
"rate": newRollupFuncOneArg(rollupDerivFast), // + rollupFuncsRemoveCounterResets
|
||||
"rate": newRollupFuncOneArg(rollupDerivFast), // + rollupFuncsRemoveCounterResets
|
||||
"rate_prometheus": newRollupFuncOneArg(rollupDerivFastPrometheus), // + rollupFuncsRemoveCounterResets
|
||||
"rate_over_sum": newRollupFuncOneArg(rollupRateOverSum),
|
||||
"resets": newRollupFuncOneArg(rollupResets),
|
||||
"rollup": newRollupFuncOneOrTwoArgs(rollupFake),
|
||||
@@ -195,7 +196,7 @@ var rollupAggrFuncs = map[string]rollupFunc{
|
||||
"zscore_over_time": rollupZScoreOverTime,
|
||||
}
|
||||
|
||||
// VictoriaMetrics can extends lookbehind window for these functions
|
||||
// VictoriaMetrics can extend lookbehind window for these functions
|
||||
// in order to make sure it contains enough points for returning non-empty results.
|
||||
//
|
||||
// This is needed for returning the expected non-empty graphs when zooming in the graph in Grafana,
|
||||
@@ -225,6 +226,7 @@ var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||
"increase_pure": true,
|
||||
"irate": true,
|
||||
"rate": true,
|
||||
"rate_prometheus": true,
|
||||
"rollup_increase": true,
|
||||
"rollup_rate": true,
|
||||
}
|
||||
@@ -252,6 +254,7 @@ var rollupFuncsSamplesScannedPerCall = map[string]int{
|
||||
"lifetime": 2,
|
||||
"present_over_time": 1,
|
||||
"rate": 2,
|
||||
"rate_prometheus": 2,
|
||||
"scrape_interval": 2,
|
||||
"tfirst_over_time": 1,
|
||||
"timestamp": 1,
|
||||
@@ -529,7 +532,7 @@ type rollupFuncArg struct {
|
||||
timestamps []int64
|
||||
|
||||
// Real value preceding values.
|
||||
// Is populated if preceding value is within the -search.maxStalenessInterval (rc.LookbackDelta).
|
||||
// Is populated if preceding value is within the rc.LookbackDelta.
|
||||
realPrevValue float64
|
||||
|
||||
// Real value which goes after values.
|
||||
@@ -776,13 +779,18 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
rfa.realPrevValue = nan
|
||||
if i > 0 {
|
||||
prevValue, prevTimestamp := values[i-1], timestamps[i-1]
|
||||
// set realPrevValue if rc.LookbackDelta == 0
|
||||
// or if distance between datapoint in prev interval and beginning of this interval
|
||||
// set realPrevValue if rc.LookbackDelta == 0 or
|
||||
// if distance between datapoint in prev interval and first datapoint in this interval
|
||||
// doesn't exceed LookbackDelta.
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1381
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/894
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8045
|
||||
if rc.LookbackDelta == 0 || (tStart-prevTimestamp) < rc.LookbackDelta {
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8935
|
||||
currTimestamp := tStart
|
||||
if len(rfa.timestamps) > 0 {
|
||||
currTimestamp = rfa.timestamps[0]
|
||||
}
|
||||
if rc.LookbackDelta == 0 || (currTimestamp-prevTimestamp) < rc.LookbackDelta {
|
||||
rfa.realPrevValue = prevValue
|
||||
}
|
||||
}
|
||||
@@ -908,15 +916,18 @@ func getMaxPrevInterval(scrapeInterval int64) int64 {
|
||||
return scrapeInterval + scrapeInterval/8
|
||||
}
|
||||
|
||||
// removeCounterResets removes resets for rollup functions over counters - see rollupFuncsRemoveCounterResets
|
||||
// it doesn't remove resets between samples with staleNaNs, or samples that exceed maxStalenessInterval
|
||||
func removeCounterResets(values []float64, timestamps []int64, maxStalenessInterval int64) {
|
||||
// There is no need in handling NaNs here, since they are impossible
|
||||
// on values from vmstorage.
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
var correction float64
|
||||
prevValue := values[0]
|
||||
for i, v := range values {
|
||||
if decimal.IsStaleNaN(v) {
|
||||
continue
|
||||
}
|
||||
d := v - prevValue
|
||||
if d < 0 {
|
||||
if (-d * 8) < prevValue {
|
||||
@@ -1826,14 +1837,18 @@ func rollupIncreasePure(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleaned up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
// restore to the real value because of potential staleness reset
|
||||
prevValue := rfa.realPrevValue
|
||||
prevValue := rfa.prevValue
|
||||
if math.IsNaN(prevValue) {
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
// Assume the counter starts from 0.
|
||||
prevValue = 0
|
||||
if !math.IsNaN(rfa.realPrevValue) {
|
||||
// Assume that the value didn't change during the current gap
|
||||
// if realPrevValue exists.
|
||||
prevValue = rfa.realPrevValue
|
||||
}
|
||||
}
|
||||
if len(values) == 0 {
|
||||
// Assume the counter didn't change since prevValue.
|
||||
@@ -1844,8 +1859,13 @@ func rollupIncreasePure(rfa *rollupFuncArg) float64 {
|
||||
|
||||
func rollupDelta(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleaned up
|
||||
// before calling rollup funcs.
|
||||
// before calling rollup funcs. Only StaleNaNs could remain in values - see dropStaleNaNs().
|
||||
values := rfa.values
|
||||
if len(values) > 0 && decimal.IsStaleNaN(values[len(values)-1]) {
|
||||
// if last sample on interval is staleness marker then the selected series is expected
|
||||
// to stop rendering immediately. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8891
|
||||
return nan
|
||||
}
|
||||
prevValue := rfa.prevValue
|
||||
if math.IsNaN(prevValue) {
|
||||
if len(values) == 0 {
|
||||
@@ -1929,10 +1949,23 @@ func rollupDerivSlow(rfa *rollupFuncArg) float64 {
|
||||
return k
|
||||
}
|
||||
|
||||
func rollupDerivFastPrometheus(rfa *rollupFuncArg) float64 {
|
||||
delta := rollupDeltaPrometheus(rfa)
|
||||
if math.IsNaN(delta) || rfa.window == 0 {
|
||||
return nan
|
||||
}
|
||||
return delta / (float64(rfa.window) / 1e3)
|
||||
}
|
||||
|
||||
func rollupDerivFast(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleaned up
|
||||
// before calling rollup funcs.
|
||||
// before calling rollup funcs. Only StaleNaNs could remain in values - see - see dropStaleNaNs().
|
||||
values := rfa.values
|
||||
if len(values) > 0 && decimal.IsStaleNaN(values[len(values)-1]) {
|
||||
// if last sample on interval is staleness marker then the selected series is expected
|
||||
// to stop rendering immediately. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8891
|
||||
return nan
|
||||
}
|
||||
timestamps := rfa.timestamps
|
||||
prevValue := rfa.prevValue
|
||||
prevTimestamp := rfa.prevTimestamp
|
||||
|
||||
@@ -291,7 +291,7 @@ func (rrc *rollupResultCache) GetSeries(qt *querytracer.Tracer, ec *EvalConfig,
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
bb.B = marshalRollupResultCacheKeyForSeries(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
bb.B = marshalRollupResultCacheKeyForSeries(bb.B[:0], expr, window, ec.Step, ec.CacheTagFilters)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
if len(metainfoBuf) == 0 {
|
||||
qt.Printf("nothing found")
|
||||
@@ -313,7 +313,7 @@ func (rrc *rollupResultCache) GetSeries(qt *querytracer.Tracer, ec *EvalConfig,
|
||||
if !ok {
|
||||
mi.RemoveKey(key)
|
||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||
bb.B = marshalRollupResultCacheKeyForSeries(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
bb.B = marshalRollupResultCacheKeyForSeries(bb.B[:0], expr, window, ec.Step, ec.CacheTagFilters)
|
||||
rrc.c.Set(bb.B, metainfoBuf)
|
||||
return nil, ec.Start
|
||||
}
|
||||
@@ -419,7 +419,7 @@ func (rrc *rollupResultCache) PutSeries(qt *querytracer.Tracer, ec *EvalConfig,
|
||||
metainfoBuf := bbPool.Get()
|
||||
defer bbPool.Put(metainfoBuf)
|
||||
|
||||
metainfoKey.B = marshalRollupResultCacheKeyForSeries(metainfoKey.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
metainfoKey.B = marshalRollupResultCacheKeyForSeries(metainfoKey.B[:0], expr, window, ec.Step, ec.CacheTagFilters)
|
||||
metainfoBuf.B = rrc.c.Get(metainfoBuf.B[:0], metainfoKey.B)
|
||||
var mi rollupResultCacheMetainfo
|
||||
if len(metainfoBuf.B) > 0 {
|
||||
|
||||
@@ -156,6 +156,14 @@ func TestRemoveCounterResets(t *testing.T) {
|
||||
removeCounterResets(values, timestamps, 10)
|
||||
testRowsEqual(t, values, timestamps, valuesExpected, timestamps)
|
||||
|
||||
// verify that staleNaNs are respected
|
||||
// it is important to have counter reset in values below to trigger correction logic
|
||||
values = []float64{2, 4, 2, decimal.StaleNaN}
|
||||
timestamps = []int64{10, 20, 30, 40}
|
||||
valuesExpected = []float64{2, 4, 6, decimal.StaleNaN}
|
||||
removeCounterResets(values, timestamps, 10)
|
||||
testRowsEqual(t, values, timestamps, valuesExpected, timestamps)
|
||||
|
||||
// verify results always increase monotonically with possible float operations precision error
|
||||
values = []float64{34.094223, 2.7518, 2.140669, 0.044878, 1.887095, 2.546569, 2.490149, 0.045, 0.035684, 0.062454, 0.058296}
|
||||
timestampsExpected = []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
@@ -648,6 +656,7 @@ func TestRollupNewRollupFuncSuccess(t *testing.T) {
|
||||
f("irate", 0)
|
||||
f("outlier_iqr_over_time", nan)
|
||||
f("rate", 2200)
|
||||
f("rate_prometheus", 2200)
|
||||
f("resets", 5)
|
||||
f("range_over_time", 111)
|
||||
f("avg_over_time", 47.083333333333336)
|
||||
@@ -1525,16 +1534,31 @@ func testRowsEqual(t *testing.T, values []float64, timestamps []int64, valuesExp
|
||||
i, ts, tsExpected, timestamps, timestampsExpected)
|
||||
}
|
||||
vExpected := valuesExpected[i]
|
||||
if decimal.IsStaleNaN(v) {
|
||||
if !decimal.IsStaleNaN(vExpected) {
|
||||
t.Fatalf("unexpected stale NaN value at values[%d]; want %f\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
i, vExpected, values, valuesExpected)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// staleNaNBits == math.NaN(), but decimal.IsStaleNaN(math.NaN()) == false
|
||||
// so we check for decimal.IsStaleNaN first.
|
||||
if decimal.IsStaleNaN(vExpected) {
|
||||
if !decimal.IsStaleNaN(v) {
|
||||
t.Fatalf("unexpected value at values[%d]; got %f; want stale NaN\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
i, v, values, valuesExpected)
|
||||
}
|
||||
}
|
||||
if math.IsNaN(v) {
|
||||
if !math.IsNaN(vExpected) {
|
||||
t.Fatalf("unexpected nan value at values[%d]; want %f\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
t.Fatalf("unexpected NaN value at values[%d]; want %f\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
i, vExpected, values, valuesExpected)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if math.IsNaN(vExpected) {
|
||||
if !math.IsNaN(v) {
|
||||
t.Fatalf("unexpected value at values[%d]; got %f; want nan\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
t.Fatalf("unexpected value at values[%d]; got %f; want NaN\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
i, v, values, valuesExpected)
|
||||
}
|
||||
continue
|
||||
@@ -1608,6 +1632,33 @@ func TestRollupDelta(t *testing.T) {
|
||||
f(100, nan, nan, nil, 0)
|
||||
}
|
||||
|
||||
func TestRollupDerivFastPrometheus(t *testing.T) {
|
||||
f := func(values []float64, window int64, resultExpected float64) {
|
||||
t.Helper()
|
||||
rfa := &rollupFuncArg{
|
||||
values: values,
|
||||
window: window,
|
||||
}
|
||||
result := rollupDerivFastPrometheus(rfa)
|
||||
if math.IsNaN(result) {
|
||||
if !math.IsNaN(resultExpected) {
|
||||
t.Fatalf("unexpected result; got %v; want %v", result, resultExpected)
|
||||
}
|
||||
return
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result; got %v; want %v", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f(nil, 0, nan)
|
||||
f(nil, 10, nan)
|
||||
f([]float64{0, 10}, 0, nan)
|
||||
f([]float64{10}, 10, nan)
|
||||
|
||||
f([]float64{0, 20}, 10e3, 2)
|
||||
f([]float64{0, 10, 20}, 10e3, 2)
|
||||
}
|
||||
|
||||
func TestRollupDeltaWithStaleness(t *testing.T) {
|
||||
// there is a gap between samples in the dataset below
|
||||
timestamps := []int64{0, 15000, 30000, 70000}
|
||||
@@ -1719,6 +1770,55 @@ func TestRollupDeltaWithStaleness(t *testing.T) {
|
||||
timestampsExpected := []int64{0, 10e3, 20e3, 30e3, 40e3}
|
||||
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
|
||||
t.Run("issue-8935", func(t *testing.T) {
|
||||
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8935
|
||||
// below dataset has a gap that exceeds LookbackDelta.
|
||||
// The step is picked in a way that on [60e3-90e3] window
|
||||
// the prevValue will be NaN, but 60e3-55e3 still matches
|
||||
// timestamp=10e3 and stores its value as realPrevValue.
|
||||
// This results into delta=1-50=-49 increase result.
|
||||
// The fix makes it to deduct LookbackDelta not from window start
|
||||
// but from first captured data point in the window, so it becomes 70e3-55e3=15e3.
|
||||
// And realPrevValue becomes NaN due to staleness detection.
|
||||
timestamps = []int64{0, 10000, 70000, 80000}
|
||||
values = []float64{50, 50, 1, 1}
|
||||
rc := rollupConfig{
|
||||
Func: rollupDelta,
|
||||
Start: 0,
|
||||
End: 90e3,
|
||||
Step: 30e3,
|
||||
LookbackDelta: 55e3,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
gotValues, _ := rc.Do(nil, values, timestamps)
|
||||
valuesExpected := []float64{0, 0, 0, 1}
|
||||
timestampsExpected := []int64{0, 30e3, 60e3, 90e3}
|
||||
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
|
||||
// the last sample is stale NaN
|
||||
timestamps = []int64{0, 10000, 20000, 30000, 40000}
|
||||
values = []float64{0, 0, 0, 10, decimal.StaleNaN}
|
||||
t.Run("last point is stale nan", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDelta,
|
||||
Start: 40001,
|
||||
End: 40001,
|
||||
Step: 50000,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
|
||||
if samplesScanned != 10 {
|
||||
t.Fatalf("expecting 10 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan}
|
||||
timestampsExpected := []int64{40001}
|
||||
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRollupIncreasePureWithStaleness(t *testing.T) {
|
||||
@@ -1833,3 +1933,48 @@ func TestRollupIncreasePureWithStaleness(t *testing.T) {
|
||||
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRollupDerivFastWithStaleness(t *testing.T) {
|
||||
timestamps := []int64{0, 10000, 20000, 30000, 40000}
|
||||
values := []float64{0, 0, 0, 0, 10}
|
||||
t.Run("no stale marker", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDerivFast,
|
||||
Start: 40001,
|
||||
End: 40001,
|
||||
Step: 50000,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
|
||||
if samplesScanned != 10 {
|
||||
t.Fatalf("expecting 10 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{0.25}
|
||||
timestampsExpected := []int64{40001}
|
||||
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
|
||||
// the last sample is stale NaN
|
||||
timestamps = []int64{0, 10000, 20000, 30000, 40000}
|
||||
values = []float64{0, 0, 0, 10, decimal.StaleNaN}
|
||||
t.Run("last point is stale nan", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDerivFast,
|
||||
Start: 40001,
|
||||
End: 40001,
|
||||
Step: 50000,
|
||||
Window: 0,
|
||||
MaxPointsPerSeries: 1e4,
|
||||
}
|
||||
rc.Timestamps = rc.getTimestamps()
|
||||
gotValues, samplesScanned := rc.Do(nil, values, timestamps)
|
||||
if samplesScanned != 10 {
|
||||
t.Fatalf("expecting 10 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan}
|
||||
timestampsExpected := []int64{40001}
|
||||
testRowsEqual(t, gotValues, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -66,8 +66,8 @@ or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/victo
|
||||
The list of MetricsQL features on top of PromQL:
|
||||
|
||||
* Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax.
|
||||
See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite#selecting-graphite-metrics).
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite#graphite-api-usage) for details.
|
||||
See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#selecting-graphite-metrics).
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#range-query)
|
||||
@@ -742,7 +742,23 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
See also [irate](#irate), [rollup_rate](#rollup_rate) and [rate_prometheus](#rate_prometheus).
|
||||
|
||||
#### rate_prometheus
|
||||
|
||||
`rate_prometheus(series_selector[d])` {{% available_from "#" %}} is a [rollup function](#rollup-functions), which calculates the average per-second
|
||||
increase rate over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#filtering).
|
||||
The resulting calculation is equivalent to `increase_prometheus(series_selector[d]) / d`.
|
||||
|
||||
It doesn't take into account the last sample before the given lookbehind window `d` when calculating the result in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is usually applied to [counters](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#counter).
|
||||
|
||||
See also [increase_prometheus](#increase_prometheus) and [rate](#rate).
|
||||
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
209
app/vmselect/vmui/assets/index-BiQY-19a.js
Normal file
209
app/vmselect/vmui/assets/index-BiQY-19a.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -36,10 +36,10 @@
|
||||
<meta property="og:title" content="UI for VictoriaMetrics">
|
||||
<meta property="og:url" content="https://victoriametrics.com/">
|
||||
<meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data">
|
||||
<script type="module" crossorigin src="./assets/index-xmjGcv4-.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-BiQY-19a.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-D8IJGiEn.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-C85_NB5q.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-ojCMu5lE.css">
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
|
||||
@@ -63,6 +63,8 @@ var (
|
||||
|
||||
cacheSizeStorageTSID = flagutil.NewBytes("storage.cacheSizeStorageTSID", 0, "Overrides max size for storage/tsid cache. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#cache-tuning")
|
||||
cacheSizeStorageMetricName = flagutil.NewBytes("storage.cacheSizeStorageMetricName", 0, "Overrides max size for storage/metricName cache. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#cache-tuning")
|
||||
cacheSizeIndexDBIndexBlocks = flagutil.NewBytes("storage.cacheSizeIndexDBIndexBlocks", 0, "Overrides max size for indexdb/indexBlocks cache. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#cache-tuning")
|
||||
cacheSizeIndexDBDataBlocks = flagutil.NewBytes("storage.cacheSizeIndexDBDataBlocks", 0, "Overrides max size for indexdb/dataBlocks cache. "+
|
||||
@@ -111,6 +113,7 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
||||
storage.SetTSIDCacheSize(cacheSizeStorageTSID.IntN())
|
||||
storage.SetTagFiltersCacheSize(cacheSizeIndexDBTagFilters.IntN())
|
||||
storage.SetMetricNamesStatsCacheSize(cacheSizeMetricNamesStats.IntN())
|
||||
storage.SetMetricNameCacheSize(cacheSizeStorageMetricName.IntN())
|
||||
mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.IntN())
|
||||
mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.IntN())
|
||||
mergeset.SetDataBlocksSparseCacheSize(cacheSizeIndexDBDataBlocksSparse.IntN())
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24.3 AS build-web-stage
|
||||
FROM golang:1.24.4 AS build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
@@ -6,7 +6,7 @@ COPY web/ /build/
|
||||
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.21.3
|
||||
FROM alpine:3.22.0
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<link rel="icon" href="/favicon.svg" />
|
||||
<link rel="apple-touch-icon" href="/favicon.svg" />
|
||||
<link rel="mask-icon" href="/favicon.svg" color="#000000">
|
||||
<link rel="icon" href="/favicon.victorialogs.svg" />
|
||||
<link rel="apple-touch-icon" href="/favicon.victorialogs.svg" />
|
||||
<link rel="mask-icon" href="/favicon.victorialogs.svg" color="#000000">
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=5"/>
|
||||
<meta name="theme-color" content="#000000"/>
|
||||
|
||||
34
app/vmui/packages/vmui/package-lock.json
generated
34
app/vmui/packages/vmui/package-lock.json
generated
@@ -10,6 +10,8 @@
|
||||
"dependencies": {
|
||||
"@types/lodash.debounce": "^4.0.9",
|
||||
"@types/lodash.get": "^4.4.9",
|
||||
"@types/lodash.orderBy": "^4.6.9",
|
||||
"@types/lodash.throttle": "^4.1.9",
|
||||
"@types/qs": "^6.9.18",
|
||||
"@types/react": "^19.1.2",
|
||||
"@types/react-input-mask": "^3.0.6",
|
||||
@@ -18,6 +20,8 @@
|
||||
"dayjs": "^1.11.13",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"lodash.get": "^4.4.2",
|
||||
"lodash.orderBy": "^4.6.0",
|
||||
"lodash.throttle": "^4.1.1",
|
||||
"marked": "^15.0.8",
|
||||
"marked-emoji": "^2.0.0",
|
||||
"preact": "^10.26.5",
|
||||
@@ -2191,6 +2195,24 @@
|
||||
"@types/lodash": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/lodash.orderBy": {
|
||||
"version": "4.6.9",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash.orderby/-/lodash.orderby-4.6.9.tgz",
|
||||
"integrity": "sha512-T9o2wkIJOmxXwVTPTmwJ59W6eTi2FseiLR369fxszG649Po/xe9vqFNhf/MtnvT5jrbDiyWKxPFPZbpSVK0SVQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/lodash": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/lodash.throttle": {
|
||||
"version": "4.1.9",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash.throttle/-/lodash.throttle-4.1.9.tgz",
|
||||
"integrity": "sha512-PCPVfpfueguWZQB7pJQK890F2scYKoDUL3iM522AptHWn7d5NQmeS/LTEHIcLr5PaTzl3dK2Z0xSUHHTHwaL5g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/lodash": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "22.14.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.14.1.tgz",
|
||||
@@ -5742,6 +5764,18 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.orderBy": {
|
||||
"version": "4.6.0",
|
||||
"resolved": "https://registry.npmjs.org/lodash.orderby/-/lodash.orderby-4.6.0.tgz",
|
||||
"integrity": "sha512-T0rZxKmghOOf5YPnn8EY5iLYeWCpZq8G41FfqoVHH5QDTAFaghJRmAdLiadEDq+ztgM2q5PjA+Z1fOwGrLgmtg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.throttle": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz",
|
||||
"integrity": "sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/loose-envify": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
"dependencies": {
|
||||
"@types/lodash.debounce": "^4.0.9",
|
||||
"@types/lodash.get": "^4.4.9",
|
||||
"@types/lodash.orderBy": "^4.6.9",
|
||||
"@types/lodash.throttle": "^4.1.9",
|
||||
"@types/qs": "^6.9.18",
|
||||
"@types/react": "^19.1.2",
|
||||
"@types/react-input-mask": "^3.0.6",
|
||||
@@ -15,6 +17,8 @@
|
||||
"dayjs": "^1.11.13",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"lodash.get": "^4.4.2",
|
||||
"lodash.orderBy": "^4.6.0",
|
||||
"lodash.throttle": "^4.1.1",
|
||||
"marked": "^15.0.8",
|
||||
"marked-emoji": "^2.0.0",
|
||||
"preact": "^10.26.5",
|
||||
|
||||
5
app/vmui/packages/vmui/public/favicon.victorialogs.svg
Normal file
5
app/vmui/packages/vmui/public/favicon.victorialogs.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="48" height="48" fill="#e94600" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M24.5475 0C10.3246.0265251 1.11379 3.06365 4.40623 6.10077c0 0 12.32997 11.23333 16.58217 14.84083.8131.6896 2.1728 1.1936 3.5191 1.2201h.1199c1.3463-.0265 2.706-.5305 3.5191-1.2201 4.2522-3.5942 16.5422-14.84083 16.5422-14.84083C48.0478 3.06365 38.8636.0265251 24.6674 0"/>
|
||||
<path d="M28.1579 27.0159c-.8131.6896-2.1728 1.1936-3.5191 1.2201h-.12c-1.3463-.0265-2.7059-.5305-3.519-1.2201-2.9725-2.5067-13.35639-11.87-17.26201-15.3979v5.4112c0 .5968.22661 1.3793.6265 1.7506C7.00358 21.1936 17.2675 30.5437 20.9731 33.6737c.8132.6896 2.1728 1.1936 3.5191 1.2201h.12c1.3463-.0265 2.7059-.5305 3.519-1.2201 3.679-3.13 13.9429-12.4536 16.6089-14.8939.4132-.3713.6265-1.1538.6265-1.7506V11.618c-3.9323 3.5411-14.3162 12.931-17.2354 15.3979h.0267Z"/>
|
||||
<path d="M28.1579 39.748c-.8131.6897-2.1728 1.1937-3.5191 1.2202h-.12c-1.3463-.0265-2.7059-.5305-3.519-1.2202-2.9725-2.4933-13.35639-11.8567-17.26201-15.3978v5.4111c0 .5969.22661 1.3793.6265 1.7507C7.00358 33.9258 17.2675 43.2759 20.9731 46.4058c.8132.6897 2.1728 1.1937 3.5191 1.2202h.12c1.3463-.0265 2.7059-.5305 3.519-1.2202 3.679-3.1299 13.9429-12.4535 16.6089-14.8938.4132-.3714.6265-1.1538.6265-1.7507v-5.4111c-3.9323 3.5411-14.3162 12.931-17.2354 15.3978h.0267Z"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
@@ -66,8 +66,8 @@ or at your own [VictoriaMetrics instance](https://docs.victoriametrics.com/victo
|
||||
The list of MetricsQL features on top of PromQL:
|
||||
|
||||
* Graphite-compatible filters can be passed via `{__graphite__="foo.*.bar"}` syntax.
|
||||
See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite#selecting-graphite-metrics).
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite#graphite-api-usage) for details.
|
||||
See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#selecting-graphite-metrics).
|
||||
VictoriaMetrics can be used as Graphite datasource in Grafana. See [these docs](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#graphite-api-usage) for details.
|
||||
See also [label_graphite_group](#label_graphite_group) function, which can be used for extracting the given groups from Graphite metric name.
|
||||
* Lookbehind window in square brackets for [rollup functions](#rollup-functions) may be omitted. VictoriaMetrics automatically selects the lookbehind window
|
||||
depending on the `step` query arg passed to [/api/v1/query_range](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#range-query)
|
||||
@@ -742,7 +742,23 @@ Metric names are stripped from the resulting rollups. Add [keep_metric_names](#k
|
||||
|
||||
This function is supported by PromQL.
|
||||
|
||||
See also [irate](#irate) and [rollup_rate](#rollup_rate).
|
||||
See also [irate](#irate), [rollup_rate](#rollup_rate) and [rate_prometheus](#rate_prometheus).
|
||||
|
||||
#### rate_prometheus
|
||||
|
||||
`rate_prometheus(series_selector[d])` {{% available_from "#" %}} is a [rollup function](#rollup-functions), which calculates the average per-second
|
||||
increase rate over the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#filtering).
|
||||
The resulting calculation is equivalent to `increase_prometheus(series_selector[d]) / d`.
|
||||
|
||||
It doesn't take into account the last sample before the given lookbehind window `d` when calculating the result in the same way as Prometheus does.
|
||||
See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
This function is usually applied to [counters](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#counter).
|
||||
|
||||
See also [increase_prometheus](#increase_prometheus) and [rate](#rate).
|
||||
|
||||
|
||||
#### rate_over_sum
|
||||
|
||||
|
||||
@@ -33,8 +33,12 @@ const LogsQueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
const part = logicalParts.find(p => caretPosition[0] >= p.position[0] && caretPosition[0] <= p.position[1]);
|
||||
if (!part) return;
|
||||
const cursorStartPosition = caretPosition[0] - part.position[0];
|
||||
const prevPart = logicalParts.find(p => p.id === part.id - 1);
|
||||
const queryBeforeIncompleteFilter = prevPart ? value.substring(0, prevPart.position[1] + 1) : undefined;
|
||||
return {
|
||||
...part,
|
||||
queryBeforeIncompleteFilter,
|
||||
query: value,
|
||||
...getContextData(part, cursorStartPosition)
|
||||
};
|
||||
}, [logicalParts, caretPosition]);
|
||||
@@ -50,6 +54,8 @@ const LogsQueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
return fieldValues;
|
||||
case ContextType.PipeName:
|
||||
return pipeList;
|
||||
case ContextType.FilterOrPipeName:
|
||||
return [...fieldNames, ...pipeList];
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
@@ -58,7 +64,7 @@ const LogsQueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
const getUpdatedValue = (insertValue: string, logicalParts: LogicalPart[], id?: number) => {
|
||||
return logicalParts.reduce((acc, part) => {
|
||||
const value = part.id === id ? insertValue : part.value;
|
||||
const separator = part.type === LogicalPartType.Pipe ? " | " : " ";
|
||||
const separator = part.separator === "|" ? " | " : " ";
|
||||
return `${acc}${separator}${value}`;
|
||||
}, "").trim();
|
||||
};
|
||||
@@ -70,7 +76,7 @@ const LogsQueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
modifiedInsert += ":";
|
||||
} else if (contextType === ContextType.FilterValue) {
|
||||
const insertWithQuotes = value.startsWith("_stream:") ? modifiedInsert : `${JSON.stringify(modifiedInsert)}`;
|
||||
modifiedInsert = `${contextData?.filterName || ""}:${insertWithQuotes}`;
|
||||
modifiedInsert = `${contextData?.filterName || ""}${contextData?.operator || ":"}${insertWithQuotes}`;
|
||||
}
|
||||
|
||||
return modifiedInsert;
|
||||
@@ -86,7 +92,13 @@ const LogsQueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
|
||||
const insertValue = getModifyInsert(insert, contextType, value, item.type);
|
||||
const newValue = getUpdatedValue(insertValue, logicalParts, id);
|
||||
const updatedPosition = (position[0] || 1) + insertValue.length + (item.type === ContextType.PipeName ? 1 : 0);
|
||||
const logicalPart = logicalParts.find(p => p.id === id);
|
||||
const getPositionCorrection = () => {
|
||||
if (logicalPart?.type === LogicalPartType.FilterOrPipe) return 1;
|
||||
if (item.type === ContextType.PipeName) return 1;
|
||||
return 0;
|
||||
};
|
||||
const updatedPosition = (position[0] || 1) + insertValue.length + getPositionCorrection();
|
||||
|
||||
onSelect(newValue, updatedPosition);
|
||||
}, [contextData, logicalParts]);
|
||||
|
||||
@@ -9,6 +9,7 @@ export const splitLogicalParts = (expr: string) => {
|
||||
const input = expr; //.replace(/\s*:\s*/g, ":");
|
||||
const parts: LogicalPart[] = [];
|
||||
let currentPart = "";
|
||||
let separator: undefined | " " | "|" = undefined;
|
||||
let isPipePart = false;
|
||||
|
||||
const quotes = ["'", "\"", "`"];
|
||||
@@ -43,8 +44,9 @@ export const splitLogicalParts = (expr: string) => {
|
||||
isPipePart = true;
|
||||
const countStartSpaces = currentPart.match(/^ */)?.[0].length || 0;
|
||||
const countEndSpaces = currentPart.match(/ *$/)?.[0].length || 0;
|
||||
pushPart(currentPart, true, [startIndex + countStartSpaces, i - countEndSpaces - 1], parts);
|
||||
pushPart(currentPart, true, [startIndex + countStartSpaces, i - countEndSpaces - 1], parts, separator);
|
||||
currentPart = "";
|
||||
separator = "|";
|
||||
startIndex = i + 1;
|
||||
continue;
|
||||
}
|
||||
@@ -54,7 +56,8 @@ export const splitLogicalParts = (expr: string) => {
|
||||
const nextStr = input.slice(i).replace(/^\s*/, "");
|
||||
const prevStr = input.slice(0, i).replace(/\s*$/, "");
|
||||
if (!nextStr.startsWith(":") && !prevStr.endsWith(":")) {
|
||||
pushPart(currentPart, false, [startIndex, i - 1], parts);
|
||||
pushPart(currentPart, false, [startIndex, i - 1], parts, separator);
|
||||
separator = " ";
|
||||
currentPart = "";
|
||||
startIndex = i + 1;
|
||||
continue;
|
||||
@@ -65,26 +68,35 @@ export const splitLogicalParts = (expr: string) => {
|
||||
}
|
||||
|
||||
// push the last part
|
||||
pushPart(currentPart, isPipePart, [startIndex, input.length], parts);
|
||||
pushPart(currentPart, isPipePart, [startIndex, input.length], parts, separator);
|
||||
|
||||
return parts;
|
||||
};
|
||||
|
||||
const pushPart = (currentPart: string, isPipePart: boolean, position: LogicalPartPosition, parts: LogicalPart[]) => {
|
||||
const pushPart = (currentPart: string, isPipePart: boolean, position: LogicalPartPosition, parts: LogicalPart[], separator: LogicalPart["separator"]) => {
|
||||
const trimmedPart = currentPart.trim();
|
||||
if (!trimmedPart) return;
|
||||
const isOperator = BUILDER_OPERATORS.includes(trimmedPart.toUpperCase());
|
||||
const pipesTypes = [LogicalPartType.Pipe, LogicalPartType.FilterOrPipe];
|
||||
const isPreviousPartPipe = parts.length > 0 && pipesTypes.includes(parts[parts.length - 1].type);
|
||||
|
||||
const getType = () => {
|
||||
if (isPreviousPartPipe) return LogicalPartType.FilterOrPipe;
|
||||
if (isPipePart) return LogicalPartType.Pipe;
|
||||
if (isOperator) return LogicalPartType.Operator;
|
||||
return LogicalPartType.Filter;
|
||||
};
|
||||
|
||||
parts.push({
|
||||
id: parts.length,
|
||||
value: trimmedPart,
|
||||
position,
|
||||
type: isPipePart
|
||||
? LogicalPartType.Pipe
|
||||
: isOperator ? LogicalPartType.Operator : LogicalPartType.Filter,
|
||||
type: getType(),
|
||||
separator,
|
||||
});
|
||||
};
|
||||
|
||||
export const getContextData = (part: LogicalPart, cursorPos: number) => {
|
||||
export const getContextData = (part: LogicalPart, cursorPos: number): ContextData => {
|
||||
const valueBeforeCursor = part.value.substring(0, cursorPos);
|
||||
const valueAfterCursor = part.value.substring(cursorPos);
|
||||
|
||||
@@ -95,23 +107,91 @@ export const getContextData = (part: LogicalPart, cursorPos: number) => {
|
||||
contextType: ContextType.Unknown,
|
||||
};
|
||||
|
||||
if (part.type === LogicalPartType.Filter) {
|
||||
const noColon = !valueBeforeCursor.includes(":") && !valueAfterCursor.includes(":");
|
||||
if (noColon) {
|
||||
metaData.contextType = ContextType.FilterUnknown;
|
||||
} else if (valueBeforeCursor.includes(":")) {
|
||||
const [filterName, filterValue] = valueBeforeCursor.split(":");
|
||||
metaData.contextType = ContextType.FilterValue;
|
||||
metaData.filterName = filterName;
|
||||
metaData.valueContext = filterValue;
|
||||
} else {
|
||||
metaData.contextType = ContextType.FilterName;
|
||||
}
|
||||
} else if (part.type === LogicalPartType.Pipe) {
|
||||
const valueStartWithPipe = PIPE_NAMES.some(p => part.value.startsWith(p));
|
||||
metaData.contextType = valueStartWithPipe ? ContextType.PipeValue : ContextType.PipeName;
|
||||
}
|
||||
// Determine context type based on logical part type
|
||||
determineContextType(part, valueBeforeCursor, valueAfterCursor, metaData);
|
||||
|
||||
// Clean up quotes in valueContext
|
||||
metaData.valueContext = metaData.valueContext.replace(/^["']|["']$/g, "");
|
||||
|
||||
return metaData;
|
||||
};
|
||||
|
||||
/** Helper function to determine if a string starts with any of the pipe names */
|
||||
const startsWithPipe = (value: string): boolean => {
|
||||
return PIPE_NAMES.some(p => value.startsWith(p));
|
||||
};
|
||||
|
||||
/** Helper function to check for colon presence */
|
||||
const hasNoColon = (before: string, after: string): boolean => {
|
||||
return !before.includes(":") && !after.includes(":");
|
||||
};
|
||||
|
||||
/** Helper function to extract filter name and update metadata for filter values */
|
||||
const handleFilterValue = (valueBeforeCursor: string, metaData: ContextData): void => {
|
||||
const [filterName, ...filterValue] = valueBeforeCursor.split(":");
|
||||
metaData.contextType = ContextType.FilterValue;
|
||||
metaData.filterName = filterName;
|
||||
const enhanceOperators = ["=", "-", "!", "~", "<", ">", "<=", ">="] as const;
|
||||
const enhanceOperator = enhanceOperators.find(op => op === filterValue[0]);
|
||||
if (enhanceOperator) {
|
||||
metaData.valueContext = filterValue.slice(1).join(":");
|
||||
metaData.operator = `:${enhanceOperator}`;
|
||||
} else {
|
||||
metaData.valueContext = filterValue.join(":");
|
||||
metaData.operator = ":";
|
||||
}
|
||||
};
|
||||
|
||||
/** Function to determine context type based on part type and value */
|
||||
const determineContextType = (
|
||||
part: LogicalPart,
|
||||
valueBeforeCursor: string,
|
||||
valueAfterCursor: string,
|
||||
metaData: ContextData
|
||||
): void => {
|
||||
switch (part.type) {
|
||||
case LogicalPartType.Filter:
|
||||
handleFilterType(valueBeforeCursor, valueAfterCursor, metaData);
|
||||
break;
|
||||
|
||||
case LogicalPartType.Pipe:
|
||||
metaData.contextType = startsWithPipe(part.value)
|
||||
? ContextType.PipeValue
|
||||
: ContextType.PipeName;
|
||||
break;
|
||||
|
||||
case LogicalPartType.FilterOrPipe:
|
||||
handleFilterOrPipeType(part.value, valueBeforeCursor, metaData);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
/** Handle filter type context determination */
|
||||
const handleFilterType = (
|
||||
valueBeforeCursor: string,
|
||||
valueAfterCursor: string,
|
||||
metaData: ContextData
|
||||
): void => {
|
||||
if (hasNoColon(valueBeforeCursor, valueAfterCursor)) {
|
||||
metaData.contextType = ContextType.FilterUnknown;
|
||||
} else if (valueBeforeCursor.includes(":")) {
|
||||
handleFilterValue(valueBeforeCursor, metaData);
|
||||
} else {
|
||||
metaData.contextType = ContextType.FilterName;
|
||||
}
|
||||
};
|
||||
|
||||
/** Handle FilterOrPipeType context determination */
|
||||
const handleFilterOrPipeType = (
|
||||
value: string,
|
||||
valueBeforeCursor: string,
|
||||
metaData: ContextData
|
||||
): void => {
|
||||
if (startsWithPipe(value)) {
|
||||
metaData.contextType = ContextType.PipeValue;
|
||||
} else if (valueBeforeCursor.includes(":")) {
|
||||
handleFilterValue(valueBeforeCursor, metaData);
|
||||
} else {
|
||||
metaData.contextType = ContextType.FilterOrPipeName;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -2,15 +2,19 @@ export enum LogicalPartType {
|
||||
Filter = "Filter",
|
||||
Pipe = "Pipe",
|
||||
Operator = "Operator",
|
||||
FilterOrPipe = "FilterOrPipe",
|
||||
}
|
||||
|
||||
export type LogicalPartPosition = [start: number, end: number];
|
||||
|
||||
export type LogicalPartSeparator = " " | "|";
|
||||
|
||||
export interface LogicalPart {
|
||||
id: number;
|
||||
value: string;
|
||||
type: LogicalPartType;
|
||||
position: LogicalPartPosition;
|
||||
separator?: LogicalPartSeparator;
|
||||
}
|
||||
|
||||
export interface ContextData {
|
||||
@@ -19,6 +23,10 @@ export interface ContextData {
|
||||
contextType: ContextType;
|
||||
valueContext: string;
|
||||
filterName?: string;
|
||||
query?: string;
|
||||
queryBeforeIncompleteFilter?: string;
|
||||
separator?: LogicalPartSeparator;
|
||||
operator?: ":" | ":!" | ":-" | ":=" | ":~" | ":<" | ":>" | ":<=" | ":>=";
|
||||
}
|
||||
|
||||
export enum ContextType {
|
||||
@@ -28,4 +36,5 @@ export enum ContextType {
|
||||
PipeName = "Pipes",
|
||||
PipeValue = "PipeValue",
|
||||
Unknown = "Unknown",
|
||||
FilterOrPipeName = "FilterOrPipeName",
|
||||
}
|
||||
|
||||
@@ -10,11 +10,11 @@ import { AUTOCOMPLETE_LIMITS } from "../../../../constants/queryAutocomplete";
|
||||
import { LogsFiledValues } from "../../../../api/types";
|
||||
import { useLogsDispatch, useLogsState } from "../../../../state/logsPanel/LogsStateContext";
|
||||
import { useTenant } from "../../../../hooks/useTenant";
|
||||
import { generateQuery } from "./utils";
|
||||
|
||||
type FetchDataArgs = {
|
||||
urlSuffix: string;
|
||||
setter: Dispatch<SetStateAction<AutocompleteOptions[]>>
|
||||
type: ContextType;
|
||||
setter: (value: LogsFiledValues[]) => void;
|
||||
params?: URLSearchParams;
|
||||
}
|
||||
|
||||
@@ -24,7 +24,8 @@ const icons = {
|
||||
[ContextType.FilterValue]: <ValueIcon/>,
|
||||
[ContextType.PipeName]: <FunctionIcon/>,
|
||||
[ContextType.PipeValue]: <LabelIcon/>,
|
||||
[ContextType.Unknown]: <ValueIcon/>
|
||||
[ContextType.Unknown]: <ValueIcon/>,
|
||||
[ContextType.FilterOrPipeName]: <FunctionIcon/>
|
||||
};
|
||||
|
||||
export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
@@ -61,7 +62,7 @@ export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
}));
|
||||
};
|
||||
|
||||
const fetchData = async ({ urlSuffix, setter, type, params }: FetchDataArgs) => {
|
||||
const fetchData = async ({ urlSuffix, setter, params }: FetchDataArgs) => {
|
||||
abortControllerRef.current.abort();
|
||||
abortControllerRef.current = new AbortController();
|
||||
const { signal } = abortControllerRef.current;
|
||||
@@ -73,7 +74,7 @@ export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
try {
|
||||
const cachedData = autocompleteCache.get(key);
|
||||
if (cachedData) {
|
||||
setter(processData(cachedData, type));
|
||||
setter(cachedData);
|
||||
setLoading(false);
|
||||
return;
|
||||
}
|
||||
@@ -86,7 +87,7 @@ export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const value = (data?.values || []) as LogsFiledValues[];
|
||||
setter(value ? processData(value, type) : []);
|
||||
setter(value || []);
|
||||
dispatch({ type: "SET_AUTOCOMPLETE_CACHE", payload: { key, value } });
|
||||
}
|
||||
setLoading(false);
|
||||
@@ -101,7 +102,7 @@ export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
|
||||
// fetch field names
|
||||
useEffect(() => {
|
||||
const validContexts = [ContextType.FilterName, ContextType.FilterUnknown];
|
||||
const validContexts = [ContextType.FilterName, ContextType.FilterUnknown, ContextType.FilterOrPipeName];
|
||||
const isInvalidContext = !validContexts.includes(contextData?.contextType || ContextType.Unknown);
|
||||
if (!serverUrl || isInvalidContext) {
|
||||
return;
|
||||
@@ -109,11 +110,14 @@ export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
|
||||
setFieldNames([]);
|
||||
|
||||
const setter = (filterNames: LogsFiledValues[]) => {
|
||||
setFieldNames(processData(filterNames, ContextType.FilterName));
|
||||
};
|
||||
|
||||
fetchData({
|
||||
urlSuffix: "field_names",
|
||||
setter: setFieldNames,
|
||||
type: ContextType.FilterName,
|
||||
params: getQueryParams({ query: "*" })
|
||||
setter: setter,
|
||||
params: getQueryParams({ query: contextData?.queryBeforeIncompleteFilter || "*" })
|
||||
});
|
||||
|
||||
return () => abortControllerRef.current?.abort();
|
||||
@@ -128,11 +132,14 @@ export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
|
||||
setFieldValues([]);
|
||||
|
||||
const setter = (filterValues: LogsFiledValues[]) => {
|
||||
setFieldValues(processData(filterValues, ContextType.FilterValue));
|
||||
};
|
||||
|
||||
fetchData({
|
||||
urlSuffix: "field_values",
|
||||
setter: setFieldValues,
|
||||
type: ContextType.FilterValue,
|
||||
params: getQueryParams({ query: "*", field: contextData.filterName })
|
||||
setter: setter,
|
||||
params: getQueryParams({ query: generateQuery(contextData), field: contextData.filterName })
|
||||
});
|
||||
|
||||
return () => abortControllerRef.current?.abort();
|
||||
|
||||
@@ -0,0 +1,131 @@
|
||||
import { expect } from "vitest";
|
||||
import { generateQuery } from "./utils";
|
||||
import { ContextType } from "./types";
|
||||
|
||||
describe("utils", () => {
|
||||
describe("_time", () => {
|
||||
it("should return the trimmed value by `-`", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "_stream:{type=\"WatchEvent\"}",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_time",
|
||||
query: "_stream:{type=\"WatchEvent\"} _time:2025-04-1",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_time=2025-04-1",
|
||||
valueContext: "2025-04-1"
|
||||
})).toStrictEqual("_stream:{type=\"WatchEvent\"} _time:2025-04");
|
||||
});
|
||||
|
||||
it("should return the trimmed value by `:` if char `-` also exist in the query", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "_stream:{type=\"WatchEvent\"}",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_time",
|
||||
query: "_stream:{type=\"WatchEvent\"} _time:2025-04-10T23:45:5",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_time=2025-04-10T23:45:5",
|
||||
valueContext: "2025-04-10T23:45:5"
|
||||
})).toStrictEqual("_stream:{type=\"WatchEvent\"} _time:2025-04-10T23:45");
|
||||
});
|
||||
|
||||
it("should return default `*` instead of -time filter", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "_stream:{type=\"WatchEvent\"}",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_time",
|
||||
query: "_stream:{type=\"WatchEvent\"} _time:202",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_time=202",
|
||||
valueContext: "202"
|
||||
})).toStrictEqual("_stream:{type=\"WatchEvent\"} *");
|
||||
});
|
||||
});
|
||||
|
||||
describe("_stream", () => {
|
||||
it("should add regexp to filter value", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_stream",
|
||||
query: "_stream:{type=\"WatchEve",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_stream:{type=\"WatchEve",
|
||||
valueContext: "{type=\"WatchEve"
|
||||
})).toStrictEqual("_stream:{type=~\"WatchEve.*\"}");
|
||||
});
|
||||
|
||||
it("should add regexp to filter value if cursor in the middle of value", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_stream",
|
||||
query: "_stream:{type=\"WatchEve\"}",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_stream:{type=\"WatchEve",
|
||||
valueContext: "{type=\"WatchEve"
|
||||
})).toStrictEqual("_stream:{type=~\"WatchEve.*\"}");
|
||||
});
|
||||
|
||||
it("should return * if do not have value after =", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_stream",
|
||||
query: "_stream:{type=",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_stream:{type=",
|
||||
valueContext: "{type="
|
||||
})).toStrictEqual("*");
|
||||
});
|
||||
});
|
||||
|
||||
it("_msg", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "_stream:{type=\"WatchEvent\"}",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_msg",
|
||||
query: "_stream:{type=\"WatchEvent\"} _msg:453",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_msg:453",
|
||||
valueContext: "453"
|
||||
})).toStrictEqual("_stream:{type=\"WatchEvent\"} *");
|
||||
});
|
||||
|
||||
it("_stream_id", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "_stream:{type=\"WatchEvent\"}",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "_stream_id",
|
||||
query: "_stream:{type=\"WatchEvent\"} _stream_id:453",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "_stream_id:453",
|
||||
valueContext: "453"
|
||||
})).toStrictEqual("_stream:{type=\"WatchEvent\"} *");
|
||||
});
|
||||
|
||||
describe("other fields", () => {
|
||||
it("should add prefix filter to other type of field names", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "repo.name",
|
||||
query: "repo.name:Victori",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "repo.name:Victori",
|
||||
valueContext: "Victori"
|
||||
})).toStrictEqual("repo.name:Victori*");
|
||||
});
|
||||
|
||||
it("should add prefix filter to other type of field names with escaped via double quote", () => {
|
||||
expect(generateQuery({
|
||||
queryBeforeIncompleteFilter: "",
|
||||
contextType: ContextType.FilterValue,
|
||||
filterName: "repo.name",
|
||||
query: "repo.name:\"Victori",
|
||||
valueAfterCursor: "",
|
||||
valueBeforeCursor: "repo.name:\"Victori",
|
||||
valueContext: "Victori"
|
||||
})).toStrictEqual("repo.name:Victori*");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,61 @@
|
||||
import { ContextData } from "./types";
|
||||
|
||||
const getStreamFieldQuery = (valueContext: string) => {
|
||||
if (valueContext.includes("=")) {
|
||||
const [fieldName, fieldValue] = valueContext.split("=");
|
||||
if (fieldValue) {
|
||||
return `_stream:${fieldName}=~${fieldValue}.*"}`;
|
||||
}
|
||||
}
|
||||
|
||||
return "*";
|
||||
};
|
||||
|
||||
const getLastPartUntilDelimiter = (value: string, delimiter: string) => {
|
||||
const lastIndexOfDelimiter = value.lastIndexOf(delimiter);
|
||||
return lastIndexOfDelimiter !== -1 ? value.slice(0, lastIndexOfDelimiter) : "";
|
||||
};
|
||||
|
||||
const getDateQuery = (contextData: ContextData) => {
|
||||
let fieldValue = "";
|
||||
if (contextData.valueContext.includes(":")) {
|
||||
fieldValue = getLastPartUntilDelimiter(contextData.valueContext, ":");
|
||||
} else if (contextData.valueContext.includes("-")) {
|
||||
fieldValue = getLastPartUntilDelimiter(contextData.valueContext, "-");
|
||||
}
|
||||
return fieldValue ? `${contextData.filterName}:${fieldValue}` : "*";
|
||||
};
|
||||
|
||||
/**
|
||||
* Generates a query string based on the provided context data.
|
||||
*
|
||||
* The function processes the input based on the `filterName` property:
|
||||
*
|
||||
* - If `filterName` is `_msg` or `_stream_id`, the query cannot be generated specifically,
|
||||
* so a wildcard query (`"*"`) is returned.
|
||||
*
|
||||
* - If `filterName` is `_stream`, the query is generated using regexp (`{type=~"value.*"}`).
|
||||
*
|
||||
* - If `filterName` is `_time`, a simplified query is created by trimming the value up
|
||||
* to the first occurrence of a delimiter such as `-` or `:`.
|
||||
*
|
||||
* - For all other values of `filterName`, a prefix query is returned using
|
||||
* the `query` value with a `*` appended (e.g., `"value*"`).
|
||||
*
|
||||
* @param {ContextData} contextData - The context object containing query parameters and metadata.
|
||||
* @returns {string} The generated query string.
|
||||
*/
|
||||
export const generateQuery = (contextData: ContextData): string => {
|
||||
let fieldQuery = "";
|
||||
if (!contextData.filterName || !contextData.query || ["_msg", "_stream_id"].includes(contextData.filterName)) {
|
||||
fieldQuery = "*";
|
||||
} else if ("_stream" === contextData.filterName) {
|
||||
fieldQuery = getStreamFieldQuery(contextData.valueContext);
|
||||
} else if ("_time" === contextData.filterName) {
|
||||
fieldQuery = getDateQuery(contextData);
|
||||
} else {
|
||||
fieldQuery = `${contextData.filterName}:${contextData.valueContext}*`;
|
||||
}
|
||||
|
||||
return contextData.queryBeforeIncompleteFilter ? `${contextData.queryBeforeIncompleteFilter}${contextData.separator ?? " "}${fieldQuery}` : fieldQuery;
|
||||
};
|
||||
@@ -0,0 +1,43 @@
|
||||
import { hasSortPipe } from "./sort";
|
||||
|
||||
describe("hasSortPipe()", () => {
|
||||
/** Queries that MUST be recognised as containing a sort/order pipe. */
|
||||
const positive: string[] = [
|
||||
// ───── basic usage ─────
|
||||
"sort by (_time)",
|
||||
"| sort by (_time)",
|
||||
"|sort(_time) desc",
|
||||
"| order by (foo desc)",
|
||||
"_time:5m | sort by (_stream, _time)",
|
||||
|
||||
// ───── documented options ─────
|
||||
"_time:1h | sort by (request_duration desc) limit 10",
|
||||
"_time:1h | sort by (request_duration desc) partition by (host) limit 3",
|
||||
"_time:5m | sort by (_time) rank as position",
|
||||
|
||||
// ───── whitespace / tabs ─────
|
||||
"|\t sort\tby (host)",
|
||||
|
||||
// ───── no space after the pipe ─────
|
||||
"foo|sort by (_time)",
|
||||
];
|
||||
|
||||
/** Queries that MUST **not** be recognised (false positives). */
|
||||
const negative: string[] = [
|
||||
"", // empty
|
||||
"error | sample 100", // no sort
|
||||
"|sorted(field)", // 'sorted' ≠ 'sort'
|
||||
"|sorter(field)", // 'sorter' ≠ 'sort'
|
||||
"my_sort(field)", // function name
|
||||
"| sorta by (field)", // 'sorta'
|
||||
"foo | orderliness by (bar)", // 'orderliness' ≠ 'order'
|
||||
];
|
||||
|
||||
it.each(positive)("detects pipe in ➜ %s", query => {
|
||||
expect(hasSortPipe(query)).toBe(true);
|
||||
});
|
||||
|
||||
it.each(negative)("does NOT detect pipe in ➜ %s", query => {
|
||||
expect(hasSortPipe(query)).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,5 @@
|
||||
const hasSortPipeRe = /(?:^|\|)\s*(?:sort|order)\b/i;
|
||||
|
||||
export function hasSortPipe(query: string): boolean {
|
||||
return hasSortPipeRe.test(query);
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { FC, useMemo, useState } from "preact/compat";
|
||||
import { FC, useMemo, useState } from "preact/compat";
|
||||
import useBoolean from "../../../hooks/useBoolean";
|
||||
import { RestartIcon, SettingsIcon } from "../../Main/Icons";
|
||||
import Button from "../../Main/Button/Button";
|
||||
@@ -19,8 +19,8 @@ import {
|
||||
LOGS_URL_PARAMS,
|
||||
WITHOUT_GROUPING
|
||||
} from "../../../constants/logs";
|
||||
import { getFromStorage, saveToStorage } from "../../../utils/storage";
|
||||
import LogParsingSwitches from "../../Configurators/LogsSettings/LogParsingSwitches";
|
||||
import { useLocalStorageBoolean } from "../../../hooks/useLocalStorageBoolean";
|
||||
|
||||
const {
|
||||
GROUP_BY,
|
||||
@@ -48,7 +48,7 @@ const GroupLogsConfigurators: FC<Props> = ({ logs }) => {
|
||||
const [dateFormat, setDateFormat] = useState(searchParams.get(DATE_FORMAT) || LOGS_DATE_FORMAT);
|
||||
const [errorFormat, setErrorFormat] = useState("");
|
||||
|
||||
const [disabledHovers, setDisabledHovers] = useState(!!getFromStorage("LOGS_DISABLED_HOVERS"));
|
||||
const [disabledHovers, handleSetDisabledHovers] = useLocalStorageBoolean("LOGS_DISABLED_HOVERS");
|
||||
|
||||
const isGroupChanged = groupBy !== LOGS_GROUP_BY;
|
||||
const isDisplayFieldsChanged = displayFields.length !== 1 || displayFields[0] !== LOGS_DISPLAY_FIELDS;
|
||||
@@ -62,7 +62,8 @@ const GroupLogsConfigurators: FC<Props> = ({ logs }) => {
|
||||
].some(Boolean);
|
||||
|
||||
const logsKeys = useMemo(() => {
|
||||
return Array.from(new Set(logs.map(l => Object.keys(l)).flat()));
|
||||
const uniqueKeys = new Set(logs.map(l => Object.keys(l)).flat());
|
||||
return Array.from(uniqueKeys).sort((a, b) => a.localeCompare(b));
|
||||
}, [logs]);
|
||||
|
||||
const {
|
||||
@@ -116,11 +117,6 @@ const GroupLogsConfigurators: FC<Props> = ({ logs }) => {
|
||||
handleClose();
|
||||
};
|
||||
|
||||
const handleSetDisabledHovers = (value: boolean) => {
|
||||
setDisabledHovers(value);
|
||||
saveToStorage("LOGS_DISABLED_HOVERS", value);
|
||||
};
|
||||
|
||||
const tooltipContent = () => {
|
||||
if (!hasChanges) return title;
|
||||
return (
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { FC } from "preact/compat";
|
||||
import { FC } from "preact/compat";
|
||||
import classNames from "classnames";
|
||||
import { MouseEvent as ReactMouseEvent, ReactNode } from "react";
|
||||
import "./style.scss";
|
||||
@@ -14,6 +14,7 @@ interface ButtonProps {
|
||||
disabled?: boolean
|
||||
children?: ReactNode
|
||||
className?: string
|
||||
"data-id"?: string
|
||||
onClick?: (e: ReactMouseEvent<HTMLButtonElement>) => void
|
||||
onMouseDown?: (e: ReactMouseEvent<HTMLButtonElement>) => void
|
||||
}
|
||||
@@ -31,6 +32,7 @@ const Button: FC<ButtonProps> = ({
|
||||
disabled,
|
||||
onClick,
|
||||
onMouseDown,
|
||||
"data-id": dataId
|
||||
}) => {
|
||||
|
||||
const classesButton = classNames({
|
||||
@@ -50,6 +52,7 @@ const Button: FC<ButtonProps> = ({
|
||||
aria-label={ariaLabel}
|
||||
onClick={onClick}
|
||||
onMouseDown={onMouseDown}
|
||||
data-id={dataId}
|
||||
>
|
||||
{startIcon}{children}{endIcon}
|
||||
</button>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import React from "react";
|
||||
import { getCssVariable } from "../../../utils/theme";
|
||||
|
||||
export const LogoIcon = () => (
|
||||
@@ -643,3 +642,47 @@ export const PauseIcon = () => (
|
||||
<path d="M6 19h4V5H6v14zm8-14v14h4V5h-4z" />
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const ScrollToTopIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path
|
||||
d="M8 12l4-4 4 4m-4-4v12"
|
||||
strokeWidth="2"
|
||||
stroke="currentColor"
|
||||
fill="none"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const SortIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M4 3 L4 15 L1.5 15 L5.5 21 L9.5 15 L7 15 L7 3 Z"/>
|
||||
<path d="M13 21 L13 9 L10.5 9 L14.5 3 L18.5 9 L16 9 L16 21 Z"/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const SortArrowDownIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M10.5 3 L10.5 15 L8 15 L12 21 L16 15 L13.5 15 L13.5 3 Z"/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const SortArrowUpIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path d="M10.5 21 L10.5 9 L8 9 L12 3 L16 9 L13.5 9 L13.5 21 Z"/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
import { FC, useEffect, useState } from "preact/compat";
|
||||
import Button from "../Main/Button/Button";
|
||||
import Tooltip from "../Main/Tooltip/Tooltip";
|
||||
import { ScrollToTopIcon } from "../Main/Icons";
|
||||
import classNames from "classnames";
|
||||
import "./style.scss";
|
||||
import { useCallback } from "react";
|
||||
|
||||
interface ScrollToTopButtonProps {
|
||||
className?: string;
|
||||
}
|
||||
|
||||
const ScrollToTopButton: FC<ScrollToTopButtonProps> = ({ className }) => {
|
||||
const [isVisible, setIsVisible] = useState(false);
|
||||
|
||||
const checkScrollPosition = () => {
|
||||
const scrollPosition = window.pageYOffset || document.documentElement.scrollTop;
|
||||
const visibleHeightThreshold = window.innerHeight;
|
||||
|
||||
setIsVisible(scrollPosition > visibleHeightThreshold);
|
||||
};
|
||||
|
||||
const scrollToTop = useCallback(() => {
|
||||
window.scrollTo({
|
||||
top: 0,
|
||||
behavior: "smooth"
|
||||
});
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
window.addEventListener("scroll", checkScrollPosition);
|
||||
checkScrollPosition();
|
||||
|
||||
return () => {
|
||||
window.removeEventListener("scroll", checkScrollPosition);
|
||||
};
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={classNames({
|
||||
"vm-scroll-to-top-button": true,
|
||||
"vm-scroll-to-top-button_visible": isVisible
|
||||
}, className)}
|
||||
>
|
||||
<Tooltip title="Scroll to top">
|
||||
<Button
|
||||
variant="contained"
|
||||
color="primary"
|
||||
onClick={scrollToTop}
|
||||
ariaLabel="Scroll to top"
|
||||
startIcon={<ScrollToTopIcon />}
|
||||
/>
|
||||
</Tooltip>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ScrollToTopButton;
|
||||
@@ -0,0 +1,26 @@
|
||||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-scroll-to-top-button {
|
||||
position: fixed;
|
||||
bottom: 20px;
|
||||
right: 20px;
|
||||
z-index: 4;
|
||||
opacity: 0;
|
||||
visibility: hidden;
|
||||
transition: opacity 0.3s, visibility 0.3s;
|
||||
|
||||
&_visible {
|
||||
opacity: 1;
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.vm-button {
|
||||
border-radius: 50%;
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { FC, useEffect, useRef, useMemo } from "preact/compat";
|
||||
import { FC, useEffect, useRef, useMemo } from "preact/compat";
|
||||
import Button from "../../Main/Button/Button";
|
||||
import { SearchIcon, SettingsIcon } from "../../Main/Icons";
|
||||
import "./style.scss";
|
||||
@@ -18,8 +18,8 @@ const title = "Table settings";
|
||||
interface TableSettingsProps {
|
||||
columns: string[];
|
||||
selectedColumns?: string[];
|
||||
tableCompact: boolean;
|
||||
toggleTableCompact: () => void;
|
||||
tableCompact?: boolean;
|
||||
toggleTableCompact?: () => void;
|
||||
onChangeColumns: (arr: string[]) => void
|
||||
}
|
||||
|
||||
@@ -49,8 +49,8 @@ const TableSettings: FC<TableSettingsProps> = ({
|
||||
|
||||
const filteredColumns = useMemo(() => {
|
||||
const allColumns = customColumns.concat(columns);
|
||||
if (!searchColumn) return allColumns;
|
||||
return allColumns.filter(col => col.includes(searchColumn));
|
||||
const result = searchColumn ? allColumns.filter(col => col.includes(searchColumn)) : allColumns;
|
||||
return result.sort((a, b) => a.localeCompare(b));
|
||||
}, [columns, customColumns, searchColumn]);
|
||||
|
||||
const isAllChecked = useMemo(() => {
|
||||
@@ -195,18 +195,20 @@ const TableSettings: FC<TableSettingsProps> = ({
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="vm-table-settings-modal-section">
|
||||
<div className="vm-table-settings-modal-section__title">
|
||||
{toggleTableCompact && tableCompact !== undefined && (
|
||||
<div className="vm-table-settings-modal-section">
|
||||
<div className="vm-table-settings-modal-section__title">
|
||||
Table view
|
||||
</div>
|
||||
<div className="vm-table-settings-modal-columns-list__item">
|
||||
<Switch
|
||||
label={"Compact view"}
|
||||
value={tableCompact}
|
||||
onChange={toggleTableCompact}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="vm-table-settings-modal-columns-list__item">
|
||||
<Switch
|
||||
label={"Compact view"}
|
||||
value={tableCompact}
|
||||
onChange={toggleTableCompact}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</Modal>)}
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
import { act, renderHook } from "@testing-library/preact";
|
||||
import { useLocalStorageBoolean } from "./useLocalStorageBoolean";
|
||||
import * as storageUtils from "../utils/storage";
|
||||
import { Mock } from "vitest";
|
||||
import { StorageKeys } from "../utils/storage";
|
||||
|
||||
vi.mock("../utils/storage");
|
||||
|
||||
const testStorageKey = "TEST_STORAGE_KEY" as StorageKeys;
|
||||
|
||||
describe("useLocalStorageBoolean", () => {
|
||||
const { getFromStorage, saveToStorage } = storageUtils;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it("initializes with the value from localStorage", () => {
|
||||
const mockGetFromStorage = getFromStorage as Mock;
|
||||
mockGetFromStorage.mockReturnValueOnce(true);
|
||||
|
||||
const { result } = renderHook(() => useLocalStorageBoolean(testStorageKey));
|
||||
|
||||
expect(result.current[0]).toBe(true);
|
||||
expect(getFromStorage).toHaveBeenCalledWith(testStorageKey);
|
||||
});
|
||||
|
||||
it("updates localStorage and state when setter is called", () => {
|
||||
const mockGetFromStorage = getFromStorage as Mock;
|
||||
mockGetFromStorage.mockReturnValueOnce(false);
|
||||
|
||||
const { result } = renderHook(() => useLocalStorageBoolean(testStorageKey));
|
||||
|
||||
act(() => {
|
||||
result.current[1](true);
|
||||
});
|
||||
|
||||
expect(saveToStorage).toHaveBeenCalledWith(testStorageKey, true);
|
||||
expect(result.current[0]).toBe(false);
|
||||
});
|
||||
|
||||
it("reacts to changes in localStorage by storage events", () => {
|
||||
const mockGetFromStorage = getFromStorage as Mock;
|
||||
mockGetFromStorage.mockReturnValueOnce(false);
|
||||
|
||||
const { result } = renderHook(() => useLocalStorageBoolean(testStorageKey));
|
||||
|
||||
// Simulate a storage event
|
||||
act(() => {
|
||||
mockGetFromStorage.mockReturnValueOnce(true);
|
||||
window.dispatchEvent(new StorageEvent("storage", { key: testStorageKey, newValue: "true" }));
|
||||
});
|
||||
|
||||
expect(result.current[0]).toBe(true);
|
||||
});
|
||||
|
||||
it("does not update state if the localStorage value remains the same", () => {
|
||||
const mockGetFromStorage = getFromStorage as Mock;
|
||||
mockGetFromStorage.mockReturnValueOnce(false);
|
||||
|
||||
const { result } = renderHook(() => useLocalStorageBoolean(testStorageKey));
|
||||
|
||||
act(() => {
|
||||
mockGetFromStorage.mockReturnValueOnce(false);
|
||||
window.dispatchEvent(new StorageEvent("storage", { key: testStorageKey, newValue: "false" }));
|
||||
});
|
||||
|
||||
expect(result.current[0]).toBe(false);
|
||||
});
|
||||
});
|
||||
31
app/vmui/packages/vmui/src/hooks/useLocalStorageBoolean.ts
Normal file
31
app/vmui/packages/vmui/src/hooks/useLocalStorageBoolean.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { useMemo, useState } from "preact/compat";
|
||||
import { getFromStorage, saveToStorage, StorageKeys } from "../utils/storage";
|
||||
import useEventListener from "./useEventListener";
|
||||
import { useCallback } from "react";
|
||||
|
||||
/**
|
||||
* A custom hook that synchronizes a boolean state with a value stored in localStorage.
|
||||
*
|
||||
* @param {StorageKeys} key - The key used to access the corresponding value in localStorage.
|
||||
* @returns {[boolean, function]} A tuple containing the current boolean value from localStorage and a setter function to update the value in localStorage.
|
||||
*
|
||||
* The hook listens to the "storage" event to automatically update the state when the localStorage value changes.
|
||||
*/
|
||||
export const useLocalStorageBoolean = (key: StorageKeys): [boolean, (value: boolean) => void] => {
|
||||
const [value, setValue] = useState(!!getFromStorage(key));
|
||||
|
||||
const handleUpdateStorage = useCallback(() => {
|
||||
const newValue = !!getFromStorage(key);
|
||||
if (newValue !== value) {
|
||||
setValue(newValue);
|
||||
}
|
||||
}, [key, value]);
|
||||
|
||||
const setNewValue = useCallback((newValue: boolean) => {
|
||||
saveToStorage(key, newValue);
|
||||
}, [key]);
|
||||
|
||||
useEventListener("storage", handleUpdateStorage);
|
||||
|
||||
return useMemo(() => [value, setNewValue], [value, setNewValue]);
|
||||
};
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { FC, useEffect, useMemo, useState } from "preact/compat";
|
||||
import { FC, useEffect, useMemo, useState } from "preact/compat";
|
||||
import ExploreLogsBody from "./ExploreLogsBody/ExploreLogsBody";
|
||||
import useStateSearchParams from "../../hooks/useStateSearchParams";
|
||||
import useSearchParamsFromObject from "../../hooks/useSearchParamsFromObject";
|
||||
@@ -18,6 +18,7 @@ import { useSearchParams } from "react-router-dom";
|
||||
import { useQueryDispatch, useQueryState } from "../../state/query/QueryStateContext";
|
||||
import { getUpdatedHistory } from "../../components/QueryHistory/utils";
|
||||
import { useDebounceCallback } from "../../hooks/useDebounceCallback";
|
||||
import usePrevious from "../../hooks/usePrevious";
|
||||
|
||||
const storageLimit = Number(getFromStorage("LOGS_LIMIT"));
|
||||
const defaultLimit = isNaN(storageLimit) ? LOGS_ENTRIES_LIMIT : storageLimit;
|
||||
@@ -30,6 +31,7 @@ const ExploreLogs: FC = () => {
|
||||
const { setSearchParamsFromKeys } = useSearchParamsFromObject();
|
||||
const [searchParams] = useSearchParams();
|
||||
const hideChart = useMemo(() => searchParams.get("hide_chart"), [searchParams]);
|
||||
const prevHideChart = usePrevious(hideChart);
|
||||
|
||||
const [limit, setLimit] = useStateSearchParams(defaultLimit, "limit");
|
||||
const [query, setQuery] = useStateSearchParams("*", "query");
|
||||
@@ -118,11 +120,10 @@ const ExploreLogs: FC = () => {
|
||||
}, [query, isUpdatingQuery]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!hideChart) debouncedFetchLogs(period, true);
|
||||
return () => {
|
||||
debouncedFetchLogs.cancel?.();
|
||||
};
|
||||
}, [hideChart, period]);
|
||||
if (!hideChart && prevHideChart) {
|
||||
fetchLogHits(period);
|
||||
}
|
||||
}, [hideChart, prevHideChart, period]);
|
||||
|
||||
return (
|
||||
<div className="vm-explore-logs">
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
&-header {
|
||||
background-color: $color-background-block;
|
||||
z-index: 1;
|
||||
z-index: 3;
|
||||
margin: -$padding-medium 0-$padding-medium 0;
|
||||
position: sticky;
|
||||
top: 0;
|
||||
|
||||
@@ -1,21 +1,67 @@
|
||||
import React, { FC } from "preact/compat";
|
||||
import React, { FC, useMemo, useCallback, createPortal } from "preact/compat";
|
||||
import DownloadLogsButton from "../../../DownloadLogsButton/DownloadLogsButton";
|
||||
import { createPortal } from "preact/compat";
|
||||
import JsonViewComponent from "../../../../../components/Views/JsonView/JsonView";
|
||||
import { ViewProps } from "../../types";
|
||||
import EmptyLogs from "../components/EmptyLogs/EmptyLogs";
|
||||
import { useCallback } from "react";
|
||||
import JsonViewSettings from "./JsonViewSettings/JsonViewSettings";
|
||||
import { useSearchParams } from "react-router-dom";
|
||||
import orderBy from "lodash.orderBy";
|
||||
import "./style.scss";
|
||||
import { Logs } from "../../../../../api/types";
|
||||
import { SortDirection } from "./types";
|
||||
|
||||
const MemoizedJsonView = React.memo(JsonViewComponent);
|
||||
|
||||
const jsonQuerySortParam = "json_sort";
|
||||
const fieldSortQueryParamName = "json_field_sort";
|
||||
|
||||
const JsonView: FC<ViewProps> = ({ data, settingsRef }) => {
|
||||
const getLogs = useCallback(() => data, [data]);
|
||||
|
||||
const [searchParams] = useSearchParams();
|
||||
const sortParam = searchParams.get(jsonQuerySortParam);
|
||||
const fieldSortParam = searchParams.get(fieldSortQueryParamName) as SortDirection;
|
||||
|
||||
const [sortField, sortDirection] = useMemo(() => {
|
||||
const [sortField, sortDirection] = sortParam?.split(":").map(decodeURIComponent) || [];
|
||||
return [sortField, sortDirection as "asc" | "desc" | undefined];
|
||||
}, [sortParam]);
|
||||
|
||||
const fields = useMemo(() => {
|
||||
const keys = new Set(data.flatMap(Object.keys));
|
||||
return Array.from(keys);
|
||||
}, [data]);
|
||||
|
||||
const orderedFieldsData = useMemo(() => {
|
||||
if (!fieldSortParam) return data;
|
||||
const orderedFields = fields.toSorted((a, b) => fieldSortParam === "asc" ? a.localeCompare(b): b.localeCompare(a));
|
||||
return data.map((item) => {
|
||||
return orderedFields.reduce((acc, field) => {
|
||||
if (item[field]) acc[field] = item[field];
|
||||
return acc;
|
||||
}, {} as Logs);
|
||||
});
|
||||
}, [fields, fieldSortParam, data]);
|
||||
|
||||
const sortedData = useMemo(() => {
|
||||
if (!sortField || !sortDirection) return orderedFieldsData;
|
||||
return orderBy(orderedFieldsData, [sortField], [sortDirection]);
|
||||
}, [orderedFieldsData, sortField, sortDirection]);
|
||||
|
||||
const renderSettings = () => {
|
||||
if (!settingsRef.current) return null;
|
||||
|
||||
return createPortal(
|
||||
data.length > 0 && <DownloadLogsButton getLogs={getLogs} />,
|
||||
data.length > 0 && (
|
||||
<div className="vm-json-view__settings-container">
|
||||
<DownloadLogsButton getLogs={getLogs} />
|
||||
<JsonViewSettings
|
||||
fields={fields}
|
||||
sortQueryParamName={jsonQuerySortParam}
|
||||
fieldSortQueryParamName={fieldSortQueryParamName}
|
||||
/>
|
||||
</div>
|
||||
),
|
||||
settingsRef.current
|
||||
);
|
||||
};
|
||||
@@ -25,9 +71,11 @@ const JsonView: FC<ViewProps> = ({ data, settingsRef }) => {
|
||||
return (
|
||||
<>
|
||||
{renderSettings()}
|
||||
<MemoizedJsonView data={data} />
|
||||
<MemoizedJsonView
|
||||
data={sortedData}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default JsonView;
|
||||
export default JsonView;
|
||||
|
||||
@@ -0,0 +1,185 @@
|
||||
import { FC, useMemo, useRef } from "preact/compat";
|
||||
import Button from "../../../../../../components/Main/Button/Button";
|
||||
import { SettingsIcon, SortArrowDownIcon, SortArrowUpIcon, SortIcon } from "../../../../../../components/Main/Icons";
|
||||
import Tooltip from "../../../../../../components/Main/Tooltip/Tooltip";
|
||||
import Select from "../../../../../../components/Main/Select/Select";
|
||||
import useBoolean from "../../../../../../hooks/useBoolean";
|
||||
import { useState, useEffect, useCallback } from "react";
|
||||
import Modal from "../../../../../../components/Main/Modal/Modal";
|
||||
import { useSearchParams } from "react-router-dom";
|
||||
import "./style.scss";
|
||||
import { SortDirection } from "../types";
|
||||
|
||||
const title = "JSON settings";
|
||||
const directionList = ["asc", "desc"];
|
||||
|
||||
interface JsonSettingsProps {
|
||||
fields: string[];
|
||||
sortQueryParamName: string;
|
||||
fieldSortQueryParamName: string;
|
||||
}
|
||||
|
||||
const JsonViewSettings: FC<JsonSettingsProps> = ({
|
||||
fields,
|
||||
sortQueryParamName,
|
||||
fieldSortQueryParamName
|
||||
}) => {
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
const buttonRef = useRef<HTMLDivElement>(null);
|
||||
const [fieldSortDirection, setFieldSortDirection] = useState<SortDirection>(null);
|
||||
|
||||
const {
|
||||
value: openSettings,
|
||||
toggle: toggleOpenSettings,
|
||||
setFalse: handleClose,
|
||||
} = useBoolean(false);
|
||||
|
||||
const [sortField, setSortField] = useState<string | null>(null);
|
||||
const [sortDirection, setSortDirection] = useState<SortDirection>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const sortParam = searchParams.get(sortQueryParamName);
|
||||
const isSortDirection = (value: string) : value is Exclude<SortDirection, null> => directionList.includes(value);
|
||||
if (sortParam) {
|
||||
const [field, direction] = sortParam.split(":").map(decodeURIComponent);
|
||||
if (field && (isSortDirection(direction))) {
|
||||
setSortField(field);
|
||||
setSortDirection(direction);
|
||||
}
|
||||
}
|
||||
|
||||
const fieldSortParam = searchParams.get(fieldSortQueryParamName);
|
||||
if (fieldSortParam === "asc" || fieldSortParam === "desc") {
|
||||
setFieldSortDirection(fieldSortParam);
|
||||
}
|
||||
}, [searchParams, sortQueryParamName, fieldSortQueryParamName, setSortField, setSortDirection, setFieldSortDirection]);
|
||||
|
||||
const updateSortParams = useCallback((field: string | null, direction: SortDirection) => {
|
||||
const updatedParams = new URLSearchParams(searchParams.toString());
|
||||
|
||||
if (!field || !direction) {
|
||||
updatedParams.delete(sortQueryParamName);
|
||||
} else {
|
||||
updatedParams.set(sortQueryParamName, `${field}:${direction || ""}`);
|
||||
}
|
||||
|
||||
setSearchParams(updatedParams);
|
||||
}, [searchParams, sortQueryParamName]);
|
||||
|
||||
const handleSort = (field: string) => {
|
||||
const newDirection: SortDirection = sortDirection || "asc";
|
||||
setSortField(field);
|
||||
setSortDirection(newDirection);
|
||||
updateSortParams(field, newDirection);
|
||||
};
|
||||
|
||||
const resetSort = () => {
|
||||
setSortField(null);
|
||||
setSortDirection(null);
|
||||
updateSortParams(null, null);
|
||||
};
|
||||
|
||||
const changeFieldSortDirection = useCallback(() => {
|
||||
let newFieldSortDirection: SortDirection = null;
|
||||
if (fieldSortDirection === null) {
|
||||
newFieldSortDirection = "asc";
|
||||
}else if (fieldSortDirection === "asc") {
|
||||
newFieldSortDirection = "desc";
|
||||
}
|
||||
setFieldSortDirection(newFieldSortDirection);
|
||||
const updatedParams = new URLSearchParams(searchParams.toString());
|
||||
|
||||
if (!newFieldSortDirection) {
|
||||
updatedParams.delete(fieldSortQueryParamName);
|
||||
} else {
|
||||
updatedParams.set(fieldSortQueryParamName, encodeURIComponent(newFieldSortDirection));
|
||||
}
|
||||
|
||||
setSearchParams(updatedParams);
|
||||
},[fieldSortDirection, searchParams, fieldSortQueryParamName]);
|
||||
|
||||
const handleChangeSortDirection = (direction: string) => {
|
||||
const field = sortField || fields[0];
|
||||
setSortField(field);
|
||||
setSortDirection(direction as SortDirection);
|
||||
updateSortParams(field, direction as SortDirection);
|
||||
};
|
||||
|
||||
const fieldSortMeta = useMemo(() => ({
|
||||
default: {
|
||||
title: "Set field sort order. Click to sort in ascending order",
|
||||
icon: <SortIcon />
|
||||
},
|
||||
asc: {
|
||||
title: "Fields sorted ascending. Click to sort in descending order",
|
||||
icon: <SortArrowDownIcon />
|
||||
},
|
||||
desc: {
|
||||
title: "Fields sorted descending. Click to reset sort",
|
||||
icon: <SortArrowUpIcon />
|
||||
},
|
||||
}), []);
|
||||
|
||||
const fieldSortButton = useMemo(() => {
|
||||
const { title, icon } = fieldSortMeta[fieldSortDirection ?? "default"];
|
||||
return <Tooltip title={title}>
|
||||
<Button
|
||||
variant="text"
|
||||
startIcon={icon}
|
||||
onClick={changeFieldSortDirection}
|
||||
ariaLabel={title}
|
||||
/>
|
||||
</Tooltip>;
|
||||
}, [fieldSortDirection, toggleOpenSettings, changeFieldSortDirection, fieldSortMeta]);
|
||||
|
||||
|
||||
return (
|
||||
<div className="vm-json-settings">
|
||||
{fieldSortButton}
|
||||
<Tooltip title={title}>
|
||||
<div ref={buttonRef}>
|
||||
<Button
|
||||
variant="text"
|
||||
startIcon={<SettingsIcon/>}
|
||||
onClick={toggleOpenSettings}
|
||||
ariaLabel={title}
|
||||
/>
|
||||
</div>
|
||||
</Tooltip>
|
||||
{openSettings && (
|
||||
<Modal
|
||||
title={title}
|
||||
className="vm-json-settings-modal"
|
||||
onClose={handleClose}
|
||||
>
|
||||
<div className="vm-json-settings-modal-section">
|
||||
<div className="vm-json-settings-modal-section__sort-settings-container">
|
||||
<Select
|
||||
value={sortField || ""}
|
||||
onChange={handleSort}
|
||||
list={fields}
|
||||
label="Select field"
|
||||
/>
|
||||
<Select
|
||||
value={sortDirection || ""}
|
||||
onChange={handleChangeSortDirection}
|
||||
list={directionList}
|
||||
label="Sort direction"
|
||||
/>
|
||||
{(sortField || sortDirection) && (
|
||||
<Button
|
||||
variant="outlined"
|
||||
color="error"
|
||||
onClick={resetSort}
|
||||
>
|
||||
Reset sort
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</Modal>)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default JsonViewSettings;
|
||||
@@ -0,0 +1,34 @@
|
||||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-json-settings {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
|
||||
&-modal {
|
||||
.vm-modal-content-body {
|
||||
min-width: clamp(300px, 600px, 90vw);
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
&-section {
|
||||
padding-block: $padding-global;
|
||||
border-top: $border-divider;
|
||||
|
||||
&:first-child {
|
||||
padding-top: 0;
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
&__sort-settings-container {
|
||||
display: grid;
|
||||
padding: $padding-medium;
|
||||
grid-template-columns: 1fr 1fr 80px;
|
||||
gap: $padding-medium;
|
||||
|
||||
@media (max-width: 500px) {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user