mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
195 Commits
issue-8584
...
v1.110.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6d68f859f | ||
|
|
b911f721b3 | ||
|
|
3f39bd08cf | ||
|
|
cddf36af43 | ||
|
|
0e555b421d | ||
|
|
ec6f33f526 | ||
|
|
e316ab878c | ||
|
|
4aa0f93085 | ||
|
|
764fc6010d | ||
|
|
3ec70a4b6d | ||
|
|
3b7039679f | ||
|
|
f3197dc5d7 | ||
|
|
9d82de9385 | ||
|
|
c34f6db5c0 | ||
|
|
8ad81220d3 | ||
|
|
7455e6c0a5 | ||
|
|
22c85a149e | ||
|
|
1e4238ede7 | ||
|
|
1d218f6aeb | ||
|
|
ba4c9133ee | ||
|
|
dcadc65681 | ||
|
|
ae9126fae7 | ||
|
|
bf3b98954e | ||
|
|
814a37e57e | ||
|
|
0afa51a156 | ||
|
|
b89cce6bd6 | ||
|
|
fdfa68bc2d | ||
|
|
1572e1e5c3 | ||
|
|
55d75b911c | ||
|
|
f9e312f35a | ||
|
|
0cfe28c2fc | ||
|
|
1c39853164 | ||
|
|
7390f99897 | ||
|
|
2d279750a9 | ||
|
|
e9c577132d | ||
|
|
899d70a93e | ||
|
|
1d13718fcf | ||
|
|
fa4708178f | ||
|
|
e0ff58ce5e | ||
|
|
a9f03605d3 | ||
|
|
19025d5a19 | ||
|
|
1f8c115428 | ||
|
|
1863c5b95b | ||
|
|
7756046865 | ||
|
|
7f2d194948 | ||
|
|
f224b60410 | ||
|
|
46d32af89a | ||
|
|
3a7ea02d41 | ||
|
|
84d2922f5e | ||
|
|
39b27cb397 | ||
|
|
b1523f650d | ||
|
|
297bc28e3d | ||
|
|
54bb834c16 | ||
|
|
edee80f215 | ||
|
|
5491d54c11 | ||
|
|
14561a7ed3 | ||
|
|
cd4ec0e739 | ||
|
|
8e76b41081 | ||
|
|
9751ea1098 | ||
|
|
787e6a9b24 | ||
|
|
73a69bd655 | ||
|
|
ec6ed3cb48 | ||
|
|
faf95b4a58 | ||
|
|
a7106040de | ||
|
|
792d0f8bb8 | ||
|
|
8628f90e6b | ||
|
|
89237a04b9 | ||
|
|
d6c156727b | ||
|
|
f1fdc8610d | ||
|
|
611450a188 | ||
|
|
3b1a41c014 | ||
|
|
cd9bb6b315 | ||
|
|
e58d3f03c7 | ||
|
|
3e4c38c56c | ||
|
|
c3becae96b | ||
|
|
84ee713dc6 | ||
|
|
7dc58894d1 | ||
|
|
30376722c1 | ||
|
|
6674691a58 | ||
|
|
55e3ccb5f0 | ||
|
|
2c97c8841c | ||
|
|
f38736343d | ||
|
|
33315f1ece | ||
|
|
680cbef0cb | ||
|
|
2d3e048f59 | ||
|
|
024a40a799 | ||
|
|
225c6b6f52 | ||
|
|
0fee22e91a | ||
|
|
96200a9ec4 | ||
|
|
06c26315df | ||
|
|
231810fe49 | ||
|
|
60ef715c79 | ||
|
|
8071dabe58 | ||
|
|
3d9b160fce | ||
|
|
5fa40ee631 | ||
|
|
53814b1ea6 | ||
|
|
9ca2a246a9 | ||
|
|
4517425da8 | ||
|
|
953ed46680 | ||
|
|
795d3fe722 | ||
|
|
536b40c06c | ||
|
|
a113516588 | ||
|
|
b68f9ea9e3 | ||
|
|
fa6a32a39d | ||
|
|
477635e00f | ||
|
|
8e92cd3b2d | ||
|
|
2ab53acce4 | ||
|
|
2f4680d8f3 | ||
|
|
1f1afeb06e | ||
|
|
b2c075191e | ||
|
|
1191c6453b | ||
|
|
5d61122fd5 | ||
|
|
e9c04879ce | ||
|
|
5f4205a050 | ||
|
|
7a46af3920 | ||
|
|
ff967a8e65 | ||
|
|
3108376d95 | ||
|
|
494fe4403a | ||
|
|
303b425fa3 | ||
|
|
8f3efde55d | ||
|
|
f16938bba9 | ||
|
|
65ff04bc09 | ||
|
|
e2715f94af | ||
|
|
582160f566 | ||
|
|
638f9839d5 | ||
|
|
3f5bf4bd03 | ||
|
|
038419663b | ||
|
|
123f373537 | ||
|
|
57121c828f | ||
|
|
aa5edbc706 | ||
|
|
f9d8c86b0a | ||
|
|
b733fc5b83 | ||
|
|
f2eaad62dc | ||
|
|
adae788b18 | ||
|
|
a65d10fcce | ||
|
|
298f862fc0 | ||
|
|
aff1580a1d | ||
|
|
d4c0a42c1b | ||
|
|
a9736a5bfb | ||
|
|
2e4beeefb1 | ||
|
|
635c5c9feb | ||
|
|
4e1260e189 | ||
|
|
ca3910748f | ||
|
|
2f0796ff40 | ||
|
|
cdba6dbc0e | ||
|
|
f18daaeac5 | ||
|
|
a9f124388f | ||
|
|
4b2276608b | ||
|
|
b352470ae1 | ||
|
|
b3261a1b87 | ||
|
|
6d5973dcb0 | ||
|
|
74f17bb67e | ||
|
|
bf024d3dce | ||
|
|
5b87aff830 | ||
|
|
34d35869fa | ||
|
|
b1d1f1f461 | ||
|
|
98f1e32e39 | ||
|
|
edac875179 | ||
|
|
0ff1a3b154 | ||
|
|
bbe58cc37b | ||
|
|
78dca6ee6e | ||
|
|
12f26668a6 | ||
|
|
6c90843aab | ||
|
|
4aad4c64bb | ||
|
|
5630c0108e | ||
|
|
732a549cff | ||
|
|
af637bc2a2 | ||
|
|
6600916344 | ||
|
|
e9cb95c5d4 | ||
|
|
8617faa160 | ||
|
|
fe888be58c | ||
|
|
a38de1c242 | ||
|
|
8dc0f6cfab | ||
|
|
346db8a606 | ||
|
|
b277a62e94 | ||
|
|
e2535fcb28 | ||
|
|
66e7b908ec | ||
|
|
a2ba37be68 | ||
|
|
004e5ff38b | ||
|
|
4160fbecc0 | ||
|
|
44844b7fbe | ||
|
|
f60458d5fa | ||
|
|
e242dd5bf2 | ||
|
|
f863b331c1 | ||
|
|
a98779770c | ||
|
|
bcbcae2309 | ||
|
|
5b8c10d08e | ||
|
|
588fa4d90d | ||
|
|
b9c777a578 | ||
|
|
d8fe739aba | ||
|
|
e5ddf475b8 | ||
|
|
3c3c8668d6 | ||
|
|
22d1b916bf | ||
|
|
35b31f904d | ||
|
|
7c05ec42fe |
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -6,4 +6,4 @@ Please provide a brief description of the changes you made. Be as specific as po
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).
|
||||
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -27,3 +27,4 @@ _site
|
||||
coverage.txt
|
||||
cspell.json
|
||||
*~
|
||||
deployment/docker/provisioning/plugins/
|
||||
|
||||
20
Makefile
20
Makefile
@@ -504,7 +504,7 @@ fmt:
|
||||
gofmt -l -w -s ./apptest
|
||||
|
||||
vet:
|
||||
go vet ./lib/...
|
||||
GOEXPERIMENT=synctest go vet ./lib/...
|
||||
go vet ./app/...
|
||||
go vet ./apptest/...
|
||||
|
||||
@@ -513,35 +513,35 @@ check-all: fmt vet golangci-lint govulncheck
|
||||
clean-checkers: remove-golangci-lint remove-govulncheck
|
||||
|
||||
test:
|
||||
go test ./lib/... ./app/...
|
||||
GOEXPERIMENT=synctest go test ./lib/... ./app/...
|
||||
|
||||
test-race:
|
||||
go test -race ./lib/... ./app/...
|
||||
GOEXPERIMENT=synctest go test -race ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
GOEXPERIMENT=synctest CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
GOEXPERIMENT=synctest go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
test-full-386:
|
||||
GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
GOEXPERIMENT=synctest GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
integration-test: victoria-metrics vmagent vmalert vmauth
|
||||
go test ./apptest/... -skip="^TestCluster.*"
|
||||
|
||||
benchmark:
|
||||
go test -bench=. ./lib/...
|
||||
GOEXPERIMENT=synctest go test -bench=. ./lib/...
|
||||
go test -bench=. ./app/...
|
||||
|
||||
benchmark-pure:
|
||||
CGO_ENABLED=0 go test -bench=. ./lib/...
|
||||
GOEXPERIMENT=synctest CGO_ENABLED=0 go test -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -bench=. ./app/...
|
||||
|
||||
vendor-update:
|
||||
go get -u ./lib/...
|
||||
go get -u ./app/...
|
||||
go mod tidy -compat=1.23
|
||||
go mod tidy -compat=1.24
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
@@ -564,7 +564,7 @@ install-qtc:
|
||||
|
||||
|
||||
golangci-lint: install-golangci-lint
|
||||
golangci-lint run
|
||||
GOEXPERIMENT=synctest golangci-lint run
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.64.7
|
||||
|
||||
@@ -219,6 +219,42 @@ func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []l
|
||||
}
|
||||
}
|
||||
|
||||
// InsertRowProcessor is used by native data ingestion protocol parser.
|
||||
type InsertRowProcessor interface {
|
||||
// AddInsertRow must add r to the underlying storage.
|
||||
AddInsertRow(r *logstorage.InsertRow)
|
||||
}
|
||||
|
||||
// AddInsertRow adds r to lmp.
|
||||
func (lmp *logMessageProcessor) AddInsertRow(r *logstorage.InsertRow) {
|
||||
lmp.rowsIngestedTotal.Inc()
|
||||
n := logstorage.EstimatedJSONRowLen(r.Fields)
|
||||
lmp.bytesIngestedTotal.Add(n)
|
||||
|
||||
if len(r.Fields) > *MaxFieldsPerLine {
|
||||
line := logstorage.MarshalFieldsToJSON(nil, r.Fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(r.Fields), *MaxFieldsPerLine, line)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
lmp.lr.MustAddInsertRow(r)
|
||||
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lmp.lr.NeedFlush() {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// flushLocked must be called under locked lmp.mu.
|
||||
func (lmp *logMessageProcessor) flushLocked() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
|
||||
96
app/vlinsert/internalinsert/internalinsert.go
Normal file
96
app/vlinsert/internalinsert/internalinsert.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package internalinsert
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
)
|
||||
|
||||
var (
|
||||
disableInsert = flag.Bool("internalinsert.disable", false, "Whether to disable /internal/insert HTTP endpoint")
|
||||
maxRequestSize = flagutil.NewBytes("internalinsert.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single request, which can be accepted at /internal/insert HTTP endpoint")
|
||||
)
|
||||
|
||||
// RequestHandler processes /internal/insert requests.
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if *disableInsert {
|
||||
httpserver.Errorf(w, r, "requests to /internal/insert are disabled with -internalinsert.disable command-line flag")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
if r.Method != "POST" {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
version := r.FormValue("version")
|
||||
if version != netinsert.ProtocolVersion {
|
||||
httpserver.Errorf(w, r, "unsupported protocol version=%q; want %q", version, netinsert.ProtocolVersion)
|
||||
return
|
||||
}
|
||||
|
||||
requestsTotal.Inc()
|
||||
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.NewLogMessageProcessor("internalinsert", false)
|
||||
irp := lmp.(insertutil.InsertRowProcessor)
|
||||
err := parseData(irp, data)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "cannot parse internal insert request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
func parseData(irp insertutil.InsertRowProcessor, data []byte) error {
|
||||
r := logstorage.GetInsertRow()
|
||||
src := data
|
||||
i := 0
|
||||
for len(src) > 0 {
|
||||
tail, err := r.UnmarshalInplace(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse row #%d: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
i++
|
||||
|
||||
irp.AddInsertRow(r)
|
||||
}
|
||||
logstorage.PutInsertRow(r)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/internal/insert"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/internal/insert"}`)
|
||||
|
||||
requestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/internal/insert"}`)
|
||||
)
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/datadog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/internalinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/journald"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
|
||||
@@ -28,6 +29,11 @@ func Stop() {
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
|
||||
if path == "/internal/insert" {
|
||||
internalinsert.RequestHandler(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(path, "/insert/") {
|
||||
// Skip requests, which do not start with /insert/, since these aren't our requests.
|
||||
return false
|
||||
|
||||
@@ -41,6 +41,26 @@ func TestPushProtoOk(t *testing.T) {
|
||||
`{"_msg":"log-line-message","severity":"Trace"}`,
|
||||
)
|
||||
|
||||
// severities mapping
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
ScopeLogs: []pb.ScopeLogs{
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 13, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 24, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
[]int64{1234, 1234, 1234},
|
||||
`{"_msg":"log-line-message","severity":"Trace"}
|
||||
{"_msg":"log-line-message","severity":"Warn"}
|
||||
{"_msg":"log-line-message","severity":"Fatal4"}`,
|
||||
)
|
||||
|
||||
// multi-line with resource attributes
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
@@ -60,7 +80,7 @@ func TestPushProtoOk(t *testing.T) {
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1235, SeverityNumber: 21, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1235, SeverityNumber: 25, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1236, SeverityNumber: -1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
},
|
||||
},
|
||||
|
||||
324
app/vlselect/internalselect/internalselect.go
Normal file
324
app/vlselect/internalselect/internalselect.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package internalselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/atomicutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
)
|
||||
|
||||
var disableSelect = flag.Bool("internalselect.disable", false, "Whether to disable /internal/select/* HTTP endpoints")
|
||||
|
||||
// RequestHandler processes requests to /internal/select/*
|
||||
func RequestHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
if *disableSelect {
|
||||
httpserver.Errorf(w, r, "requests to /internal/select/* are disabled with -internalselect.disable command-line flag")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
path := r.URL.Path
|
||||
rh := requestHandlers[path]
|
||||
if rh == nil {
|
||||
httpserver.Errorf(w, r, "unsupported endpoint requested: %s", path)
|
||||
return
|
||||
}
|
||||
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vl_http_requests_total{path=%q}`, path)).Inc()
|
||||
if err := rh(ctx, w, r); err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vl_http_request_errors_total{path=%q}`, path)).Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
// The return is skipped intentionally in order to track the duration of failed queries.
|
||||
}
|
||||
metrics.GetOrCreateSummary(fmt.Sprintf(`vl_http_request_duration_seconds{path=%q}`, path)).UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
var requestHandlers = map[string]func(ctx context.Context, w http.ResponseWriter, r *http.Request) error{
|
||||
"/internal/select/query": processQueryRequest,
|
||||
"/internal/select/field_names": processFieldNamesRequest,
|
||||
"/internal/select/field_values": processFieldValuesRequest,
|
||||
"/internal/select/stream_field_names": processStreamFieldNamesRequest,
|
||||
"/internal/select/stream_field_values": processStreamFieldValuesRequest,
|
||||
"/internal/select/streams": processStreamsRequest,
|
||||
"/internal/select/stream_ids": processStreamIDsRequest,
|
||||
}
|
||||
|
||||
func processQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.QueryProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
|
||||
var wLock sync.Mutex
|
||||
var dataLenBuf []byte
|
||||
|
||||
sendBuf := func(bb *bytesutil.ByteBuffer) error {
|
||||
if len(bb.B) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := bb.B
|
||||
if !cp.DisableCompression {
|
||||
bufLen := len(bb.B)
|
||||
bb.B = zstd.CompressLevel(bb.B, bb.B, 1)
|
||||
data = bb.B[bufLen:]
|
||||
}
|
||||
|
||||
wLock.Lock()
|
||||
dataLenBuf = encoding.MarshalUint64(dataLenBuf[:0], uint64(len(data)))
|
||||
_, err := w.Write(dataLenBuf)
|
||||
if err == nil {
|
||||
_, err = w.Write(data)
|
||||
}
|
||||
wLock.Unlock()
|
||||
|
||||
// Reset the sent buf
|
||||
bb.Reset()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var bufs atomicutil.Slice[bytesutil.ByteBuffer]
|
||||
|
||||
var errGlobalLock sync.Mutex
|
||||
var errGlobal error
|
||||
|
||||
writeBlock := func(workerID uint, db *logstorage.DataBlock) {
|
||||
if errGlobal != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bb := bufs.Get(workerID)
|
||||
|
||||
bb.B = db.Marshal(bb.B)
|
||||
|
||||
if len(bb.B) < 1024*1024 {
|
||||
// Fast path - the bb is too small to be sent to the client yet.
|
||||
return
|
||||
}
|
||||
|
||||
// Slow path - the bb must be sent to the client.
|
||||
if err := sendBuf(bb); err != nil {
|
||||
errGlobalLock.Lock()
|
||||
if errGlobal != nil {
|
||||
errGlobal = err
|
||||
}
|
||||
errGlobalLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if err := vlstorage.RunQuery(ctx, cp.TenantIDs, cp.Query, writeBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if errGlobal != nil {
|
||||
return errGlobal
|
||||
}
|
||||
|
||||
// Send the remaining data
|
||||
for _, bb := range bufs.GetSlice() {
|
||||
if err := sendBuf(bb); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.FieldNamesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldNames, err := vlstorage.GetFieldNames(ctx, cp.TenantIDs, cp.Query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain field names: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldNames, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.FieldValuesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldName := r.FormValue("field")
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldValues, err := vlstorage.GetFieldValues(ctx, cp.TenantIDs, cp.Query, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain field values: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldValues, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamFieldNamesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldNames, err := vlstorage.GetStreamFieldNames(ctx, cp.TenantIDs, cp.Query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain stream field names: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldNames, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamFieldValuesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldName := r.FormValue("field")
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldValues, err := vlstorage.GetStreamFieldValues(ctx, cp.TenantIDs, cp.Query, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain stream field values: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldValues, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamsProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
streams, err := vlstorage.GetStreams(ctx, cp.TenantIDs, cp.Query, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain streams: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, streams, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamIDsProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
streamIDs, err := vlstorage.GetStreamIDs(ctx, cp.TenantIDs, cp.Query, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain streams: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, streamIDs, cp.DisableCompression)
|
||||
}
|
||||
|
||||
type commonParams struct {
|
||||
TenantIDs []logstorage.TenantID
|
||||
Query *logstorage.Query
|
||||
|
||||
DisableCompression bool
|
||||
}
|
||||
|
||||
func getCommonParams(r *http.Request, expectedProtocolVersion string) (*commonParams, error) {
|
||||
version := r.FormValue("version")
|
||||
if version != expectedProtocolVersion {
|
||||
return nil, fmt.Errorf("unexpected version=%q; want %q", version, expectedProtocolVersion)
|
||||
}
|
||||
|
||||
tenantIDsStr := r.FormValue("tenant_ids")
|
||||
tenantIDs, err := logstorage.UnmarshalTenantIDs([]byte(tenantIDsStr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal tenant_ids=%q: %w", tenantIDsStr, err)
|
||||
}
|
||||
|
||||
timestamp, err := getInt64FromRequest(r, "timestamp")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQueryAtTimestamp(qStr, timestamp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal query=%q: %w", qStr, err)
|
||||
}
|
||||
|
||||
s := r.FormValue("disable_compression")
|
||||
disableCompression, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse disable_compression=%q: %w", s, err)
|
||||
}
|
||||
|
||||
cp := &commonParams{
|
||||
TenantIDs: tenantIDs,
|
||||
Query: q,
|
||||
|
||||
DisableCompression: disableCompression,
|
||||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func writeValuesWithHits(w http.ResponseWriter, vhs []logstorage.ValueWithHits, disableCompression bool) error {
|
||||
var b []byte
|
||||
for i := range vhs {
|
||||
b = vhs[i].Marshal(b)
|
||||
}
|
||||
|
||||
if !disableCompression {
|
||||
b = zstd.CompressLevel(nil, b, 1)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return fmt.Errorf("cannot send response to the client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInt64FromRequest(r *http.Request, argName string) (int64, error) {
|
||||
s := r.FormValue(argName)
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %s=%q: %w", argName, s, err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func getBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
v := bufferedWriterPool.Get()
|
||||
if v == nil {
|
||||
return &bufferedWriter{
|
||||
bw: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
bw := v.(*bufferedWriter)
|
||||
bw.bw.Reset(w)
|
||||
return bw
|
||||
}
|
||||
|
||||
func putBufferedWriter(bw *bufferedWriter) {
|
||||
bw.reset()
|
||||
bufferedWriterPool.Put(bw)
|
||||
}
|
||||
|
||||
var bufferedWriterPool sync.Pool
|
||||
|
||||
type bufferedWriter struct {
|
||||
mu sync.Mutex
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) reset() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) WriteIgnoreErrors(p []byte) {
|
||||
bw.mu.Lock()
|
||||
_, _ = bw.bw.Write(p)
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) FlushIgnoreErrors() {
|
||||
bw.mu.Lock()
|
||||
_ = bw.bw.Flush()
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package logsql
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
@@ -11,12 +12,14 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/atomicutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
@@ -57,10 +60,13 @@ func ProcessFacetsRequest(ctx context.Context, w http.ResponseWriter, r *http.Re
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string][]facetEntry)
|
||||
writeBlock := func(_ uint, _ []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
if rowsCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
columns := db.Columns
|
||||
if len(columns) != 3 {
|
||||
logger.Panicf("BUG: expecting 3 columns; got %d columns", len(columns))
|
||||
}
|
||||
@@ -156,17 +162,19 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string]*hitsSeries)
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
if rowsCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
columns := db.Columns
|
||||
timestampValues := columns[0].Values
|
||||
hitsValues := columns[len(columns)-1].Values
|
||||
columns = columns[1 : len(columns)-1]
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
timestampStr := strings.Clone(timestampValues[i])
|
||||
hitsStr := strings.Clone(hitsValues[i])
|
||||
hits, err := strconv.ParseUint(hitsStr, 10, 64)
|
||||
@@ -205,6 +213,8 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
WriteHitsSeries(w, m)
|
||||
}
|
||||
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
func getTopHitsSeries(m map[string]*hitsSeries, fieldsLimit int) map[string]*hitsSeries {
|
||||
if fieldsLimit <= 0 || fieldsLimit >= len(m) {
|
||||
return m
|
||||
@@ -536,7 +546,7 @@ var liveTailRequests = metrics.NewCounter(`vl_live_tailing_requests`)
|
||||
const tailOffsetNsecs = 5e9
|
||||
|
||||
type logRow struct {
|
||||
timestamp int64
|
||||
timestamp string
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
@@ -552,7 +562,7 @@ type tailProcessor struct {
|
||||
mu sync.Mutex
|
||||
|
||||
perStreamRows map[string][]logRow
|
||||
lastTimestamps map[string]int64
|
||||
lastTimestamps map[string]string
|
||||
|
||||
err error
|
||||
}
|
||||
@@ -562,12 +572,12 @@ func newTailProcessor(cancel func()) *tailProcessor {
|
||||
cancel: cancel,
|
||||
|
||||
perStreamRows: make(map[string][]logRow),
|
||||
lastTimestamps: make(map[string]int64),
|
||||
lastTimestamps: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *tailProcessor) writeBlock(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(timestamps) == 0 {
|
||||
func (tp *tailProcessor) writeBlock(_ uint, db *logstorage.DataBlock) {
|
||||
if db.RowsCount() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -579,14 +589,8 @@ func (tp *tailProcessor) writeBlock(_ uint, timestamps []int64, columns []logsto
|
||||
}
|
||||
|
||||
// Make sure columns contain _time field, since it is needed for proper tail work.
|
||||
hasTime := false
|
||||
for _, c := range columns {
|
||||
if c.Name == "_time" {
|
||||
hasTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasTime {
|
||||
timestamps, ok := db.GetTimestamps()
|
||||
if !ok {
|
||||
tp.err = fmt.Errorf("missing _time field")
|
||||
tp.cancel()
|
||||
return
|
||||
@@ -595,8 +599,8 @@ func (tp *tailProcessor) writeBlock(_ uint, timestamps []int64, columns []logsto
|
||||
// Copy block rows to tp.perStreamRows
|
||||
for i, timestamp := range timestamps {
|
||||
streamID := ""
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j, c := range columns {
|
||||
fields := make([]logstorage.Field, len(db.Columns))
|
||||
for j, c := range db.Columns {
|
||||
name := strings.Clone(c.Name)
|
||||
value := strings.Clone(c.Values[i])
|
||||
|
||||
@@ -688,12 +692,15 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
|
||||
m := make(map[string]*statsSeries)
|
||||
var mLock sync.Mutex
|
||||
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
|
||||
columns := db.Columns
|
||||
clonedColumnNames := make([]string, len(columns))
|
||||
for i, c := range columns {
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
for i := range timestamps {
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
// Do not move q.GetTimestamp() outside writeBlock, since ts
|
||||
// must be initialized to query timestamp for every processed log row.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8312
|
||||
@@ -802,12 +809,14 @@ func ProcessStatsQueryRequest(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
var rowsLock sync.Mutex
|
||||
|
||||
timestamp := q.GetTimestamp()
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
columns := db.Columns
|
||||
clonedColumnNames := make([]string, len(columns))
|
||||
for i, c := range columns {
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
for i := range timestamps {
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
labels := make([]logstorage.Field, 0, len(byFields))
|
||||
for j, c := range columns {
|
||||
if slices.Contains(byFields, c.Name) {
|
||||
@@ -869,11 +878,21 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
bw := getBufferedWriter(w)
|
||||
sw := &syncWriter{
|
||||
w: w,
|
||||
}
|
||||
|
||||
var bwShards atomicutil.Slice[bufferedWriter]
|
||||
bwShards.Init = func(shard *bufferedWriter) {
|
||||
shard.sw = sw
|
||||
}
|
||||
defer func() {
|
||||
bw.FlushIgnoreErrors()
|
||||
putBufferedWriter(bw)
|
||||
shards := bwShards.GetSlice()
|
||||
for _, shard := range shards {
|
||||
shard.FlushIgnoreErrors()
|
||||
}
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", "application/stream+json")
|
||||
|
||||
if limit > 0 {
|
||||
@@ -883,32 +902,34 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
bb := blockResultPool.Get()
|
||||
b := bb.B
|
||||
bw := bwShards.Get(0)
|
||||
for i := range rows {
|
||||
b = logstorage.MarshalFieldsToJSON(b[:0], rows[i].fields)
|
||||
b = append(b, '\n')
|
||||
bw.WriteIgnoreErrors(b)
|
||||
bw.buf = logstorage.MarshalFieldsToJSON(bw.buf, rows[i].fields)
|
||||
bw.buf = append(bw.buf, '\n')
|
||||
if len(bw.buf) > 16*1024 {
|
||||
bw.FlushIgnoreErrors()
|
||||
}
|
||||
}
|
||||
bb.B = b
|
||||
blockResultPool.Put(bb)
|
||||
return
|
||||
}
|
||||
|
||||
q.AddPipeLimit(uint64(limit))
|
||||
}
|
||||
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
writeBlock := func(workerID uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
if rowsCount == 0 {
|
||||
return
|
||||
}
|
||||
columns := db.Columns
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
WriteJSONRow(bb, columns, i)
|
||||
bw := bwShards.Get(workerID)
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
WriteJSONRow(bw, columns, i)
|
||||
if len(bw.buf) > 16*1024 {
|
||||
bw.FlushIgnoreErrors()
|
||||
}
|
||||
}
|
||||
bw.WriteIgnoreErrors(bb.B)
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
@@ -917,14 +938,37 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
type row struct {
|
||||
timestamp int64
|
||||
fields []logstorage.Field
|
||||
type syncWriter struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
func (sw *syncWriter) Write(p []byte) (int, error) {
|
||||
sw.mu.Lock()
|
||||
n, err := sw.w.Write(p)
|
||||
sw.mu.Unlock()
|
||||
return n, err
|
||||
}
|
||||
|
||||
type bufferedWriter struct {
|
||||
buf []byte
|
||||
sw *syncWriter
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) Write(p []byte) (int, error) {
|
||||
bw.buf = append(bw.buf, p...)
|
||||
|
||||
// Do not send bw.buf to bw.sw here, since the data at bw.buf may be incomplete (it must end with '\n')
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) FlushIgnoreErrors() {
|
||||
_, _ = bw.sw.Write(bw.buf)
|
||||
bw.buf = bw.buf[:0]
|
||||
}
|
||||
|
||||
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]logRow, error) {
|
||||
limitUpper := 2 * limit
|
||||
q.AddPipeLimit(uint64(limitUpper))
|
||||
|
||||
@@ -993,7 +1037,7 @@ func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID,
|
||||
}
|
||||
}
|
||||
|
||||
func getLastNRows(rows []row, limit int) []row {
|
||||
func getLastNRows(rows []logRow, limit int) []logRow {
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
return rows[i].timestamp < rows[j].timestamp
|
||||
})
|
||||
@@ -1003,18 +1047,31 @@ func getLastNRows(rows []row, limit int) []row {
|
||||
return rows
|
||||
}
|
||||
|
||||
func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]logRow, error) {
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var rows []row
|
||||
var missingTimeColumn atomic.Bool
|
||||
var rows []logRow
|
||||
var rowsLock sync.Mutex
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
if missingTimeColumn.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
columns := db.Columns
|
||||
clonedColumnNames := make([]string, len(columns))
|
||||
for i, c := range columns {
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
|
||||
timestamps, ok := db.GetTimestamps()
|
||||
if !ok {
|
||||
missingTimeColumn.Store(true)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
for i, timestamp := range timestamps {
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j := range columns {
|
||||
@@ -1024,7 +1081,7 @@ func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.Tenant
|
||||
}
|
||||
|
||||
rowsLock.Lock()
|
||||
rows = append(rows, row{
|
||||
rows = append(rows, logRow{
|
||||
timestamp: timestamp,
|
||||
fields: fields,
|
||||
})
|
||||
@@ -1035,11 +1092,13 @@ func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.Tenant
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock); err != nil {
|
||||
return nil, err
|
||||
err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock)
|
||||
|
||||
if missingTimeColumn.Load() {
|
||||
return nil, fmt.Errorf("missing _time column in the result for the query [%s]", q)
|
||||
}
|
||||
|
||||
return rows, nil
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) {
|
||||
@@ -1098,20 +1157,22 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
|
||||
}
|
||||
|
||||
// Parse optional extra_filters
|
||||
extraFiltersStr := r.FormValue("extra_filters")
|
||||
extraFilters, err := parseExtraFilters(extraFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
for _, extraFiltersStr := range r.Form["extra_filters"] {
|
||||
extraFilters, err := parseExtraFilters(extraFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraFilters)
|
||||
}
|
||||
q.AddExtraFilters(extraFilters)
|
||||
|
||||
// Parse optional extra_stream_filters
|
||||
extraStreamFiltersStr := r.FormValue("extra_stream_filters")
|
||||
extraStreamFilters, err := parseExtraStreamFilters(extraStreamFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
for _, extraStreamFiltersStr := range r.Form["extra_stream_filters"] {
|
||||
extraStreamFilters, err := parseExtraStreamFilters(extraStreamFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraStreamFilters)
|
||||
}
|
||||
q.AddExtraFilters(extraStreamFilters)
|
||||
|
||||
return q, tenantIDs, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect/internalselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect/logsql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -72,7 +73,7 @@ var vmuiFileServer = http.FileServer(http.FS(vmuiFiles))
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
|
||||
if !strings.HasPrefix(path, "/select/") {
|
||||
if !strings.HasPrefix(path, "/select/") && !strings.HasPrefix(path, "/internal/select/") {
|
||||
// Skip requests, which do not start with /select/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
@@ -99,9 +100,17 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
if path == "/select/logsql/tail" {
|
||||
logsqlTailRequests.Inc()
|
||||
// Process live tailing request without timeout, since it is OK to run live tailing requests for very long time.
|
||||
// Also do not apply concurrency limit to tail requests, since these limits are intended for non-tail requests.
|
||||
logsql.ProcessLiveTailRequest(ctx, w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
// Limit the number of concurrent queries, which can consume big amounts of CPU time.
|
||||
startTime := time.Now()
|
||||
ctx := r.Context()
|
||||
d := getMaxQueryDuration(r)
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, d)
|
||||
defer cancel()
|
||||
@@ -111,11 +120,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
defer decRequestConcurrency()
|
||||
|
||||
if path == "/select/logsql/tail" {
|
||||
logsqlTailRequests.Inc()
|
||||
// Process live tailing request without timeout (e.g. use ctx instead of ctxWithTimeout),
|
||||
// since it is OK to run live tailing requests for very long time.
|
||||
logsql.ProcessLiveTailRequest(ctx, w, r)
|
||||
if strings.HasPrefix(path, "/internal/select/") {
|
||||
// Process internal request from vlselect without timeout (e.g. use ctx instead of ctxWithTimeout),
|
||||
// since the timeout must be controlled by the vlselect.
|
||||
internalselect.RequestHandler(ctx, w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -124,15 +132,17 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
err := ctxWithTimeout.Err()
|
||||
logRequestErrorIfNeeded(ctxWithTimeout, w, r, startTime)
|
||||
return true
|
||||
}
|
||||
|
||||
func logRequestErrorIfNeeded(ctx context.Context, w http.ResponseWriter, r *http.Request, startTime time.Time) {
|
||||
err := ctx.Err()
|
||||
switch err {
|
||||
case nil:
|
||||
// nothing to do
|
||||
case context.Canceled:
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has canceled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||
// do not log canceled requests, since they are expected and legal.
|
||||
case context.DeadlineExceeded:
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("the request couldn't be executed in %.3f seconds; possible solutions: "+
|
||||
@@ -143,8 +153,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
default:
|
||||
httpserver.Errorf(w, r, "unexpected error: %s", err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func incRequestConcurrency(ctx context.Context, w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
@@ -5,9 +5,13 @@ menu:
|
||||
docs:
|
||||
parent: 'victoriametrics'
|
||||
weight: 23
|
||||
tags:
|
||||
- metrics
|
||||
aliases:
|
||||
- /ExtendedPromQL.html
|
||||
- /MetricsQL.html
|
||||
- /metricsql/index.html
|
||||
- /metricsql/
|
||||
---
|
||||
[VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) implements MetricsQL -
|
||||
query language inspired by [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/assets/index-Brup_hCI.css
Normal file
1
app/vlselect/vmui/assets/index-Brup_hCI.css
Normal file
File diff suppressed because one or more lines are too long
206
app/vlselect/vmui/assets/index-Wi3UrT0H.js
Normal file
206
app/vlselect/vmui/assets/index-Wi3UrT0H.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
76
app/vlselect/vmui/assets/vendor-C-vZmbyg.js
Normal file
76
app/vlselect/vmui/assets/vendor-C-vZmbyg.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -35,10 +35,10 @@
|
||||
<meta property="og:title" content="UI for VictoriaLogs">
|
||||
<meta property="og:url" content="https://victoriametrics.com/products/victorialogs/">
|
||||
<meta property="og:description" content="Explore your log data with VictoriaLogs UI">
|
||||
<script type="module" crossorigin src="./assets/index-BgdvCSTM.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-DojlIpLz.js">
|
||||
<script type="module" crossorigin src="./assets/index-Wi3UrT0H.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-C-vZmbyg.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-u4IOGr0E.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-Brup_hCI.css">
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
|
||||
@@ -10,11 +10,14 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -38,15 +41,59 @@ var (
|
||||
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 10e6, "The minimum free disk space at -storageDataPath after which "+
|
||||
"the storage stops accepting new data")
|
||||
|
||||
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages. It overrides -httpAuth.*")
|
||||
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge . It overrides -httpAuth.* . "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/#forced-merge")
|
||||
forceFlushAuthKey = flagutil.NewPassword("forceFlushAuthKey", "authKey, which must be passed in query string to /internal/force_flush . It overrides -httpAuth.* . "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/#forced-flush")
|
||||
|
||||
storageNodeAddrs = flagutil.NewArrayString("storageNode", "Comma-separated list of TCP addresses for storage nodes to route the ingested logs to and to send select queries to. "+
|
||||
"If the list is empty, then the ingested logs are stored and queried locally from -storageDataPath")
|
||||
insertConcurrency = flag.Int("insert.concurrency", 2, "The average number of concurrent data ingestion requests, which can be sent to every -storageNode")
|
||||
insertDisableCompression = flag.Bool("insert.disableCompression", false, "Whether to disable compression when sending the ingested data to -storageNode nodes. "+
|
||||
"Disabled compression reduces CPU usage at the cost of higher network usage")
|
||||
selectDisableCompression = flag.Bool("select.disableCompression", false, "Whether to disable compression for select query responses received from -storageNode nodes. "+
|
||||
"Disabled compression reduces CPU usage at the cost of higher network usage")
|
||||
|
||||
storageNodeUsername = flagutil.NewArrayString("storageNode.username", "Optional basic auth username to use for the corresponding -storageNode")
|
||||
storageNodePassword = flagutil.NewArrayString("storageNode.password", "Optional basic auth password to use for the corresponding -storageNode")
|
||||
storageNodePasswordFile = flagutil.NewArrayString("storageNode.passwordFile", "Optional path to basic auth password to use for the corresponding -storageNode. "+
|
||||
"The file is re-read every second")
|
||||
storageNodeBearerToken = flagutil.NewArrayString("storageNode.bearerToken", "Optional bearer auth token to use for the corresponding -storageNode")
|
||||
storageNodeBearerTokenFile = flagutil.NewArrayString("storageNode.bearerTokenFile", "Optional path to bearer token file to use for the corresponding -storageNode. "+
|
||||
"The token is re-read from the file every second")
|
||||
|
||||
storageNodeTLS = flagutil.NewArrayBool("storageNode.tls", "Whether to use TLS (HTTPS) protocol for communicating with the corresponding -storageNode. "+
|
||||
"By default communication is performed via HTTP")
|
||||
storageNodeTLSCAFile = flagutil.NewArrayString("storageNode.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to the corresponding -storageNode. "+
|
||||
"By default, system CA is used")
|
||||
storageNodeTLSCertFile = flagutil.NewArrayString("storageNode.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting "+
|
||||
"to the corresponding -storageNode")
|
||||
storageNodeTLSKeyFile = flagutil.NewArrayString("storageNode.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to the corresponding -storageNode")
|
||||
storageNodeTLSServerName = flagutil.NewArrayString("storageNode.tlsServerName", "Optional TLS server name to use for connections to the corresponding -storageNode. "+
|
||||
"By default, the server name from -storageNode is used")
|
||||
storageNodeTLSInsecureSkipVerify = flagutil.NewArrayBool("storageNode.tlsInsecureSkipVerify", "Whether to skip tls verification when connecting to the corresponding -storageNode")
|
||||
)
|
||||
|
||||
var localStorage *logstorage.Storage
|
||||
var localStorageMetrics *metrics.Set
|
||||
|
||||
var netstorageInsert *netinsert.Storage
|
||||
var netstorageSelect *netselect.Storage
|
||||
|
||||
// Init initializes vlstorage.
|
||||
//
|
||||
// Stop must be called when vlstorage is no longer needed
|
||||
func Init() {
|
||||
if strg != nil {
|
||||
logger.Panicf("BUG: Init() has been already called")
|
||||
if len(*storageNodeAddrs) == 0 {
|
||||
initLocalStorage()
|
||||
} else {
|
||||
initNetworkStorage()
|
||||
}
|
||||
}
|
||||
|
||||
func initLocalStorage() {
|
||||
if localStorage != nil {
|
||||
logger.Panicf("BUG: initLocalStorage() has been already called")
|
||||
}
|
||||
|
||||
if retentionPeriod.Duration() < 24*time.Hour {
|
||||
@@ -63,60 +110,157 @@ func Init() {
|
||||
}
|
||||
logger.Infof("opening storage at -storageDataPath=%s", *storageDataPath)
|
||||
startTime := time.Now()
|
||||
strg = logstorage.MustOpenStorage(*storageDataPath, cfg)
|
||||
localStorage = logstorage.MustOpenStorage(*storageDataPath, cfg)
|
||||
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
localStorage.UpdateStats(&ss)
|
||||
logger.Infof("successfully opened storage in %.3f seconds; smallParts: %d; bigParts: %d; smallPartBlocks: %d; bigPartBlocks: %d; smallPartRows: %d; bigPartRows: %d; "+
|
||||
"smallPartSize: %d bytes; bigPartSize: %d bytes",
|
||||
time.Since(startTime).Seconds(), ss.SmallParts, ss.BigParts, ss.SmallPartBlocks, ss.BigPartBlocks, ss.SmallPartRowsCount, ss.BigPartRowsCount,
|
||||
ss.CompressedSmallPartSize, ss.CompressedBigPartSize)
|
||||
|
||||
// register storage metrics
|
||||
storageMetrics = metrics.NewSet()
|
||||
storageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, strg)
|
||||
// register local storage metrics
|
||||
localStorageMetrics = metrics.NewSet()
|
||||
localStorageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, localStorage)
|
||||
})
|
||||
metrics.RegisterSet(storageMetrics)
|
||||
metrics.RegisterSet(localStorageMetrics)
|
||||
}
|
||||
|
||||
func initNetworkStorage() {
|
||||
if netstorageInsert != nil || netstorageSelect != nil {
|
||||
logger.Panicf("BUG: initNetworkStorage() has been already called")
|
||||
}
|
||||
|
||||
authCfgs := make([]*promauth.Config, len(*storageNodeAddrs))
|
||||
isTLSs := make([]bool, len(*storageNodeAddrs))
|
||||
for i := range authCfgs {
|
||||
authCfgs[i] = newAuthConfigForStorageNode(i)
|
||||
isTLSs[i] = storageNodeTLS.GetOptionalArg(i)
|
||||
}
|
||||
|
||||
logger.Infof("starting insert service for nodes %s", *storageNodeAddrs)
|
||||
netstorageInsert = netinsert.NewStorage(*storageNodeAddrs, authCfgs, isTLSs, *insertConcurrency, *insertDisableCompression)
|
||||
|
||||
logger.Infof("initializing select service for nodes %s", *storageNodeAddrs)
|
||||
netstorageSelect = netselect.NewStorage(*storageNodeAddrs, authCfgs, isTLSs, *selectDisableCompression)
|
||||
|
||||
logger.Infof("initialized all the network services")
|
||||
}
|
||||
|
||||
func newAuthConfigForStorageNode(argIdx int) *promauth.Config {
|
||||
username := storageNodeUsername.GetOptionalArg(argIdx)
|
||||
password := storageNodePassword.GetOptionalArg(argIdx)
|
||||
passwordFile := storageNodePasswordFile.GetOptionalArg(argIdx)
|
||||
var basicAuthCfg *promauth.BasicAuthConfig
|
||||
if username != "" || password != "" || passwordFile != "" {
|
||||
basicAuthCfg = &promauth.BasicAuthConfig{
|
||||
Username: username,
|
||||
Password: promauth.NewSecret(password),
|
||||
PasswordFile: passwordFile,
|
||||
}
|
||||
}
|
||||
|
||||
token := storageNodeBearerToken.GetOptionalArg(argIdx)
|
||||
tokenFile := storageNodeBearerTokenFile.GetOptionalArg(argIdx)
|
||||
|
||||
tlsCfg := &promauth.TLSConfig{
|
||||
CAFile: storageNodeTLSCAFile.GetOptionalArg(argIdx),
|
||||
CertFile: storageNodeTLSCertFile.GetOptionalArg(argIdx),
|
||||
KeyFile: storageNodeTLSKeyFile.GetOptionalArg(argIdx),
|
||||
ServerName: storageNodeTLSServerName.GetOptionalArg(argIdx),
|
||||
InsecureSkipVerify: storageNodeTLSInsecureSkipVerify.GetOptionalArg(argIdx),
|
||||
}
|
||||
|
||||
opts := &promauth.Options{
|
||||
BasicAuth: basicAuthCfg,
|
||||
BearerToken: token,
|
||||
BearerTokenFile: tokenFile,
|
||||
TLSConfig: tlsCfg,
|
||||
}
|
||||
ac, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot populate auth config for storage node #%d: %s", argIdx, err)
|
||||
}
|
||||
|
||||
return ac
|
||||
}
|
||||
|
||||
// Stop stops vlstorage.
|
||||
func Stop() {
|
||||
metrics.UnregisterSet(storageMetrics, true)
|
||||
storageMetrics = nil
|
||||
if localStorage != nil {
|
||||
metrics.UnregisterSet(localStorageMetrics, true)
|
||||
localStorageMetrics = nil
|
||||
|
||||
strg.MustClose()
|
||||
strg = nil
|
||||
localStorage.MustClose()
|
||||
localStorage = nil
|
||||
} else {
|
||||
netstorageInsert.MustStop()
|
||||
netstorageInsert = nil
|
||||
|
||||
netstorageSelect.MustStop()
|
||||
netstorageSelect = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RequestHandler is a storage request handler.
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
if path == "/internal/force_merge" {
|
||||
if !httpserver.CheckAuthFlag(w, r, forceMergeAuthKey) {
|
||||
return true
|
||||
}
|
||||
// Run force merge in background
|
||||
partitionNamePrefix := r.FormValue("partition_prefix")
|
||||
go func() {
|
||||
activeForceMerges.Inc()
|
||||
defer activeForceMerges.Dec()
|
||||
logger.Infof("forced merge for partition_prefix=%q has been started", partitionNamePrefix)
|
||||
startTime := time.Now()
|
||||
strg.MustForceMerge(partitionNamePrefix)
|
||||
logger.Infof("forced merge for partition_prefix=%q has been successfully finished in %.3f seconds", partitionNamePrefix, time.Since(startTime).Seconds())
|
||||
}()
|
||||
return true
|
||||
switch path {
|
||||
case "/internal/force_merge":
|
||||
return processForceMerge(w, r)
|
||||
case "/internal/force_flush":
|
||||
return processForceFlush(w, r)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var strg *logstorage.Storage
|
||||
var storageMetrics *metrics.Set
|
||||
func processForceMerge(w http.ResponseWriter, r *http.Request) bool {
|
||||
if localStorage == nil {
|
||||
// Force merge isn't supported by non-local storage
|
||||
return false
|
||||
}
|
||||
|
||||
// CanWriteData returns non-nil error if it cannot write data to vlstorage.
|
||||
if !httpserver.CheckAuthFlag(w, r, forceMergeAuthKey) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Run force merge in background
|
||||
partitionNamePrefix := r.FormValue("partition_prefix")
|
||||
go func() {
|
||||
activeForceMerges.Inc()
|
||||
defer activeForceMerges.Dec()
|
||||
logger.Infof("forced merge for partition_prefix=%q has been started", partitionNamePrefix)
|
||||
startTime := time.Now()
|
||||
localStorage.MustForceMerge(partitionNamePrefix)
|
||||
logger.Infof("forced merge for partition_prefix=%q has been successfully finished in %.3f seconds", partitionNamePrefix, time.Since(startTime).Seconds())
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
func processForceFlush(w http.ResponseWriter, r *http.Request) bool {
|
||||
if localStorage == nil {
|
||||
// Force merge isn't supported by non-local storage
|
||||
return false
|
||||
}
|
||||
|
||||
if !httpserver.CheckAuthFlag(w, r, forceFlushAuthKey) {
|
||||
return true
|
||||
}
|
||||
|
||||
logger.Infof("flushing storage to make pending data available for reading")
|
||||
localStorage.DebugFlush()
|
||||
return true
|
||||
}
|
||||
|
||||
// CanWriteData returns non-nil error if it cannot write data to vlstorage
|
||||
func CanWriteData() error {
|
||||
if strg.IsReadOnly() {
|
||||
if localStorage == nil {
|
||||
// The data can be always written in non-local mode.
|
||||
return nil
|
||||
}
|
||||
|
||||
if localStorage.IsReadOnly() {
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot add rows into storage in read-only mode; the storage can be in read-only mode "+
|
||||
"because of lack of free disk space at -storageDataPath=%s", *storageDataPath),
|
||||
@@ -130,50 +274,77 @@ func CanWriteData() error {
|
||||
//
|
||||
// It is advised to call CanWriteData() before calling MustAddRows()
|
||||
func MustAddRows(lr *logstorage.LogRows) {
|
||||
strg.MustAddRows(lr)
|
||||
if localStorage != nil {
|
||||
// Store lr in the local storage.
|
||||
localStorage.MustAddRows(lr)
|
||||
} else {
|
||||
// Store lr across the remote storage nodes.
|
||||
lr.ForEachRow(netstorageInsert.AddRow)
|
||||
}
|
||||
}
|
||||
|
||||
// RunQuery runs the given q and calls writeBlock for the returned data blocks
|
||||
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteBlockFunc) error {
|
||||
return strg.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteDataBlockFunc) error {
|
||||
if localStorage != nil {
|
||||
return localStorage.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
}
|
||||
return netstorageSelect.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
}
|
||||
|
||||
// GetFieldNames executes q and returns field names seen in results.
|
||||
func GetFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetFieldNames(ctx, tenantIDs, q)
|
||||
if localStorage != nil {
|
||||
return localStorage.GetFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
return netstorageSelect.GetFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
|
||||
// GetFieldValues executes q and returns unique values for the fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique values are returned.
|
||||
func GetFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
if localStorage != nil {
|
||||
return localStorage.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
return netstorageSelect.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreamFieldNames executes q and returns stream field names seen in results.
|
||||
func GetStreamFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
return netstorageSelect.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
|
||||
// GetStreamFieldValues executes q and returns stream field values for the given fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique stream field values are returned.
|
||||
func GetStreamFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
return netstorageSelect.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreams executes q and returns streams seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streams are returned.
|
||||
func GetStreams(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreams(ctx, tenantIDs, q, limit)
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreams(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
return netstorageSelect.GetStreams(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
|
||||
// GetStreamIDs executes q and returns streamIDs seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streamIDs are returned.
|
||||
func GetStreamIDs(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return strg.GetStreamIDs(ctx, tenantIDs, q, limit)
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreamIDs(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
return netstorageSelect.GetStreamIDs(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
|
||||
func writeStorageMetrics(w io.Writer, strg *logstorage.Storage) {
|
||||
|
||||
369
app/vlstorage/netinsert/netinsert.go
Normal file
369
app/vlstorage/netinsert/netinsert.go
Normal file
@@ -0,0 +1,369 @@
|
||||
package netinsert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/valyala/fastrand"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/contextutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
)
|
||||
|
||||
// the maximum size of a single data block sent to storage node.
|
||||
const maxInsertBlockSize = 2 * 1024 * 1024
|
||||
|
||||
// ProtocolVersion is the version of the data ingestion protocol.
|
||||
//
|
||||
// It must be changed every time the data encoding at /internal/insert HTTP endpoint is changed.
|
||||
const ProtocolVersion = "v1"
|
||||
|
||||
// Storage is a network storage for sending data to remote storage nodes in the cluster.
|
||||
type Storage struct {
|
||||
sns []*storageNode
|
||||
|
||||
disableCompression bool
|
||||
|
||||
srt *streamRowsTracker
|
||||
|
||||
pendingDataBuffers chan *bytesutil.ByteBuffer
|
||||
|
||||
stopCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
type storageNode struct {
|
||||
// scheme is http or https scheme to communicate with addr
|
||||
scheme string
|
||||
|
||||
// addr is TCP address of storage node to send the ingested data to
|
||||
addr string
|
||||
|
||||
// s is a storage, which holds the given storageNode
|
||||
s *Storage
|
||||
|
||||
// c is an http client used for sending data blocks to addr.
|
||||
c *http.Client
|
||||
|
||||
// ac is auth config used for setting request headers such as Authorization and Host.
|
||||
ac *promauth.Config
|
||||
|
||||
// pendingData contains pending data, which must be sent to the storage node at the addr.
|
||||
pendingDataMu sync.Mutex
|
||||
pendingData *bytesutil.ByteBuffer
|
||||
pendingDataLastFlush time.Time
|
||||
|
||||
// the unix timestamp until the storageNode is disabled for data writing.
|
||||
disabledUntil atomic.Uint64
|
||||
}
|
||||
|
||||
func newStorageNode(s *Storage, addr string, ac *promauth.Config, isTLS bool) *storageNode {
|
||||
tr := httputil.NewTransport(false, "vlinsert_backend")
|
||||
tr.TLSHandshakeTimeout = 20 * time.Second
|
||||
tr.DisableCompression = true
|
||||
|
||||
scheme := "http"
|
||||
if isTLS {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
sn := &storageNode{
|
||||
scheme: scheme,
|
||||
addr: addr,
|
||||
s: s,
|
||||
c: &http.Client{
|
||||
Transport: ac.NewRoundTripper(tr),
|
||||
},
|
||||
ac: ac,
|
||||
|
||||
pendingData: &bytesutil.ByteBuffer{},
|
||||
}
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
sn.backgroundFlusher()
|
||||
}()
|
||||
|
||||
return sn
|
||||
}
|
||||
|
||||
func (sn *storageNode) backgroundFlusher() {
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-sn.s.stopCh:
|
||||
return
|
||||
case <-t.C:
|
||||
sn.flushPendingData()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sn *storageNode) flushPendingData() {
|
||||
sn.pendingDataMu.Lock()
|
||||
if time.Since(sn.pendingDataLastFlush) < time.Second {
|
||||
// nothing to flush
|
||||
sn.pendingDataMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
pendingData := sn.grabPendingDataForFlushLocked()
|
||||
sn.pendingDataMu.Unlock()
|
||||
|
||||
sn.mustSendInsertRequest(pendingData)
|
||||
}
|
||||
|
||||
func (sn *storageNode) addRow(r *logstorage.InsertRow) {
|
||||
bb := bbPool.Get()
|
||||
b := bb.B
|
||||
|
||||
b = r.Marshal(b)
|
||||
|
||||
if len(b) > maxInsertBlockSize {
|
||||
logger.Warnf("skipping too long log entry, since its length exceeds %d bytes; the actual log entry length is %d bytes; log entry contents: %s", maxInsertBlockSize, len(b), b)
|
||||
return
|
||||
}
|
||||
|
||||
var pendingData *bytesutil.ByteBuffer
|
||||
sn.pendingDataMu.Lock()
|
||||
if sn.pendingData.Len()+len(b) > maxInsertBlockSize {
|
||||
pendingData = sn.grabPendingDataForFlushLocked()
|
||||
}
|
||||
sn.pendingData.MustWrite(b)
|
||||
sn.pendingDataMu.Unlock()
|
||||
|
||||
bb.B = b
|
||||
bbPool.Put(bb)
|
||||
|
||||
if pendingData != nil {
|
||||
sn.mustSendInsertRequest(pendingData)
|
||||
}
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
func (sn *storageNode) grabPendingDataForFlushLocked() *bytesutil.ByteBuffer {
|
||||
sn.pendingDataLastFlush = time.Now()
|
||||
pendingData := sn.pendingData
|
||||
sn.pendingData = <-sn.s.pendingDataBuffers
|
||||
|
||||
return pendingData
|
||||
}
|
||||
|
||||
func (sn *storageNode) mustSendInsertRequest(pendingData *bytesutil.ByteBuffer) {
|
||||
defer func() {
|
||||
pendingData.Reset()
|
||||
sn.s.pendingDataBuffers <- pendingData
|
||||
}()
|
||||
|
||||
err := sn.sendInsertRequest(pendingData)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !errors.Is(err, errTemporarilyDisabled) {
|
||||
logger.Warnf("%s; re-routing the data block to the remaining nodes", err)
|
||||
}
|
||||
for !sn.s.sendInsertRequestToAnyNode(pendingData) {
|
||||
logger.Errorf("cannot send pending data to all storage nodes, since all of them are unavailable; re-trying to send the data in a second")
|
||||
|
||||
t := timerpool.Get(time.Second)
|
||||
select {
|
||||
case <-sn.s.stopCh:
|
||||
timerpool.Put(t)
|
||||
logger.Errorf("dropping %d bytes of data, since there are no available storage nodes", pendingData.Len())
|
||||
return
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sn *storageNode) sendInsertRequest(pendingData *bytesutil.ByteBuffer) error {
|
||||
dataLen := pendingData.Len()
|
||||
if dataLen == 0 {
|
||||
// Nothing to send.
|
||||
return nil
|
||||
}
|
||||
|
||||
if sn.disabledUntil.Load() > fasttime.UnixTimestamp() {
|
||||
return errTemporarilyDisabled
|
||||
}
|
||||
|
||||
ctx, cancel := contextutil.NewStopChanContext(sn.s.stopCh)
|
||||
defer cancel()
|
||||
|
||||
var body io.Reader
|
||||
if !sn.s.disableCompression {
|
||||
bb := zstdBufPool.Get()
|
||||
defer zstdBufPool.Put(bb)
|
||||
|
||||
bb.B = zstd.CompressLevel(bb.B[:0], pendingData.B, 1)
|
||||
body = bb.NewReader()
|
||||
} else {
|
||||
body = pendingData.NewReader()
|
||||
}
|
||||
|
||||
reqURL := sn.getRequestURL("/internal/insert")
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", reqURL, body)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating an http request: %s", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
if !sn.s.disableCompression {
|
||||
req.Header.Set("Content-Encoding", "zstd")
|
||||
}
|
||||
if err := sn.ac.SetHeaders(req, true); err != nil {
|
||||
return fmt.Errorf("cannot set auth headers for %q: %w", reqURL, err)
|
||||
}
|
||||
|
||||
resp, err := sn.c.Do(req)
|
||||
if err != nil {
|
||||
// Disable sn for data writing for 10 seconds.
|
||||
sn.disabledUntil.Store(fasttime.UnixTimestamp() + 10)
|
||||
|
||||
return fmt.Errorf("cannot send data block with the length %d to %q: %s", pendingData.Len(), reqURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode/100 == 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
respBody = []byte(fmt.Sprintf("%s", err))
|
||||
}
|
||||
|
||||
// Disable sn for data writing for 10 seconds.
|
||||
sn.disabledUntil.Store(fasttime.UnixTimestamp() + 10)
|
||||
|
||||
return fmt.Errorf("unexpected status code returned when sending data block to %q: %d; want 2xx; response body: %q", reqURL, resp.StatusCode, respBody)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getRequestURL(path string) string {
|
||||
return fmt.Sprintf("%s://%s%s?version=%s", sn.scheme, sn.addr, path, url.QueryEscape(ProtocolVersion))
|
||||
}
|
||||
|
||||
var zstdBufPool bytesutil.ByteBufferPool
|
||||
|
||||
// NewStorage returns new Storage for the given addrs with the given authCfgs.
|
||||
//
|
||||
// The concurrency is the average number of concurrent connections per every addr.
|
||||
//
|
||||
// If disableCompression is set, then the data is sent uncompressed to the remote storage.
|
||||
//
|
||||
// Call MustStop on the returned storage when it is no longer needed.
|
||||
func NewStorage(addrs []string, authCfgs []*promauth.Config, isTLSs []bool, concurrency int, disableCompression bool) *Storage {
|
||||
pendingDataBuffers := make(chan *bytesutil.ByteBuffer, concurrency*len(addrs))
|
||||
for i := 0; i < cap(pendingDataBuffers); i++ {
|
||||
pendingDataBuffers <- &bytesutil.ByteBuffer{}
|
||||
}
|
||||
|
||||
s := &Storage{
|
||||
disableCompression: disableCompression,
|
||||
pendingDataBuffers: pendingDataBuffers,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
sns := make([]*storageNode, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
sns[i] = newStorageNode(s, addr, authCfgs[i], isTLSs[i])
|
||||
}
|
||||
s.sns = sns
|
||||
|
||||
s.srt = newStreamRowsTracker(len(sns))
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MustStop stops the s.
|
||||
func (s *Storage) MustStop() {
|
||||
close(s.stopCh)
|
||||
s.wg.Wait()
|
||||
s.sns = nil
|
||||
}
|
||||
|
||||
// AddRow adds the given log row into s.
|
||||
func (s *Storage) AddRow(streamHash uint64, r *logstorage.InsertRow) {
|
||||
idx := s.srt.getNodeIdx(streamHash)
|
||||
sn := s.sns[idx]
|
||||
sn.addRow(r)
|
||||
}
|
||||
|
||||
func (s *Storage) sendInsertRequestToAnyNode(pendingData *bytesutil.ByteBuffer) bool {
|
||||
startIdx := int(fastrand.Uint32n(uint32(len(s.sns))))
|
||||
for i := range s.sns {
|
||||
idx := (startIdx + i) % len(s.sns)
|
||||
sn := s.sns[idx]
|
||||
err := sn.sendInsertRequest(pendingData)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if !errors.Is(err, errTemporarilyDisabled) {
|
||||
logger.Warnf("cannot send pending data to the storage node %q: %s; trying to send it to another storage node", sn.addr, err)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var errTemporarilyDisabled = fmt.Errorf("writing to the node is temporarily disabled")
|
||||
|
||||
type streamRowsTracker struct {
|
||||
mu sync.Mutex
|
||||
|
||||
nodesCount int64
|
||||
rowsPerStream map[uint64]uint64
|
||||
}
|
||||
|
||||
func newStreamRowsTracker(nodesCount int) *streamRowsTracker {
|
||||
return &streamRowsTracker{
|
||||
nodesCount: int64(nodesCount),
|
||||
rowsPerStream: make(map[uint64]uint64),
|
||||
}
|
||||
}
|
||||
|
||||
func (srt *streamRowsTracker) getNodeIdx(streamHash uint64) uint64 {
|
||||
if srt.nodesCount == 1 {
|
||||
// Fast path for a single node.
|
||||
return 0
|
||||
}
|
||||
|
||||
srt.mu.Lock()
|
||||
defer srt.mu.Unlock()
|
||||
|
||||
streamRows := srt.rowsPerStream[streamHash] + 1
|
||||
srt.rowsPerStream[streamHash] = streamRows
|
||||
|
||||
if streamRows <= 1000 {
|
||||
// Write the initial rows for the stream to a single storage node for better locality.
|
||||
// This should work great for log streams containing small number of logs, since will be distributed
|
||||
// evenly among available storage nodes because they have different streamHash.
|
||||
return streamHash % uint64(srt.nodesCount)
|
||||
}
|
||||
|
||||
// The log stream contains more than 1000 rows. Distribute them among storage nodes at random
|
||||
// in order to improve query performance over this stream (the data for the log stream
|
||||
// can be processed in parallel on all the storage nodes).
|
||||
//
|
||||
// The random distribution is preferred over round-robin distribution in order to avoid possible
|
||||
// dependency between the order of the ingested logs and the number of storage nodes,
|
||||
// which may lead to non-uniform distribution of logs among storage nodes.
|
||||
return uint64(fastrand.Uint32n(uint32(srt.nodesCount)))
|
||||
}
|
||||
57
app/vlstorage/netinsert/netinsert_test.go
Normal file
57
app/vlstorage/netinsert/netinsert_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package netinsert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
func TestStreamRowsTracker(t *testing.T) {
|
||||
f := func(rowsCount, streamsCount, nodesCount int) {
|
||||
t.Helper()
|
||||
|
||||
// generate stream hashes
|
||||
streamHashes := make([]uint64, streamsCount)
|
||||
for i := range streamHashes {
|
||||
streamHashes[i] = xxhash.Sum64([]byte(fmt.Sprintf("stream %d.", i)))
|
||||
}
|
||||
|
||||
srt := newStreamRowsTracker(nodesCount)
|
||||
|
||||
rng := rand.New(rand.NewSource(0))
|
||||
rowsPerNode := make([]uint64, nodesCount)
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
streamIdx := rng.Intn(streamsCount)
|
||||
h := streamHashes[streamIdx]
|
||||
nodeIdx := srt.getNodeIdx(h)
|
||||
rowsPerNode[nodeIdx]++
|
||||
}
|
||||
|
||||
// Verify that rows are uniformly distributed among nodes.
|
||||
expectedRowsPerNode := float64(rowsCount) / float64(nodesCount)
|
||||
for nodeIdx, nodeRows := range rowsPerNode {
|
||||
if math.Abs(float64(nodeRows)-expectedRowsPerNode)/expectedRowsPerNode > 0.15 {
|
||||
t.Fatalf("non-uniform distribution of rows among nodes; node %d has %d rows, while it must have %v rows; rowsPerNode=%d",
|
||||
nodeIdx, nodeRows, expectedRowsPerNode, rowsPerNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rowsCount := 10000
|
||||
streamsCount := 9
|
||||
nodesCount := 2
|
||||
f(rowsCount, streamsCount, nodesCount)
|
||||
|
||||
rowsCount = 10000
|
||||
streamsCount = 100
|
||||
nodesCount = 2
|
||||
f(rowsCount, streamsCount, nodesCount)
|
||||
|
||||
rowsCount = 100000
|
||||
streamsCount = 1000
|
||||
nodesCount = 9
|
||||
f(rowsCount, streamsCount, nodesCount)
|
||||
}
|
||||
469
app/vlstorage/netselect/netselect.go
Normal file
469
app/vlstorage/netselect/netselect.go
Normal file
@@ -0,0 +1,469 @@
|
||||
package netselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/contextutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// FieldNamesProtocolVersion is the version of the protocol used for /internal/select/field_names HTTP endpoint.
|
||||
//
|
||||
// It must be updated every time the protocol changes.
|
||||
FieldNamesProtocolVersion = "v1"
|
||||
|
||||
// FieldValuesProtocolVersion is the version of the protocol used for /internal/select/field_values HTTP endpoint.
|
||||
//
|
||||
// It must be updated every time the protocol changes.
|
||||
FieldValuesProtocolVersion = "v1"
|
||||
|
||||
// StreamFieldNamesProtocolVersion is the version of the protocol used for /internal/select/stream_field_names HTTP endpoint.
|
||||
//
|
||||
// It must be updated every time the protocol changes.
|
||||
StreamFieldNamesProtocolVersion = "v1"
|
||||
|
||||
// StreamFieldValuesProtocolVersion is the version of the protocol used for /internal/select/stream_field_values HTTP endpoint.
|
||||
//
|
||||
// It must be updated every time the protocol changes.
|
||||
StreamFieldValuesProtocolVersion = "v1"
|
||||
|
||||
// StreamsProtocolVersion is the version of the protocol used for /internal/select/streams HTTP endpoint.
|
||||
//
|
||||
// It must be updated every time the protocol changes.
|
||||
StreamsProtocolVersion = "v1"
|
||||
|
||||
// StreamIDsProtocolVersion is the version of the protocol used for /internal/select/stream_ids HTTP endpoint.
|
||||
//
|
||||
// It must be updated every time the protocol changes.
|
||||
StreamIDsProtocolVersion = "v1"
|
||||
|
||||
// QueryProtocolVersion is the version of the protocol used for /internal/select/query HTTP endpoint.
|
||||
//
|
||||
// It must be updated every time the protocol changes.
|
||||
QueryProtocolVersion = "v1"
|
||||
)
|
||||
|
||||
// Storage is a network storage for querying remote storage nodes in the cluster.
|
||||
type Storage struct {
|
||||
sns []*storageNode
|
||||
|
||||
disableCompression bool
|
||||
}
|
||||
|
||||
type storageNode struct {
|
||||
// scheme is http or https scheme to communicate with addr
|
||||
scheme string
|
||||
|
||||
// addr is TCP address of the storage node to query
|
||||
addr string
|
||||
|
||||
// s is a storage, which holds the given storageNode
|
||||
s *Storage
|
||||
|
||||
// c is an http client used for querying storage node at addr.
|
||||
c *http.Client
|
||||
|
||||
// ac is auth config used for setting request headers such as Authorization and Host.
|
||||
ac *promauth.Config
|
||||
}
|
||||
|
||||
func newStorageNode(s *Storage, addr string, ac *promauth.Config, isTLS bool) *storageNode {
|
||||
tr := httputil.NewTransport(false, "vlselect_backend")
|
||||
tr.TLSHandshakeTimeout = 20 * time.Second
|
||||
tr.DisableCompression = true
|
||||
|
||||
scheme := "http"
|
||||
if isTLS {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
sn := &storageNode{
|
||||
scheme: scheme,
|
||||
addr: addr,
|
||||
s: s,
|
||||
c: &http.Client{
|
||||
Transport: ac.NewRoundTripper(tr),
|
||||
},
|
||||
ac: ac,
|
||||
}
|
||||
return sn
|
||||
}
|
||||
|
||||
func (sn *storageNode) runQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, processBlock func(db *logstorage.DataBlock)) error {
|
||||
args := sn.getCommonArgs(QueryProtocolVersion, tenantIDs, q)
|
||||
|
||||
reqURL := sn.getRequestURL("/internal/select/query", args)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating a request: %s", err)
|
||||
}
|
||||
if err := sn.ac.SetHeaders(req, true); err != nil {
|
||||
return fmt.Errorf("cannot set auth headers for %q: %w", reqURL, err)
|
||||
}
|
||||
|
||||
// send the request to the storage node
|
||||
resp, err := sn.c.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
responseBody = []byte(err.Error())
|
||||
}
|
||||
return fmt.Errorf("unexpected status code for the request to %q: %d; want %d; response: %q", reqURL, resp.StatusCode, http.StatusOK, responseBody)
|
||||
}
|
||||
|
||||
// read the response
|
||||
var dataLenBuf [8]byte
|
||||
var buf []byte
|
||||
var db logstorage.DataBlock
|
||||
var valuesBuf []string
|
||||
for {
|
||||
if _, err := io.ReadFull(resp.Body, dataLenBuf[:]); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
// The end of response stream
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("cannot read block size from %q: %w", reqURL, err)
|
||||
}
|
||||
blockLen := encoding.UnmarshalUint64(dataLenBuf[:])
|
||||
if blockLen > math.MaxInt {
|
||||
return fmt.Errorf("too big data block: %d bytes; mustn't exceed %v bytes", blockLen, math.MaxInt)
|
||||
}
|
||||
|
||||
buf = slicesutil.SetLength(buf, int(blockLen))
|
||||
if _, err := io.ReadFull(resp.Body, buf); err != nil {
|
||||
return fmt.Errorf("cannot read block with size of %d bytes from %q: %w", blockLen, reqURL, err)
|
||||
}
|
||||
|
||||
src := buf
|
||||
if !sn.s.disableCompression {
|
||||
bufLen := len(buf)
|
||||
var err error
|
||||
buf, err = zstd.Decompress(buf, buf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decompress data block: %w", err)
|
||||
}
|
||||
src = buf[bufLen:]
|
||||
}
|
||||
|
||||
for len(src) > 0 {
|
||||
tail, vb, err := db.UnmarshalInplace(src, valuesBuf[:0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal data block received from %q: %w", reqURL, err)
|
||||
}
|
||||
valuesBuf = vb
|
||||
src = tail
|
||||
|
||||
processBlock(&db)
|
||||
|
||||
clear(valuesBuf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sn *storageNode) getFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
args := sn.getCommonArgs(FieldNamesProtocolVersion, tenantIDs, q)
|
||||
|
||||
return sn.getValuesWithHits(ctx, "/internal/select/field_names", args)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
args := sn.getCommonArgs(FieldValuesProtocolVersion, tenantIDs, q)
|
||||
args.Set("field", fieldName)
|
||||
args.Set("limit", fmt.Sprintf("%d", limit))
|
||||
|
||||
return sn.getValuesWithHits(ctx, "/internal/select/field_values", args)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getStreamFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
args := sn.getCommonArgs(StreamFieldNamesProtocolVersion, tenantIDs, q)
|
||||
|
||||
return sn.getValuesWithHits(ctx, "/internal/select/stream_field_names", args)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getStreamFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
args := sn.getCommonArgs(StreamFieldValuesProtocolVersion, tenantIDs, q)
|
||||
args.Set("field", fieldName)
|
||||
args.Set("limit", fmt.Sprintf("%d", limit))
|
||||
|
||||
return sn.getValuesWithHits(ctx, "/internal/select/stream_field_values", args)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getStreams(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
args := sn.getCommonArgs(StreamsProtocolVersion, tenantIDs, q)
|
||||
args.Set("limit", fmt.Sprintf("%d", limit))
|
||||
|
||||
return sn.getValuesWithHits(ctx, "/internal/select/streams", args)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getStreamIDs(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
args := sn.getCommonArgs(StreamIDsProtocolVersion, tenantIDs, q)
|
||||
args.Set("limit", fmt.Sprintf("%d", limit))
|
||||
|
||||
return sn.getValuesWithHits(ctx, "/internal/select/stream_ids", args)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getCommonArgs(version string, tenantIDs []logstorage.TenantID, q *logstorage.Query) url.Values {
|
||||
args := url.Values{}
|
||||
args.Set("version", version)
|
||||
args.Set("tenant_ids", string(logstorage.MarshalTenantIDs(nil, tenantIDs)))
|
||||
args.Set("query", q.String())
|
||||
args.Set("timestamp", fmt.Sprintf("%d", q.GetTimestamp()))
|
||||
args.Set("disable_compression", fmt.Sprintf("%v", sn.s.disableCompression))
|
||||
return args
|
||||
}
|
||||
|
||||
func (sn *storageNode) getValuesWithHits(ctx context.Context, path string, args url.Values) ([]logstorage.ValueWithHits, error) {
|
||||
data, err := sn.executeRequestAt(ctx, path, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return unmarshalValuesWithHits(data)
|
||||
}
|
||||
|
||||
func (sn *storageNode) executeRequestAt(ctx context.Context, path string, args url.Values) ([]byte, error) {
|
||||
reqURL := sn.getRequestURL(path, args)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating a request: %s", err)
|
||||
}
|
||||
|
||||
// send the request to the storage node
|
||||
resp, err := sn.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
responseBody = []byte(err.Error())
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected status code for the request to %q: %d; want %d; response: %q", reqURL, resp.StatusCode, http.StatusOK, responseBody)
|
||||
}
|
||||
|
||||
// read the response
|
||||
var bb bytesutil.ByteBuffer
|
||||
if _, err := bb.ReadFrom(resp.Body); err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", reqURL, err)
|
||||
}
|
||||
|
||||
if sn.s.disableCompression {
|
||||
return bb.B, nil
|
||||
}
|
||||
|
||||
bbLen := len(bb.B)
|
||||
bb.B, err = zstd.Decompress(bb.B, bb.B)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb.B[bbLen:], nil
|
||||
}
|
||||
|
||||
func (sn *storageNode) getRequestURL(path string, args url.Values) string {
|
||||
return fmt.Sprintf("%s://%s%s?%s", sn.scheme, sn.addr, path, args.Encode())
|
||||
}
|
||||
|
||||
// NewStorage returns new Storage for the given addrs and the given authCfgs.
|
||||
//
|
||||
// If disableCompression is set, then uncompressed responses are received from storage nodes.
|
||||
//
|
||||
// Call MustStop on the returned storage when it is no longer needed.
|
||||
func NewStorage(addrs []string, authCfgs []*promauth.Config, isTLSs []bool, disableCompression bool) *Storage {
|
||||
s := &Storage{
|
||||
disableCompression: disableCompression,
|
||||
}
|
||||
|
||||
sns := make([]*storageNode, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
sns[i] = newStorageNode(s, addr, authCfgs[i], isTLSs[i])
|
||||
}
|
||||
s.sns = sns
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MustStop stops the s.
|
||||
func (s *Storage) MustStop() {
|
||||
s.sns = nil
|
||||
}
|
||||
|
||||
// RunQuery runs the given q and calls writeBlock for the returned data blocks
|
||||
func (s *Storage) RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteDataBlockFunc) error {
|
||||
nqr, err := logstorage.NewNetQueryRunner(ctx, tenantIDs, q, s.RunQuery, writeBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
search := func(stopCh <-chan struct{}, q *logstorage.Query, writeBlock logstorage.WriteDataBlockFunc) error {
|
||||
return s.runQuery(stopCh, tenantIDs, q, writeBlock)
|
||||
}
|
||||
|
||||
concurrency := q.GetConcurrency()
|
||||
return nqr.Run(ctx, concurrency, search)
|
||||
}
|
||||
|
||||
func (s *Storage) runQuery(stopCh <-chan struct{}, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteDataBlockFunc) error {
|
||||
ctxWithCancel, cancel := contextutil.NewStopChanContext(stopCh)
|
||||
defer cancel()
|
||||
|
||||
errs := make([]error, len(s.sns))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := range s.sns {
|
||||
wg.Add(1)
|
||||
go func(nodeIdx int) {
|
||||
defer wg.Done()
|
||||
sn := s.sns[nodeIdx]
|
||||
err := sn.runQuery(ctxWithCancel, tenantIDs, q, func(db *logstorage.DataBlock) {
|
||||
writeBlock(uint(nodeIdx), db)
|
||||
})
|
||||
if err != nil {
|
||||
// Cancel the remaining parallel queries
|
||||
cancel()
|
||||
}
|
||||
|
||||
errs[nodeIdx] = err
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return getFirstNonCancelError(errs)
|
||||
}
|
||||
|
||||
// GetFieldNames executes q and returns field names seen in results.
|
||||
func (s *Storage) GetFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return s.getValuesWithHits(ctx, 0, false, func(ctx context.Context, sn *storageNode) ([]logstorage.ValueWithHits, error) {
|
||||
return sn.getFieldNames(ctx, tenantIDs, q)
|
||||
})
|
||||
}
|
||||
|
||||
// GetFieldValues executes q and returns unique values for the fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique values are returned.
|
||||
func (s *Storage) GetFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return s.getValuesWithHits(ctx, limit, true, func(ctx context.Context, sn *storageNode) ([]logstorage.ValueWithHits, error) {
|
||||
return sn.getFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
})
|
||||
}
|
||||
|
||||
// GetStreamFieldNames executes q and returns stream field names seen in results.
|
||||
func (s *Storage) GetStreamFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
return s.getValuesWithHits(ctx, 0, false, func(ctx context.Context, sn *storageNode) ([]logstorage.ValueWithHits, error) {
|
||||
return sn.getStreamFieldNames(ctx, tenantIDs, q)
|
||||
})
|
||||
}
|
||||
|
||||
// GetStreamFieldValues executes q and returns stream field values for the given fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique stream field values are returned.
|
||||
func (s *Storage) GetStreamFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return s.getValuesWithHits(ctx, limit, true, func(ctx context.Context, sn *storageNode) ([]logstorage.ValueWithHits, error) {
|
||||
return sn.getStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
})
|
||||
}
|
||||
|
||||
// GetStreams executes q and returns streams seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streams are returned.
|
||||
func (s *Storage) GetStreams(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return s.getValuesWithHits(ctx, limit, true, func(ctx context.Context, sn *storageNode) ([]logstorage.ValueWithHits, error) {
|
||||
return sn.getStreams(ctx, tenantIDs, q, limit)
|
||||
})
|
||||
}
|
||||
|
||||
// GetStreamIDs executes q and returns streamIDs seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streamIDs are returned.
|
||||
func (s *Storage) GetStreamIDs(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
return s.getValuesWithHits(ctx, limit, true, func(ctx context.Context, sn *storageNode) ([]logstorage.ValueWithHits, error) {
|
||||
return sn.getStreamIDs(ctx, tenantIDs, q, limit)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Storage) getValuesWithHits(ctx context.Context, limit uint64, resetHitsOnLimitExceeded bool,
|
||||
callback func(ctx context.Context, sn *storageNode) ([]logstorage.ValueWithHits, error)) ([]logstorage.ValueWithHits, error) {
|
||||
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
results := make([][]logstorage.ValueWithHits, len(s.sns))
|
||||
errs := make([]error, len(s.sns))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := range s.sns {
|
||||
wg.Add(1)
|
||||
go func(nodeIdx int) {
|
||||
defer wg.Done()
|
||||
|
||||
sn := s.sns[nodeIdx]
|
||||
vhs, err := callback(ctxWithCancel, sn)
|
||||
results[nodeIdx] = vhs
|
||||
errs[nodeIdx] = err
|
||||
|
||||
if err != nil {
|
||||
// Cancel the remaining parallel requests
|
||||
cancel()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if err := getFirstNonCancelError(errs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vhs := logstorage.MergeValuesWithHits(results, limit, resetHitsOnLimitExceeded)
|
||||
|
||||
return vhs, nil
|
||||
}
|
||||
|
||||
func getFirstNonCancelError(errs []error) error {
|
||||
for _, err := range errs {
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalValuesWithHits(src []byte) ([]logstorage.ValueWithHits, error) {
|
||||
var vhs []logstorage.ValueWithHits
|
||||
for len(src) > 0 {
|
||||
var vh logstorage.ValueWithHits
|
||||
tail, err := vh.UnmarshalInplace(src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal ValueWithHits #%d: %w", len(vhs), err)
|
||||
}
|
||||
src = tail
|
||||
|
||||
// Clone vh.Value, since it points to src.
|
||||
vh.Value = strings.Clone(vh.Value)
|
||||
|
||||
vhs = append(vhs, vh)
|
||||
}
|
||||
|
||||
return vhs, nil
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmagent docs [here](https://docs.victoriametrics.com/vmagent/).
|
||||
See vmagent docs [here](https://docs.victoriametrics.com/victoriametrics/vmagent/).
|
||||
|
||||
vmagent docs can be edited at [docs/vmagent.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmagent.md).
|
||||
vmagent docs can be edited at [docs/vmagent.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmagent.md).
|
||||
|
||||
@@ -105,7 +105,7 @@ func main() {
|
||||
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
|
||||
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
|
||||
// exceeds 0.05 for extended periods of time.
|
||||
cgroup.SetGOGC(30)
|
||||
cgroup.SetGOGC(50)
|
||||
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
@@ -443,8 +443,10 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/prometheus/api/v1/targets", "/api/v1/targets":
|
||||
promscrapeAPIV1TargetsRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
||||
state := r.FormValue("state")
|
||||
promscrape.WriteAPIV1Targets(w, state)
|
||||
scrapePool := r.FormValue("scrapePool")
|
||||
promscrape.WriteAPIV1Targets(w, state, scrapePool)
|
||||
return true
|
||||
case "/prometheus/target_response", "/target_response":
|
||||
promscrapeTargetResponseRequests.Inc()
|
||||
|
||||
@@ -10,20 +10,22 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/awsapi"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -88,7 +90,8 @@ type client struct {
|
||||
remoteWriteURL string
|
||||
|
||||
// Whether to use VictoriaMetrics remote write protocol for sending the data to remoteWriteURL
|
||||
useVMProto bool
|
||||
useVMProto atomic.Bool
|
||||
canDowngradeVMProto atomic.Bool
|
||||
|
||||
fq *persistentqueue.FastQueue
|
||||
hc *http.Client
|
||||
@@ -126,8 +129,7 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||
logger.Fatalf("cannot initialize AWS Config for -remoteWrite.url=%q: %s", remoteWriteURL, err)
|
||||
}
|
||||
|
||||
tr := httputil.NewTransport(false)
|
||||
tr.DialContext = netutil.NewStatDialFunc("vmagent_remotewrite")
|
||||
tr := httputil.NewTransport(false, "vmagent_remotewrite")
|
||||
tr.TLSHandshakeTimeout = tlsHandshakeTimeout.GetOptionalArg(argIdx)
|
||||
tr.MaxConnsPerHost = 2 * concurrency
|
||||
tr.MaxIdleConnsPerHost = 2 * concurrency
|
||||
@@ -168,17 +170,11 @@ func newHTTPClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persiste
|
||||
logger.Fatalf("-remoteWrite.useVMProto and -remoteWrite.usePromProto cannot be set simultaneously for -remoteWrite.url=%s", sanitizedURL)
|
||||
}
|
||||
if !useVMProto && !usePromProto {
|
||||
// Auto-detect whether the remote storage supports VictoriaMetrics remote write protocol.
|
||||
doRequest := func(url string) (*http.Response, error) {
|
||||
return c.doRequest(url, nil)
|
||||
}
|
||||
useVMProto = protoparserutil.HandleVMProtoClientHandshake(c.remoteWriteURL, doRequest)
|
||||
if !useVMProto {
|
||||
logger.Infof("the remote storage at %q doesn't support VictoriaMetrics remote write protocol. Switching to Prometheus remote write protocol. "+
|
||||
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol", sanitizedURL)
|
||||
}
|
||||
// The VM protocol could be downgraded later at runtime if unsupported media type response status is received.
|
||||
useVMProto = true
|
||||
c.canDowngradeVMProto.Store(true)
|
||||
}
|
||||
c.useVMProto = useVMProto
|
||||
c.useVMProto.Store(useVMProto)
|
||||
|
||||
return c
|
||||
}
|
||||
@@ -386,7 +382,7 @@ func (c *client) newRequest(url string, body []byte) (*http.Request, error) {
|
||||
h := req.Header
|
||||
h.Set("User-Agent", "vmagent")
|
||||
h.Set("Content-Type", "application/x-protobuf")
|
||||
if c.useVMProto {
|
||||
if encoding.IsZstd(body) {
|
||||
h.Set("Content-Encoding", "zstd")
|
||||
h.Set("X-VictoriaMetrics-Remote-Write-Version", "1")
|
||||
} else {
|
||||
@@ -422,7 +418,7 @@ again:
|
||||
if retryDuration > maxRetryDuration {
|
||||
retryDuration = maxRetryDuration
|
||||
}
|
||||
logger.Warnf("couldn't send a block with size %d bytes to %q: %s; re-sending the block in %.3f seconds",
|
||||
remoteWriteRetryLogger.Warnf("couldn't send a block with size %d bytes to %q: %s; re-sending the block in %.3f seconds",
|
||||
len(block), c.sanitizedURL, err, retryDuration.Seconds())
|
||||
t := timerpool.Get(retryDuration)
|
||||
select {
|
||||
@@ -435,6 +431,7 @@ again:
|
||||
c.retriesCount.Inc()
|
||||
goto again
|
||||
}
|
||||
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode/100 == 2 {
|
||||
_ = resp.Body.Close()
|
||||
@@ -443,24 +440,46 @@ again:
|
||||
c.blocksSent.Inc()
|
||||
return true
|
||||
}
|
||||
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="%d"}`, c.sanitizedURL, statusCode)).Inc()
|
||||
if statusCode == 409 || statusCode == 400 {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
remoteWriteRejectedLogger.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; "+
|
||||
"failed to read response body: %s",
|
||||
len(block), c.sanitizedURL, statusCode, err)
|
||||
} else {
|
||||
remoteWriteRejectedLogger.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; response body: %s",
|
||||
len(block), c.sanitizedURL, statusCode, string(body))
|
||||
}
|
||||
// Just drop block on 409 and 400 status codes like Prometheus does.
|
||||
if statusCode == 409 {
|
||||
logBlockRejected(block, c.sanitizedURL, resp)
|
||||
|
||||
// Just drop block on 409 status code like Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/873
|
||||
// and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1149
|
||||
_ = resp.Body.Close()
|
||||
c.packetsDropped.Inc()
|
||||
return true
|
||||
// - Remote Write v1 specification implicitly expects a `400 Bad Request` when the encoding is not supported.
|
||||
// - Remote Write v2 specification explicitly specifies a `415 Unsupported Media Type` for unsupported encodings.
|
||||
// - Real-world implementations of v1 use both 400 and 415 status codes.
|
||||
// See more in research: https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8462#issuecomment-2786918054
|
||||
} else if statusCode == 415 || statusCode == 400 {
|
||||
if c.canDowngradeVMProto.Swap(false) {
|
||||
logger.Infof("received unsupported media type or bad request from remote storage at %q. Downgrading protocol from VictoriaMetrics to Prometheus remote write for all future requests. "+
|
||||
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol", c.sanitizedURL)
|
||||
c.useVMProto.Store(false)
|
||||
}
|
||||
|
||||
if encoding.IsZstd(block) {
|
||||
logger.Infof("received unsupported media type or bad request from remote storage at %q. Re-packing the block to Prometheus remote write and retrying."+
|
||||
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol", c.sanitizedURL)
|
||||
|
||||
block = mustRepackBlockFromZstdToSnappy(block)
|
||||
|
||||
c.retriesCount.Inc()
|
||||
_ = resp.Body.Close()
|
||||
goto again
|
||||
}
|
||||
|
||||
// Just drop snappy blocks on 400 or 415 status codes like Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/873
|
||||
// and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1149
|
||||
logBlockRejected(block, c.sanitizedURL, resp)
|
||||
_ = resp.Body.Close()
|
||||
c.packetsDropped.Inc()
|
||||
return true
|
||||
}
|
||||
|
||||
// Unexpected status code returned
|
||||
@@ -490,6 +509,7 @@ again:
|
||||
}
|
||||
|
||||
var remoteWriteRejectedLogger = logger.WithThrottler("remoteWriteRejected", 5*time.Second)
|
||||
var remoteWriteRetryLogger = logger.WithThrottler("remoteWriteRetry", 5*time.Second)
|
||||
|
||||
// getRetryDuration returns retry duration.
|
||||
// retryAfterDuration has the highest priority.
|
||||
@@ -512,6 +532,28 @@ func getRetryDuration(retryAfterDuration, retryDuration, maxRetryDuration time.D
|
||||
return retryDuration
|
||||
}
|
||||
|
||||
func mustRepackBlockFromZstdToSnappy(zstdBlock []byte) []byte {
|
||||
plainBlock := make([]byte, 0, len(zstdBlock)*2)
|
||||
plainBlock, err := zstd.Decompress(plainBlock, zstdBlock)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot re-pack block with size %d bytes from Zstd to Snappy: %s", len(zstdBlock), err)
|
||||
}
|
||||
|
||||
return snappy.Encode(nil, plainBlock)
|
||||
}
|
||||
|
||||
func logBlockRejected(block []byte, sanitizedURL string, resp *http.Response) {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
remoteWriteRejectedLogger.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; "+
|
||||
"failed to read response body: %s",
|
||||
len(block), sanitizedURL, resp.StatusCode, err)
|
||||
} else {
|
||||
remoteWriteRejectedLogger.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; response body: %s",
|
||||
len(block), sanitizedURL, resp.StatusCode, string(body))
|
||||
}
|
||||
}
|
||||
|
||||
// parseRetryAfterHeader parses `Retry-After` value retrieved from HTTP response header.
|
||||
// retryAfterString should be in either HTTP-date or a number of seconds.
|
||||
// It will return time.Duration(0) if `retryAfterString` does not follow RFC 7231.
|
||||
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
func TestCalculateRetryDuration(t *testing.T) {
|
||||
@@ -97,3 +100,19 @@ func helper(d time.Duration) time.Duration {
|
||||
|
||||
return d + dv
|
||||
}
|
||||
|
||||
func TestRepackBlockFromZstdToSnappy(t *testing.T) {
|
||||
expectedPlainBlock := []byte(`foobar`)
|
||||
|
||||
zstdBlock := encoding.CompressZSTDLevel(nil, expectedPlainBlock, 1)
|
||||
snappyBlock := mustRepackBlockFromZstdToSnappy(zstdBlock)
|
||||
|
||||
actualPlainBlock, err := snappy.Decode(nil, snappyBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if string(actualPlainBlock) != string(expectedPlainBlock) {
|
||||
t.Fatalf("unexpected plain block; got %q; want %q", actualPlainBlock, expectedPlainBlock)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/golang/snappy"
|
||||
@@ -39,7 +40,7 @@ type pendingSeries struct {
|
||||
periodicFlusherWG sync.WaitGroup
|
||||
}
|
||||
|
||||
func newPendingSeries(fq *persistentqueue.FastQueue, isVMRemoteWrite bool, significantFigures, roundDigits int) *pendingSeries {
|
||||
func newPendingSeries(fq *persistentqueue.FastQueue, isVMRemoteWrite *atomic.Bool, significantFigures, roundDigits int) *pendingSeries {
|
||||
var ps pendingSeries
|
||||
ps.wr.fq = fq
|
||||
ps.wr.isVMRemoteWrite = isVMRemoteWrite
|
||||
@@ -99,7 +100,7 @@ type writeRequest struct {
|
||||
fq *persistentqueue.FastQueue
|
||||
|
||||
// Whether to encode the write request with VictoriaMetrics remote write protocol.
|
||||
isVMRemoteWrite bool
|
||||
isVMRemoteWrite *atomic.Bool
|
||||
|
||||
// How many significant figures must be left before sending the writeRequest to fq.
|
||||
significantFigures int
|
||||
@@ -137,7 +138,7 @@ func (wr *writeRequest) reset() {
|
||||
// This is needed in order to properly save in-memory data to persistent queue on graceful shutdown.
|
||||
func (wr *writeRequest) mustFlushOnStop() {
|
||||
wr.wr.Timeseries = wr.tss
|
||||
if !tryPushWriteRequest(&wr.wr, wr.mustWriteBlock, wr.isVMRemoteWrite) {
|
||||
if !tryPushWriteRequest(&wr.wr, wr.mustWriteBlock, wr.isVMRemoteWrite.Load()) {
|
||||
logger.Panicf("BUG: final flush must always return true")
|
||||
}
|
||||
wr.reset()
|
||||
@@ -151,7 +152,7 @@ func (wr *writeRequest) mustWriteBlock(block []byte) bool {
|
||||
func (wr *writeRequest) tryFlush() bool {
|
||||
wr.wr.Timeseries = wr.tss
|
||||
wr.lastFlushTime.Store(fasttime.UnixTimestamp())
|
||||
if !tryPushWriteRequest(&wr.wr, wr.fq.TryWriteBlock, wr.isVMRemoteWrite) {
|
||||
if !tryPushWriteRequest(&wr.wr, wr.fq.TryWriteBlock, wr.isVMRemoteWrite.Load()) {
|
||||
return false
|
||||
}
|
||||
wr.reset()
|
||||
@@ -197,28 +198,43 @@ func (wr *writeRequest) tryPush(src []prompbmarshal.TimeSeries) bool {
|
||||
}
|
||||
|
||||
func (wr *writeRequest) copyTimeSeries(dst, src *prompbmarshal.TimeSeries) {
|
||||
labelsDst := wr.labels
|
||||
labelsSrc := src.Labels
|
||||
|
||||
// Pre-allocate memory for labels.
|
||||
labelsLen := len(wr.labels)
|
||||
samplesDst := wr.samples
|
||||
buf := wr.buf
|
||||
for i := range src.Labels {
|
||||
labelsDst = append(labelsDst, prompbmarshal.Label{})
|
||||
dstLabel := &labelsDst[len(labelsDst)-1]
|
||||
srcLabel := &src.Labels[i]
|
||||
wr.labels = slicesutil.SetLength(wr.labels, labelsLen+len(labelsSrc))
|
||||
labelsDst := wr.labels[labelsLen:]
|
||||
|
||||
buf = append(buf, srcLabel.Name...)
|
||||
dstLabel.Name = bytesutil.ToUnsafeString(buf[len(buf)-len(srcLabel.Name):])
|
||||
buf = append(buf, srcLabel.Value...)
|
||||
dstLabel.Value = bytesutil.ToUnsafeString(buf[len(buf)-len(srcLabel.Value):])
|
||||
// Pre-allocate memory for byte slice needed for storing label names and values.
|
||||
neededBufLen := 0
|
||||
for i := range labelsSrc {
|
||||
label := &labelsSrc[i]
|
||||
neededBufLen += len(label.Name) + len(label.Value)
|
||||
}
|
||||
dst.Labels = labelsDst[labelsLen:]
|
||||
bufLen := len(wr.buf)
|
||||
wr.buf = slicesutil.SetLength(wr.buf, bufLen+neededBufLen)
|
||||
buf := wr.buf[:bufLen]
|
||||
|
||||
samplesDst = append(samplesDst, src.Samples...)
|
||||
dst.Samples = samplesDst[len(samplesDst)-len(src.Samples):]
|
||||
// Copy labels
|
||||
for i := range labelsSrc {
|
||||
dstLabel := &labelsDst[i]
|
||||
srcLabel := &labelsSrc[i]
|
||||
|
||||
wr.samples = samplesDst
|
||||
wr.labels = labelsDst
|
||||
bufLen := len(buf)
|
||||
buf = append(buf, srcLabel.Name...)
|
||||
dstLabel.Name = bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, srcLabel.Value...)
|
||||
dstLabel.Value = bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
}
|
||||
wr.buf = buf
|
||||
dst.Labels = labelsDst
|
||||
|
||||
// Copy samples
|
||||
samplesLen := len(wr.samples)
|
||||
wr.samples = append(wr.samples, src.Samples...)
|
||||
dst.Samples = wr.samples[samplesLen:]
|
||||
}
|
||||
|
||||
// marshalConcurrency limits the maximum number of concurrent workers, which marshal and compress WriteRequest.
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/consistenthash"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
@@ -43,7 +44,7 @@ var (
|
||||
"according to https://docs.victoriametrics.com/#how-to-import-time-series-data ."+
|
||||
"See https://docs.victoriametrics.com/vmagent/#multitenancy for details")
|
||||
|
||||
shardByURL = flag.Bool("remoteWrite.shardByURL", false, "Whether to shard outgoing series across all the remote storage systems enumerated via -remoteWrite.url . "+
|
||||
shardByURL = flag.Bool("remoteWrite.shardByURL", false, "Whether to shard outgoing series across all the remote storage systems enumerated via -remoteWrite.url. "+
|
||||
"By default the data is replicated across all the -remoteWrite.url . See https://docs.victoriametrics.com/vmagent/#sharding-among-remote-storages . "+
|
||||
"See also -remoteWrite.shardByURLReplicas")
|
||||
shardByURLReplicas = flag.Int("remoteWrite.shardByURLReplicas", 1, "How many copies of data to make among remote storage systems enumerated via -remoteWrite.url "+
|
||||
@@ -54,7 +55,6 @@ var (
|
||||
shardByURLIgnoreLabels = flagutil.NewArrayString("remoteWrite.shardByURL.ignoreLabels", "Optional list of labels, which must be ignored when sharding outgoing samples "+
|
||||
"among remote storage systems if -remoteWrite.shardByURL command-line flag is set. By default all the labels are used for sharding in order to gain "+
|
||||
"even distribution of series over the specified -remoteWrite.url systems. See also -remoteWrite.shardByURL.labels")
|
||||
|
||||
tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory for storing pending data, which isn't sent to the configured -remoteWrite.url . "+
|
||||
"See also -remoteWrite.maxDiskUsagePerURL and -remoteWrite.disableOnDiskQueue")
|
||||
keepDanglingQueues = flag.Bool("remoteWrite.keepDanglingQueues", false, "Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. "+
|
||||
@@ -96,7 +96,9 @@ var (
|
||||
|
||||
var (
|
||||
// rwctxsGlobal contains statically populated entries when -remoteWrite.url is specified.
|
||||
rwctxsGlobal []*remoteWriteCtx
|
||||
rwctxsGlobal []*remoteWriteCtx
|
||||
rwctxsGlobalIdx []int
|
||||
rwctxConsistentHashGlobal *consistenthash.ConsistentHash
|
||||
|
||||
// ErrQueueFullHTTPRetry must be returned when TryPush() returns false.
|
||||
ErrQueueFullHTTPRetry = &httpserver.ErrorWithStatusCode{
|
||||
@@ -205,7 +207,7 @@ func Init() {
|
||||
|
||||
initStreamAggrConfigGlobal()
|
||||
|
||||
rwctxsGlobal = newRemoteWriteCtxs(*remoteWriteURLs)
|
||||
initRemoteWriteCtxs(*remoteWriteURLs)
|
||||
|
||||
disableOnDiskQueues := []bool(*disableOnDiskQueue)
|
||||
disableOnDiskQueueAny = slices.Contains(disableOnDiskQueues, true)
|
||||
@@ -290,7 +292,7 @@ var (
|
||||
relabelConfigTimestamp = metrics.NewCounter(`vmagent_relabel_config_last_reload_success_timestamp_seconds`)
|
||||
)
|
||||
|
||||
func newRemoteWriteCtxs(urls []string) []*remoteWriteCtx {
|
||||
func initRemoteWriteCtxs(urls []string) {
|
||||
if len(urls) == 0 {
|
||||
logger.Panicf("BUG: urls must be non-empty")
|
||||
}
|
||||
@@ -306,6 +308,7 @@ func newRemoteWriteCtxs(urls []string) []*remoteWriteCtx {
|
||||
maxInmemoryBlocks = 2
|
||||
}
|
||||
rwctxs := make([]*remoteWriteCtx, len(urls))
|
||||
rwctxIdx := make([]int, len(urls))
|
||||
for i, remoteWriteURLRaw := range urls {
|
||||
remoteWriteURL, err := url.Parse(remoteWriteURLRaw)
|
||||
if err != nil {
|
||||
@@ -316,8 +319,19 @@ func newRemoteWriteCtxs(urls []string) []*remoteWriteCtx {
|
||||
sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL)
|
||||
}
|
||||
rwctxs[i] = newRemoteWriteCtx(i, remoteWriteURL, maxInmemoryBlocks, sanitizedURL)
|
||||
rwctxIdx[i] = i
|
||||
}
|
||||
return rwctxs
|
||||
|
||||
if *shardByURL {
|
||||
consistentHashNodes := make([]string, 0, len(urls))
|
||||
for i, url := range urls {
|
||||
consistentHashNodes = append(consistentHashNodes, fmt.Sprintf("%d:%s", i+1, url))
|
||||
}
|
||||
rwctxConsistentHashGlobal = consistenthash.NewConsistentHash(consistentHashNodes, 0)
|
||||
}
|
||||
|
||||
rwctxsGlobal = rwctxs
|
||||
rwctxsGlobalIdx = rwctxIdx
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -501,6 +515,10 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnF
|
||||
return true
|
||||
}
|
||||
|
||||
// getEligibleRemoteWriteCtxs checks whether writes to configured remote storage systems are blocked and
|
||||
// returns only the unblocked rwctx.
|
||||
//
|
||||
// calculateHealthyRwctxIdx will rely on the order of rwctx to be in ascending order.
|
||||
func getEligibleRemoteWriteCtxs(tss []prompbmarshal.TimeSeries, forceDropSamplesOnFailure bool) ([]*remoteWriteCtx, bool) {
|
||||
if !disableOnDiskQueueAny {
|
||||
return rwctxsGlobal, true
|
||||
@@ -517,6 +535,12 @@ func getEligibleRemoteWriteCtxs(tss []prompbmarshal.TimeSeries, forceDropSamples
|
||||
return nil, false
|
||||
}
|
||||
rowsCount := getRowsCount(tss)
|
||||
if *shardByURL {
|
||||
// Todo: When shardByURL is enabled, the following metrics won't be 100% accurate. Because vmagent don't know
|
||||
// which rwctx should data be pushed to yet. Let's consider the hashing algorithm fair and will distribute
|
||||
// data to all rwctxs evenly.
|
||||
rowsCount = rowsCount / len(rwctxsGlobal)
|
||||
}
|
||||
rwctx.rowsDroppedOnPushFailure.Add(rowsCount)
|
||||
}
|
||||
}
|
||||
@@ -528,6 +552,7 @@ func pushToRemoteStoragesTrackDropped(tss []prompbmarshal.TimeSeries) {
|
||||
if len(rwctxs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !tryPushBlockToRemoteStorages(rwctxs, tss, true) {
|
||||
logger.Panicf("BUG: tryPushBlockToRemoteStorages() must return true when forceDropSamplesOnFailure=true")
|
||||
}
|
||||
@@ -578,42 +603,7 @@ func tryShardingBlockAmongRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []pr
|
||||
defer putTSSShards(x)
|
||||
|
||||
shards := x.shards
|
||||
tmpLabels := promutil.GetLabels()
|
||||
for _, ts := range tssBlock {
|
||||
hashLabels := ts.Labels
|
||||
if len(shardByURLLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLLabelsMap[label.Name]; ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
} else if len(shardByURLIgnoreLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLIgnoreLabelsMap[label.Name]; !ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
}
|
||||
h := getLabelsHash(hashLabels)
|
||||
idx := h % uint64(len(shards))
|
||||
i := 0
|
||||
for {
|
||||
shards[idx] = append(shards[idx], ts)
|
||||
i++
|
||||
if i >= replicas {
|
||||
break
|
||||
}
|
||||
idx++
|
||||
if idx >= uint64(len(shards)) {
|
||||
idx = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
promutil.PutLabels(tmpLabels)
|
||||
shardAmountRemoteWriteCtx(tssBlock, shards, rwctxs, replicas)
|
||||
|
||||
// Push sharded samples to remote storage systems in parallel in order to reduce
|
||||
// the time needed for sending the data to multiple remote storage systems.
|
||||
@@ -636,6 +626,86 @@ func tryShardingBlockAmongRemoteStorages(rwctxs []*remoteWriteCtx, tssBlock []pr
|
||||
return !anyPushFailed.Load()
|
||||
}
|
||||
|
||||
// calculateHealthyRwctxIdx returns the index of healthyRwctxs in rwctxsGlobal.
|
||||
// It relies on the order of rwctx in healthyRwctxs, which is appended by getEligibleRemoteWriteCtxs.
|
||||
func calculateHealthyRwctxIdx(healthyRwctxs []*remoteWriteCtx) ([]int, []int) {
|
||||
// fast path: all rwctxs are healthy.
|
||||
if len(healthyRwctxs) == len(rwctxsGlobal) {
|
||||
return rwctxsGlobalIdx, nil
|
||||
}
|
||||
|
||||
unhealthyIdx := make([]int, 0, len(rwctxsGlobal))
|
||||
healthyIdx := make([]int, 0, len(rwctxsGlobal))
|
||||
|
||||
var i int
|
||||
for j := range rwctxsGlobal {
|
||||
if i < len(healthyRwctxs) && rwctxsGlobal[j].idx == healthyRwctxs[i].idx {
|
||||
healthyIdx = append(healthyIdx, j)
|
||||
i++
|
||||
} else {
|
||||
unhealthyIdx = append(unhealthyIdx, j)
|
||||
}
|
||||
}
|
||||
|
||||
return healthyIdx, unhealthyIdx
|
||||
}
|
||||
|
||||
// shardAmountRemoteWriteCtx distribute time series to shards by consistent hashing.
|
||||
func shardAmountRemoteWriteCtx(tssBlock []prompbmarshal.TimeSeries, shards [][]prompbmarshal.TimeSeries, rwctxs []*remoteWriteCtx, replicas int) {
|
||||
tmpLabels := promutil.GetLabels()
|
||||
defer promutil.PutLabels(tmpLabels)
|
||||
|
||||
healthyIdx, unhealthyIdx := calculateHealthyRwctxIdx(rwctxs)
|
||||
|
||||
// shardsIdxMap is a map to find which the shard idx by rwctxs idx.
|
||||
// rwctxConsistentHashGlobal will tell which the rwctxs idx a time series should be written to.
|
||||
// And this time series should be appended to the shards by correct shard idx.
|
||||
shardsIdxMap := make(map[int]int, len(healthyIdx))
|
||||
for idx, rwctxsIdx := range healthyIdx {
|
||||
shardsIdxMap[rwctxsIdx] = idx
|
||||
}
|
||||
|
||||
for _, ts := range tssBlock {
|
||||
hashLabels := ts.Labels
|
||||
if len(shardByURLLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLLabelsMap[label.Name]; ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
} else if len(shardByURLIgnoreLabelsMap) > 0 {
|
||||
hashLabels = tmpLabels.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
if _, ok := shardByURLIgnoreLabelsMap[label.Name]; !ok {
|
||||
hashLabels = append(hashLabels, label)
|
||||
}
|
||||
}
|
||||
tmpLabels.Labels = hashLabels
|
||||
}
|
||||
h := getLabelsHash(hashLabels)
|
||||
|
||||
// Get the rwctxIdx through consistent hashing and then map it to the index in shards.
|
||||
// The rwctxIdx is not always equal to the shardIdx, for example, when some rwctx are not available.
|
||||
rwctxIdx := rwctxConsistentHashGlobal.GetNodeIdx(h, unhealthyIdx)
|
||||
shardIdx := shardsIdxMap[rwctxIdx]
|
||||
|
||||
replicated := 0
|
||||
for {
|
||||
shards[shardIdx] = append(shards[shardIdx], ts)
|
||||
replicated++
|
||||
if replicated >= replicas {
|
||||
break
|
||||
}
|
||||
shardIdx++
|
||||
if shardIdx >= len(shards) {
|
||||
shardIdx = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type tssShards struct {
|
||||
shards [][]prompbmarshal.TimeSeries
|
||||
}
|
||||
@@ -807,7 +877,7 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL *url.URL, maxInmemoryBlocks in
|
||||
}
|
||||
pss := make([]*pendingSeries, pssLen)
|
||||
for i := range pss {
|
||||
pss[i] = newPendingSeries(fq, c.useVMProto, sf, rd)
|
||||
pss[i] = newPendingSeries(fq, &c.useVMProto, sf, rd)
|
||||
}
|
||||
|
||||
rwctx := &remoteWriteCtx{
|
||||
|
||||
@@ -4,13 +4,17 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/consistenthash"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -68,7 +72,9 @@ func TestRemoteWriteContext_TryPush_ImmutableTimeseries(t *testing.T) {
|
||||
allRelabelConfigs.Store(rcs)
|
||||
|
||||
pss := make([]*pendingSeries, 1)
|
||||
pss[0] = newPendingSeries(nil, true, 0, 100)
|
||||
isVMProto := &atomic.Bool{}
|
||||
isVMProto.Store(true)
|
||||
pss[0] = newPendingSeries(nil, isVMProto, 0, 100)
|
||||
rwctx := &remoteWriteCtx{
|
||||
idx: 0,
|
||||
streamAggrKeepInput: keepInput,
|
||||
@@ -171,3 +177,173 @@ metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`)
|
||||
}
|
||||
|
||||
func TestShardAmountRemoteWriteCtx(t *testing.T) {
|
||||
// 1. distribute 100000 series to n nodes.
|
||||
// 2. remove the last node from healthy list.
|
||||
// 3. distribute the same 10000 series to (n-1) node again.
|
||||
// 4. check active time series change rate:
|
||||
// change rate must < (3/total nodes). e.g. +30% if 10 you have 10 nodes.
|
||||
|
||||
f := func(remoteWriteCount int, healthyIdx []int, replicas int) {
|
||||
t.Helper()
|
||||
defer func() {
|
||||
rwctxsGlobal = nil
|
||||
rwctxsGlobalIdx = nil
|
||||
rwctxConsistentHashGlobal = nil
|
||||
}()
|
||||
|
||||
rwctxsGlobal = make([]*remoteWriteCtx, remoteWriteCount)
|
||||
rwctxsGlobalIdx = make([]int, remoteWriteCount)
|
||||
rwctxs := make([]*remoteWriteCtx, 0, len(healthyIdx))
|
||||
|
||||
for i := range remoteWriteCount {
|
||||
rwCtx := &remoteWriteCtx{
|
||||
idx: i,
|
||||
}
|
||||
rwctxsGlobalIdx[i] = i
|
||||
|
||||
if i >= len(healthyIdx) {
|
||||
rwctxsGlobal[i] = rwCtx
|
||||
continue
|
||||
}
|
||||
hIdx := healthyIdx[i]
|
||||
if hIdx != i {
|
||||
rwctxs = append(rwctxs, &remoteWriteCtx{
|
||||
idx: hIdx,
|
||||
})
|
||||
} else {
|
||||
rwctxs = append(rwctxs, rwCtx)
|
||||
}
|
||||
rwctxsGlobal[i] = rwCtx
|
||||
}
|
||||
|
||||
seriesCount := 100000
|
||||
// build 1000000 series
|
||||
tssBlock := make([]prompbmarshal.TimeSeries, 0, seriesCount)
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
tssBlock = append(tssBlock, prompbmarshal.TimeSeries{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "label",
|
||||
Value: strconv.Itoa(i),
|
||||
},
|
||||
},
|
||||
Samples: []prompbmarshal.Sample{
|
||||
{
|
||||
Timestamp: 0,
|
||||
Value: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// build consistent hash for x remote write context
|
||||
// build active time series set
|
||||
nodes := make([]string, 0, remoteWriteCount)
|
||||
activeTimeSeriesByNodes := make([]map[string]struct{}, remoteWriteCount)
|
||||
for i := 0; i < remoteWriteCount; i++ {
|
||||
nodes = append(nodes, fmt.Sprintf("node%d", i))
|
||||
activeTimeSeriesByNodes[i] = make(map[string]struct{})
|
||||
}
|
||||
rwctxConsistentHashGlobal = consistenthash.NewConsistentHash(nodes, 0)
|
||||
|
||||
// create shards
|
||||
x := getTSSShards(len(rwctxs))
|
||||
shards := x.shards
|
||||
|
||||
// execute
|
||||
shardAmountRemoteWriteCtx(tssBlock, shards, rwctxs, replicas)
|
||||
|
||||
for i, nodeIdx := range healthyIdx {
|
||||
for _, ts := range shards[i] {
|
||||
// add it to node[nodeIdx]'s active time series
|
||||
activeTimeSeriesByNodes[nodeIdx][prompbmarshal.LabelsToString(ts.Labels)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
totalActiveTimeSeries := 0
|
||||
for _, activeTimeSeries := range activeTimeSeriesByNodes {
|
||||
totalActiveTimeSeries += len(activeTimeSeries)
|
||||
}
|
||||
avgActiveTimeSeries1 := totalActiveTimeSeries / remoteWriteCount
|
||||
putTSSShards(x)
|
||||
|
||||
// removed last node
|
||||
rwctxs = rwctxs[:len(rwctxs)-1]
|
||||
healthyIdx = healthyIdx[:len(healthyIdx)-1]
|
||||
|
||||
x = getTSSShards(len(rwctxs))
|
||||
shards = x.shards
|
||||
|
||||
// execute
|
||||
shardAmountRemoteWriteCtx(tssBlock, shards, rwctxs, replicas)
|
||||
for i, nodeIdx := range healthyIdx {
|
||||
for _, ts := range shards[i] {
|
||||
// add it to node[nodeIdx]'s active time series
|
||||
activeTimeSeriesByNodes[nodeIdx][prompbmarshal.LabelsToString(ts.Labels)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
totalActiveTimeSeries = 0
|
||||
for _, activeTimeSeries := range activeTimeSeriesByNodes {
|
||||
totalActiveTimeSeries += len(activeTimeSeries)
|
||||
}
|
||||
avgActiveTimeSeries2 := totalActiveTimeSeries / remoteWriteCount
|
||||
|
||||
changed := math.Abs(float64(avgActiveTimeSeries2-avgActiveTimeSeries1) / float64(avgActiveTimeSeries1))
|
||||
threshold := 3 / float64(remoteWriteCount)
|
||||
|
||||
if changed >= threshold {
|
||||
t.Fatalf("average active time series before: %d, after: %d, changed: %.2f. threshold: %.2f", avgActiveTimeSeries1, avgActiveTimeSeries2, changed, threshold)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
f(5, []int{0, 1, 2, 3, 4}, 1)
|
||||
|
||||
f(5, []int{0, 1, 2, 3, 4}, 2)
|
||||
|
||||
f(10, []int{0, 1, 2, 3, 4, 5, 6, 7, 9}, 1)
|
||||
|
||||
f(10, []int{0, 1, 2, 3, 4, 5, 6, 7, 9}, 3)
|
||||
}
|
||||
|
||||
func TestCalculateHealthyRwctxIdx(t *testing.T) {
|
||||
f := func(total int, healthyIdx []int, unhealthyIdx []int) {
|
||||
t.Helper()
|
||||
|
||||
healthyMap := make(map[int]bool)
|
||||
for _, idx := range healthyIdx {
|
||||
healthyMap[idx] = true
|
||||
}
|
||||
rwctxsGlobal = make([]*remoteWriteCtx, total)
|
||||
rwctxsGlobalIdx = make([]int, total)
|
||||
rwctxs := make([]*remoteWriteCtx, 0, len(healthyIdx))
|
||||
for i := range rwctxsGlobal {
|
||||
rwctx := &remoteWriteCtx{idx: i}
|
||||
rwctxsGlobal[i] = rwctx
|
||||
if healthyMap[i] {
|
||||
rwctxs = append(rwctxs, rwctx)
|
||||
}
|
||||
rwctxsGlobalIdx[i] = i
|
||||
}
|
||||
|
||||
gotHealthyIdx, gotUnhealthyIdx := calculateHealthyRwctxIdx(rwctxs)
|
||||
if !reflect.DeepEqual(healthyIdx, gotHealthyIdx) {
|
||||
t.Errorf("calculateHealthyRwctxIdx want healthyIdx = %v, got %v", healthyIdx, gotHealthyIdx)
|
||||
}
|
||||
if !reflect.DeepEqual(unhealthyIdx, gotUnhealthyIdx) {
|
||||
t.Errorf("calculateHealthyRwctxIdx want unhealthyIdx = %v, got %v", unhealthyIdx, gotUnhealthyIdx)
|
||||
}
|
||||
}
|
||||
|
||||
f(5, []int{0, 1, 2, 3, 4}, nil)
|
||||
f(5, []int{0, 1, 2, 4}, []int{3})
|
||||
f(5, []int{2, 4}, []int{0, 1, 3})
|
||||
f(5, []int{0, 2, 4}, []int{1, 3})
|
||||
f(5, []int{}, []int{0, 1, 2, 3, 4})
|
||||
f(5, []int{4}, []int{0, 1, 2, 3})
|
||||
f(1, []int{0}, nil)
|
||||
f(1, []int{}, []int{0})
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmalert-tool docs [here](https://docs.victoriametrics.com/vmalert-tool.html).
|
||||
See vmalert-tool docs [here](https://docs.victoriametrics.com/victoriametrics/vmalert-tool/).
|
||||
|
||||
vmalert-tool docs can be edited at [docs/vmalert-tool.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmalert-tool.md).
|
||||
vmalert-tool docs can be edited at [docs/vmalert-tool.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmalert-tool.md).
|
||||
|
||||
8
app/vmalert-tool/deployment/Dockerfile
Normal file
8
app/vmalert-tool/deployment/Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
ARG base_image=non-existing
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8880
|
||||
|
||||
ENTRYPOINT ["/vmalert-tool-prod"]
|
||||
ARG src_binary=non-existing
|
||||
COPY $src_binary ./vmalert-tool-prod
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmalert docs [here](https://docs.victoriametrics.com/vmalert/).
|
||||
See vmalert docs [here](https://docs.victoriametrics.com/victoriametrics/vmalert/).
|
||||
|
||||
vmalert docs can be edited at [docs/vmalert.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmalert.md).
|
||||
vmalert docs can be edited at [docs/vmalert.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmalert.md).
|
||||
|
||||
@@ -2,7 +2,6 @@ package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
@@ -11,11 +10,12 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/log"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutil"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -67,7 +67,7 @@ func (g *Group) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
if g.Type.Get() == "" {
|
||||
g.Type = NewRawType(*defaultRuleType)
|
||||
}
|
||||
h := md5.New()
|
||||
h := fnv.New64a()
|
||||
h.Write(b)
|
||||
g.Checksum = fmt.Sprintf("%x", h.Sum(nil))
|
||||
return nil
|
||||
|
||||
@@ -35,6 +35,8 @@ type promResponse struct {
|
||||
Stats struct {
|
||||
SeriesFetched *string `json:"seriesFetched,omitempty"`
|
||||
} `json:"stats,omitempty"`
|
||||
// IsPartial supported by VictoriaMetrics
|
||||
IsPartial *bool `json:"isPartial,omitempty"`
|
||||
}
|
||||
|
||||
// see https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
@@ -209,7 +211,7 @@ func parsePrometheusResponse(req *http.Request, resp *http.Response) (res Result
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
res = Result{Data: ms}
|
||||
res = Result{Data: ms, IsPartial: r.IsPartial}
|
||||
if r.Stats.SeriesFetched != nil {
|
||||
intV, err := strconv.Atoi(*r.Stats.SeriesFetched)
|
||||
if err != nil {
|
||||
|
||||
@@ -72,12 +72,14 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]}}`))
|
||||
case 7:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]},"stats":{"seriesFetched": "42"}}`))
|
||||
case 8:
|
||||
w.Write([]byte(`{"status":"success", "isPartial":true, "data":{"resultType":"scalar","result":[1583786142, "1"]}}`))
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/render", func(w http.ResponseWriter, _ *http.Request) {
|
||||
c++
|
||||
switch c {
|
||||
case 8:
|
||||
case 9:
|
||||
w.Write([]byte(`[{"target":"constantLine(10)","tags":{"name":"constantLine(10)"},"datapoints":[[10,1611758343],[10,1611758373],[10,1611758403]]}]`))
|
||||
}
|
||||
})
|
||||
@@ -100,9 +102,9 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
t.Fatalf("failed to parse 'time' query param %q: %s", timeParam, err)
|
||||
}
|
||||
switch c {
|
||||
case 9:
|
||||
w.Write([]byte("[]"))
|
||||
case 10:
|
||||
w.Write([]byte("[]"))
|
||||
case 11:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"total","foo":"bar"},"value":[1583786142,"13763"]},{"metric":{"__name__":"total","foo":"baz"},"value":[1583786140,"2000"]}]}}`))
|
||||
}
|
||||
})
|
||||
@@ -203,10 +205,18 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
*res.SeriesFetched)
|
||||
}
|
||||
|
||||
res, _, err = pq.Query(ctx, vmQuery, ts) // 8
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
if res.IsPartial != nil && !*res.IsPartial {
|
||||
t.Fatalf("unexpected metric isPartial want %+v", true)
|
||||
}
|
||||
|
||||
// test graphite
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
|
||||
|
||||
res, _, err = gq.Query(ctx, queryRender, ts) // 8 - graphite
|
||||
res, _, err = gq.Query(ctx, queryRender, ts) // 9 - graphite
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
@@ -226,9 +236,9 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
vlogs := datasourceVLogs
|
||||
pq = s.BuildWithParams(QuerierParams{DataSourceType: string(vlogs), EvaluationInterval: 15 * time.Second})
|
||||
|
||||
expErr(vlogsQuery, "error parsing response") // 9
|
||||
expErr(vlogsQuery, "error parsing response") // 10
|
||||
|
||||
res, _, err = pq.Query(ctx, vlogsQuery, ts) // 10
|
||||
res, _, err = pq.Query(ctx, vlogsQuery, ts) // 11
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ type Result struct {
|
||||
// If nil, then this feature is not supported by the datasource.
|
||||
// SeriesFetched is supported by VictoriaMetrics since v1.90.
|
||||
SeriesFetched *int
|
||||
// IsPartial is used by VictoriaMetrics to indicate
|
||||
// whether response data is partial.
|
||||
IsPartial *bool
|
||||
}
|
||||
|
||||
// QuerierBuilder builds Querier with given params.
|
||||
|
||||
@@ -10,8 +10,9 @@ import (
|
||||
// FakeQuerier is a mock querier that return predefined results and error message
|
||||
type FakeQuerier struct {
|
||||
sync.Mutex
|
||||
metrics []Metric
|
||||
err error
|
||||
metrics []Metric
|
||||
err error
|
||||
isPartial *bool
|
||||
}
|
||||
|
||||
// SetErr sets query error message
|
||||
@@ -21,11 +22,19 @@ func (fq *FakeQuerier) SetErr(err error) {
|
||||
fq.Unlock()
|
||||
}
|
||||
|
||||
// SetPartialResponse marks query response as partial
|
||||
func (fq *FakeQuerier) SetPartialResponse(partial bool) {
|
||||
fq.Lock()
|
||||
fq.isPartial = &partial
|
||||
fq.Unlock()
|
||||
}
|
||||
|
||||
// Reset reset querier's error message and results
|
||||
func (fq *FakeQuerier) Reset() {
|
||||
fq.Lock()
|
||||
fq.err = nil
|
||||
fq.metrics = fq.metrics[:0]
|
||||
fq.isPartial = nil
|
||||
fq.Unlock()
|
||||
}
|
||||
|
||||
@@ -57,7 +66,7 @@ func (fq *FakeQuerier) Query(_ context.Context, _ string, _ time.Time) (Result,
|
||||
cp := make([]Metric, len(fq.metrics))
|
||||
copy(cp, fq.metrics)
|
||||
req, _ := http.NewRequest(http.MethodPost, "foo.com", nil)
|
||||
return Result{Data: cp}, req, nil
|
||||
return Result{Data: cp, IsPartial: fq.isPartial}, req, nil
|
||||
}
|
||||
|
||||
// FakeQuerierWithRegistry can store different results for different query expr
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
@@ -83,11 +82,10 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
||||
if err := httputil.CheckURL(*addr); err != nil {
|
||||
return nil, fmt.Errorf("invalid -datasource.url: %w", err)
|
||||
}
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify, "vmalert_datasource")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport for -datasource.url=%q: %w", *addr, err)
|
||||
}
|
||||
tr.DialContext = netutil.NewStatDialFunc("vmalert_datasource")
|
||||
tr.DisableKeepAlives = *disableKeepAlive
|
||||
tr.MaxIdleConnsPerHost = *maxIdleConnections
|
||||
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
|
||||
|
||||
@@ -161,7 +161,7 @@ func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg proma
|
||||
if authCfg.TLSConfig != nil {
|
||||
tls = authCfg.TLSConfig
|
||||
}
|
||||
tr, err := promauth.NewTLSTransport(tls.CertFile, tls.KeyFile, tls.CAFile, tls.ServerName, tls.InsecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(tls.CertFile, tls.KeyFile, tls.CAFile, tls.ServerName, tls.InsecureSkipVerify, "vmalert_notifier")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport for alertmanager URL=%q: %w", alertManagerURL, err)
|
||||
|
||||
@@ -198,8 +198,10 @@ func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg proma
|
||||
argFunc: fn,
|
||||
authCfg: aCfg,
|
||||
relabelConfigs: relabelCfg,
|
||||
client: &http.Client{Transport: tr},
|
||||
timeout: timeout,
|
||||
metrics: newNotifierMetrics(alertManagerURL),
|
||||
client: &http.Client{
|
||||
Transport: tr,
|
||||
},
|
||||
timeout: timeout,
|
||||
metrics: newNotifierMetrics(alertManagerURL),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@@ -99,7 +99,7 @@ func (cfg *Config) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal configuration for checksum: %w", err)
|
||||
}
|
||||
h := md5.New()
|
||||
h := fnv.New64a()
|
||||
h.Write(b)
|
||||
cfg.Checksum = fmt.Sprintf("%x", h.Sum(nil))
|
||||
return nil
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
@@ -70,12 +69,11 @@ func Init() (datasource.QuerierBuilder, error) {
|
||||
if err := httputil.CheckURL(*addr); err != nil {
|
||||
return nil, fmt.Errorf("invalid -remoteRead.url: %w", err)
|
||||
}
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify, "vmalert_remoteread")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport for -remoteRead.url=%q: %w", *addr, err)
|
||||
}
|
||||
tr.IdleConnTimeout = *idleConnectionTimeout
|
||||
tr.DialContext = netutil.NewStatDialFunc("vmalert_remoteread")
|
||||
|
||||
endpointParams, err := flagutil.ParseJSONMap(*oauth2EndpointParams)
|
||||
if err != nil {
|
||||
|
||||
@@ -93,7 +93,7 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
|
||||
cfg.FlushInterval = defaultFlushInterval
|
||||
}
|
||||
if cfg.Transport == nil {
|
||||
cfg.Transport = httputil.NewTransport(false)
|
||||
cfg.Transport = httputil.NewTransport(false, "vmalert_remotewrite")
|
||||
}
|
||||
cc := defaultConcurrency
|
||||
if cfg.Concurrency > 0 {
|
||||
@@ -146,8 +146,13 @@ func (c *Client) Close() error {
|
||||
return fmt.Errorf("client is already closed")
|
||||
}
|
||||
close(c.input)
|
||||
|
||||
start := time.Now()
|
||||
logger.Infof("shutting down remote write client: flushing remained series")
|
||||
close(c.doneCh)
|
||||
c.wg.Wait()
|
||||
logger.Infof("shutting down remote write client: finished in %v", time.Since(start))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -156,21 +161,16 @@ func (c *Client) run(ctx context.Context) {
|
||||
wr := &prompbmarshal.WriteRequest{}
|
||||
shutdown := func() {
|
||||
lastCtx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
logger.Infof("shutting down remote write client and flushing remained series")
|
||||
|
||||
shutdownFlushCnt := 0
|
||||
for ts := range c.input {
|
||||
wr.Timeseries = append(wr.Timeseries, ts)
|
||||
if len(wr.Timeseries) >= c.maxBatchSize {
|
||||
shutdownFlushCnt += len(wr.Timeseries)
|
||||
c.flush(lastCtx, wr)
|
||||
}
|
||||
}
|
||||
// flush the last batch. `flush` will re-check and avoid flushing empty batch.
|
||||
shutdownFlushCnt += len(wr.Timeseries)
|
||||
c.flush(lastCtx, wr)
|
||||
|
||||
logger.Infof("shutting down remote write client flushed %d series", shutdownFlushCnt)
|
||||
cancel()
|
||||
}
|
||||
c.wg.Add(1)
|
||||
|
||||
@@ -33,7 +33,7 @@ func NewDebugClient() (*DebugClient, error) {
|
||||
if err := httputil.CheckURL(*addr); err != nil {
|
||||
return nil, fmt.Errorf("invalid -remoteWrite.url: %w", err)
|
||||
}
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify, "vmalert_remotewrite_debug")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport for -remoteWrite.url=%q: %w", *addr, err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
@@ -73,12 +72,11 @@ func Init(ctx context.Context) (*Client, error) {
|
||||
if err := httputil.CheckURL(*addr); err != nil {
|
||||
return nil, fmt.Errorf("invalid -remoteWrite.url: %w", err)
|
||||
}
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(*tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify, "vmalert_remotewrite")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport for -remoteWrite.url=%q: %w", *addr, err)
|
||||
}
|
||||
tr.IdleConnTimeout = *idleConnectionTimeout
|
||||
tr.DialContext = netutil.NewStatDialFunc("vmalert_remotewrite")
|
||||
|
||||
endpointParams, err := flagutil.ParseJSONMap(*oauth2EndpointParams)
|
||||
if err != nil {
|
||||
|
||||
@@ -207,8 +207,8 @@ func (ar *AlertingRule) logDebugf(at time.Time, a *notifier.Alert, format string
|
||||
if !ar.Debug {
|
||||
return
|
||||
}
|
||||
prefix := fmt.Sprintf("DEBUG rule %q:%q (%d) at %v: ",
|
||||
ar.GroupName, ar.Name, ar.RuleID, at.Format(time.RFC3339))
|
||||
prefix := fmt.Sprintf("DEBUG alerting rule %q, %q:%q (%d) at %v: ",
|
||||
ar.File, ar.GroupName, ar.Name, ar.RuleID, at.Format(time.RFC3339))
|
||||
|
||||
if a != nil {
|
||||
labelKeys := make([]string, len(a.Labels))
|
||||
@@ -408,8 +408,8 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
||||
}
|
||||
ar.logDebugf(ts, nil, "query returned %d samples (elapsed: %s)", curState.Samples, curState.Duration)
|
||||
|
||||
ar.logDebugf(ts, nil, "query returned %d samples (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartialResponse(res))
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res.Data, err
|
||||
|
||||
@@ -22,6 +22,32 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutil"
|
||||
)
|
||||
|
||||
func TestNewAlertingRule(t *testing.T) {
|
||||
f := func(group *Group, rule config.Rule, expectRule *AlertingRule) {
|
||||
t.Helper()
|
||||
|
||||
r := NewAlertingRule(&datasource.FakeQuerier{}, group, rule)
|
||||
if err := CompareRules(t, expectRule, r); err != nil {
|
||||
t.Fatalf("unexpected rule mismatch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
f(&Group{Name: "foo"},
|
||||
config.Rule{
|
||||
Alert: "health",
|
||||
Expr: "up == 0",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}, &AlertingRule{
|
||||
Name: "health",
|
||||
Expr: "up == 0",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAlertingRuleToTimeSeries(t *testing.T) {
|
||||
timestamp := time.Now()
|
||||
|
||||
@@ -1322,3 +1348,23 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
t.Fatalf("processed labels mismatch, got: %v, want: %v", ls.processed, expectedProcessedLabels)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertingRuleExec_Partial(t *testing.T) {
|
||||
fq := &datasource.FakeQuerier{}
|
||||
fq.Add(metricWithValueAndLabels(t, 10, "__name__", "bar"))
|
||||
fq.SetPartialResponse(true)
|
||||
|
||||
ar := newTestAlertingRule("test", 0)
|
||||
ar.Debug = true
|
||||
ar.Labels = map[string]string{"job": "test"}
|
||||
ar.q = fq
|
||||
ar.For = time.Second
|
||||
|
||||
fq.Add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "bar"))
|
||||
|
||||
ts := time.Now()
|
||||
_, err := ar.exec(context.TODO(), ts, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -471,6 +471,7 @@ func (g *Group) DeepCopy() *Group {
|
||||
newG := Group{}
|
||||
_ = json.Unmarshal(data, &newG)
|
||||
newG.Rules = g.Rules
|
||||
newG.id = g.id
|
||||
return &newG
|
||||
}
|
||||
|
||||
|
||||
@@ -36,144 +36,178 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func TestUpdateWith(t *testing.T) {
|
||||
f := func(currentRules, newRules []config.Rule) {
|
||||
f := func(oldG, newG config.Group) {
|
||||
t.Helper()
|
||||
|
||||
ns := metrics.NewSet()
|
||||
g := &Group{
|
||||
Name: "test",
|
||||
metrics: &groupMetrics{set: ns},
|
||||
}
|
||||
qb := &datasource.FakeQuerier{}
|
||||
for _, r := range currentRules {
|
||||
r.ID = config.HashRule(r)
|
||||
g.Rules = append(g.Rules, g.newRule(qb, r))
|
||||
for i := range oldG.Rules {
|
||||
oldG.Rules[i].ID = config.HashRule(oldG.Rules[i])
|
||||
}
|
||||
for i := range newG.Rules {
|
||||
newG.Rules[i].ID = config.HashRule(newG.Rules[i])
|
||||
}
|
||||
|
||||
ng := &Group{
|
||||
Name: "test",
|
||||
}
|
||||
for _, r := range newRules {
|
||||
r.ID = config.HashRule(r)
|
||||
ng.Rules = append(ng.Rules, ng.newRule(qb, r))
|
||||
}
|
||||
g := NewGroup(oldG, qb, 0, nil)
|
||||
g.metrics = &groupMetrics{set: ns}
|
||||
expect := NewGroup(newG, qb, 0, nil)
|
||||
|
||||
err := g.updateWith(ng)
|
||||
err := g.updateWith(expect)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot update rule: %s", err)
|
||||
}
|
||||
|
||||
if len(g.Rules) != len(newRules) {
|
||||
t.Fatalf("expected to have %d rules; got: %d", len(g.Rules), len(newRules))
|
||||
if len(g.Rules) != len(expect.Rules) {
|
||||
t.Fatalf("expected to have %d rules; got: %d", len(expect.Rules), len(g.Rules))
|
||||
}
|
||||
sort.Slice(g.Rules, func(i, j int) bool {
|
||||
return g.Rules[i].ID() < g.Rules[j].ID()
|
||||
})
|
||||
sort.Slice(ng.Rules, func(i, j int) bool {
|
||||
return ng.Rules[i].ID() < ng.Rules[j].ID()
|
||||
sort.Slice(expect.Rules, func(i, j int) bool {
|
||||
return expect.Rules[i].ID() < expect.Rules[j].ID()
|
||||
})
|
||||
for i, r := range g.Rules {
|
||||
got, want := r, ng.Rules[i]
|
||||
got, want := r, expect.Rules[i]
|
||||
if got.ID() != want.ID() {
|
||||
t.Fatalf("expected to have rule %q; got %q", want, got)
|
||||
}
|
||||
if err := CompareRules(t, got, want); err != nil {
|
||||
t.Fatalf("comparison error: %s", err)
|
||||
t.Fatalf("comparison1 error: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// new rule
|
||||
f(nil, []config.Rule{
|
||||
{Alert: "bar"},
|
||||
})
|
||||
f(config.Group{}, config.Group{
|
||||
Rules: []config.Rule{
|
||||
{Alert: "bar"},
|
||||
}})
|
||||
|
||||
// update alerting rule
|
||||
f([]config.Rule{
|
||||
{
|
||||
Alert: "foo",
|
||||
Expr: "up > 0",
|
||||
For: promutil.NewDuration(time.Second),
|
||||
f(config.Group{
|
||||
Rules: []config.Rule{
|
||||
{
|
||||
Alert: "foo",
|
||||
Expr: "up > 0",
|
||||
For: promutil.NewDuration(time.Second),
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": "{{ $value|humanize }}",
|
||||
"description": "{{$labels}}",
|
||||
},
|
||||
},
|
||||
{
|
||||
Alert: "bar",
|
||||
Expr: "up > 0",
|
||||
For: promutil.NewDuration(time.Second),
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
},
|
||||
}}, config.Group{
|
||||
Rules: []config.Rule{
|
||||
{
|
||||
Alert: "foo",
|
||||
Expr: "up > 10",
|
||||
For: promutil.NewDuration(time.Second),
|
||||
Labels: map[string]string{
|
||||
"baz": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": "none",
|
||||
},
|
||||
},
|
||||
{
|
||||
Alert: "bar",
|
||||
Expr: "up > 0",
|
||||
For: promutil.NewDuration(2 * time.Second),
|
||||
KeepFiringFor: promutil.NewDuration(time.Minute),
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
},
|
||||
}})
|
||||
|
||||
// update recording rule
|
||||
f(config.Group{
|
||||
Rules: []config.Rule{{
|
||||
Record: "foo",
|
||||
Expr: "max(up)",
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": "{{ $value|humanize }}",
|
||||
"description": "{{$labels}}",
|
||||
},
|
||||
},
|
||||
{
|
||||
Alert: "bar",
|
||||
Expr: "up > 0",
|
||||
For: promutil.NewDuration(time.Second),
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
},
|
||||
}, []config.Rule{
|
||||
{
|
||||
Alert: "foo",
|
||||
Expr: "up > 10",
|
||||
For: promutil.NewDuration(time.Second),
|
||||
}}}, config.Group{
|
||||
Rules: []config.Rule{{
|
||||
Record: "foo",
|
||||
Expr: "min(up)",
|
||||
Debug: true,
|
||||
Labels: map[string]string{
|
||||
"baz": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": "none",
|
||||
},
|
||||
},
|
||||
{
|
||||
Alert: "bar",
|
||||
Expr: "up > 0",
|
||||
For: promutil.NewDuration(2 * time.Second),
|
||||
KeepFiringFor: promutil.NewDuration(time.Minute),
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
},
|
||||
})
|
||||
}}})
|
||||
|
||||
// update recording rule
|
||||
f([]config.Rule{{
|
||||
Record: "foo",
|
||||
Expr: "max(up)",
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
}}, []config.Rule{{
|
||||
Record: "foo",
|
||||
Expr: "min(up)",
|
||||
Labels: map[string]string{
|
||||
"baz": "bar",
|
||||
},
|
||||
}})
|
||||
// update debug
|
||||
f(config.Group{
|
||||
Rules: []config.Rule{
|
||||
{
|
||||
Record: "foo",
|
||||
Expr: "max(up)",
|
||||
},
|
||||
{
|
||||
Alert: "foo",
|
||||
Expr: "up > 0",
|
||||
Debug: true,
|
||||
For: promutil.NewDuration(time.Second),
|
||||
},
|
||||
}}, config.Group{
|
||||
Rules: []config.Rule{
|
||||
{
|
||||
Record: "foo",
|
||||
Expr: "max(up)",
|
||||
Debug: true,
|
||||
},
|
||||
{
|
||||
Alert: "foo",
|
||||
Expr: "up > 0",
|
||||
For: promutil.NewDuration(time.Second),
|
||||
},
|
||||
}})
|
||||
|
||||
// empty rule
|
||||
f([]config.Rule{{Alert: "foo"}, {Record: "bar"}}, nil)
|
||||
f(config.Group{
|
||||
Rules: []config.Rule{{Alert: "foo"}, {Record: "bar"}}}, config.Group{})
|
||||
|
||||
// multiple rules
|
||||
f([]config.Rule{
|
||||
{Alert: "bar"},
|
||||
{Alert: "baz"},
|
||||
{Alert: "foo"},
|
||||
}, []config.Rule{
|
||||
{Alert: "baz"},
|
||||
{Record: "foo"},
|
||||
})
|
||||
f(config.Group{
|
||||
Rules: []config.Rule{
|
||||
{Alert: "bar"},
|
||||
{Alert: "baz"},
|
||||
{Alert: "foo"},
|
||||
}}, config.Group{
|
||||
Rules: []config.Rule{
|
||||
{Alert: "baz"},
|
||||
{Record: "foo"},
|
||||
}})
|
||||
|
||||
// replace rule
|
||||
f([]config.Rule{{Alert: "foo1"}}, []config.Rule{{Alert: "foo2"}})
|
||||
f(config.Group{
|
||||
Rules: []config.Rule{{Alert: "foo1"}}}, config.Group{
|
||||
Rules: []config.Rule{{Alert: "foo2"}}})
|
||||
|
||||
// replace multiple rules
|
||||
f([]config.Rule{
|
||||
{Alert: "foo1"},
|
||||
{Record: "foo2"},
|
||||
{Alert: "foo3"},
|
||||
}, []config.Rule{
|
||||
{Alert: "foo3"},
|
||||
{Alert: "foo4"},
|
||||
{Record: "foo5"},
|
||||
})
|
||||
f(config.Group{
|
||||
Rules: []config.Rule{
|
||||
{Alert: "foo1"},
|
||||
{Record: "foo2"},
|
||||
{Alert: "foo3"},
|
||||
}}, config.Group{
|
||||
Rules: []config.Rule{
|
||||
{Alert: "foo3"},
|
||||
{Alert: "foo4"},
|
||||
{Record: "foo5"},
|
||||
}})
|
||||
}
|
||||
|
||||
func TestUpdateDuringRandSleep(t *testing.T) {
|
||||
|
||||
@@ -30,6 +30,7 @@ type RecordingRule struct {
|
||||
GroupID uint64
|
||||
GroupName string
|
||||
File string
|
||||
Debug bool
|
||||
|
||||
q datasource.Querier
|
||||
|
||||
@@ -91,12 +92,14 @@ func NewRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
|
||||
GroupID: group.GetID(),
|
||||
GroupName: group.Name,
|
||||
File: group.File,
|
||||
Debug: cfg.Debug,
|
||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||
DataSourceType: group.Type.String(),
|
||||
ApplyIntervalAsTimeFilter: setIntervalAsTimeFilter(group.Type.String(), cfg.Expr),
|
||||
EvaluationInterval: group.Interval,
|
||||
QueryParams: group.Params,
|
||||
Headers: group.Headers,
|
||||
Debug: cfg.Debug,
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -169,6 +172,8 @@ func (rr *RecordingRule) exec(ctx context.Context, ts time.Time, limit int) ([]p
|
||||
return nil, curState.Err
|
||||
}
|
||||
|
||||
rr.logDebugf(ts, "query returned %d samples (elapsed: %s, isPartial: %t)", curState.Samples, curState.Duration, isPartialResponse(res))
|
||||
|
||||
qMetrics := res.Data
|
||||
numSeries := len(qMetrics)
|
||||
if limit > 0 && numSeries > limit {
|
||||
@@ -202,6 +207,17 @@ func (rr *RecordingRule) exec(ctx context.Context, ts time.Time, limit int) ([]p
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func (rr *RecordingRule) logDebugf(at time.Time, format string, args ...any) {
|
||||
if !rr.Debug {
|
||||
return
|
||||
}
|
||||
prefix := fmt.Sprintf("DEBUG recording rule %q, %q:%q (%d) at %v: ",
|
||||
rr.File, rr.GroupName, rr.Name, rr.RuleID, at.Format(time.RFC3339))
|
||||
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
logger.Infof("%s", prefix+msg)
|
||||
}
|
||||
|
||||
func stringToLabels(s string) []prompbmarshal.Label {
|
||||
labels := strings.Split(s, ",")
|
||||
rLabels := make([]prompbmarshal.Label, 0, len(labels))
|
||||
@@ -262,6 +278,7 @@ func (rr *RecordingRule) updateWith(r Rule) error {
|
||||
rr.Expr = nr.Expr
|
||||
rr.Labels = nr.Labels
|
||||
rr.q = nr.q
|
||||
rr.Debug = nr.Debug
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,11 +9,34 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func TestNewRecordingRule(t *testing.T) {
|
||||
f := func(group *Group, rule config.Rule, expectRule *AlertingRule) {
|
||||
t.Helper()
|
||||
|
||||
r := NewAlertingRule(&datasource.FakeQuerier{}, group, rule)
|
||||
if err := CompareRules(t, expectRule, r); err != nil {
|
||||
t.Fatalf("unexpected rule mismatch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
f(&Group{Name: "foo"},
|
||||
config.Rule{
|
||||
Alert: "health",
|
||||
Expr: "up == 0",
|
||||
Labels: map[string]string{},
|
||||
}, &AlertingRule{
|
||||
Name: "health",
|
||||
Expr: "up == 0",
|
||||
Labels: map[string]string{},
|
||||
})
|
||||
}
|
||||
|
||||
func TestRecordingRule_Exec(t *testing.T) {
|
||||
ts, _ := time.Parse(time.RFC3339, "2024-10-29T00:00:00Z")
|
||||
const defaultStep = 5 * time.Millisecond
|
||||
@@ -307,7 +330,8 @@ func TestRecordingRuleLimit_Failure(t *testing.T) {
|
||||
|
||||
fq := &datasource.FakeQuerier{}
|
||||
fq.Add(testMetrics...)
|
||||
rule := &RecordingRule{Name: "job:foo",
|
||||
rule := &RecordingRule{
|
||||
Name: "job:foo",
|
||||
state: &ruleState{entries: make([]StateEntry, 10)},
|
||||
Labels: map[string]string{
|
||||
"source": "test_limit",
|
||||
@@ -342,7 +366,8 @@ func TestRecordingRuleLimit_Success(t *testing.T) {
|
||||
|
||||
fq := &datasource.FakeQuerier{}
|
||||
fq.Add(testMetrics...)
|
||||
rule := &RecordingRule{Name: "job:foo",
|
||||
rule := &RecordingRule{
|
||||
Name: "job:foo",
|
||||
state: &ruleState{entries: make([]StateEntry, 10)},
|
||||
Labels: map[string]string{
|
||||
"source": "test_limit",
|
||||
@@ -421,3 +446,36 @@ func TestSetIntervalAsTimeFilter(t *testing.T) {
|
||||
f(`error AND _time:5m | count()`, "vlogs", false)
|
||||
f(`* | error AND _time:5m | count()`, "vlogs", false)
|
||||
}
|
||||
|
||||
func TestRecordingRuleExec_Partial(t *testing.T) {
|
||||
ts, _ := time.Parse(time.RFC3339, "2024-10-29T00:00:00Z")
|
||||
fq := &datasource.FakeQuerier{}
|
||||
|
||||
m := metricWithValueAndLabels(t, 10, "__name__", "bar")
|
||||
fq.Add(m)
|
||||
fq.SetPartialResponse(true)
|
||||
rule := &RecordingRule{
|
||||
GroupName: "Bar",
|
||||
Name: "foo",
|
||||
state: &ruleState{
|
||||
entries: make([]StateEntry, 10),
|
||||
},
|
||||
}
|
||||
rule.Debug = true
|
||||
rule.q = fq
|
||||
got, err := rule.exec(context.TODO(), ts, 0)
|
||||
want := []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{10}, []int64{ts.UnixNano()}, []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "foo",
|
||||
},
|
||||
}),
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("fail to test rule %s: unexpected error: %s", rule.Name, err)
|
||||
}
|
||||
if err := compareTimeSeries(t, want, got); err != nil {
|
||||
t.Fatalf("fail to test rule %s: time series mismatch: %s", rule.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,9 @@ func compareRecordingRules(t *testing.T, a, b *RecordingRule) error {
|
||||
if a.Expr != b.Expr {
|
||||
return fmt.Errorf("expected to have expression %q; got %q", a.Expr, b.Expr)
|
||||
}
|
||||
if a.Debug != b.Debug {
|
||||
return fmt.Errorf("expected to have debug=%t; got %t", a.Debug, b.Debug)
|
||||
}
|
||||
if !reflect.DeepEqual(a.Labels, b.Labels) {
|
||||
return fmt.Errorf("expected to have labels %#v; got %#v", a.Labels, b.Labels)
|
||||
}
|
||||
@@ -64,6 +67,9 @@ func compareAlertingRules(t *testing.T, a, b *AlertingRule) error {
|
||||
if a.Type.String() != b.Type.String() {
|
||||
return fmt.Errorf("expected to have Type %#v; got %#v", a.Type.String(), b.Type.String())
|
||||
}
|
||||
if a.Debug != b.Debug {
|
||||
return fmt.Errorf("expected to have debug=%t; got %t", a.Debug, b.Debug)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -105,3 +105,10 @@ func isSecreteHeader(str string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isPartialResponse(res datasource.Result) bool {
|
||||
if res.IsPartial != nil && *res.IsPartial {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmauth docs [here](https://docs.victoriametrics.com/vmauth/).
|
||||
See vmauth docs [here](https://docs.victoriametrics.com/victoriametrics/vmauth/).
|
||||
|
||||
vmauth docs can be edited at [docs/vmauth.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmauth.md).
|
||||
vmauth docs can be edited at [docs/vmauth.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmauth.md).
|
||||
|
||||
@@ -319,7 +319,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
// Timed out request must be counted as errors, since this usually means that the backend is slow.
|
||||
ui.backendErrors.Inc()
|
||||
}
|
||||
return true, false
|
||||
return false, false
|
||||
}
|
||||
if !rtbOK || !rtb.canRetry() {
|
||||
// Request body cannot be re-sent to another backend. Return the error to the client then.
|
||||
@@ -508,7 +508,7 @@ func newRoundTripper(caFileOpt, certFileOpt, keyFileOpt, serverNameOpt string, i
|
||||
return nil, fmt.Errorf("cannot initialize promauth.Config: %w", err)
|
||||
}
|
||||
|
||||
tr := httputil.NewTransport(false)
|
||||
tr := httputil.NewTransport(false, "vmauth_backend")
|
||||
tr.ResponseHeaderTimeout = *responseTimeout
|
||||
// Automatic compression must be disabled in order to fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/535
|
||||
tr.DisableCompression = true
|
||||
@@ -517,7 +517,6 @@ func newRoundTripper(caFileOpt, certFileOpt, keyFileOpt, serverNameOpt string, i
|
||||
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
|
||||
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
|
||||
}
|
||||
tr.DialContext = netutil.NewStatDialFunc("vmauth_backend")
|
||||
|
||||
rt := cfg.NewRoundTripper(tr)
|
||||
return rt, nil
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmbackup docs [here](https://docs.victoriametrics.com/vmbackup/).
|
||||
See vmbackup docs [here](https://docs.victoriametrics.com/victoriametrics/vmbackup/).
|
||||
|
||||
vmbackup docs can be edited at [docs/vmbackup.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmbackup.md).
|
||||
vmbackup docs can be edited at [docs/vmbackup.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmbackup.md).
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmbackupmanager docs [here](https://docs.victoriametrics.com/vmbackupmanager/).
|
||||
See vmbackupmanager docs [here](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/).
|
||||
|
||||
vmbackupmanager docs can be edited at [docs/vmbackupmanager.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmbackupmanager.md).
|
||||
vmbackupmanager docs can be edited at [docs/vmbackupmanager.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmbackupmanager.md).
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmctl docs [here](https://docs.victoriametrics.com/vmctl/).
|
||||
See vmctl docs [here](https://docs.victoriametrics.com/victoriametrics/vmctl/).
|
||||
|
||||
vmctl docs can be edited at [docs/vmctl.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmctl.md).
|
||||
vmctl docs can be edited at [docs/vmctl.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmctl.md).
|
||||
|
||||
@@ -70,7 +70,7 @@ func main() {
|
||||
return fmt.Errorf("invalid -%s: %w", otsdbAddr, err)
|
||||
}
|
||||
|
||||
tr, err := promauth.NewTLSTransport(certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(certFile, keyFile, caFile, serverName, insecureSkipVerify, "vmctl_opentsdb")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create transport for -%s=%q: %s", otsdbAddr, addr, err)
|
||||
}
|
||||
@@ -185,7 +185,7 @@ func main() {
|
||||
serverName := c.String(remoteReadServerName)
|
||||
insecureSkipVerify := c.Bool(remoteReadInsecureSkipVerify)
|
||||
|
||||
tr, err := promauth.NewTLSTransport(certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(certFile, keyFile, caFile, serverName, insecureSkipVerify, "vmctl_remoteread")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create transport for -%s=%q: %s", remoteReadSrcAddr, addr, err)
|
||||
}
|
||||
@@ -315,7 +315,7 @@ func main() {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
||||
trSrc := httputil.NewTransport(false)
|
||||
trSrc := httputil.NewTransport(false, "vmctl_src")
|
||||
trSrc.DisableKeepAlives = disableKeepAlive
|
||||
trSrc.TLSClientConfig = srcTC
|
||||
|
||||
@@ -345,7 +345,7 @@ func main() {
|
||||
return fmt.Errorf("failed to create TLS Config: %s", err)
|
||||
}
|
||||
|
||||
trDst := httputil.NewTransport(false)
|
||||
trDst := httputil.NewTransport(false, "vmctl_dst")
|
||||
trDst.DisableKeepAlives = disableKeepAlive
|
||||
trDst.TLSClientConfig = dstTC
|
||||
|
||||
@@ -455,7 +455,7 @@ func initConfigVM(c *cli.Context) (vm.Config, error) {
|
||||
serverName := c.String(vmServerName)
|
||||
insecureSkipVerify := c.Bool(vmInsecureSkipVerify)
|
||||
|
||||
tr, err := promauth.NewTLSTransport(certFile, keyFile, caFile, serverName, insecureSkipVerify)
|
||||
tr, err := promauth.NewTLSTransport(certFile, keyFile, caFile, serverName, insecureSkipVerify, "vmctl_client")
|
||||
if err != nil {
|
||||
return vm.Config{}, fmt.Errorf("failed to create transport for -%s=%q: %s", vmAddr, addr, err)
|
||||
}
|
||||
|
||||
@@ -43,60 +43,12 @@ func (pp *prometheusProcessor) run() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing blocks"), len(blocks))
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer barpool.Stop()
|
||||
|
||||
blockReadersCh := make(chan tsdb.BlockReader)
|
||||
errCh := make(chan error, pp.cc)
|
||||
pp.im.ResetStats()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(pp.cc)
|
||||
for i := 0; i < pp.cc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for br := range blockReadersCh {
|
||||
if err := pp.do(br); err != nil {
|
||||
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
|
||||
return
|
||||
}
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
}
|
||||
// any error breaks the import
|
||||
for _, br := range blocks {
|
||||
select {
|
||||
case promErr := <-errCh:
|
||||
close(blockReadersCh)
|
||||
return fmt.Errorf("prometheus error: %s", promErr)
|
||||
case vmErr := <-pp.im.Errors():
|
||||
close(blockReadersCh)
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
|
||||
case blockReadersCh <- br:
|
||||
}
|
||||
}
|
||||
|
||||
close(blockReadersCh)
|
||||
wg.Wait()
|
||||
// wait for all buffers to flush
|
||||
pp.im.Close()
|
||||
close(errCh)
|
||||
// drain import errors channel
|
||||
for vmErr := range pp.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
|
||||
}
|
||||
}
|
||||
for err := range errCh {
|
||||
return fmt.Errorf("import process failed: %s", err)
|
||||
if err := pp.processBlocks(blocks); err != nil {
|
||||
return fmt.Errorf("migration failed: %s", err)
|
||||
}
|
||||
|
||||
log.Println("Import finished!")
|
||||
log.Print(pp.im.Stats())
|
||||
log.Println(pp.im.Stats())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -156,3 +108,59 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
||||
}
|
||||
return ss.Err()
|
||||
}
|
||||
|
||||
func (pp *prometheusProcessor) processBlocks(blocks []tsdb.BlockReader) error {
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing blocks"), len(blocks))
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer barpool.Stop()
|
||||
|
||||
blockReadersCh := make(chan tsdb.BlockReader)
|
||||
errCh := make(chan error, pp.cc)
|
||||
pp.im.ResetStats()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(pp.cc)
|
||||
for i := 0; i < pp.cc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for br := range blockReadersCh {
|
||||
if err := pp.do(br); err != nil {
|
||||
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
|
||||
return
|
||||
}
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
}
|
||||
// any error breaks the import
|
||||
for _, br := range blocks {
|
||||
select {
|
||||
case promErr := <-errCh:
|
||||
close(blockReadersCh)
|
||||
return fmt.Errorf("prometheus error: %s", promErr)
|
||||
case vmErr := <-pp.im.Errors():
|
||||
close(blockReadersCh)
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
|
||||
case blockReadersCh <- br:
|
||||
}
|
||||
}
|
||||
|
||||
close(blockReadersCh)
|
||||
wg.Wait()
|
||||
// wait for all buffers to flush
|
||||
pp.im.Close()
|
||||
close(errCh)
|
||||
// drain import errors channel
|
||||
for vmErr := range pp.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, pp.isVerbose))
|
||||
}
|
||||
}
|
||||
for err := range errCh {
|
||||
return fmt.Errorf("import process failed: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,167 +2,214 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
remote_read_integration "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
)
|
||||
|
||||
// If you want to run this test:
|
||||
// 1. provide test snapshot path in const testSnapshot
|
||||
// 2. define httpAddr const with your victoriametrics address
|
||||
// 3. run victoria metrics with defined address
|
||||
// 4. remove t.Skip() from Test_prometheusProcessor_run
|
||||
// 5. run tests one by one not all at one time
|
||||
|
||||
const (
|
||||
httpAddr = "http://127.0.0.1:8428/"
|
||||
testSnapshot = "./testdata/20220427T130947Z-70ba49b1093fd0bf"
|
||||
testSnapshot = "./testdata/snapshots/20250118T124506Z-59d1b952d7eaf547"
|
||||
blockData = "./testdata/snapshots/20250118T124506Z-59d1b952d7eaf547/01JHWQ445Y2P1TDYB05AEKD6MC"
|
||||
)
|
||||
|
||||
// This test simulates close process if user abort it
|
||||
func TestPrometheusProcessorRun(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
defer func() { isSilent = false }()
|
||||
f := func(startStr, endStr string, numOfSeries int, resultExpected []vm.TimeSeries) {
|
||||
t.Helper()
|
||||
|
||||
type fields struct {
|
||||
cfg prometheus.Config
|
||||
vmCfg vm.Config
|
||||
cl func(prometheus.Config) *prometheus.Client
|
||||
im func(vm.Config) *vm.Importer
|
||||
closer func(importer *vm.Importer)
|
||||
cc int
|
||||
}
|
||||
type args struct {
|
||||
silent bool
|
||||
verbose bool
|
||||
}
|
||||
dst := remote_read_integration.NewRemoteWriteServer(t)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "simulate syscall.SIGINT",
|
||||
fields: fields{
|
||||
cfg: prometheus.Config{
|
||||
Snapshot: testSnapshot,
|
||||
Filter: prometheus.Filter{},
|
||||
},
|
||||
cl: func(cfg prometheus.Config) *prometheus.Client {
|
||||
client, err := prometheus.NewClient(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("error init prometeus client: %s", err)
|
||||
}
|
||||
return client
|
||||
},
|
||||
im: func(vmCfg vm.Config) *vm.Importer {
|
||||
importer, err := vm.NewImporter(context.Background(), vmCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("error init importer: %s", err)
|
||||
}
|
||||
return importer
|
||||
},
|
||||
closer: func(importer *vm.Importer) {
|
||||
// simulate syscall.SIGINT
|
||||
time.Sleep(time.Second * 5)
|
||||
if importer != nil {
|
||||
importer.Close()
|
||||
}
|
||||
},
|
||||
vmCfg: vm.Config{Addr: httpAddr, Concurrency: 1},
|
||||
cc: 2,
|
||||
},
|
||||
args: args{
|
||||
silent: false,
|
||||
verbose: false,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "simulate correct work",
|
||||
fields: fields{
|
||||
cfg: prometheus.Config{
|
||||
Snapshot: testSnapshot,
|
||||
Filter: prometheus.Filter{},
|
||||
},
|
||||
cl: func(cfg prometheus.Config) *prometheus.Client {
|
||||
client, err := prometheus.NewClient(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("error init prometeus client: %s", err)
|
||||
}
|
||||
return client
|
||||
},
|
||||
im: func(vmCfg vm.Config) *vm.Importer {
|
||||
importer, err := vm.NewImporter(context.Background(), vmCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("error init importer: %s", err)
|
||||
}
|
||||
return importer
|
||||
},
|
||||
closer: nil,
|
||||
vmCfg: vm.Config{Addr: httpAddr, Concurrency: 5},
|
||||
cc: 2,
|
||||
},
|
||||
args: args{
|
||||
silent: true,
|
||||
verbose: false,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := tt.fields.cl(tt.fields.cfg)
|
||||
importer := tt.fields.im(tt.fields.vmCfg)
|
||||
isSilent = tt.args.silent
|
||||
pp := &prometheusProcessor{
|
||||
cl: client,
|
||||
im: importer,
|
||||
cc: tt.fields.cc,
|
||||
isVerbose: tt.args.verbose,
|
||||
}
|
||||
defer func() {
|
||||
dst.Close()
|
||||
}()
|
||||
|
||||
// we should answer on prompt
|
||||
if !tt.args.silent {
|
||||
input := []byte("Y\n")
|
||||
dst.Series(resultExpected)
|
||||
dst.ExpectedSeries(resultExpected)
|
||||
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := fillStorage(resultExpected); err != nil {
|
||||
t.Fatalf("cannot fill storage: %s", err)
|
||||
}
|
||||
|
||||
_, err = w.Write(input)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send 'Y' to importer: %s", err)
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot close writer: %s", err)
|
||||
}
|
||||
isSilent = true
|
||||
defer func() { isSilent = false }()
|
||||
|
||||
stdin := os.Stdin
|
||||
// Restore stdin right after the test.
|
||||
defer func() {
|
||||
os.Stdin = stdin
|
||||
_ = r.Close()
|
||||
}()
|
||||
os.Stdin = r
|
||||
}
|
||||
bf, err := backoff.New(1, 1.8, time.Second*2)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create backoff: %s", err)
|
||||
}
|
||||
|
||||
// simulate close if needed
|
||||
if tt.fields.closer != nil {
|
||||
go tt.fields.closer(importer)
|
||||
}
|
||||
importerCfg := vm.Config{
|
||||
Addr: dst.URL(),
|
||||
Transport: nil,
|
||||
Concurrency: 1,
|
||||
Backoff: bf,
|
||||
}
|
||||
|
||||
if err := pp.run(); (err != nil) != tt.wantErr {
|
||||
t.Fatalf("run() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
ctx := context.Background()
|
||||
importer, err := vm.NewImporter(ctx, importerCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create importer: %s", err)
|
||||
}
|
||||
defer importer.Close()
|
||||
|
||||
matchName := "__name__"
|
||||
matchValue := ".*"
|
||||
filter := prometheus.Filter{
|
||||
TimeMin: startStr,
|
||||
TimeMax: endStr,
|
||||
Label: matchName,
|
||||
LabelValue: matchValue,
|
||||
}
|
||||
|
||||
runnner, err := prometheus.NewClient(prometheus.Config{
|
||||
Snapshot: testSnapshot,
|
||||
Filter: filter,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create prometheus client: %s", err)
|
||||
}
|
||||
p := &prometheusProcessor{
|
||||
cl: runnner,
|
||||
im: importer,
|
||||
cc: 1,
|
||||
}
|
||||
|
||||
if err := p.run(); err != nil {
|
||||
t.Fatalf("run() error: %s", err)
|
||||
}
|
||||
|
||||
collectedTs := dst.GetCollectedTimeSeries()
|
||||
t.Logf("collected timeseries: %d; expected timeseries: %d", len(collectedTs), len(resultExpected))
|
||||
if len(collectedTs) != len(resultExpected) {
|
||||
t.Fatalf("unexpected number of collected time series; got %d; want %d", len(collectedTs), numOfSeries)
|
||||
}
|
||||
|
||||
deleted, err := deleteSeries(matchName, matchValue)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot delete series: %s", err)
|
||||
}
|
||||
if deleted != numOfSeries {
|
||||
t.Fatalf("unexpected number of deleted series; got %d; want %d", deleted, numOfSeries)
|
||||
}
|
||||
}
|
||||
|
||||
processFlags()
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
defer func() {
|
||||
vmstorage.Stop()
|
||||
if err := os.RemoveAll(storagePath); err != nil {
|
||||
log.Fatalf("cannot remove %q: %s", storagePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
barpool.Disable(true)
|
||||
defer func() {
|
||||
barpool.Disable(false)
|
||||
}()
|
||||
|
||||
b, err := tsdb.OpenBlock(nil, blockData, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open block: %s", err)
|
||||
}
|
||||
// timestamp is equal to minTime and maxTime from meta.json
|
||||
ss, err := readBlock(b, 1737204082361, 1737204302539)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot read block: %s", err)
|
||||
}
|
||||
|
||||
resultExpected, err := prepareExpectedData(ss)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot prepare expected data: %s", err)
|
||||
}
|
||||
|
||||
f("2025-01-18T12:40:00Z", "2025-01-18T12:46:00Z", 2792, resultExpected)
|
||||
}
|
||||
|
||||
func readBlock(b tsdb.BlockReader, timeMin int64, timeMax int64) (storage.SeriesSet, error) {
|
||||
minTime, maxTime := b.Meta().MinTime, b.Meta().MaxTime
|
||||
|
||||
if timeMin != 0 {
|
||||
minTime = timeMin
|
||||
}
|
||||
if timeMax != 0 {
|
||||
maxTime = timeMax
|
||||
}
|
||||
|
||||
q, err := tsdb.NewBlockQuerier(b, minTime, maxTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matchName := "__name__"
|
||||
matchValue := ".*"
|
||||
ctx := context.Background()
|
||||
ss := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, matchName, matchValue))
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func prepareExpectedData(ss storage.SeriesSet) ([]vm.TimeSeries, error) {
|
||||
var expectedSeriesSet []vm.TimeSeries
|
||||
var it chunkenc.Iterator
|
||||
for ss.Next() {
|
||||
var name string
|
||||
var labelPairs []vm.LabelPair
|
||||
series := ss.At()
|
||||
|
||||
for _, label := range series.Labels() {
|
||||
if label.Name == "__name__" {
|
||||
name = label.Value
|
||||
continue
|
||||
}
|
||||
labelPairs = append(labelPairs, vm.LabelPair{
|
||||
Name: label.Name,
|
||||
Value: label.Value,
|
||||
})
|
||||
}
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("failed to find `__name__` label in labelset for block")
|
||||
}
|
||||
|
||||
var timestamps []int64
|
||||
var values []float64
|
||||
it = series.Iterator(it)
|
||||
for {
|
||||
typ := it.Next()
|
||||
if typ == chunkenc.ValNone {
|
||||
break
|
||||
}
|
||||
if typ != chunkenc.ValFloat {
|
||||
// Skip unsupported values
|
||||
continue
|
||||
}
|
||||
t, v := it.At()
|
||||
timestamps = append(timestamps, t)
|
||||
values = append(values, v)
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts := vm.TimeSeries{
|
||||
Name: name,
|
||||
LabelPairs: labelPairs,
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
}
|
||||
expectedSeriesSet = append(expectedSeriesSet, ts)
|
||||
}
|
||||
return expectedSeriesSet, nil
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestRemoteRead(t *testing.T) {
|
||||
vmCfg: vm.Config{
|
||||
Addr: "",
|
||||
Concurrency: 1,
|
||||
Transport: httputil.NewTransport(false),
|
||||
Transport: httputil.NewTransport(false, "vmctl_test_read"),
|
||||
},
|
||||
start: "2022-09-26T11:23:05+02:00",
|
||||
end: "2022-11-26T11:24:05+02:00",
|
||||
|
||||
@@ -41,6 +41,7 @@ type RemoteWriteServer struct {
|
||||
server *httptest.Server
|
||||
series []vm.TimeSeries
|
||||
expectedSeries []vm.TimeSeries
|
||||
tss []vm.TimeSeries
|
||||
}
|
||||
|
||||
// NewRemoteWriteServer prepares test remote write server
|
||||
@@ -73,6 +74,10 @@ func (rws *RemoteWriteServer) ExpectedSeries(series []vm.TimeSeries) {
|
||||
rws.expectedSeries = append(rws.expectedSeries, series...)
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) GetCollectedTimeSeries() []vm.TimeSeries {
|
||||
return rws.tss
|
||||
}
|
||||
|
||||
// URL returns server url
|
||||
func (rws *RemoteWriteServer) URL() string {
|
||||
return rws.server.URL
|
||||
@@ -80,7 +85,6 @@ func (rws *RemoteWriteServer) URL() string {
|
||||
|
||||
func (rws *RemoteWriteServer) getWriteHandler(t *testing.T) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var tss []vm.TimeSeries
|
||||
scanner := bufio.NewScanner(r.Body)
|
||||
var rows parser.Rows
|
||||
for scanner.Scan() {
|
||||
@@ -102,17 +106,11 @@ func (rws *RemoteWriteServer) getWriteHandler(t *testing.T) http.Handler {
|
||||
ts.Timestamps = append(ts.Timestamps, row.Timestamps...)
|
||||
ts.Name = nameValue
|
||||
ts.LabelPairs = labelPairs
|
||||
tss = append(tss, ts)
|
||||
rws.tss = append(rws.tss, ts)
|
||||
}
|
||||
rows.Reset()
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tss, rws.expectedSeries) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Fatalf("datasets not equal, expected: %#v; \n got: %#v", rws.expectedSeries, tss)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
})
|
||||
|
||||
Binary file not shown.
BIN
app/vmctl/testdata/snapshots/20250118T124506Z-59d1b952d7eaf547/01JHWQ445Y2P1TDYB05AEKD6MC/index
vendored
Normal file
BIN
app/vmctl/testdata/snapshots/20250118T124506Z-59d1b952d7eaf547/01JHWQ445Y2P1TDYB05AEKD6MC/index
vendored
Normal file
Binary file not shown.
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"ulid": "01JHWQ445Y2P1TDYB05AEKD6MC",
|
||||
"minTime": 1737204082361,
|
||||
"maxTime": 1737204302539,
|
||||
"stats": {
|
||||
"numSamples": 60275,
|
||||
"numSeries": 2792,
|
||||
"numChunks": 2792
|
||||
},
|
||||
"compaction": {
|
||||
"level": 1,
|
||||
"sources": [
|
||||
"01JHWQ445Y2P1TDYB05AEKD6MC"
|
||||
]
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
BIN
app/vmctl/testdata/snapshots/20250118T124506Z-59d1b952d7eaf547/01JHWQ445Y2P1TDYB05AEKD6MC/tombstones
vendored
Normal file
BIN
app/vmctl/testdata/snapshots/20250118T124506Z-59d1b952d7eaf547/01JHWQ445Y2P1TDYB05AEKD6MC/tombstones
vendored
Normal file
Binary file not shown.
@@ -23,8 +23,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
storagePath = "TestStorage"
|
||||
retentionPeriod = "100y"
|
||||
storagePath = "TestStorage"
|
||||
retentionPeriod = "100y"
|
||||
deleteSeriesLimit = 3e3
|
||||
)
|
||||
|
||||
func TestVMNativeProcessorRun(t *testing.T) {
|
||||
@@ -66,7 +67,7 @@ func TestVMNativeProcessorRun(t *testing.T) {
|
||||
t.Fatalf("cannot add series to storage: %s", err)
|
||||
}
|
||||
|
||||
tr := httputil.NewTransport(false)
|
||||
tr := httputil.NewTransport(false, "test_client")
|
||||
tr.DisableKeepAlives = false
|
||||
|
||||
srcClient := &native.Client{
|
||||
@@ -259,7 +260,7 @@ func deleteSeries(name, value string) (int, error) {
|
||||
if err := tfs.Add([]byte(name), []byte(value), false, true); err != nil {
|
||||
return 0, fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
|
||||
}
|
||||
return vmstorage.DeleteSeries(nil, []*storage.TagFilters{tfs}, 1e3)
|
||||
return vmstorage.DeleteSeries(nil, []*storage.TagFilters{tfs}, deleteSeriesLimit)
|
||||
}
|
||||
|
||||
func TestBuildMatchWithFilter_Failure(t *testing.T) {
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmgateway docs [here](https://docs.victoriametrics.com/vmgateway/).
|
||||
See vmgateway docs [here](https://docs.victoriametrics.com/victoriametrics/vmgateway/).
|
||||
|
||||
vmgateway docs can be edited at [docs/vmgateway.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmgateway.md).
|
||||
vmgateway docs can be edited at [docs/vmgateway.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmgateway.md).
|
||||
|
||||
@@ -320,8 +320,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/prometheus/api/v1/targets", "/api/v1/targets":
|
||||
promscrapeAPIV1TargetsRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
||||
state := r.FormValue("state")
|
||||
promscrape.WriteAPIV1Targets(w, state)
|
||||
scrapePool := r.FormValue("scrapePool")
|
||||
promscrape.WriteAPIV1Targets(w, state, scrapePool)
|
||||
return true
|
||||
case "/prometheus/target_response", "/target_response":
|
||||
promscrapeTargetResponseRequests.Inc()
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
See vmrestore docs [here](https://docs.victoriametrics.com/vmrestore/).
|
||||
See vmrestore docs [here](https://docs.victoriametrics.com/victoriametrics/vmrestore/).
|
||||
|
||||
vmrestore docs can be edited at [docs/vmrestore.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/vmrestore.md).
|
||||
vmrestore docs can be edited at [docs/vmrestore.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/vmrestore.md).
|
||||
|
||||
@@ -404,6 +404,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
case "/api/v1/status/metric_names_stats":
|
||||
metricNamesStatsRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := stats.MetricNamesStatsHandler(qt, w, r); err != nil {
|
||||
metricNamesStatsErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
|
||||
@@ -806,7 +806,6 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
||||
} else {
|
||||
queryOffset = 0
|
||||
}
|
||||
qs := &promql.QueryStats{}
|
||||
ec := &promql.EvalConfig{
|
||||
Start: start,
|
||||
End: start,
|
||||
@@ -822,9 +821,10 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
||||
GetRequestURI: func() string {
|
||||
return httpserver.GetRequestURI(r)
|
||||
},
|
||||
|
||||
QueryStats: qs,
|
||||
}
|
||||
qs := promql.NewQueryStats(query, nil, ec)
|
||||
ec.QueryStats = qs
|
||||
|
||||
result, err := promql.Exec(qt, ec, query, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err)
|
||||
@@ -853,6 +853,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
||||
if err := bw.Flush(); err != nil {
|
||||
return fmt.Errorf("cannot flush query response to remote client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -914,7 +915,6 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
||||
start, end = promql.AdjustStartEnd(start, end, step)
|
||||
}
|
||||
|
||||
qs := &promql.QueryStats{}
|
||||
ec := &promql.EvalConfig{
|
||||
Start: start,
|
||||
End: end,
|
||||
@@ -930,9 +930,10 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
||||
GetRequestURI: func() string {
|
||||
return httpserver.GetRequestURI(r)
|
||||
},
|
||||
|
||||
QueryStats: qs,
|
||||
}
|
||||
qs := promql.NewQueryStats(query, nil, ec)
|
||||
ec.QueryStats = qs
|
||||
|
||||
result, err := promql.Exec(qt, ec, query, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -961,6 +962,7 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
||||
if err := bw.Flush(); err != nil {
|
||||
return fmt.Errorf("cannot send query range response to remote client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
// It cannot be converted to int without breaking backwards compatibility at vmalert :(
|
||||
%}
|
||||
"seriesFetched": "{%dl qs.SeriesFetched.Load() %}",
|
||||
"executionTimeMsec": {%dl qs.ExecutionTimeMsec.Load() %}
|
||||
"executionTimeMsec": {%dl qs.ExecutionDuration.Load().Milliseconds() %}
|
||||
}
|
||||
{% code
|
||||
qt.Printf("generate /api/v1/query_range response for series=%d, points=%d", seriesCount, pointsCount)
|
||||
|
||||
@@ -71,9 +71,9 @@ func StreamQueryRangeResponse(qw422016 *qt422016.Writer, rs []netstorage.Result,
|
||||
qw422016.N().DL(qs.SeriesFetched.Load())
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:36
|
||||
qw422016.N().S(`","executionTimeMsec":`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:37
|
||||
qw422016.N().DL(qs.ExecutionTimeMsec.Load())
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:37
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:38
|
||||
qw422016.N().DL(qs.ExecutionDuration.Load().Milliseconds())
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:38
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:40
|
||||
qt.Printf("generate /api/v1/query_range response for series=%d, points=%d", seriesCount, pointsCount)
|
||||
|
||||
@@ -36,7 +36,7 @@ See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
// It cannot be converted to int without breaking backwards compatibility at vmalert :(
|
||||
%}
|
||||
"seriesFetched": "{%dl qs.SeriesFetched.Load() %}",
|
||||
"executionTimeMsec": {%dl qs.ExecutionTimeMsec.Load() %}
|
||||
"executionTimeMsec": {%dl qs.ExecutionDuration.Load().Milliseconds() %}
|
||||
}
|
||||
{% code
|
||||
qt.Printf("generate /api/v1/query response for series=%d", seriesCount)
|
||||
|
||||
@@ -81,9 +81,9 @@ func StreamQueryResponse(qw422016 *qt422016.Writer, rs []netstorage.Result, qt *
|
||||
qw422016.N().DL(qs.SeriesFetched.Load())
|
||||
//line app/vmselect/prometheus/query_response.qtpl:38
|
||||
qw422016.N().S(`","executionTimeMsec":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:39
|
||||
qw422016.N().DL(qs.ExecutionTimeMsec.Load())
|
||||
//line app/vmselect/prometheus/query_response.qtpl:39
|
||||
//line app/vmselect/prometheus/query_response.qtpl:40
|
||||
qw422016.N().DL(qs.ExecutionDuration.Load().Milliseconds())
|
||||
//line app/vmselect/prometheus/query_response.qtpl:40
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:42
|
||||
qt.Printf("generate /api/v1/query response for series=%d", seriesCount)
|
||||
|
||||
@@ -11,7 +11,7 @@ TSDBStatusResponse generates response for /api/v1/status/tsdb .
|
||||
"data":{
|
||||
"totalSeries": {%dul= status.TotalSeries %},
|
||||
"totalLabelValuePairs": {%dul= status.TotalLabelValuePairs %},
|
||||
"seriesCountByMetricName":{%= tsdbStatusEntries(status.SeriesCountByMetricName) %},
|
||||
"seriesCountByMetricName":{%= tsdbStatusMetricNameEntries(status.SeriesCountByMetricName,status.SeriesQueryStatsByMetricName) %},
|
||||
"seriesCountByLabelName":{%= tsdbStatusEntries(status.SeriesCountByLabelName) %},
|
||||
"seriesCountByFocusLabelValue":{%= tsdbStatusEntries(status.SeriesCountByFocusLabelValue) %},
|
||||
"seriesCountByLabelValuePair":{%= tsdbStatusEntries(status.SeriesCountByLabelValuePair) %},
|
||||
@@ -34,4 +34,32 @@ TSDBStatusResponse generates response for /api/v1/status/tsdb .
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
{% func tsdbStatusMetricNameEntries(a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) %}
|
||||
{% code
|
||||
queryStatsByMetricName := make(map[string]storage.MetricNamesStatsRecord,len(queryStats))
|
||||
for _, record := range queryStats{
|
||||
queryStatsByMetricName[record.MetricName] = record
|
||||
}
|
||||
%}
|
||||
[
|
||||
{% for i, e := range a %}
|
||||
{
|
||||
{% code
|
||||
entry, ok := queryStatsByMetricName[e.Name]
|
||||
%}
|
||||
"name":{%q= e.Name %},
|
||||
{% if !ok %}
|
||||
"value":{%d= int(e.Count) %}
|
||||
{% else %}
|
||||
"value":{%d= int(e.Count) %},
|
||||
"requestsCount":{%d= int(entry.RequestsCount) %},
|
||||
"lastRequestTimestamp":{%d= int(entry.LastRequestTs) %}
|
||||
{% endif %}
|
||||
}
|
||||
{% if i+1 < len(a) %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
|
||||
{% endstripspace %}
|
||||
|
||||
@@ -37,9 +37,9 @@ func StreamTSDBStatusResponse(qw422016 *qt422016.Writer, status *storage.TSDBSta
|
||||
qw422016.N().DUL(status.TotalLabelValuePairs)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:13
|
||||
qw422016.N().S(`,"seriesCountByMetricName":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14
|
||||
streamtsdbStatusEntries(qw422016, status.SeriesCountByMetricName)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:14
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15
|
||||
streamtsdbStatusMetricNameEntries(qw422016, status.SeriesCountByMetricName, status.SeriesQueryStatsByMetricName)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15
|
||||
qw422016.N().S(`,"seriesCountByLabelName":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15
|
||||
streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelName)
|
||||
@@ -147,3 +147,89 @@ func tsdbStatusEntries(a []storage.TopHeapEntry) string {
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:35
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:38
|
||||
func streamtsdbStatusMetricNameEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:40
|
||||
queryStatsByMetricName := make(map[string]storage.MetricNamesStatsRecord, len(queryStats))
|
||||
for _, record := range queryStats {
|
||||
queryStatsByMetricName[record.MetricName] = record
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:44
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:46
|
||||
for i, e := range a {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:46
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:49
|
||||
entry, ok := queryStatsByMetricName[e.Name]
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:50
|
||||
qw422016.N().S(`"name":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:51
|
||||
qw422016.N().Q(e.Name)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:51
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:52
|
||||
if !ok {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:52
|
||||
qw422016.N().S(`"value":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:53
|
||||
qw422016.N().D(int(e.Count))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:54
|
||||
} else {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:54
|
||||
qw422016.N().S(`"value":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:55
|
||||
qw422016.N().D(int(e.Count))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:55
|
||||
qw422016.N().S(`,"requestsCount":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:56
|
||||
qw422016.N().D(int(entry.RequestsCount))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:56
|
||||
qw422016.N().S(`,"lastRequestTimestamp":`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:57
|
||||
qw422016.N().D(int(entry.LastRequestTs))
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:58
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:58
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
if i+1 < len(a) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:60
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:61
|
||||
}
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:61
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
func writetsdbStatusMetricNameEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
streamtsdbStatusMetricNameEntries(qw422016, a, queryStats)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
func tsdbStatusMetricNameEntries(a []storage.TopHeapEntry, queryStats []storage.MetricNamesStatsRecord) string {
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
writetsdbStatusMetricNameEntries(qb422016, a, queryStats)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/tsdb_status_response.qtpl:63
|
||||
}
|
||||
|
||||
@@ -172,30 +172,6 @@ func copyEvalConfig(src *EvalConfig) *EvalConfig {
|
||||
return &ec
|
||||
}
|
||||
|
||||
// QueryStats contains various stats for the query.
|
||||
type QueryStats struct {
|
||||
// SeriesFetched contains the number of series fetched from storage during the query evaluation.
|
||||
SeriesFetched atomic.Int64
|
||||
|
||||
// ExecutionTimeMsec contains the number of milliseconds the query took to execute.
|
||||
ExecutionTimeMsec atomic.Int64
|
||||
}
|
||||
|
||||
func (qs *QueryStats) addSeriesFetched(n int) {
|
||||
if qs == nil {
|
||||
return
|
||||
}
|
||||
qs.SeriesFetched.Add(int64(n))
|
||||
}
|
||||
|
||||
func (qs *QueryStats) addExecutionTimeMsec(startTime time.Time) {
|
||||
if qs == nil {
|
||||
return
|
||||
}
|
||||
d := time.Since(startTime).Milliseconds()
|
||||
qs.ExecutionTimeMsec.Add(d)
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) validate() {
|
||||
if ec.Start > ec.End {
|
||||
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", ec.Start, ec.End)
|
||||
@@ -1721,12 +1697,13 @@ func evalRollupFuncNoCache(qt *querytracer.Tracer, ec *EvalConfig, funcName stri
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qs := ec.QueryStats
|
||||
rssLen := rss.Len()
|
||||
if rssLen == 0 {
|
||||
rss.Cancel()
|
||||
return nil, nil
|
||||
}
|
||||
ec.QueryStats.addSeriesFetched(rssLen)
|
||||
qs.addSeriesFetched(rssLen)
|
||||
|
||||
// Verify timeseries fit available memory during rollup calculations.
|
||||
timeseriesLen := rssLen
|
||||
|
||||
55
app/vmselect/promql/query_stats.go
Normal file
55
app/vmselect/promql/query_stats.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
)
|
||||
|
||||
// QueryStats contains various stats of the query evaluation.
|
||||
type QueryStats struct {
|
||||
// ExecutionDuration contains the time duration the query took to execute.
|
||||
ExecutionDuration atomic.Pointer[time.Duration]
|
||||
// SeriesFetched contains the number of series fetched from storage or cache.
|
||||
SeriesFetched atomic.Int64
|
||||
|
||||
at *auth.Token
|
||||
|
||||
query string
|
||||
queryType string
|
||||
start int64
|
||||
end int64
|
||||
step int64
|
||||
}
|
||||
|
||||
// NewQueryStats creates a new QueryStats object.
|
||||
func NewQueryStats(query string, at *auth.Token, ec *EvalConfig) *QueryStats {
|
||||
qs := &QueryStats{
|
||||
at: at,
|
||||
query: query,
|
||||
step: ec.Step,
|
||||
start: ec.Start,
|
||||
end: ec.End,
|
||||
queryType: "range",
|
||||
}
|
||||
if qs.start == qs.end {
|
||||
qs.queryType = "instant"
|
||||
}
|
||||
return qs
|
||||
}
|
||||
|
||||
func (qs *QueryStats) addSeriesFetched(n int) {
|
||||
if qs == nil {
|
||||
return
|
||||
}
|
||||
qs.SeriesFetched.Add(int64(n))
|
||||
}
|
||||
|
||||
func (qs *QueryStats) addExecutionTimeMsec(startTime time.Time) {
|
||||
if qs == nil {
|
||||
return
|
||||
}
|
||||
d := time.Since(startTime)
|
||||
qs.ExecutionDuration.Store(&d)
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package stats
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
@@ -33,6 +34,12 @@ func MetricNamesStatsHandler(qt *querytracer.Tracer, w http.ResponseWriter, r *h
|
||||
le = n
|
||||
}
|
||||
matchPattern := r.FormValue("match_pattern")
|
||||
if len(matchPattern) > 0 {
|
||||
_, err := regexp.Compile(matchPattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("match_pattern=%q must be valid regex: %w", matchPattern, err)
|
||||
}
|
||||
}
|
||||
stats, err := netstorage.GetMetricNamesStats(qt, limit, le, matchPattern)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -5,9 +5,13 @@ menu:
|
||||
docs:
|
||||
parent: 'victoriametrics'
|
||||
weight: 23
|
||||
tags:
|
||||
- metrics
|
||||
aliases:
|
||||
- /ExtendedPromQL.html
|
||||
- /MetricsQL.html
|
||||
- /metricsql/index.html
|
||||
- /metricsql/
|
||||
---
|
||||
[VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) implements MetricsQL -
|
||||
query language inspired by [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
207
app/vmselect/vmui/assets/index-BLLXzY70.js
Normal file
207
app/vmselect/vmui/assets/index-BLLXzY70.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/assets/index-Brup_hCI.css
Normal file
1
app/vmselect/vmui/assets/index-Brup_hCI.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
76
app/vmselect/vmui/assets/vendor-C-vZmbyg.js
Normal file
76
app/vmselect/vmui/assets/vendor-C-vZmbyg.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -36,10 +36,10 @@
|
||||
<meta property="og:title" content="UI for VictoriaMetrics">
|
||||
<meta property="og:url" content="https://victoriametrics.com/">
|
||||
<meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data">
|
||||
<script type="module" crossorigin src="./assets/index-Bc6E56oa.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-DojlIpLz.js">
|
||||
<script type="module" crossorigin src="./assets/index-BLLXzY70.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-C-vZmbyg.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-u4IOGr0E.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-Brup_hCI.css">
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
|
||||
@@ -336,13 +336,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/create":
|
||||
snapshotsCreateTotal.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshotPath, err := Storage.CreateSnapshot()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot create snapshot: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
snapshotsCreateErrorsTotal.Inc()
|
||||
return true
|
||||
}
|
||||
snapshotPath := Storage.MustCreateSnapshot()
|
||||
if prometheusCompatibleResponse {
|
||||
fmt.Fprintf(w, `{"status":"success","data":{"name":%s}}`, stringsutil.JSONString(snapshotPath))
|
||||
} else {
|
||||
@@ -352,13 +346,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
case "/list":
|
||||
snapshotsListTotal.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshots, err := Storage.ListSnapshots()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot list snapshots: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
snapshotsListErrorsTotal.Inc()
|
||||
return true
|
||||
}
|
||||
snapshots := Storage.MustListSnapshots()
|
||||
fmt.Fprintf(w, `{"status":"ok","snapshots":[`)
|
||||
if len(snapshots) > 0 {
|
||||
for _, snapshot := range snapshots[:len(snapshots)-1] {
|
||||
@@ -373,13 +361,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshotName := r.FormValue("snapshot")
|
||||
|
||||
snapshots, err := Storage.ListSnapshots()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot list snapshots: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
snapshotsDeleteErrorsTotal.Inc()
|
||||
return true
|
||||
}
|
||||
snapshots := Storage.MustListSnapshots()
|
||||
for _, snName := range snapshots {
|
||||
if snName == snapshotName {
|
||||
if err := Storage.DeleteSnapshot(snName); err != nil {
|
||||
@@ -393,19 +375,13 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
}
|
||||
|
||||
err = fmt.Errorf("cannot find snapshot %q", snapshotName)
|
||||
err := fmt.Errorf("cannot find snapshot %q", snapshotName)
|
||||
jsonResponseError(w, err)
|
||||
return true
|
||||
case "/delete_all":
|
||||
snapshotsDeleteAllTotal.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshots, err := Storage.ListSnapshots()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot list snapshots: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
snapshotsDeleteAllErrorsTotal.Inc()
|
||||
return true
|
||||
}
|
||||
snapshots := Storage.MustListSnapshots()
|
||||
for _, snapshotName := range snapshots {
|
||||
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
||||
err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
|
||||
@@ -439,10 +415,7 @@ func initStaleSnapshotsRemover(strg *storage.Storage) {
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
if err := strg.DeleteStaleSnapshots(snapshotsMaxAgeDur); err != nil {
|
||||
// Use logger.Errorf instead of logger.Fatalf in the hope the error is temporary.
|
||||
logger.Errorf("cannot delete stale snapshots: %s", err)
|
||||
}
|
||||
strg.MustDeleteStaleSnapshots(snapshotsMaxAgeDur)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -460,11 +433,9 @@ var (
|
||||
var (
|
||||
activeForceMerges = metrics.NewCounter("vm_active_force_merges")
|
||||
|
||||
snapshotsCreateTotal = metrics.NewCounter(`vm_http_requests_total{path="/snapshot/create"}`)
|
||||
snapshotsCreateErrorsTotal = metrics.NewCounter(`vm_http_request_errors_total{path="/snapshot/create"}`)
|
||||
snapshotsCreateTotal = metrics.NewCounter(`vm_http_requests_total{path="/snapshot/create"}`)
|
||||
|
||||
snapshotsListTotal = metrics.NewCounter(`vm_http_requests_total{path="/snapshot/list"}`)
|
||||
snapshotsListErrorsTotal = metrics.NewCounter(`vm_http_request_errors_total{path="/snapshot/list"}`)
|
||||
snapshotsListTotal = metrics.NewCounter(`vm_http_requests_total{path="/snapshot/list"}`)
|
||||
|
||||
snapshotsDeleteTotal = metrics.NewCounter(`vm_http_requests_total{path="/snapshot/delete"}`)
|
||||
snapshotsDeleteErrorsTotal = metrics.NewCounter(`vm_http_request_errors_total{path="/snapshot/delete"}`)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24.1 AS build-web-stage
|
||||
FROM golang:1.24.2 AS build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
copy-metricsql-docs:
|
||||
cp docs/MetricsQL.md app/vmui/packages/vmui/src/assets/MetricsQL.md
|
||||
cp docs/victoriametrics/MetricsQL.md app/vmui/packages/vmui/src/assets/MetricsQL.md
|
||||
|
||||
vmui-package-base-image:
|
||||
docker build -t vmui-builder-image -f app/vmui/Dockerfile-build ./app/vmui
|
||||
|
||||
580
app/vmui/packages/vmui/package-lock.json
generated
580
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -8,21 +8,21 @@
|
||||
"@types/lodash.debounce": "^4.0.9",
|
||||
"@types/lodash.get": "^4.4.9",
|
||||
"@types/qs": "^6.9.18",
|
||||
"@types/react": "^19.0.12",
|
||||
"@types/react": "^19.1.2",
|
||||
"@types/react-input-mask": "^3.0.6",
|
||||
"@types/react-router-dom": "^5.3.3",
|
||||
"classnames": "^2.5.1",
|
||||
"dayjs": "^1.11.13",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"lodash.get": "^4.4.2",
|
||||
"marked": "^15.0.7",
|
||||
"marked": "^15.0.8",
|
||||
"marked-emoji": "^2.0.0",
|
||||
"preact": "^10.26.4",
|
||||
"preact": "^10.26.5",
|
||||
"qs": "^6.14.0",
|
||||
"react-input-mask": "^2.0.4",
|
||||
"react-router-dom": "^7.4.0",
|
||||
"react-router-dom": "^7.5.0",
|
||||
"uplot": "^1.6.32",
|
||||
"vite": "^6.2.3",
|
||||
"vite": "^6.2.6",
|
||||
"web-vitals": "^4.2.4"
|
||||
},
|
||||
"scripts": {
|
||||
@@ -55,25 +55,25 @@
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6",
|
||||
"@babel/plugin-proposal-private-property-in-object": "^7.21.11",
|
||||
"@eslint/eslintrc": "^3.3.1",
|
||||
"@eslint/js": "^9.23.0",
|
||||
"@eslint/js": "^9.24.0",
|
||||
"@preact/preset-vite": "^2.10.1",
|
||||
"@testing-library/jest-dom": "^6.6.3",
|
||||
"@testing-library/preact": "^3.2.4",
|
||||
"@types/node": "^22.13.13",
|
||||
"@typescript-eslint/eslint-plugin": "^8.28.0",
|
||||
"@typescript-eslint/parser": "^8.28.0",
|
||||
"@types/node": "^22.14.1",
|
||||
"@typescript-eslint/eslint-plugin": "^8.30.1",
|
||||
"@typescript-eslint/parser": "^8.30.1",
|
||||
"cross-env": "^7.0.3",
|
||||
"eslint": "^9.23.0",
|
||||
"eslint-plugin-react": "^7.37.4",
|
||||
"eslint": "^9.24.0",
|
||||
"eslint-plugin-react": "^7.37.5",
|
||||
"globals": "^16.0.0",
|
||||
"http-proxy-middleware": "^3.0.3",
|
||||
"jsdom": "^26.0.0",
|
||||
"http-proxy-middleware": "^3.0.5",
|
||||
"jsdom": "^26.1.0",
|
||||
"postcss": "^8.5.3",
|
||||
"rollup-plugin-visualizer": "^5.14.0",
|
||||
"sass": "^1.86.0",
|
||||
"sass-embedded": "^1.86.0",
|
||||
"typescript": "^5.8.2",
|
||||
"vitest": "^3.0.9",
|
||||
"webpack": "^5.98.0"
|
||||
"sass": "^1.86.3",
|
||||
"sass-embedded": "^1.86.3",
|
||||
"typescript": "^5.8.3",
|
||||
"vitest": "^3.1.1",
|
||||
"webpack": "^5.99.5"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,3 +11,19 @@ export const getCardinalityInfo = (server: string, requestsParam: CardinalityReq
|
||||
return `${server}/api/v1/status/tsdb?topN=${requestsParam.topN}&date=${requestsParam.date}${match}${focusLabel}`;
|
||||
};
|
||||
|
||||
interface MetricNamesStatsParams {
|
||||
limit?: number,
|
||||
le?: number, // less than or equal
|
||||
match_pattern?: string, // a regex pattern to match metric names
|
||||
}
|
||||
|
||||
export const getMetricNamesStats = (server: string, params: MetricNamesStatsParams) => {
|
||||
const searchParams = new URLSearchParams(
|
||||
Object.entries(params).filter(([_, value]) => value != null)
|
||||
).toString();
|
||||
|
||||
console.log("searchParams", searchParams);
|
||||
|
||||
return `${server}/api/v1/status/metric_names_stats?${searchParams}`;
|
||||
};
|
||||
|
||||
|
||||
@@ -5,9 +5,13 @@ menu:
|
||||
docs:
|
||||
parent: 'victoriametrics'
|
||||
weight: 23
|
||||
tags:
|
||||
- metrics
|
||||
aliases:
|
||||
- /ExtendedPromQL.html
|
||||
- /MetricsQL.html
|
||||
- /metricsql/index.html
|
||||
- /metricsql/
|
||||
---
|
||||
[VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) implements MetricsQL -
|
||||
query language inspired by [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
|
||||
@@ -1,23 +1,13 @@
|
||||
import React, { FC, useCallback, useMemo, useRef, useState } from "preact/compat";
|
||||
import React, { FC, useState } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import "uplot/dist/uPlot.min.css";
|
||||
import useElementSize from "../../../hooks/useElementSize";
|
||||
import uPlot, { AlignedData } from "uplot";
|
||||
import { useEffect } from "react";
|
||||
import useBarHitsOptions, { getLabelFromLogHit } from "./hooks/useBarHitsOptions";
|
||||
import BarHitsTooltip from "./BarHitsTooltip/BarHitsTooltip";
|
||||
import { AlignedData } from "uplot";
|
||||
import { TimeParams } from "../../../types";
|
||||
import usePlotScale from "../../../hooks/uplot/usePlotScale";
|
||||
import useReadyChart from "../../../hooks/uplot/useReadyChart";
|
||||
import useZoomChart from "../../../hooks/uplot/useZoomChart";
|
||||
import classNames from "classnames";
|
||||
import { LegendLogHits, LogHits } from "../../../api/types";
|
||||
import { addSeries, delSeries, setBand } from "../../../utils/uplot";
|
||||
import { LogHits } from "../../../api/types";
|
||||
import { GraphOptions, GRAPH_STYLES } from "./types";
|
||||
import BarHitsOptions from "./BarHitsOptions/BarHitsOptions";
|
||||
import stack from "../../../utils/uplot/stack";
|
||||
import BarHitsLegend from "./BarHitsLegend/BarHitsLegend";
|
||||
import { calculateTotalHits, sortLogHits } from "../../../utils/logs";
|
||||
import BarHitsPlot from "./BarHitsPlot/BarHitsPlot";
|
||||
|
||||
interface Props {
|
||||
logHits: LogHits[];
|
||||
@@ -27,9 +17,6 @@ interface Props {
|
||||
onApplyFilter: (value: string) => void;
|
||||
}
|
||||
const BarHitsChart: FC<Props> = ({ logHits, data: _data, period, setPeriod, onApplyFilter }) => {
|
||||
const [containerRef, containerSize] = useElementSize();
|
||||
const uPlotRef = useRef<HTMLDivElement>(null);
|
||||
const [uPlotInst, setUPlotInst] = useState<uPlot>();
|
||||
const [graphOptions, setGraphOptions] = useState<GraphOptions>({
|
||||
graphStyle: GRAPH_STYLES.LINE_STEPPED,
|
||||
stacked: false,
|
||||
@@ -37,82 +24,6 @@ const BarHitsChart: FC<Props> = ({ logHits, data: _data, period, setPeriod, onAp
|
||||
hideChart: false,
|
||||
});
|
||||
|
||||
const { xRange, setPlotScale } = usePlotScale({ period, setPeriod });
|
||||
const { onReadyChart, isPanning } = useReadyChart(setPlotScale);
|
||||
useZoomChart({ uPlotInst, xRange, setPlotScale });
|
||||
|
||||
const isEmptyData = useMemo(() => _data.every(d => d.length === 0), [_data]);
|
||||
|
||||
const { data, bands } = useMemo(() => {
|
||||
return graphOptions.stacked ? stack(_data, () => false) : { data: _data, bands: [] };
|
||||
}, [graphOptions, _data]);
|
||||
|
||||
const { options, series, focusDataIdx } = useBarHitsOptions({
|
||||
data,
|
||||
logHits,
|
||||
bands,
|
||||
xRange,
|
||||
containerSize,
|
||||
onReadyChart,
|
||||
setPlotScale,
|
||||
graphOptions
|
||||
});
|
||||
|
||||
const prepareLegend = useCallback((hits: LogHits[], totalHits: number): LegendLogHits[] => {
|
||||
return hits.map((hit) => {
|
||||
const label = getLabelFromLogHit(hit);
|
||||
|
||||
const legendItem: LegendLogHits = {
|
||||
label,
|
||||
isOther: hit._isOther,
|
||||
fields: hit.fields,
|
||||
total: hit.total || 0,
|
||||
totalHits,
|
||||
stroke: series.find((s) => s.label === label)?.stroke,
|
||||
};
|
||||
|
||||
return legendItem;
|
||||
}).sort(sortLogHits("total"));
|
||||
}, [series]);
|
||||
|
||||
|
||||
const legendDetails: LegendLogHits[] = useMemo(() => {
|
||||
const totalHits = calculateTotalHits(logHits);
|
||||
return prepareLegend(logHits, totalHits);
|
||||
}, [logHits, prepareLegend]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
delSeries(uPlotInst);
|
||||
addSeries(uPlotInst, series, true);
|
||||
setBand(uPlotInst, series);
|
||||
uPlotInst.redraw();
|
||||
}, [series]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotRef.current) return;
|
||||
const uplot = new uPlot(options, data, uPlotRef.current);
|
||||
setUPlotInst(uplot);
|
||||
return () => uplot.destroy();
|
||||
}, [uPlotRef.current, options]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
uPlotInst.scales.x.range = () => [xRange.min, xRange.max];
|
||||
uPlotInst.redraw();
|
||||
}, [xRange]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
uPlotInst.setSize(containerSize);
|
||||
uPlotInst.redraw();
|
||||
}, [containerSize]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
uPlotInst.setData(data);
|
||||
uPlotInst.redraw();
|
||||
}, [data]);
|
||||
|
||||
return (
|
||||
<div
|
||||
@@ -122,32 +33,16 @@ const BarHitsChart: FC<Props> = ({ logHits, data: _data, period, setPeriod, onAp
|
||||
})}
|
||||
>
|
||||
{!graphOptions.hideChart && (
|
||||
<div
|
||||
className={classNames({
|
||||
"vm-bar-hits-chart": true,
|
||||
"vm-bar-hits-chart_panning": isPanning
|
||||
})}
|
||||
ref={containerRef}
|
||||
>
|
||||
<div
|
||||
className="vm-line-chart__u-plot"
|
||||
ref={uPlotRef}
|
||||
/>
|
||||
<BarHitsTooltip
|
||||
uPlotInst={uPlotInst}
|
||||
data={_data}
|
||||
focusDataIdx={focusDataIdx}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<BarHitsOptions onChange={setGraphOptions}/>
|
||||
{uPlotInst && !isEmptyData && !graphOptions.hideChart && (
|
||||
<BarHitsLegend
|
||||
uPlotInst={uPlotInst}
|
||||
<BarHitsPlot
|
||||
logHits={logHits}
|
||||
data={_data}
|
||||
period={period}
|
||||
setPeriod={setPeriod}
|
||||
onApplyFilter={onApplyFilter}
|
||||
legendDetails={legendDetails}
|
||||
graphOptions={graphOptions}
|
||||
/>
|
||||
)}
|
||||
<BarHitsOptions onChange={setGraphOptions}/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -21,11 +21,15 @@ const BarHitsLegend: FC<Props> = ({ uPlotInst, legendDetails, onApplyFilter }) =
|
||||
|
||||
const handleRedrawGraph = () => {
|
||||
uPlotInst.redraw();
|
||||
setSeries(getSeries());
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setSeries(getSeries());
|
||||
if (!uPlotInst.hooks.draw) {
|
||||
uPlotInst.hooks.draw = [];
|
||||
}
|
||||
uPlotInst.hooks.draw.push(() => {
|
||||
setSeries(getSeries());
|
||||
});
|
||||
}, [uPlotInst]);
|
||||
|
||||
return (
|
||||
|
||||
@@ -0,0 +1,153 @@
|
||||
import React, { FC, useCallback } from "preact/compat";
|
||||
import useElementSize from "../../../../hooks/useElementSize";
|
||||
import { useEffect, useMemo, useRef, useState } from "react";
|
||||
import uPlot, { AlignedData, Series } from "uplot";
|
||||
import { GraphOptions } from "../types";
|
||||
import usePlotScale from "../../../../hooks/uplot/usePlotScale";
|
||||
import useReadyChart from "../../../../hooks/uplot/useReadyChart";
|
||||
import useZoomChart from "../../../../hooks/uplot/useZoomChart";
|
||||
import stack from "../../../../utils/uplot/stack";
|
||||
import useBarHitsOptions, { getLabelFromLogHit } from "../hooks/useBarHitsOptions";
|
||||
import { LegendLogHits, LogHits } from "../../../../api/types";
|
||||
import { addSeries, delSeries, setBand } from "../../../../utils/uplot";
|
||||
import classNames from "classnames";
|
||||
import BarHitsTooltip from "../BarHitsTooltip/BarHitsTooltip";
|
||||
import { TimeParams } from "../../../../types";
|
||||
import BarHitsLegend from "../BarHitsLegend/BarHitsLegend";
|
||||
import { calculateTotalHits, sortLogHits } from "../../../../utils/logs";
|
||||
|
||||
interface Props {
|
||||
logHits: LogHits[];
|
||||
data: AlignedData;
|
||||
period: TimeParams;
|
||||
setPeriod: ({ from, to }: { from: Date, to: Date }) => void;
|
||||
onApplyFilter: (value: string) => void;
|
||||
graphOptions: GraphOptions;
|
||||
}
|
||||
|
||||
const BarHitsPlot: FC<Props> = ({ graphOptions, logHits, data: _data, period, setPeriod, onApplyFilter }: Props) => {
|
||||
const [containerRef, containerSize] = useElementSize();
|
||||
const uPlotRef = useRef<HTMLDivElement>(null);
|
||||
const [uPlotInst, setUPlotInst] = useState<uPlot>();
|
||||
|
||||
const { xRange, setPlotScale } = usePlotScale({ period, setPeriod });
|
||||
const { onReadyChart, isPanning } = useReadyChart(setPlotScale);
|
||||
useZoomChart({ uPlotInst, xRange, setPlotScale });
|
||||
|
||||
const { data, bands } = useMemo(() => {
|
||||
return graphOptions.stacked ? stack(_data, () => false) : { data: _data, bands: [] };
|
||||
}, [graphOptions, _data]);
|
||||
|
||||
const { options, series, focusDataIdx } = useBarHitsOptions({
|
||||
data,
|
||||
logHits,
|
||||
bands,
|
||||
xRange,
|
||||
containerSize,
|
||||
onReadyChart,
|
||||
setPlotScale,
|
||||
graphOptions
|
||||
});
|
||||
|
||||
const prepareLegend = useCallback((hits: LogHits[], totalHits: number): LegendLogHits[] => {
|
||||
return hits.map((hit) => {
|
||||
const label = getLabelFromLogHit(hit);
|
||||
|
||||
const legendItem: LegendLogHits = {
|
||||
label,
|
||||
isOther: hit._isOther,
|
||||
fields: hit.fields,
|
||||
total: hit.total || 0,
|
||||
totalHits,
|
||||
stroke: series.find((s) => s.label === label)?.stroke,
|
||||
};
|
||||
|
||||
return legendItem;
|
||||
}).sort(sortLogHits("total"));
|
||||
}, [series]);
|
||||
|
||||
|
||||
const legendDetails: LegendLogHits[] = useMemo(() => {
|
||||
const totalHits = calculateTotalHits(logHits);
|
||||
return prepareLegend(logHits, totalHits);
|
||||
}, [logHits, prepareLegend]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
|
||||
const oldSeriesMap = new Map(uPlotInst.series.map(s => [s.label, s]));
|
||||
|
||||
const syncedSeries = series.map(s => {
|
||||
const old = oldSeriesMap.get(s.label);
|
||||
return old ? { ...s, show: old.show } : s;
|
||||
});
|
||||
|
||||
delSeries(uPlotInst);
|
||||
addSeries(uPlotInst, syncedSeries, true);
|
||||
setBand(uPlotInst, syncedSeries);
|
||||
uPlotInst.redraw();
|
||||
}, [series, uPlotInst]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
uPlotInst.delBand();
|
||||
bands.forEach(band => {
|
||||
uPlotInst.addBand(band);
|
||||
});
|
||||
uPlotInst.redraw();
|
||||
}, [bands]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotRef.current) return;
|
||||
const uplot = new uPlot(options, data, uPlotRef.current);
|
||||
setUPlotInst(uplot);
|
||||
return () => uplot.destroy();
|
||||
}, [uPlotRef.current]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
uPlotInst.scales.x.range = () => [xRange.min, xRange.max];
|
||||
uPlotInst.redraw();
|
||||
}, [xRange]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
uPlotInst.setSize(containerSize);
|
||||
uPlotInst.redraw();
|
||||
}, [containerSize]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!uPlotInst) return;
|
||||
uPlotInst.setData(data);
|
||||
uPlotInst.redraw();
|
||||
}, [data]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<div
|
||||
className={classNames({
|
||||
"vm-bar-hits-chart": true,
|
||||
"vm-bar-hits-chart_panning": isPanning
|
||||
})}
|
||||
ref={containerRef}
|
||||
>
|
||||
<div
|
||||
className="vm-line-chart__u-plot"
|
||||
ref={uPlotRef}
|
||||
/>
|
||||
<BarHitsTooltip
|
||||
uPlotInst={uPlotInst}
|
||||
data={_data}
|
||||
focusDataIdx={focusDataIdx}
|
||||
/>
|
||||
</div>
|
||||
{uPlotInst && <BarHitsLegend
|
||||
uPlotInst={uPlotInst}
|
||||
onApplyFilter={onApplyFilter}
|
||||
legendDetails={legendDetails}
|
||||
/>}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default BarHitsPlot;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user