mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
179 Commits
v1.97.13
...
debug/erro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
005068ea5a | ||
|
|
7941877233 | ||
|
|
f303081304 | ||
|
|
a84628f701 | ||
|
|
f823a225ac | ||
|
|
79f1a37ee6 | ||
|
|
f9cd408ca9 | ||
|
|
c2811d8d11 | ||
|
|
8d981b15c9 | ||
|
|
58f09fe3f8 | ||
|
|
afd926a0b0 | ||
|
|
204c102342 | ||
|
|
c5949af9e8 | ||
|
|
5dc0413bc0 | ||
|
|
f919783de9 | ||
|
|
60f9f44150 | ||
|
|
0fcbe8fdae | ||
|
|
458b602938 | ||
|
|
471f1d0a09 | ||
|
|
7f80c1633f | ||
|
|
186b00df6b | ||
|
|
4205ae3011 | ||
|
|
491028774a | ||
|
|
565b79c9ca | ||
|
|
5478cc61c2 | ||
|
|
79c08ecac4 | ||
|
|
f47fd83e54 | ||
|
|
9c39bac565 | ||
|
|
1042f07498 | ||
|
|
79a595c6d0 | ||
|
|
40b47601d1 | ||
|
|
6bfcbe66f7 | ||
|
|
94118c63f6 | ||
|
|
9605d73809 | ||
|
|
3237c64ef3 | ||
|
|
1fbc2c0db1 | ||
|
|
71bb9fc0d0 | ||
|
|
0210f4ebd2 | ||
|
|
891ad8f202 | ||
|
|
e501640f44 | ||
|
|
21082405ec | ||
|
|
094a5ab58f | ||
|
|
bbc84fa119 | ||
|
|
9d1a72aca8 | ||
|
|
05d3db248b | ||
|
|
59d739ff0b | ||
|
|
b54d10be63 | ||
|
|
524f0e8d8b | ||
|
|
72419834af | ||
|
|
e6b7d25ab4 | ||
|
|
ac124cf5aa | ||
|
|
3d7f8377f7 | ||
|
|
4992e083f0 | ||
|
|
71a9fb16f7 | ||
|
|
7e7d029de1 | ||
|
|
983f30c326 | ||
|
|
efd8098b0b | ||
|
|
d86788e9a2 | ||
|
|
a87ad250d0 | ||
|
|
bf84de3c6b | ||
|
|
7ec8ea8301 | ||
|
|
c6f6302ca4 | ||
|
|
87100e55cc | ||
|
|
c464d4484f | ||
|
|
91f858ee1e | ||
|
|
da0d57e4b6 | ||
|
|
fa621b384e | ||
|
|
02fedb8585 | ||
|
|
04d19a2200 | ||
|
|
e612877fe7 | ||
|
|
43181b67b1 | ||
|
|
b0ed5b6174 | ||
|
|
4aeda4b267 | ||
|
|
20a2822c23 | ||
|
|
1891b74a0a | ||
|
|
0dc576d3da | ||
|
|
88861c66fe | ||
|
|
1ee5ba8d55 | ||
|
|
e0ab3fccaf | ||
|
|
2fe6640193 | ||
|
|
d1ccf205c4 | ||
|
|
b42ed019f5 | ||
|
|
5a41c7f5a5 | ||
|
|
ec193ef691 | ||
|
|
e669c87af4 | ||
|
|
87c1b2de6f | ||
|
|
bcd8d9d6c6 | ||
|
|
dbed0de650 | ||
|
|
34a730ac65 | ||
|
|
e21bdcdbc7 | ||
|
|
9db8e071c4 | ||
|
|
1627bcc6cb | ||
|
|
5033d05d55 | ||
|
|
5279faf02f | ||
|
|
564e6ea024 | ||
|
|
6b48126603 | ||
|
|
4a2192431d | ||
|
|
86bc7d5cd1 | ||
|
|
d05fadf988 | ||
|
|
e439e40e79 | ||
|
|
d6f5ba2887 | ||
|
|
94e4c4e367 | ||
|
|
aadd8d5f3a | ||
|
|
44d8e6a19d | ||
|
|
6b0ae0b79f | ||
|
|
a51a18403c | ||
|
|
de0ae735aa | ||
|
|
acbe526307 | ||
|
|
9a6ddb48df | ||
|
|
7d3e60f7f1 | ||
|
|
f54f73033b | ||
|
|
75a2e23b7e | ||
|
|
6fe079dbfb | ||
|
|
843fae3419 | ||
|
|
db961f8609 | ||
|
|
c45451bf69 | ||
|
|
30029f1e39 | ||
|
|
48f395456e | ||
|
|
08ce6ef825 | ||
|
|
cd10bb585c | ||
|
|
4ac94db2c7 | ||
|
|
65d831a0ee | ||
|
|
48540ac409 | ||
|
|
3cef820cba | ||
|
|
b4f3861690 | ||
|
|
4c8691450a | ||
|
|
fefa3e7936 | ||
|
|
08af80ebe0 | ||
|
|
915867fe56 | ||
|
|
786ce2c5b3 | ||
|
|
bddb0e369f | ||
|
|
f322494ca2 | ||
|
|
ceb081a018 | ||
|
|
e2fa25ab29 | ||
|
|
740548ccfc | ||
|
|
dbec34bafc | ||
|
|
04796ba249 | ||
|
|
5c7b044685 | ||
|
|
80c5066ef3 | ||
|
|
c3b8da81cd | ||
|
|
8b1fd6a619 | ||
|
|
b57f8d3cb6 | ||
|
|
f4776fec1b | ||
|
|
b76c77649d | ||
|
|
cedacf5f5c | ||
|
|
a2c3b33e42 | ||
|
|
7d1477e984 | ||
|
|
02effba767 | ||
|
|
601a25d4e8 | ||
|
|
b9b117d149 | ||
|
|
0b021fa5a7 | ||
|
|
b43fcc0cf8 | ||
|
|
534371031e | ||
|
|
0602d60047 | ||
|
|
cdc0db8ad7 | ||
|
|
77430b797d | ||
|
|
f2ad481a1f | ||
|
|
17b813ba28 | ||
|
|
6a71921565 | ||
|
|
7e924d7ecf | ||
|
|
480a8be48f | ||
|
|
c58d0549a8 | ||
|
|
cc70b5bb34 | ||
|
|
9feacf9761 | ||
|
|
53d438aab0 | ||
|
|
d29260f4e8 | ||
|
|
671ba82894 | ||
|
|
88fe4ebb34 | ||
|
|
1b1aef57e0 | ||
|
|
30b9167965 | ||
|
|
24a2a4a962 | ||
|
|
cdf384eb4d | ||
|
|
c89926bdf7 | ||
|
|
33a60f907c | ||
|
|
5e0db31914 | ||
|
|
54f1a33a63 | ||
|
|
e4525516e2 | ||
|
|
d3101b075f | ||
|
|
b4d8d135e9 |
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
@@ -39,9 +40,20 @@ var (
|
||||
"The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. "+
|
||||
"Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmsingle can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// VictoriaMetrics is optimized for reduced memory allocations,
|
||||
// so it can run with the reduced GOGC in order to reduce the used memory,
|
||||
// while keeping CPU usage spent in GC at low levels.
|
||||
//
|
||||
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
|
||||
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
|
||||
// exceeds 0.05 for extended periods of time.
|
||||
cgroup.SetGOGC(30)
|
||||
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
@@ -76,6 +88,7 @@ func main() {
|
||||
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsertcommon.StartIngestionRateLimiter(*maxIngestionRate)
|
||||
vminsert.Init()
|
||||
|
||||
startSelfScraper()
|
||||
@@ -97,6 +110,7 @@ func main() {
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
vminsert.Stop()
|
||||
vminsertcommon.StopIngestionRateLimiter()
|
||||
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -68,6 +69,10 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
t := &r.Tags[j]
|
||||
labels = addLabel(labels, t.Key, t.Value)
|
||||
}
|
||||
if timeserieslimits.IsExceeding(labels) {
|
||||
// Skip metric with exceeding labels.
|
||||
continue
|
||||
}
|
||||
if len(mrs) < cap(mrs) {
|
||||
mrs = mrs[:len(mrs)+1]
|
||||
} else {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -21,6 +22,11 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
)
|
||||
|
||||
var (
|
||||
datadogStreamFields = flagutil.NewArrayString("datadog.streamFields", "Datadog tags to be used as stream fields.")
|
||||
datadogIgnoreFields = flagutil.NewArrayString("datadog.ignoreFields", "Datadog tags to ignore.")
|
||||
)
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
// RequestHandler processes Datadog insert requests
|
||||
@@ -79,17 +85,21 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(cp.StreamFields) == 0 {
|
||||
cp.StreamFields = *datadogStreamFields
|
||||
}
|
||||
if len(cp.IgnoreFields) == 0 {
|
||||
cp.IgnoreFields = *datadogIgnoreFields
|
||||
}
|
||||
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor("datadog")
|
||||
n, err := readLogsRequest(ts, data, lmp.AddRow)
|
||||
err = readLogsRequest(ts, data, lmp)
|
||||
lmp.MustClose()
|
||||
if n > 0 {
|
||||
rowsIngestedTotal.Add(n)
|
||||
}
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message in /api/v2/logs request: %s, stream fields: %s", err, cp.StreamFields)
|
||||
return true
|
||||
@@ -105,47 +115,118 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
var (
|
||||
v2LogsRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/datadog/api/v2/logs"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="datadog"}`)
|
||||
v2LogsRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/datadog/api/v2/logs"}`)
|
||||
)
|
||||
|
||||
// datadog message field has two formats:
|
||||
// - regular log message with string text
|
||||
// - nested json format for serverless plugins
|
||||
// which has folowing format:
|
||||
// {"message": {"message": "text","lamdba": {"arn": "string","requestID": "string"}, "timestamp": int64} }
|
||||
//
|
||||
// See https://github.com/DataDog/datadog-lambda-extension/blob/28b90c7e4e985b72d60b5f5a5147c69c7ac693c4/bottlecap/src/logs/lambda/mod.rs#L24
|
||||
func appendMsgFields(fields []logstorage.Field, v *fastjson.Value) ([]logstorage.Field, error) {
|
||||
switch v.Type() {
|
||||
case fastjson.TypeString:
|
||||
val := v.GetStringBytes()
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
case fastjson.TypeObject:
|
||||
var firstErr error
|
||||
v.GetObject().Visit(func(k []byte, v *fastjson.Value) {
|
||||
if firstErr != nil {
|
||||
return
|
||||
}
|
||||
switch bytesutil.ToUnsafeString(k) {
|
||||
case "message":
|
||||
val := v.GetStringBytes()
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
case "status":
|
||||
val := v.GetStringBytes()
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "status",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
case "lamdba":
|
||||
obj, err := v.Object()
|
||||
if err != nil {
|
||||
firstErr = err
|
||||
firstErr = fmt.Errorf("unexpected lambda value type for %q:%q; want object", k, v)
|
||||
return
|
||||
}
|
||||
obj.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if firstErr != nil {
|
||||
return
|
||||
}
|
||||
val, err := v.StringBytes()
|
||||
if err != nil {
|
||||
firstErr = fmt.Errorf("unexpected lambda label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: bytesutil.ToUnsafeString(k),
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
})
|
||||
default:
|
||||
return fields, fmt.Errorf("unsupported message type %q", v.Type().String())
|
||||
}
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
// readLogsRequest parses data according to DataDog logs format
|
||||
// https://docs.datadoghq.com/api/latest/logs/#send-logs
|
||||
func readLogsRequest(ts int64, data []byte, processLogMessage func(int64, []logstorage.Field)) (int, error) {
|
||||
func readLogsRequest(ts int64, data []byte, lmp insertutils.LogMessageProcessor) error {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
return fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
}
|
||||
records, err := v.Array()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot extract array from parsed JSON: %w", err)
|
||||
return fmt.Errorf("cannot extract array from parsed JSON: %w", err)
|
||||
}
|
||||
|
||||
var fields []logstorage.Field
|
||||
for m, r := range records {
|
||||
for _, r := range records {
|
||||
o, err := r.Object()
|
||||
if err != nil {
|
||||
return m + 1, fmt.Errorf("could not extract log record: %w", err)
|
||||
return fmt.Errorf("could not extract log record: %w", err)
|
||||
}
|
||||
o.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
val, e := v.StringBytes()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
switch string(k) {
|
||||
switch bytesutil.ToUnsafeString(k) {
|
||||
case "message":
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
fields, err = appendMsgFields(fields, v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case "timestamp":
|
||||
val, e := v.Int64()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("failed to parse timestamp for %q:%q", k, v)
|
||||
}
|
||||
if val > 0 {
|
||||
ts = val * 1e6
|
||||
}
|
||||
case "ddtags":
|
||||
// https://docs.datadoghq.com/getting_started/tagging/
|
||||
val, e := v.StringBytes()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
var pair []byte
|
||||
idx := 0
|
||||
for idx >= 0 {
|
||||
@@ -172,14 +253,22 @@ func readLogsRequest(ts int64, data []byte, processLogMessage func(int64, []logs
|
||||
}
|
||||
}
|
||||
default:
|
||||
val, e := v.StringBytes()
|
||||
if e != nil {
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: bytesutil.ToUnsafeString(k),
|
||||
Value: bytesutil.ToUnsafeString(val),
|
||||
})
|
||||
}
|
||||
})
|
||||
processLogMessage(ts, fields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
fields = fields[:0]
|
||||
}
|
||||
return len(records), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package datadog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestReadLogsRequestFailure(t *testing.T) {
|
||||
@@ -15,16 +13,12 @@ func TestReadLogsRequestFailure(t *testing.T) {
|
||||
|
||||
ts := time.Now().UnixNano()
|
||||
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
t.Fatalf("unexpected call to processLogMessage with timestamp=%d, fields=%s", timestamp, fields)
|
||||
}
|
||||
|
||||
rows, err := readLogsRequest(ts, []byte(data), processLogMessage)
|
||||
if err == nil {
|
||||
lmp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := readLogsRequest(ts, []byte(data), lmp); err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
if rows != 0 {
|
||||
t.Fatalf("unexpected non-zero rows=%d", rows)
|
||||
if err := lmp.Verify(nil, ""); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
f("foobar")
|
||||
@@ -39,30 +33,16 @@ func TestReadLogsRequestSuccess(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
ts := time.Now().UnixNano()
|
||||
var result string
|
||||
processLogMessage := func(_ int64, fields []logstorage.Field) {
|
||||
a := make([]string, len(fields))
|
||||
for i, f := range fields {
|
||||
a[i] = fmt.Sprintf("%q:%q", f.Name, f.Value)
|
||||
}
|
||||
if len(result) > 0 {
|
||||
result = result + "\n"
|
||||
}
|
||||
s := "{" + strings.Join(a, ",") + "}"
|
||||
result += s
|
||||
var timestampsExpected []int64
|
||||
for i := 0; i < rowsExpected; i++ {
|
||||
timestampsExpected = append(timestampsExpected, ts)
|
||||
}
|
||||
|
||||
// Read the request without compression
|
||||
rows, err := readLogsRequest(ts, []byte(data), processLogMessage)
|
||||
if err != nil {
|
||||
lmp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := readLogsRequest(ts, []byte(data), lmp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
if err := lmp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +54,12 @@ func TestReadLogsRequestSuccess(t *testing.T) {
|
||||
"hostname":"127.0.0.1",
|
||||
"message":"bar",
|
||||
"service":"test"
|
||||
}, {
|
||||
"ddsource":"nginx",
|
||||
"ddtags":"tag1:value1,tag2:value2",
|
||||
"hostname":"127.0.0.1",
|
||||
"message":{"message": "nested"},
|
||||
"service":"test"
|
||||
}, {
|
||||
"ddsource":"nginx",
|
||||
"ddtags":"tag1:value1,tag2:value2",
|
||||
@@ -106,8 +92,9 @@ func TestReadLogsRequestSuccess(t *testing.T) {
|
||||
"service":"test"
|
||||
}
|
||||
]`
|
||||
rowsExpected := 6
|
||||
rowsExpected := 7
|
||||
resultExpected := `{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"bar","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"nested","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"foobar","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"baz","service":"test"}
|
||||
{"ddsource":"nginx","tag1":"value1","tag2":"value2","hostname":"127.0.0.1","_msg":"xyz","service":"test"}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -103,7 +101,8 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor("elasticsearch_bulk")
|
||||
isGzip := r.Header.Get("Content-Encoding") == "gzip"
|
||||
n, err := readBulkRequest(r.Body, isGzip, cp.TimeField, cp.MsgFields, lmp)
|
||||
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
|
||||
n, err := readBulkRequest(streamName, r.Body, isGzip, cp.TimeField, cp.MsgFields, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message #%d in /_bulk request: %s, stream fields: %s", n, err, cp.StreamFields)
|
||||
@@ -129,11 +128,10 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
var (
|
||||
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="elasticsearch_bulk"}`)
|
||||
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(r io.Reader, isGzip bool, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func readBulkRequest(streamName string, r io.Reader, isGzip bool, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
||||
|
||||
if isGzip {
|
||||
@@ -148,48 +146,29 @@ func readBulkRequest(r io.Reader, isGzip bool, timeField string, msgFields []str
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
lr := insertutils.NewLineReader(streamName, wcr)
|
||||
|
||||
n := 0
|
||||
nCheckpoint := 0
|
||||
for {
|
||||
ok, err := readBulkLine(sc, timeField, msgFields, lmp)
|
||||
ok, err := readBulkLine(lr, timeField, msgFields, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil || !ok {
|
||||
rowsIngestedTotal.Add(n - nCheckpoint)
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
if batchSize := n - nCheckpoint; n >= 1000 {
|
||||
rowsIngestedTotal.Add(batchSize)
|
||||
nCheckpoint = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
func readBulkLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
func readBulkLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
|
||||
// Read the command, must be "create" or "index"
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read "create" or "index" command, since its size exceeds -insert.maxLineSizeBytes=%d`,
|
||||
insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
if !lr.NextLine() {
|
||||
err := lr.Err()
|
||||
return false, err
|
||||
}
|
||||
line = sc.Bytes()
|
||||
line = lr.Line
|
||||
}
|
||||
lineStr := bytesutil.ToUnsafeString(line)
|
||||
if !strings.Contains(lineStr, `"create"`) && !strings.Contains(lineStr, `"index"`) {
|
||||
@@ -197,16 +176,18 @@ func readBulkLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp i
|
||||
}
|
||||
|
||||
// Decode log message
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf("cannot read log message, since its size exceeds -insert.maxLineSizeBytes=%d", insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
if !lr.NextLine() {
|
||||
if err := lr.Err(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
|
||||
}
|
||||
line = sc.Bytes()
|
||||
line = lr.Line
|
||||
if len(line) == 0 {
|
||||
// Special case - the line could be too long, so it was skipped.
|
||||
// Continue parsing next lines.
|
||||
return true, nil
|
||||
}
|
||||
p := logstorage.GetJSONParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
@@ -220,7 +201,7 @@ func readBulkLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp i
|
||||
ts = time.Now().UnixNano()
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgFields, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
lmp.AddRow(ts, p.Fields, nil)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestReadBulkRequest_Failure(t *testing.T) {
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, "_time", []string{"_msg"}, tlp)
|
||||
rows, err := readBulkRequest("test", r, false, "_time", []string{"_msg"}, tlp)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
@@ -33,7 +33,7 @@ foobar`)
|
||||
}
|
||||
|
||||
func TestReadBulkRequest_Success(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
msgFields := []string{"non_existing_foo", msgField, "non_exiting_bar"}
|
||||
@@ -41,14 +41,14 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, timeField, msgFields, tlp)
|
||||
rows, err := readBulkRequest("test", r, false, timeField, msgFields, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
if rows != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -56,22 +56,22 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
tlp = &insertutils.TestLogMessageProcessor{}
|
||||
compressedData := compressData(data)
|
||||
r = bytes.NewBufferString(compressedData)
|
||||
rows, err = readBulkRequest(r, true, timeField, msgFields, tlp)
|
||||
rows, err = readBulkRequest("test", r, true, timeField, msgFields, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
if rows != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, len(timestampsExpected))
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatalf("verification failure after compression: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify an empty data
|
||||
f("", "_time", "_msg", 0, nil, "")
|
||||
f("\n", "_time", "_msg", 0, nil, "")
|
||||
f("\n\n", "_time", "_msg", 0, nil, "")
|
||||
f("", "_time", "_msg", nil, "")
|
||||
f("\n", "_time", "_msg", nil, "")
|
||||
f("\n\n", "_time", "_msg", nil, "")
|
||||
|
||||
// Verify non-empty data
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
@@ -85,13 +85,12 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 4
|
||||
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000, 1686026893000000000}
|
||||
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"_msg":"baz"}
|
||||
{"_msg":"xyz","x":"y"}
|
||||
{"_msg":"qwe rty"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func compressData(s string) string {
|
||||
|
||||
@@ -41,7 +41,7 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
r := &bytes.Reader{}
|
||||
for pb.Next() {
|
||||
r.Reset(dataBytes)
|
||||
_, err := readBulkRequest(r, isGzip, timeField, msgFields, blp)
|
||||
_, err := readBulkRequest("test", r, isGzip, timeField, msgFields, blp)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
|
||||
@@ -137,10 +137,12 @@ func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignore
|
||||
|
||||
// LogMessageProcessor is an interface for log message processors.
|
||||
type LogMessageProcessor interface {
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and the given fields.
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and fields.
|
||||
//
|
||||
// If streamFields is non-nil, then the given streamFields must be used as log stream fields instead of pre-configured fields.
|
||||
//
|
||||
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can re-use them.
|
||||
AddRow(timestamp int64, fields []logstorage.Field)
|
||||
AddRow(timestamp int64, fields, streamFields []logstorage.Field)
|
||||
|
||||
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
|
||||
MustClose()
|
||||
@@ -155,7 +157,8 @@ type logMessageProcessor struct {
|
||||
cp *CommonParams
|
||||
lr *logstorage.LogRows
|
||||
|
||||
processedBytesTotal *metrics.Counter
|
||||
rowsIngestedTotal *metrics.Counter
|
||||
bytesIngestedTotal *metrics.Counter
|
||||
}
|
||||
|
||||
func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
@@ -185,12 +188,15 @@ func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
}
|
||||
|
||||
// AddRow adds new log message to lmp with the given timestamp and fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
//
|
||||
// If streamFields is non-nil, then it is used as log stream fields instead of the pre-configured stream fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
n := getApproxJSONRowLen(fields)
|
||||
lmp.processedBytesTotal.Add(n)
|
||||
lmp.rowsIngestedTotal.Inc()
|
||||
n := logstorage.EstimatedJSONRowLen(fields)
|
||||
lmp.bytesIngestedTotal.Add(n)
|
||||
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
@@ -199,7 +205,7 @@ func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Fiel
|
||||
return
|
||||
}
|
||||
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields)
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields, streamFields)
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
@@ -212,16 +218,6 @@ func (lmp *logMessageProcessor) AddRow(timestamp int64, fields []logstorage.Fiel
|
||||
}
|
||||
}
|
||||
|
||||
// getApproxJSONRowLen returns an approximate length of the log entry with the given fields if represented as JSON.
|
||||
func getApproxJSONRowLen(fields []logstorage.Field) int {
|
||||
n := len("{}\n")
|
||||
n += len(`"_time":""`) + len(time.RFC3339Nano)
|
||||
for _, f := range fields {
|
||||
n += len(`,"":""`) + len(f.Name) + len(f.Value)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// flushLocked must be called under locked lmp.mu.
|
||||
func (lmp *logMessageProcessor) flushLocked() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
@@ -244,12 +240,14 @@ func (lmp *logMessageProcessor) MustClose() {
|
||||
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
|
||||
func (cp *CommonParams) NewLogMessageProcessor(protocolName string) LogMessageProcessor {
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields, cp.ExtraFields, *defaultMsgValue)
|
||||
processedBytesTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_bytes_ingested_total{type=%q}", protocolName))
|
||||
rowsIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_rows_ingested_total{type=%q}", protocolName))
|
||||
bytesIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_bytes_ingested_total{type=%q}", protocolName))
|
||||
lmp := &logMessageProcessor{
|
||||
cp: cp,
|
||||
lr: lr,
|
||||
|
||||
processedBytesTotal: processedBytesTotal,
|
||||
rowsIngestedTotal: rowsIngestedTotal,
|
||||
bytesIngestedTotal: bytesIngestedTotal,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
135
app/vlinsert/insertutils/line_reader.go
Normal file
135
app/vlinsert/insertutils/line_reader.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
)
|
||||
|
||||
// LineReader reads newline-delimited lines from the underlying reader
|
||||
type LineReader struct {
|
||||
// Line contains the next line read after the call to NextLine
|
||||
Line []byte
|
||||
|
||||
// name is the LineReader name
|
||||
name string
|
||||
|
||||
// r is the underlying reader to read data from
|
||||
r io.Reader
|
||||
|
||||
// buf is a buffer for reading the next line
|
||||
buf []byte
|
||||
|
||||
// err is the last error when reading data from r
|
||||
err error
|
||||
|
||||
// eofReached is set to true when all the data is read from r
|
||||
eofReached bool
|
||||
}
|
||||
|
||||
// NewLineReader returns LineReader for r.
|
||||
func NewLineReader(name string, r io.Reader) *LineReader {
|
||||
return &LineReader{
|
||||
name: name,
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// NextLine reads the next line from the underlying reader.
|
||||
//
|
||||
// It returns true if the next line is successfully read into Line.
|
||||
// If the line length exceeds MaxLineSizeBytes, then this line is skipped
|
||||
// and an empty line is returned instead.
|
||||
//
|
||||
// If false is returned, then no more lines left to read from r.
|
||||
// Check for Err in this case.
|
||||
func (lr *LineReader) NextLine() bool {
|
||||
for {
|
||||
if len(lr.buf) == 0 {
|
||||
if lr.err != nil || lr.eofReached {
|
||||
return false
|
||||
}
|
||||
if !lr.readMoreData() {
|
||||
return false
|
||||
}
|
||||
if len(lr.buf) == 0 && lr.eofReached {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if n := bytes.IndexByte(lr.buf, '\n'); n >= 0 {
|
||||
lr.Line = append(lr.Line[:0], lr.buf[:n]...)
|
||||
lr.buf = append(lr.buf[:0], lr.buf[n+1:]...)
|
||||
return true
|
||||
}
|
||||
if lr.eofReached {
|
||||
lr.Line = append(lr.Line[:0], lr.buf...)
|
||||
lr.buf = lr.buf[:0]
|
||||
return true
|
||||
}
|
||||
if !lr.readMoreData() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the last error after NextLine call.
|
||||
func (lr *LineReader) Err() error {
|
||||
if lr.err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s: %s", lr.name, lr.err)
|
||||
}
|
||||
|
||||
func (lr *LineReader) readMoreData() bool {
|
||||
bufLen := len(lr.buf)
|
||||
if bufLen >= MaxLineSizeBytes.IntN() {
|
||||
logger.Warnf("%s: the line length exceeds -insert.maxLineSizeBytes=%d; skipping it; line contents=%q", lr.name, MaxLineSizeBytes.IntN(), lr.buf)
|
||||
tooLongLinesSkipped.Inc()
|
||||
return lr.skipUntilNextLine()
|
||||
}
|
||||
|
||||
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
|
||||
n, err := lr.r.Read(lr.buf[bufLen:])
|
||||
lr.buf = lr.buf[:bufLen+n]
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
lr.eofReached = true
|
||||
return true
|
||||
}
|
||||
lr.err = fmt.Errorf("cannot read the next line: %s", err)
|
||||
}
|
||||
return n > 0
|
||||
}
|
||||
|
||||
var tooLongLinesSkipped = metrics.NewCounter("vl_too_long_lines_skipped_total")
|
||||
|
||||
func (lr *LineReader) skipUntilNextLine() bool {
|
||||
for {
|
||||
lr.buf = slicesutil.SetLength(lr.buf, MaxLineSizeBytes.IntN())
|
||||
n, err := lr.r.Read(lr.buf)
|
||||
lr.buf = lr.buf[:n]
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
lr.eofReached = true
|
||||
lr.buf = lr.buf[:0]
|
||||
return true
|
||||
}
|
||||
lr.err = fmt.Errorf("cannot skip the current line: %s", err)
|
||||
return false
|
||||
}
|
||||
if n := bytes.IndexByte(lr.buf, '\n'); n >= 0 {
|
||||
// Include \n in the buf, so too long line is replaced with an empty line.
|
||||
// This is needed for maintaining synchorinzation consistency between lines
|
||||
// in protocols such as Elasticsearch bulk import.
|
||||
lr.buf = append(lr.buf[:0], lr.buf[n:]...)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
161
app/vlinsert/insertutils/line_reader_test.go
Normal file
161
app/vlinsert/insertutils/line_reader_test.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLineReader_Success(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
lr := NewLineReader("foo", r)
|
||||
var lines []string
|
||||
for lr.NextLine() {
|
||||
lines = append(lines, string(lr.Line))
|
||||
}
|
||||
if err := lr.Err(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lr.NextLine() {
|
||||
t.Fatalf("expecting error on the second call to NextLine()")
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("\n", []string{""})
|
||||
f("\n\n", []string{"", ""})
|
||||
f("foo", []string{"foo"})
|
||||
f("foo\n", []string{"foo"})
|
||||
f("\nfoo", []string{"", "foo"})
|
||||
f("foo\n\n", []string{"foo", ""})
|
||||
f("foo\nbar", []string{"foo", "bar"})
|
||||
f("foo\nbar\n", []string{"foo", "bar"})
|
||||
f("\nfoo\n\nbar\n\n", []string{"", "foo", "", "bar", ""})
|
||||
}
|
||||
|
||||
func TestLineReader_SkipUntilNextLine(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
lr := NewLineReader("foo", r)
|
||||
var lines []string
|
||||
for lr.NextLine() {
|
||||
lines = append(lines, string(lr.Line))
|
||||
}
|
||||
if err := lr.Err(); err != nil {
|
||||
t.Fatalf("unexpected error for data=%q: %s", data, err)
|
||||
}
|
||||
if lr.NextLine() {
|
||||
t.Fatalf("expecting error on the second call to NextLine()")
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines for data=%q\ngot\n%q\nwant\n%q", data, lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
|
||||
longLineLen := MaxLineSizeBytes.IntN() + overflow
|
||||
longLine := string(make([]byte, longLineLen))
|
||||
|
||||
// Single long line
|
||||
data := longLine
|
||||
f(data, nil)
|
||||
|
||||
// Multiple long lines
|
||||
data = longLine + "\n" + longLine
|
||||
f(data, []string{""})
|
||||
|
||||
data = longLine + "\n" + longLine + "\n"
|
||||
f(data, []string{"", ""})
|
||||
|
||||
// Long line in the middle
|
||||
data = "foo\n" + longLine + "\nbar"
|
||||
f(data, []string{"foo", "", "bar"})
|
||||
|
||||
// Multiple long lines in the middle
|
||||
data = "foo\n" + longLine + "\n" + longLine + "\nbar"
|
||||
f(data, []string{"foo", "", "", "bar"})
|
||||
|
||||
// Long line in the end
|
||||
data = "foo\n" + longLine
|
||||
f(data, []string{"foo"})
|
||||
|
||||
// Long line in the end
|
||||
data = "foo\n" + longLine + "\n"
|
||||
f(data, []string{"foo", ""})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineReader_Failure(t *testing.T) {
|
||||
f := func(data string, linesExpected []string) {
|
||||
t.Helper()
|
||||
|
||||
fr := &failureReader{
|
||||
r: bytes.NewBufferString(data),
|
||||
}
|
||||
lr := NewLineReader("foo", fr)
|
||||
var lines []string
|
||||
for lr.NextLine() {
|
||||
lines = append(lines, string(lr.Line))
|
||||
}
|
||||
if err := lr.Err(); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if lr.NextLine() {
|
||||
t.Fatalf("expecting error on the second call to NextLine()")
|
||||
}
|
||||
if err := lr.Err(); err == nil {
|
||||
t.Fatalf("expecting non-nil error on the second call")
|
||||
}
|
||||
if !reflect.DeepEqual(lines, linesExpected) {
|
||||
t.Fatalf("unexpected lines\ngot\n%q\nwant\n%q", lines, linesExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", nil)
|
||||
f("foo", nil)
|
||||
f("foo\n", []string{"foo"})
|
||||
f("\n", []string{""})
|
||||
f("foo\nbar", []string{"foo"})
|
||||
f("foo\nbar\n", []string{"foo", "bar"})
|
||||
f("\nfoo\nbar\n\n", []string{"", "foo", "bar", ""})
|
||||
|
||||
// long line
|
||||
longLineLen := MaxLineSizeBytes.IntN()
|
||||
for _, overflow := range []int{0, 100, MaxLineSizeBytes.IntN(), MaxLineSizeBytes.IntN() + 1, 2 * MaxLineSizeBytes.IntN()} {
|
||||
longLine := string(make([]byte, longLineLen+overflow))
|
||||
|
||||
data := longLine
|
||||
f(data, nil)
|
||||
|
||||
data = "foo\n" + longLine
|
||||
f(data, []string{"foo"})
|
||||
|
||||
data = longLine + "\nfoo"
|
||||
f(data, []string{""})
|
||||
|
||||
data = longLine + "\nfoo\n"
|
||||
f(data, []string{"", "foo"})
|
||||
}
|
||||
}
|
||||
|
||||
type failureReader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *failureReader) Read(p []byte) (int, error) {
|
||||
n, _ := r.r.Read(p)
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
return 0, fmt.Errorf("some error")
|
||||
}
|
||||
@@ -15,7 +15,10 @@ type TestLogMessageProcessor struct {
|
||||
}
|
||||
|
||||
// AddRow adds row with the given timestamp and fields to tlp
|
||||
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
func (tlp *TestLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
if streamFields != nil {
|
||||
panic(fmt.Errorf("BUG: streamFields must be nil; got %v", streamFields))
|
||||
}
|
||||
tlp.timestamps = append(tlp.timestamps, timestamp)
|
||||
tlp.rows = append(tlp.rows, string(logstorage.MarshalFieldsToJSON(nil, fields)))
|
||||
}
|
||||
@@ -25,10 +28,10 @@ func (tlp *TestLogMessageProcessor) MustClose() {
|
||||
}
|
||||
|
||||
// Verify verifies the number of rows, timestamps and results after AddRow calls.
|
||||
func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected []int64, resultExpected string) error {
|
||||
func (tlp *TestLogMessageProcessor) Verify(timestampsExpected []int64, resultExpected string) error {
|
||||
result := strings.Join(tlp.rows, "\n")
|
||||
if len(tlp.rows) != rowsExpected {
|
||||
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), rowsExpected, result, resultExpected)
|
||||
if len(tlp.rows) != len(timestampsExpected) {
|
||||
return fmt.Errorf("unexpected rows read; got %d; want %d;\nrows read:\n%s\nrows wanted\n%s", len(tlp.rows), len(timestampsExpected), result, resultExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tlp.timestamps, timestampsExpected) {
|
||||
@@ -45,7 +48,7 @@ func (tlp *TestLogMessageProcessor) Verify(rowsExpected int, timestampsExpected
|
||||
type BenchmarkLogMessageProcessor struct{}
|
||||
|
||||
// AddRow implements LogMessageProcessor interface.
|
||||
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _ []logstorage.Field) {
|
||||
func (blp *BenchmarkLogMessageProcessor) AddRow(_ int64, _, _ []logstorage.Field) {
|
||||
}
|
||||
|
||||
// MustClose implements LogMessageProcessor interface.
|
||||
|
||||
@@ -121,7 +121,7 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor("journald")
|
||||
n, err := parseJournaldRequest(data, lmp, cp)
|
||||
err = parseJournaldRequest(data, lmp, cp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
@@ -129,8 +129,6 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedJournaldTotal.Add(n)
|
||||
|
||||
// update requestJournaldDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJournaldDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -138,8 +136,6 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
rowsIngestedJournaldTotal = metrics.NewCounter(`vl_rows_ingested_total{type="journald"}`)
|
||||
|
||||
requestsJournaldTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/journald/upload"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/journald/upload"}`)
|
||||
|
||||
@@ -147,7 +143,7 @@ var (
|
||||
)
|
||||
|
||||
// See https://systemd.io/JOURNAL_EXPORT_FORMATS/#journal-export-format
|
||||
func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *insertutils.CommonParams) (rowsIngested int, err error) {
|
||||
func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *insertutils.CommonParams) error {
|
||||
var fields []logstorage.Field
|
||||
var ts int64
|
||||
var size uint64
|
||||
@@ -170,15 +166,14 @@ func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
lmp.AddRow(ts, fields)
|
||||
rowsIngested++
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
fields = fields[:0]
|
||||
}
|
||||
// skip newline separator
|
||||
data = data[1:]
|
||||
continue
|
||||
case idx < 0:
|
||||
return rowsIngested, fmt.Errorf("missing new line separator, unread data left=%d", len(data))
|
||||
return fmt.Errorf("missing new line separator, unread data left=%d", len(data))
|
||||
}
|
||||
|
||||
idx = bytes.IndexByte(line, '=')
|
||||
@@ -191,46 +186,46 @@ func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *
|
||||
} else {
|
||||
name = bytesutil.ToUnsafeString(line)
|
||||
if len(data) == 0 {
|
||||
return rowsIngested, fmt.Errorf("unexpected zero data for binary field value of key=%s", name)
|
||||
return fmt.Errorf("unexpected zero data for binary field value of key=%s", name)
|
||||
}
|
||||
// size of binary data encoded as le i64 at the begging
|
||||
idx, err := binary.Decode(data, binary.LittleEndian, &size)
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
|
||||
return fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
|
||||
}
|
||||
// skip binary data sise
|
||||
data = data[idx:]
|
||||
if size == 0 {
|
||||
return rowsIngested, fmt.Errorf("unexpected zero binary data size decoded %d", size)
|
||||
return fmt.Errorf("unexpected zero binary data size decoded %d", size)
|
||||
}
|
||||
if int(size) > len(data) {
|
||||
return rowsIngested, fmt.Errorf("binary data size=%d cannot exceed size of the data at buffer=%d", size, len(data))
|
||||
return fmt.Errorf("binary data size=%d cannot exceed size of the data at buffer=%d", size, len(data))
|
||||
}
|
||||
value = bytesutil.ToUnsafeString(data[:size])
|
||||
data = data[int(size):]
|
||||
// binary data must has new line separator for the new line or next field
|
||||
if len(data) == 0 {
|
||||
return rowsIngested, fmt.Errorf("unexpected empty buffer after binary field=%s read", name)
|
||||
return fmt.Errorf("unexpected empty buffer after binary field=%s read", name)
|
||||
}
|
||||
lastB := data[0]
|
||||
if lastB != '\n' {
|
||||
return rowsIngested, fmt.Errorf("expected new line separator after binary field=%s, got=%s", name, string(lastB))
|
||||
return fmt.Errorf("expected new line separator after binary field=%s, got=%s", name, string(lastB))
|
||||
}
|
||||
data = data[1:]
|
||||
}
|
||||
// https://github.com/systemd/systemd/blob/main/src/libsystemd/sd-journal/journal-file.c#L1703
|
||||
if len(name) > journaldEntryMaxNameLen {
|
||||
return rowsIngested, fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
|
||||
return fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
|
||||
}
|
||||
if !allowedJournaldEntryNameChars.MatchString(name) {
|
||||
return rowsIngested, fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
|
||||
return fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
|
||||
}
|
||||
if name == cp.TimeField {
|
||||
ts, err = strconv.ParseInt(value, 10, 64)
|
||||
n, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse Journald timestamp, %w", err)
|
||||
return fmt.Errorf("failed to parse Journald timestamp, %w", err)
|
||||
}
|
||||
ts *= 1e3
|
||||
ts = n * 1e3
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -249,8 +244,7 @@ func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
}
|
||||
lmp.AddRow(ts, fields)
|
||||
rowsIngested++
|
||||
lmp.AddRow(ts, fields, nil)
|
||||
}
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,12 +14,11 @@ func TestPushJournaldOk(t *testing.T) {
|
||||
TimeField: "__REALTIME_TIMESTAMP",
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
}
|
||||
n, err := parseJournaldRequest([]byte(src), tlp, cp)
|
||||
if err != nil {
|
||||
if err := parseJournaldRequest([]byte(src), tlp, cp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -50,8 +49,7 @@ func TestPushJournald_Failure(t *testing.T) {
|
||||
TimeField: "__REALTIME_TIMESTAMP",
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
}
|
||||
_, err := parseJournaldRequest([]byte(data), tlp, cp)
|
||||
if err == nil {
|
||||
if err := parseJournaldRequest([]byte(data), tlp, cp); err == nil {
|
||||
t.Fatalf("expected non nil error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package jsonline
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -10,7 +8,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -53,7 +50,8 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor("jsonline")
|
||||
err = processStreamInternal(reader, cp.TimeField, cp.MsgFields, lmp)
|
||||
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
|
||||
err = processStreamInternal(streamName, reader, cp.TimeField, cp.MsgFields, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
if err != nil {
|
||||
@@ -66,20 +64,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func processStreamInternal(r io.Reader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) error {
|
||||
func processStreamInternal(streamName string, r io.Reader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
lr := insertutils.NewLineReader(streamName, wcr)
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok, err := readLine(sc, timeField, msgFields, lmp)
|
||||
ok, err := readLine(lr, timeField, msgFields, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
@@ -89,23 +82,17 @@ func processStreamInternal(r io.Reader, timeField string, msgFields []string, lm
|
||||
return nil
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func readLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
func readLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read json line, since its size exceeds -insert.maxLineSizeBytes=%d`, insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
if !lr.NextLine() {
|
||||
err := lr.Err()
|
||||
return false, err
|
||||
}
|
||||
line = sc.Bytes()
|
||||
line = lr.Line
|
||||
}
|
||||
|
||||
p := logstorage.GetJSONParser()
|
||||
@@ -117,17 +104,13 @@ func readLine(sc *bufio.Scanner, timeField string, msgFields []string, lmp inser
|
||||
return false, fmt.Errorf("cannot get timestamp: %w", err)
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgFields, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
lmp.AddRow(ts, p.Fields, nil)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/jsonline"}`)
|
||||
|
||||
|
||||
@@ -8,17 +8,17 @@ import (
|
||||
)
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
msgFields := []string{msgField}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, timeField, msgFields, tlp); err != nil {
|
||||
if err := processStreamInternal("test", r, timeField, msgFields, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -29,12 +29,11 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000}
|
||||
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"_msg":"baz"}
|
||||
{"_msg":"xyz","x":"y"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
|
||||
// Non-existing msgField
|
||||
data = `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
@@ -42,11 +41,10 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
`
|
||||
timeField = "@timestamp"
|
||||
msgField = "foobar"
|
||||
rowsExpected = 2
|
||||
timestampsExpected = []int64{1686026891735000000, 1686023292735000000}
|
||||
resultExpected = `{"log.offset":"71770","log.file.path":"/var/log/auth.log","message":"foobar"}
|
||||
{"message":"baz"}`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
@@ -55,7 +53,7 @@ func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "time", nil, tlp); err == nil {
|
||||
if err := processStreamInternal("test", r, "time", nil, tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,15 +54,14 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor("loki_json")
|
||||
n, err := parseJSONRequest(data, lmp)
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = parseJSONRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki json request: %s; data=%s", err, data)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedJSONTotal.Add(n)
|
||||
|
||||
// update requestJSONDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJSONDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -70,31 +69,29 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki_json"}`)
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
requestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
)
|
||||
|
||||
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
return fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
}
|
||||
|
||||
streamsV := v.Get("streams")
|
||||
if streamsV == nil {
|
||||
return 0, fmt.Errorf("missing `streams` item in the parsed JSON")
|
||||
return fmt.Errorf("missing `streams` item in the parsed JSON")
|
||||
}
|
||||
streams, err := streamsV.Array()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
|
||||
return fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
|
||||
}
|
||||
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
var commonFields []logstorage.Field
|
||||
rowsIngested := 0
|
||||
for _, stream := range streams {
|
||||
// populate common labels from `stream` dict
|
||||
commonFields = commonFields[:0]
|
||||
@@ -103,7 +100,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
if labelsV != nil {
|
||||
o, err := labelsV.Object()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
|
||||
return fmt.Errorf("`stream` item in the parsed JSON must contain an object; got %q", labelsV)
|
||||
}
|
||||
labels = o
|
||||
}
|
||||
@@ -119,37 +116,37 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("error when parsing `stream` object: %w", err)
|
||||
return fmt.Errorf("error when parsing `stream` object: %w", err)
|
||||
}
|
||||
|
||||
// populate messages from `values` array
|
||||
linesV := stream.Get("values")
|
||||
if linesV == nil {
|
||||
return rowsIngested, fmt.Errorf("missing `values` item in the parsed `stream` object %q", stream)
|
||||
return fmt.Errorf("missing `values` item in the parsed `stream` object %q", stream)
|
||||
}
|
||||
lines, err := linesV.Array()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
|
||||
return fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
|
||||
}
|
||||
|
||||
fields := commonFields
|
||||
for _, line := range lines {
|
||||
lineA, err := line.Array()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
|
||||
return fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
|
||||
}
|
||||
if len(lineA) < 2 || len(lineA) > 3 {
|
||||
return rowsIngested, fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2 or 3", line, len(lineA))
|
||||
return fmt.Errorf("unexpected number of values in `values` item array %q; got %d want 2 or 3", line, len(lineA))
|
||||
}
|
||||
|
||||
// parse timestamp
|
||||
timestamp, err := lineA[0].StringBytes()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
|
||||
return fmt.Errorf("unexpected log timestamp type for %q; want string", lineA[0])
|
||||
}
|
||||
ts, err := parseLokiTimestamp(bytesutil.ToUnsafeString(timestamp))
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
|
||||
return fmt.Errorf("cannot parse log timestamp %q: %w", timestamp, err)
|
||||
}
|
||||
if ts == 0 {
|
||||
ts = currentTimestamp
|
||||
@@ -158,7 +155,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
// parse log message
|
||||
msg, err := lineA[1].StringBytes()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
|
||||
return fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
|
||||
}
|
||||
|
||||
fields = append(fields[:len(commonFields)], logstorage.Field{
|
||||
@@ -170,7 +167,7 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
if len(lineA) > 2 {
|
||||
structuredMetadata, err := lineA[2].Object()
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("unexpected structured metadata type for %q; want JSON object", lineA[2])
|
||||
return fmt.Errorf("unexpected structured metadata type for %q; want JSON object", lineA[2])
|
||||
}
|
||||
|
||||
structuredMetadata.Visit(func(k []byte, v *fastjson.Value) {
|
||||
@@ -186,15 +183,18 @@ func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, er
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
|
||||
return fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
|
||||
}
|
||||
}
|
||||
lmp.AddRow(ts, fields)
|
||||
var streamFields []logstorage.Field
|
||||
if useDefaultStreamFields {
|
||||
streamFields = commonFields
|
||||
}
|
||||
lmp.AddRow(ts, fields, streamFields)
|
||||
}
|
||||
rowsIngested += len(lines)
|
||||
}
|
||||
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseLokiTimestamp(s string) (int64, error) {
|
||||
|
||||
@@ -11,12 +11,11 @@ func TestParseJSONRequest_Failure(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err == nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("unexpected number of parsed lines: %d; want 0", n)
|
||||
if err := tlp.Verify(nil, ""); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
f(``)
|
||||
@@ -66,11 +65,10 @@ func TestParseJSONRequest_Success(t *testing.T) {
|
||||
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err != nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,8 +28,7 @@ func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
data := getJSONBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseJSONRequest(data, blp)
|
||||
if err != nil {
|
||||
if err := parseJSONRequest(data, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,15 +45,14 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor("loki_protobuf")
|
||||
n, err := parseProtobufRequest(data, lmp)
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = parseProtobufRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse Loki protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update requestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -61,18 +60,17 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki_protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
bb := bytesBufPool.Get()
|
||||
defer bytesBufPool.Put(bb)
|
||||
|
||||
buf, err := snappy.Decode(bb.B[:cap(bb.B)], data)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
|
||||
return fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
|
||||
}
|
||||
bb.B = buf
|
||||
|
||||
@@ -81,13 +79,12 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
|
||||
err = req.UnmarshalProtobuf(bb.B)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse request body: %w", err)
|
||||
return fmt.Errorf("cannot parse request body: %w", err)
|
||||
}
|
||||
|
||||
fields := getFields()
|
||||
defer putFields(fields)
|
||||
|
||||
rowsIngested := 0
|
||||
streams := req.Streams
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
for i := range streams {
|
||||
@@ -96,7 +93,7 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
// Labels are same for all entries in the stream.
|
||||
fields.fields, err = parsePromLabels(fields.fields[:0], stream.Labels)
|
||||
if err != nil {
|
||||
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
|
||||
return fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
|
||||
}
|
||||
commonFieldsLen := len(fields.fields)
|
||||
|
||||
@@ -122,11 +119,14 @@ func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int
|
||||
ts = currentTimestamp
|
||||
}
|
||||
|
||||
lmp.AddRow(ts, fields.fields)
|
||||
var streamFields []logstorage.Field
|
||||
if useDefaultStreamFields {
|
||||
streamFields = fields.fields[:commonFieldsLen]
|
||||
}
|
||||
lmp.AddRow(ts, fields.fields, streamFields)
|
||||
}
|
||||
rowsIngested += len(stream.Entries)
|
||||
}
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFields() *fields {
|
||||
|
||||
@@ -15,7 +15,10 @@ type testLogMessageProcessor struct {
|
||||
pr PushRequest
|
||||
}
|
||||
|
||||
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields []logstorage.Field) {
|
||||
func (tlp *testLogMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
if streamFields != nil {
|
||||
panic(fmt.Errorf("unexpected non-nil streamFields: %v", streamFields))
|
||||
}
|
||||
msg := ""
|
||||
for _, f := range fields {
|
||||
if f.Name == "_msg" {
|
||||
@@ -50,23 +53,21 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &testLogMessageProcessor{}
|
||||
n, err := parseJSONRequest([]byte(s), tlp)
|
||||
if err != nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if n != len(tlp.pr.Streams) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), n)
|
||||
if len(tlp.pr.Streams) != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), len(timestampsExpected))
|
||||
}
|
||||
|
||||
data := tlp.pr.MarshalProtobuf(nil)
|
||||
encodedData := snappy.Encode(nil, data)
|
||||
|
||||
tlp2 := &insertutils.TestLogMessageProcessor{}
|
||||
n, err = parseProtobufRequest(encodedData, tlp2)
|
||||
if err != nil {
|
||||
if err := parseProtobufRequest(encodedData, tlp2, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp2.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp2.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,8 +31,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := parseProtobufRequest(body, blp)
|
||||
if err != nil {
|
||||
if err := parseProtobufRequest(body, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package vlinsert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
@@ -34,9 +35,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path = strings.TrimPrefix(path, "/insert")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/jsonline" {
|
||||
switch path {
|
||||
case "/jsonline":
|
||||
jsonline.RequestHandler(w, r)
|
||||
return true
|
||||
case "/ready":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/elasticsearch/"):
|
||||
|
||||
@@ -67,15 +67,14 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor("opentelelemtry_protobuf")
|
||||
n, err := pushProtobufRequest(data, lmp)
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = pushProtobufRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse OpenTelemetry protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update requestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
@@ -83,22 +82,19 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var (
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="opentelemetry_protobuf"}`)
|
||||
|
||||
requestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
var req pb.ExportLogsServiceRequest
|
||||
if err := req.UnmarshalProtobuf(data); err != nil {
|
||||
errorsTotal.Inc()
|
||||
return 0, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
|
||||
return fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(data), err)
|
||||
}
|
||||
|
||||
var rowsIngested int
|
||||
var commonFields []logstorage.Field
|
||||
for _, rl := range req.ResourceLogs {
|
||||
attributes := rl.Resource.Attributes
|
||||
@@ -109,16 +105,14 @@ func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor) (int,
|
||||
}
|
||||
commonFieldsLen := len(commonFields)
|
||||
for _, sc := range rl.ScopeLogs {
|
||||
var scopeIngested int
|
||||
commonFields, scopeIngested = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp)
|
||||
rowsIngested += scopeIngested
|
||||
commonFields = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp, useDefaultStreamFields)
|
||||
}
|
||||
}
|
||||
|
||||
return rowsIngested, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor) ([]logstorage.Field, int) {
|
||||
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) []logstorage.Field {
|
||||
fields := commonFields
|
||||
for _, lr := range sc.LogRecords {
|
||||
fields = fields[:len(commonFields)]
|
||||
@@ -137,7 +131,11 @@ func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field,
|
||||
Value: lr.FormatSeverity(),
|
||||
})
|
||||
|
||||
lmp.AddRow(lr.ExtractTimestampNano(), fields)
|
||||
var streamFields []logstorage.Field
|
||||
if useDefaultStreamFields {
|
||||
streamFields = commonFields
|
||||
}
|
||||
lmp.AddRow(lr.ExtractTimestampNano(), fields, streamFields)
|
||||
}
|
||||
return fields, len(sc.LogRecords)
|
||||
return fields
|
||||
}
|
||||
|
||||
@@ -16,12 +16,11 @@ func TestPushProtoOk(t *testing.T) {
|
||||
|
||||
pData := lr.MarshalProtobuf(nil)
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
n, err := pushProtobufRequest(pData, tlp)
|
||||
if err != nil {
|
||||
if err := pushProtobufRequest(pData, tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := tlp.Verify(n, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,8 +27,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
_, err := pushProtobufRequest(body, blp)
|
||||
if err != nil {
|
||||
if err := pushProtobufRequest(body, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
@@ -436,7 +436,6 @@ func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertut
|
||||
return fmt.Errorf("cannot read line #%d: %s", n, err)
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
return slr.Error()
|
||||
}
|
||||
@@ -568,7 +567,7 @@ func processLine(line []byte, currentYear int, timezone *time.Location, useLocal
|
||||
ts = nsecs
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgFields, "_msg")
|
||||
lmp.AddRow(ts, p.Fields)
|
||||
lmp.AddRow(ts, p.Fields, nil)
|
||||
logstorage.PutSyslogParser(p)
|
||||
|
||||
return nil
|
||||
@@ -577,8 +576,6 @@ func processLine(line []byte, currentYear int, timezone *time.Location, useLocal
|
||||
var msgFields = []string{"message"}
|
||||
|
||||
var (
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="syslog"}`)
|
||||
|
||||
errorsTotal = metrics.NewCounter(`vl_errors_total{type="syslog"}`)
|
||||
|
||||
udpRequestsTotal = metrics.NewCounter(`vl_udp_reqests_total{type="syslog"}`)
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestSyslogLineReader_Failure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data string, currentYear, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
f := func(data string, currentYear int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
MustInit()
|
||||
@@ -89,7 +89,7 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
if err := processStreamInternal(r, "", false, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(rowsExpected, timestampsExpected, resultExpected); err != nil {
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -99,12 +99,11 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
48 <165>Jun 4 12:08:33 abcd systemd[345]: abc defg<123>1 2023-06-03T17:42:12.345Z mymachine.example.com appname 12345 ID47 [exampleSDID@32473 iut="3" eventSource="Application 123 = ] 56" eventID="11211"] This is a test message with structured data.
|
||||
`
|
||||
currentYear := 2023
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1685794113000000000, 1685880513000000000, 1685814132345000000}
|
||||
resultExpected := `{"format":"rfc3164","hostname":"abcd","app_name":"systemd","_msg":"Starting Update the local ESM caches..."}
|
||||
{"priority":"165","facility":"20","severity":"5","format":"rfc3164","hostname":"abcd","app_name":"systemd","proc_id":"345","_msg":"abc defg"}
|
||||
{"priority":"123","facility":"15","severity":"3","format":"rfc5424","hostname":"mymachine.example.com","app_name":"appname","proc_id":"12345","msg_id":"ID47","exampleSDID@32473.iut":"3","exampleSDID@32473.eventSource":"Application 123 = ] 56","exampleSDID@32473.eventID":"11211","_msg":"This is a test message with structured data."}`
|
||||
f(data, currentYear, rowsExpected, timestampsExpected, resultExpected)
|
||||
f(data, currentYear, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
@@ -234,5 +235,11 @@ func getJSONString(s string) string {
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error when marshaling string to JSON: %w", err))
|
||||
}
|
||||
return string(data)
|
||||
return jsonHTMLReplacer.Replace(string(data))
|
||||
}
|
||||
|
||||
var jsonHTMLReplacer = strings.NewReplacer(
|
||||
`\u003c`, "\u003c",
|
||||
`\u003e`, "\u003e",
|
||||
`\u0026`, "\u0026",
|
||||
)
|
||||
|
||||
49
app/vlselect/logsql/facets_response.qtpl
Normal file
49
app/vlselect/logsql/facets_response.qtpl
Normal file
@@ -0,0 +1,49 @@
|
||||
{% import (
|
||||
"slices"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
{% func FacetsResponse(m map[string][]facetEntry) %}
|
||||
{
|
||||
{% code
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
%}
|
||||
"facets":[
|
||||
{% if len(sortedKeys) > 0 %}
|
||||
{%= facetsLine(m, sortedKeys[0]) %}
|
||||
{% for _, k := range sortedKeys[1:] %}
|
||||
,{%= facetsLine(m, k) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func facetsLine(m map[string][]facetEntry, k string) %}
|
||||
{
|
||||
"field_name":{%q= k %},
|
||||
"values":[
|
||||
{% code fes := m[k] %}
|
||||
{% if len(fes) > 0 %}
|
||||
{%= facetLine(fes[0]) %}
|
||||
{% for _, fe := range fes[1:] %}
|
||||
,{%= facetLine(fe) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func facetLine(fe facetEntry) %}
|
||||
{
|
||||
"field_value":{%q= fe.value %},
|
||||
"hits":{%s= fe.hits %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
178
app/vlselect/logsql/facets_response.qtpl.go
Normal file
178
app/vlselect/logsql/facets_response.qtpl.go
Normal file
@@ -0,0 +1,178 @@
|
||||
// Code generated by qtc from "facets_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:1
|
||||
import (
|
||||
"slices"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
func StreamFacetsResponse(qw422016 *qt422016.Writer, m map[string][]facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:7
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:10
|
||||
sortedKeys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
slices.Sort(sortedKeys)
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:15
|
||||
qw422016.N().S(`"facets":[`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:17
|
||||
if len(sortedKeys) > 0 {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:18
|
||||
streamfacetsLine(qw422016, m, sortedKeys[0])
|
||||
//line app/vlselect/logsql/facets_response.qtpl:19
|
||||
for _, k := range sortedKeys[1:] {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:19
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:20
|
||||
streamfacetsLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:21
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:22
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:22
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
func WriteFacetsResponse(qq422016 qtio422016.Writer, m map[string][]facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
StreamFacetsResponse(qw422016, m)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
func FacetsResponse(m map[string][]facetEntry) string {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
WriteFacetsResponse(qb422016, m)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/facets_response.qtpl:25
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:27
|
||||
func streamfacetsLine(qw422016 *qt422016.Writer, m map[string][]facetEntry, k string) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:27
|
||||
qw422016.N().S(`{"field_name":`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:29
|
||||
qw422016.N().Q(k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:29
|
||||
qw422016.N().S(`,"values":[`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:31
|
||||
fes := m[k]
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:32
|
||||
if len(fes) > 0 {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:33
|
||||
streamfacetLine(qw422016, fes[0])
|
||||
//line app/vlselect/logsql/facets_response.qtpl:34
|
||||
for _, fe := range fes[1:] {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:34
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:35
|
||||
streamfacetLine(qw422016, fe)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:36
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:37
|
||||
}
|
||||
//line app/vlselect/logsql/facets_response.qtpl:37
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
func writefacetsLine(qq422016 qtio422016.Writer, m map[string][]facetEntry, k string) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
streamfacetsLine(qw422016, m, k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
func facetsLine(m map[string][]facetEntry, k string) string {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
writefacetsLine(qb422016, m, k)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/facets_response.qtpl:40
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:42
|
||||
func streamfacetLine(qw422016 *qt422016.Writer, fe facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:42
|
||||
qw422016.N().S(`{"field_value":`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:44
|
||||
qw422016.N().Q(fe.value)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:44
|
||||
qw422016.N().S(`,"hits":`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:45
|
||||
qw422016.N().S(fe.hits)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:45
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
func writefacetLine(qq422016 qtio422016.Writer, fe facetEntry) {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
streamfacetLine(qw422016, fe)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
func facetLine(fe facetEntry) string {
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
writefacetLine(qb422016, fe)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/facets_response.qtpl:47
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -23,6 +25,82 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
// ProcessFacetsRequest handles /select/logsql/facets request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-facets
|
||||
func ProcessFacetsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
q, tenantIDs, err := parseCommonArgs(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
maxValuesPerField, err := httputils.GetInt(r, "max_values_per_field")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
maxValueLen, err := httputils.GetInt(r, "max_value_len")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
keepConstFields := httputils.GetBool(r, "keep_const_fields")
|
||||
|
||||
q.DropAllPipes()
|
||||
q.AddFacetsPipe(limit, maxValuesPerField, maxValueLen, keepConstFields)
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string][]facetEntry)
|
||||
writeBlock := func(_ uint, _ []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
if len(columns) != 3 {
|
||||
logger.Panicf("BUG: expecting 3 columns; got %d columns", len(columns))
|
||||
}
|
||||
|
||||
fieldNames := columns[0].Values
|
||||
fieldValues := columns[1].Values
|
||||
hits := columns[2].Values
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := range fieldNames {
|
||||
fieldName := strings.Clone(fieldNames[i])
|
||||
fieldValue := strings.Clone(fieldValues[i])
|
||||
hitsStr := strings.Clone(hits[i])
|
||||
|
||||
mLock.Lock()
|
||||
m[fieldName] = append(m[fieldName], facetEntry{
|
||||
value: fieldValue,
|
||||
hits: hitsStr,
|
||||
})
|
||||
mLock.Unlock()
|
||||
}
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteFacetsResponse(w, m)
|
||||
}
|
||||
|
||||
type facetEntry struct {
|
||||
value string
|
||||
hits string
|
||||
}
|
||||
|
||||
// ProcessHitsRequest handles /select/logsql/hits request.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-hits-stats
|
||||
@@ -1017,18 +1095,20 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
|
||||
}
|
||||
|
||||
// Parse optional extra_filters
|
||||
extraFilters, err := getExtraFilters(r, "extra_filters")
|
||||
extraFiltersStr := r.FormValue("extra_filters")
|
||||
extraFilters, err := parseExtraFilters(extraFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraFilters)
|
||||
|
||||
// Parse optional extra_stream_filters
|
||||
extraStreamFilters, err := getExtraFilters(r, "extra_stream_filters")
|
||||
extraStreamFiltersStr := r.FormValue("extra_stream_filters")
|
||||
extraStreamFilters, err := parseExtraStreamFilters(extraStreamFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraStreamFilters(extraStreamFilters)
|
||||
q.AddExtraFilters(extraStreamFilters)
|
||||
|
||||
return q, tenantIDs, nil
|
||||
}
|
||||
@@ -1046,15 +1126,114 @@ func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
|
||||
return nsecs, true, nil
|
||||
}
|
||||
|
||||
func getExtraFilters(r *http.Request, argName string) ([]logstorage.Field, error) {
|
||||
s := r.FormValue(argName)
|
||||
func parseExtraFilters(s string) (*logstorage.Filter, error) {
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var p logstorage.JSONParser
|
||||
if err := p.ParseLogMessage([]byte(s)); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %s: %w", argName, err)
|
||||
if !strings.HasPrefix(s, `{"`) {
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
return p.Fields, nil
|
||||
|
||||
// Extra filters in the form {"field":"value",...}.
|
||||
kvs, err := parseExtraFiltersJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filters := make([]string, len(kvs))
|
||||
for i, kv := range kvs {
|
||||
if len(kv.values) == 1 {
|
||||
filters[i] = fmt.Sprintf("%q:=%q", kv.key, kv.values[0])
|
||||
} else {
|
||||
orValues := make([]string, len(kv.values))
|
||||
for j, v := range kv.values {
|
||||
orValues[j] = fmt.Sprintf("%q", v)
|
||||
}
|
||||
filters[i] = fmt.Sprintf("%q:in(%s)", kv.key, strings.Join(orValues, ","))
|
||||
}
|
||||
}
|
||||
s = strings.Join(filters, " ")
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
|
||||
func parseExtraStreamFilters(s string) (*logstorage.Filter, error) {
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, `{"`) {
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
|
||||
// Extra stream filters in the form {"field":"value",...}.
|
||||
kvs, err := parseExtraFiltersJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filters := make([]string, len(kvs))
|
||||
for i, kv := range kvs {
|
||||
if len(kv.values) == 1 {
|
||||
filters[i] = fmt.Sprintf("%q=%q", kv.key, kv.values[0])
|
||||
} else {
|
||||
orValues := make([]string, len(kv.values))
|
||||
for j, v := range kv.values {
|
||||
orValues[j] = regexp.QuoteMeta(v)
|
||||
}
|
||||
filters[i] = fmt.Sprintf("%q=~%q", kv.key, strings.Join(orValues, "|"))
|
||||
}
|
||||
}
|
||||
s = "{" + strings.Join(filters, ",") + "}"
|
||||
return logstorage.ParseFilter(s)
|
||||
}
|
||||
|
||||
type extraFilter struct {
|
||||
key string
|
||||
values []string
|
||||
}
|
||||
|
||||
func parseExtraFiltersJSON(s string) ([]extraFilter, error) {
|
||||
v, err := fastjson.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o := v.GetObject()
|
||||
|
||||
var errOuter error
|
||||
var filters []extraFilter
|
||||
o.Visit(func(k []byte, v *fastjson.Value) {
|
||||
if errOuter != nil {
|
||||
return
|
||||
}
|
||||
switch v.Type() {
|
||||
case fastjson.TypeString:
|
||||
filters = append(filters, extraFilter{
|
||||
key: string(k),
|
||||
values: []string{string(v.GetStringBytes())},
|
||||
})
|
||||
case fastjson.TypeArray:
|
||||
a := v.GetArray()
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
orValues := make([]string, len(a))
|
||||
for i, av := range a {
|
||||
ov, err := av.StringBytes()
|
||||
if err != nil {
|
||||
errOuter = fmt.Errorf("cannot obtain string item at the array for key %q; item: %s", k, av)
|
||||
return
|
||||
}
|
||||
orValues[i] = string(ov)
|
||||
}
|
||||
filters = append(filters, extraFilter{
|
||||
key: string(k),
|
||||
values: orValues,
|
||||
})
|
||||
default:
|
||||
errOuter = fmt.Errorf("unexpected type of value for key %q: %s; value: %s", k, v.Type(), v)
|
||||
}
|
||||
})
|
||||
if errOuter != nil {
|
||||
return nil, errOuter
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
103
app/vlselect/logsql/logsql_test.go
Normal file
103
app/vlselect/logsql/logsql_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseExtraFilters_Success(t *testing.T) {
|
||||
f := func(s, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
f, err := parseExtraFilters(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in parseExtraFilters: %s", err)
|
||||
}
|
||||
result := f.String()
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", "")
|
||||
|
||||
// JSON string
|
||||
f(`{"foo":"bar"}`, `foo:=bar`)
|
||||
f(`{"foo":["bar","baz"]}`, `foo:in(bar,baz)`)
|
||||
f(`{"z":"=b ","c":["d","e,"],"a":[],"_msg":"x"}`, `z:="=b " c:in(d,"e,") =x`)
|
||||
|
||||
// LogsQL filter
|
||||
f(`foobar`, `foobar`)
|
||||
f(`foo:bar`, `foo:bar`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `(foo:bar or foo:baz) error _time:5m {foo="bar",baz="z"}`)
|
||||
}
|
||||
|
||||
func TestParseExtraFilters_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
_, err := parseExtraFilters(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid JSON
|
||||
f(`{"foo"}`)
|
||||
f(`[1,2]`)
|
||||
f(`{"foo":[1]}`)
|
||||
|
||||
// Invliad LogsQL filter
|
||||
f(`foo:(bar`)
|
||||
|
||||
// excess pipe
|
||||
f(`foo | count()`)
|
||||
}
|
||||
|
||||
func TestParseExtraStreamFilters_Success(t *testing.T) {
|
||||
f := func(s, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
f, err := parseExtraStreamFilters(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in parseExtraStreamFilters: %s", err)
|
||||
}
|
||||
result := f.String()
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", "")
|
||||
|
||||
// JSON string
|
||||
f(`{"foo":"bar"}`, `{foo="bar"}`)
|
||||
f(`{"foo":["bar","baz"]}`, `{foo=~"bar|baz"}`)
|
||||
f(`{"z":"b","c":["d","e|\""],"a":[],"_msg":"x"}`, `{z="b",c=~"d|e\\|\"",_msg="x"}`)
|
||||
|
||||
// LogsQL filter
|
||||
f(`foobar`, `foobar`)
|
||||
f(`foo:bar`, `foo:bar`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `(foo:bar or foo:baz) error _time:5m {foo="bar",baz="z"}`)
|
||||
}
|
||||
|
||||
func TestParseExtraStreamFilters_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
_, err := parseExtraStreamFilters(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid JSON
|
||||
f(`{"foo"}`)
|
||||
f(`[1,2]`)
|
||||
f(`{"foo":[1]}`)
|
||||
|
||||
// Invliad LogsQL filter
|
||||
f(`foo:(bar`)
|
||||
|
||||
// excess pipe
|
||||
f(`foo | count()`)
|
||||
}
|
||||
@@ -177,6 +177,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
func processSelectRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, path string) bool {
|
||||
httpserver.EnableCORS(w, r)
|
||||
switch path {
|
||||
case "/select/logsql/facets":
|
||||
logsqlFacetsRequests.Inc()
|
||||
logsql.ProcessFacetsRequest(ctx, w, r)
|
||||
return true
|
||||
case "/select/logsql/field_names":
|
||||
logsqlFieldNamesRequests.Inc()
|
||||
logsql.ProcessFieldNamesRequest(ctx, w, r)
|
||||
@@ -236,6 +240,7 @@ func getMaxQueryDuration(r *http.Request) time.Duration {
|
||||
}
|
||||
|
||||
var (
|
||||
logsqlFacetsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/facets"}`)
|
||||
logsqlFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_names"}`)
|
||||
logsqlFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_values"}`)
|
||||
logsqlHitsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/hits"}`)
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.faf86aa5.css",
|
||||
"main.js": "./static/js/main.b204330a.js",
|
||||
"main.css": "./static/css/main.fa83344e.css",
|
||||
"main.js": "./static/js/main.8ad2bc1f.js",
|
||||
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.faf86aa5.css",
|
||||
"static/js/main.b204330a.js"
|
||||
"static/css/main.fa83344e.css",
|
||||
"static/js/main.8ad2bc1f.js"
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.b204330a.js"></script><link href="./static/css/main.faf86aa5.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.8ad2bc1f.js"></script><link href="./static/css/main.fa83344e.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
1
app/vlselect/vmui/static/css/main.fa83344e.css
Normal file
1
app/vlselect/vmui/static/css/main.fa83344e.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.8ad2bc1f.js
Normal file
2
app/vlselect/vmui/static/js/main.8ad2bc1f.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
@@ -45,6 +46,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -77,6 +79,9 @@ var (
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running vmagent. The following files are checked: "+
|
||||
"-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig, -remoteWrite.streamAggr.config . "+
|
||||
"Unknown config entries aren't allowed in -promscrape.config by default. This can be changed by passing -promscrape.config.strictParse=false command-line flag")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 0, "The maximum number of labels per time series to be accepted. Series with superfluous labels are ignored. In this case the vm_rows_ignored_total{reason=\"too_many_labels\"} metric at /metrics page is incremented")
|
||||
maxLabelNameLen = flag.Int("maxLabelNameLen", 0, "The maximum length of label names in the accepted time series. Series with longer label name are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_name\"} metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 0, "The maximum length of label values in the accepted time series. Series with longer label value are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_value\"} metric at /metrics page is incremented")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -93,6 +98,15 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
// vmagent is optimized for reduced memory allocations,
|
||||
// so it can run with the reduced GOGC in order to reduce the used memory,
|
||||
// while keeping CPU usage spent in GC at low levels.
|
||||
//
|
||||
// Some workloads may need increased GOGC values. Then such values can be set via GOGC environment variable.
|
||||
// It is recommended increasing GOGC if go_memstats_gc_cpu_fraction metric exposed at /metrics page
|
||||
// exceeds 0.05 for extended periods of time.
|
||||
cgroup.SetGOGC(30)
|
||||
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
@@ -100,6 +114,7 @@ func main() {
|
||||
remotewrite.InitSecretFlags()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
timeserieslimits.Init(*maxLabelsPerTimeseries, *maxLabelNameLen, *maxLabelValueLen)
|
||||
|
||||
if promscrape.IsDryRun() {
|
||||
if err := promscrape.CheckConfig(); err != nil {
|
||||
|
||||
@@ -7,13 +7,10 @@ import (
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -21,6 +18,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
@@ -30,6 +28,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
@@ -472,6 +471,15 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnF
|
||||
rowsCountAfterRelabel := getRowsCount(tssBlock)
|
||||
rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
|
||||
}
|
||||
if timeserieslimits.Enabled() {
|
||||
tmpBlock := tssBlock[:0]
|
||||
for _, ts := range tssBlock {
|
||||
if !timeserieslimits.IsExceeding(ts.Labels) {
|
||||
tmpBlock = append(tmpBlock, ts)
|
||||
}
|
||||
}
|
||||
tssBlock = tmpBlock
|
||||
}
|
||||
sortLabelsIfNeeded(tssBlock)
|
||||
tssBlock = limitSeriesCardinality(tssBlock)
|
||||
if sas.IsEnabled() {
|
||||
@@ -716,29 +724,14 @@ func logSkippedSeries(labels []prompbmarshal.Label, flagName string, flagValue i
|
||||
select {
|
||||
case <-logSkippedSeriesTicker.C:
|
||||
// Do not use logger.WithThrottler() here, since this will increase CPU usage
|
||||
// because every call to logSkippedSeries will result to a call to labelsToString.
|
||||
logger.Warnf("skip series %s because %s=%d reached", labelsToString(labels), flagName, flagValue)
|
||||
// because every call to logSkippedSeries will result to a call to prompbmarshal.LabelsToString.
|
||||
logger.Warnf("skip series %s because %s=%d reached", prompbmarshal.LabelsToString(labels), flagName, flagValue)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
var logSkippedSeriesTicker = time.NewTicker(5 * time.Second)
|
||||
|
||||
func labelsToString(labels []prompbmarshal.Label) string {
|
||||
var b []byte
|
||||
b = append(b, '{')
|
||||
for i, label := range labels {
|
||||
b = append(b, label.Name...)
|
||||
b = append(b, '=')
|
||||
b = strconv.AppendQuote(b, label.Value)
|
||||
if i+1 < len(labels) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, '}')
|
||||
return string(b)
|
||||
}
|
||||
|
||||
var (
|
||||
globalRowsPushedBeforeRelabel = metrics.NewCounter("vmagent_remotewrite_global_rows_pushed_before_relabel_total")
|
||||
rowsDroppedByGlobalRelabel = metrics.NewCounter("vmagent_remotewrite_global_relabel_metrics_dropped_total")
|
||||
|
||||
@@ -51,9 +51,14 @@ Examples:
|
||||
Usage: `Optional external URL to template in rule's labels or annotations.`,
|
||||
Required: false,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "loggerLevel",
|
||||
Usage: `Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC (default "ERROR").`,
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
if failed := unittest.UnitTest(c.StringSlice("files"), c.Bool("disableAlertgroupLabel"), c.StringSlice("external.label"), c.String("external.url")); failed {
|
||||
if failed := unittest.UnitTest(c.StringSlice("files"), c.Bool("disableAlertgroupLabel"), c.StringSlice("external.label"), c.String("external.url"), c.String("loggerLevel")); failed {
|
||||
return fmt.Errorf("unittest failed")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -46,17 +47,24 @@ var (
|
||||
testRemoteWritePath = "http://127.0.0.1" + httpListenAddr
|
||||
testHealthHTTPPath = "http://127.0.0.1" + httpListenAddr + "/health"
|
||||
|
||||
testLogLevel = "ERROR"
|
||||
disableAlertgroupLabel bool
|
||||
)
|
||||
|
||||
const (
|
||||
testStoragePath = "vmalert-unittest"
|
||||
testLogLevel = "ERROR"
|
||||
)
|
||||
|
||||
// UnitTest runs unittest for files
|
||||
func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, externalURL string) bool {
|
||||
if err := templates.Load([]string{}, true); err != nil {
|
||||
func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, externalURL, logLevel string) bool {
|
||||
if logLevel != "" {
|
||||
testLogLevel = logLevel
|
||||
}
|
||||
eu, err := url.Parse(externalURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to parse external URL: %w", err)
|
||||
}
|
||||
if err := templates.Load([]string{}, *eu); err != nil {
|
||||
logger.Fatalf("failed to load template: %v", err)
|
||||
}
|
||||
storagePath = filepath.Join(os.TempDir(), testStoragePath)
|
||||
|
||||
@@ -1,24 +1,14 @@
|
||||
package unittest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{}, true); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestUnitTest_Failure(t *testing.T) {
|
||||
f := func(files []string) {
|
||||
t.Helper()
|
||||
|
||||
failed := UnitTest(files, false, nil, "")
|
||||
failed := UnitTest(files, false, nil, "", "")
|
||||
if !failed {
|
||||
t.Fatalf("expecting failed test")
|
||||
}
|
||||
@@ -33,7 +23,7 @@ func TestUnitTest_Success(t *testing.T) {
|
||||
f := func(disableGroupLabel bool, files []string, externalLabels []string, externalURL string) {
|
||||
t.Helper()
|
||||
|
||||
failed := UnitTest(files, disableGroupLabel, externalLabels, externalURL)
|
||||
failed := UnitTest(files, disableGroupLabel, externalLabels, externalURL, "")
|
||||
if failed {
|
||||
t.Fatalf("unexpected failed test")
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
|
||||
@@ -7,7 +7,7 @@ groups:
|
||||
labels:
|
||||
label: bar
|
||||
annotations:
|
||||
summary: "{{ $value }"
|
||||
summary: "{{ }}"
|
||||
description: "{{$labels}}"
|
||||
- alert: UnkownAnnotationsFunction
|
||||
for: 5m
|
||||
|
||||
@@ -81,7 +81,10 @@ absolute path to all .tpl files in root.
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.")
|
||||
)
|
||||
|
||||
var alertURLGeneratorFn notifier.AlertURLGenerator
|
||||
var (
|
||||
alertURLGeneratorFn notifier.AlertURLGenerator
|
||||
extURL *url.URL
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
@@ -95,9 +98,15 @@ func main() {
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
|
||||
err := templates.Load(*ruleTemplatesPath, true)
|
||||
var err error
|
||||
extURL, err = getExternalURL(*externalURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to parse %q: %s", *ruleTemplatesPath, err)
|
||||
logger.Fatalf("failed to init external.url %q: %s", *externalURL, err)
|
||||
}
|
||||
|
||||
err = templates.Load(*ruleTemplatesPath, *extURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to load template %q: %s", *ruleTemplatesPath, err)
|
||||
}
|
||||
|
||||
if *dryRun {
|
||||
@@ -111,12 +120,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
eu, err := getExternalURL(*externalURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init `-external.url`: %s", err)
|
||||
}
|
||||
|
||||
alertURLGeneratorFn, err = getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
|
||||
alertURLGeneratorFn, err = getAlertURLGenerator(extURL, *externalAlertSource, *validateTemplates)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init `external.alert.source`: %s", err)
|
||||
}
|
||||
@@ -304,7 +308,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
|
||||
}
|
||||
templated, err := alert.ExecTemplate(qFn, alert.Labels, m)
|
||||
if err != nil {
|
||||
logger.Errorf("can not exec source template %s", err)
|
||||
logger.Errorf("cannot template alert source: %s", err)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", externalURL, templated["tpl"])
|
||||
}, nil
|
||||
@@ -359,7 +363,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
|
||||
logger.Errorf("failed to reload notifier config: %s", err)
|
||||
continue
|
||||
}
|
||||
err := templates.Load(*ruleTemplatesPath, false)
|
||||
err := templates.Load(*ruleTemplatesPath, *extURL)
|
||||
if err != nil {
|
||||
setConfigError(err)
|
||||
logger.Errorf("failed to load new templates: %s", err)
|
||||
|
||||
@@ -74,7 +74,10 @@ func TestGetAlertURLGenerator(t *testing.T) {
|
||||
|
||||
func TestConfigReload(t *testing.T) {
|
||||
originalRulePath := *rulePath
|
||||
originalExternalURL := extURL
|
||||
extURL = &url.URL{}
|
||||
defer func() {
|
||||
extURL = originalExternalURL
|
||||
*rulePath = originalRulePath
|
||||
}()
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
|
||||
@@ -127,7 +127,7 @@ func ExecTemplate(q templates.QueryFn, annotations map[string]string, tplData Al
|
||||
|
||||
// ValidateTemplates validate annotations for possible template error, uses empty data for template population
|
||||
func ValidateTemplates(annotations map[string]string) error {
|
||||
tmpl, err := templates.Get()
|
||||
tmpl, err := templates.GetWithFuncs(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -146,12 +146,21 @@ func templateAnnotations(annotations map[string]string, data AlertTplData, tmpl
|
||||
tData := tplData{data, externalLabels, externalURL}
|
||||
header := strings.Join(tplHeaders, "")
|
||||
for key, text := range annotations {
|
||||
// simple check to skip text without template
|
||||
if !strings.Contains(text, "{{") || !strings.Contains(text, "}}") {
|
||||
r[key] = text
|
||||
continue
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
builder.Reset()
|
||||
builder.Grow(len(header) + len(text))
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(text)
|
||||
if err := templateAnnotation(&buf, builder.String(), tData, tmpl, execute); err != nil {
|
||||
// clone a new template for each parse to avoid collision
|
||||
ctmpl, _ := tmpl.Clone()
|
||||
ctmpl = ctmpl.Option("missingkey=zero")
|
||||
if err := templateAnnotation(&buf, builder.String(), tData, ctmpl, execute); err != nil {
|
||||
r[key] = text
|
||||
eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err))
|
||||
continue
|
||||
|
||||
@@ -75,7 +75,13 @@ func TestAlertExecTemplate(t *testing.T) {
|
||||
Labels: map[string]string{
|
||||
"instance": "localhost",
|
||||
},
|
||||
}, map[string]string{}, map[string]string{})
|
||||
}, map[string]string{
|
||||
"summary": "it's a test summary",
|
||||
"description": "it's a test description",
|
||||
}, map[string]string{
|
||||
"summary": "it's a test summary",
|
||||
"description": "it's a test description",
|
||||
})
|
||||
|
||||
// label-template
|
||||
f(&Alert{
|
||||
@@ -93,6 +99,19 @@ func TestAlertExecTemplate(t *testing.T) {
|
||||
"description": "It is 10000 connections for localhost for more than 5m0s",
|
||||
})
|
||||
|
||||
// label template override
|
||||
f(&Alert{
|
||||
Value: 1e4,
|
||||
}, map[string]string{
|
||||
"summary": `{{- define "default.template" -}} {{ printf "summary" }} {{- end -}} {{ template "default.template" . }}`,
|
||||
"description": `{{ template "default.template" . }}`,
|
||||
"value": `{{$value }}`,
|
||||
}, map[string]string{
|
||||
"summary": "summary",
|
||||
"description": "",
|
||||
"value": "10000",
|
||||
})
|
||||
|
||||
// expression-template
|
||||
f(&Alert{
|
||||
Expr: `vm_rows{"label"="bar"}<0`,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
@@ -93,13 +92,11 @@ var (
|
||||
func Init(gen AlertURLGenerator, extLabels map[string]string, extURL string) (func() []Notifier, error) {
|
||||
externalURL = extURL
|
||||
externalLabels = extLabels
|
||||
eu, err := url.Parse(externalURL)
|
||||
_, err := url.Parse(externalURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse external URL: %w", err)
|
||||
}
|
||||
|
||||
templates.UpdateWithFuncs(templates.FuncsWithExternalURL(eu))
|
||||
|
||||
if *blackHole {
|
||||
if len(*addrs) > 0 || *configPath != "" {
|
||||
return nil, fmt.Errorf("only one of -notifier.blackhole, -notifier.url and -notifier.config flags must be specified")
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, true); err != nil {
|
||||
if err := templates.Load([]string{"testdata/templates/*good.tmpl"}, url.URL{}); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
@@ -26,7 +27,7 @@ func init() {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := templates.Load([]string{}, true); err != nil {
|
||||
if err := templates.Load([]string{}, url.URL{}); err != nil {
|
||||
fmt.Println("failed to load template for test")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -54,10 +54,9 @@ func newTemplate() *textTpl.Template {
|
||||
}
|
||||
|
||||
// Load func loads templates from multiple globs specified in pathPatterns and either
|
||||
// sets them directly to current template if it's undefined or with overwrite=true
|
||||
// or sets replacement templates and adds templates with new names to a current
|
||||
func Load(pathPatterns []string, overwrite bool) error {
|
||||
var err error
|
||||
// sets them directly to current template if it's the first init;
|
||||
// or sets replacement templates and wait for Reload() to replace current template with replacement.
|
||||
func Load(pathPatterns []string, externalURL url.URL) error {
|
||||
tmpl := newTemplate()
|
||||
for _, tp := range pathPatterns {
|
||||
p, err := doublestar.FilepathGlob(tp)
|
||||
@@ -79,36 +78,12 @@ func Load(pathPatterns []string, overwrite bool) error {
|
||||
}
|
||||
tplMu.Lock()
|
||||
defer tplMu.Unlock()
|
||||
if masterTmpl.current == nil || overwrite {
|
||||
masterTmpl.replacement = nil
|
||||
masterTmpl.current = newTemplate()
|
||||
} else {
|
||||
masterTmpl.replacement = newTemplate()
|
||||
if err = copyTemplates(tmpl, masterTmpl.replacement, overwrite); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return copyTemplates(tmpl, masterTmpl.current, overwrite)
|
||||
}
|
||||
tmpl = tmpl.Funcs(funcsWithExternalURL(externalURL))
|
||||
|
||||
func copyTemplates(from *textTpl.Template, to *textTpl.Template, overwrite bool) error {
|
||||
if from == nil {
|
||||
return nil
|
||||
}
|
||||
if to == nil {
|
||||
to = newTemplate()
|
||||
}
|
||||
tmpl, err := from.Clone()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range tmpl.Templates() {
|
||||
if to.Lookup(t.Name()) == nil || overwrite {
|
||||
to, err = to.AddParseTree(t.Name(), t.Tree)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add template %q: %w", t.Name(), err)
|
||||
}
|
||||
}
|
||||
if masterTmpl.current == nil {
|
||||
masterTmpl.current = tmpl
|
||||
} else {
|
||||
masterTmpl.replacement = tmpl
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -153,13 +128,6 @@ func datasourceMetricsToTemplateMetrics(ms []datasource.Metric) []metric {
|
||||
// for templating functions.
|
||||
type QueryFn func(query string) ([]datasource.Metric, error)
|
||||
|
||||
// UpdateWithFuncs updates existing or sets a new function map for a template
|
||||
func UpdateWithFuncs(funcs textTpl.FuncMap) {
|
||||
tplMu.Lock()
|
||||
defer tplMu.Unlock()
|
||||
masterTmpl.current = masterTmpl.current.Funcs(funcs)
|
||||
}
|
||||
|
||||
// GetWithFuncs returns a copy of current template with additional FuncMap
|
||||
// provided with funcs argument
|
||||
func GetWithFuncs(funcs textTpl.FuncMap) (*textTpl.Template, error) {
|
||||
@@ -174,13 +142,6 @@ func GetWithFuncs(funcs textTpl.FuncMap) (*textTpl.Template, error) {
|
||||
return tmpl.Funcs(funcs), nil
|
||||
}
|
||||
|
||||
// Get returns a copy of a template
|
||||
func Get() (*textTpl.Template, error) {
|
||||
tplMu.RLock()
|
||||
defer tplMu.RUnlock()
|
||||
return masterTmpl.current.Clone()
|
||||
}
|
||||
|
||||
// FuncsWithQuery returns a function map that depends on metric data
|
||||
func FuncsWithQuery(query QueryFn) textTpl.FuncMap {
|
||||
return textTpl.FuncMap{
|
||||
@@ -198,8 +159,8 @@ func FuncsWithQuery(query QueryFn) textTpl.FuncMap {
|
||||
}
|
||||
}
|
||||
|
||||
// FuncsWithExternalURL returns a function map that depends on externalURL value
|
||||
func FuncsWithExternalURL(externalURL *url.URL) textTpl.FuncMap {
|
||||
// funcsWithExternalURL returns a function map that depends on externalURL value
|
||||
func funcsWithExternalURL(externalURL url.URL) textTpl.FuncMap {
|
||||
return textTpl.FuncMap{
|
||||
"externalURL": func() string {
|
||||
return externalURL.String()
|
||||
|
||||
@@ -2,6 +2,7 @@ package templates
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
textTpl "text/template"
|
||||
@@ -152,7 +153,7 @@ func TestTemplatesLoad_Failure(t *testing.T) {
|
||||
f := func(pathPatterns []string, expectedErrStr string) {
|
||||
t.Helper()
|
||||
|
||||
err := Load(pathPatterns, false)
|
||||
err := Load(pathPatterns, url.URL{})
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
@@ -171,128 +172,17 @@ func TestTemplatesLoad_Failure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTemplatesLoad_Success(t *testing.T) {
|
||||
f := func(initialTmpl textTemplate, pathPatterns []string, overwrite bool, expectedTmpl textTemplate) {
|
||||
f := func(pathPatterns []string, expectedTmpl textTemplate) {
|
||||
t.Helper()
|
||||
|
||||
masterTmplOrig := masterTmpl
|
||||
masterTmpl = initialTmpl
|
||||
defer func() {
|
||||
masterTmpl = masterTmplOrig
|
||||
}()
|
||||
|
||||
if err := Load(pathPatterns, overwrite); err != nil {
|
||||
if err := Load(pathPatterns, url.URL{}); err != nil {
|
||||
t.Fatalf("cannot load templates: %s", err)
|
||||
}
|
||||
|
||||
if !equalTemplates(masterTmpl.replacement, expectedTmpl.replacement) {
|
||||
t.Fatalf("unexpected replacement template\ngot\n%+v\nwant\n%+v", masterTmpl.replacement, expectedTmpl.replacement)
|
||||
}
|
||||
if !equalTemplates(masterTmpl.current, expectedTmpl.current) {
|
||||
t.Fatalf("unexpected current template\ngot\n%+v\nwant\n%+v", masterTmpl.current, expectedTmpl.current)
|
||||
}
|
||||
}
|
||||
|
||||
// non existing path undefined template override
|
||||
initialTmpl := mkTemplate(nil, nil)
|
||||
pathPatterns := []string{
|
||||
"templates/non-existing/good-*.tpl",
|
||||
"templates/absent/good-*.tpl",
|
||||
}
|
||||
overwrite := true
|
||||
expectedTmpl := mkTemplate(``, nil)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
|
||||
// non existing path defined template override
|
||||
initialTmpl = mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
pathPatterns = []string{
|
||||
"templates/non-existing/good-*.tpl",
|
||||
"templates/absent/good-*.tpl",
|
||||
}
|
||||
overwrite = true
|
||||
expectedTmpl = mkTemplate(``, nil)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
|
||||
// existing path undefined template override
|
||||
initialTmpl = mkTemplate(nil, nil)
|
||||
pathPatterns = []string{
|
||||
"templates/other/nested/good0-*.tpl",
|
||||
"templates/test/good0-*.tpl",
|
||||
}
|
||||
overwrite = false
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
|
||||
// existing path defined template override
|
||||
initialTmpl = mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" "world" }}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
pathPatterns = []string{
|
||||
"templates/other/nested/good0-*.tpl",
|
||||
"templates/test/good0-*.tpl",
|
||||
}
|
||||
overwrite = false
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" "world" }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, `
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`)
|
||||
f(initialTmpl, pathPatterns, overwrite, expectedTmpl)
|
||||
}
|
||||
|
||||
func TestTemplatesReload(t *testing.T) {
|
||||
f := func(initialTmpl, expectedTmpl textTemplate) {
|
||||
t.Helper()
|
||||
|
||||
masterTmplOrig := masterTmpl
|
||||
masterTmpl = initialTmpl
|
||||
defer func() {
|
||||
masterTmpl = masterTmplOrig
|
||||
}()
|
||||
|
||||
Reload()
|
||||
|
||||
if !equalTemplates(masterTmpl.replacement, expectedTmpl.replacement) {
|
||||
@@ -303,46 +193,47 @@ func TestTemplatesReload(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// empty current and replacement templates
|
||||
f(mkTemplate(nil, nil), mkTemplate(nil, nil))
|
||||
// non existing path
|
||||
pathPatterns := []string{
|
||||
"templates/non-existing/good-*.tpl",
|
||||
"templates/absent/good-*.tpl",
|
||||
}
|
||||
expectedTmpl := mkTemplate(``, nil)
|
||||
f(pathPatterns, expectedTmpl)
|
||||
|
||||
// empty current template only
|
||||
f(mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil), mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil))
|
||||
|
||||
// empty replacement template only
|
||||
f(mkTemplate(nil, `
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`), mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "value" -}}
|
||||
{{- end -}}
|
||||
`, nil))
|
||||
|
||||
// defined both templates
|
||||
f(mkTemplate(`
|
||||
// existing path
|
||||
pathPatterns = []string{
|
||||
"templates/test/good0-*.tpl",
|
||||
}
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{- printf "value" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.2" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, nil)
|
||||
f(pathPatterns, expectedTmpl)
|
||||
|
||||
// existing path defined template override
|
||||
pathPatterns = []string{
|
||||
"templates/other/nested/good0-*.tpl",
|
||||
}
|
||||
expectedTmpl = mkTemplate(`
|
||||
{{- define "good0-test.tpl" -}}{{- end -}}
|
||||
{{- define "test.0" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "before" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`, `
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "after" -}}
|
||||
{{- define "test.3" -}}
|
||||
{{ printf "Hello %s!" externalURL }}
|
||||
{{- end -}}
|
||||
`), mkTemplate(`
|
||||
{{- define "test.1" -}}
|
||||
{{- printf "after" -}}
|
||||
{{- end -}}
|
||||
`, nil))
|
||||
`, nil)
|
||||
f(pathPatterns, expectedTmpl)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/rule"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRecordingToApi(t *testing.T) {
|
||||
|
||||
@@ -783,10 +783,11 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
|
||||
func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
uis := ac.Users
|
||||
if len(uis) == 0 && ac.UnauthorizedUser == nil {
|
||||
return nil, fmt.Errorf("Missing `users` or `unauthorized_user` sections")
|
||||
}
|
||||
byAuthToken := make(map[string]*UserInfo, len(uis))
|
||||
if len(uis) == 0 && ac.UnauthorizedUser == nil {
|
||||
// fast path for empty configuration
|
||||
return byAuthToken, nil
|
||||
}
|
||||
for i := range uis {
|
||||
ui := &uis[i]
|
||||
ats, err := getAuthTokens(ui.AuthToken, ui.BearerToken, ui.Username, ui.Password)
|
||||
|
||||
@@ -24,16 +24,10 @@ func TestParseAuthConfigFailure(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Empty config
|
||||
f(``)
|
||||
|
||||
// Invalid entry
|
||||
f(`foobar`)
|
||||
f(`foobar: baz`)
|
||||
|
||||
// Empty users
|
||||
f(`users: []`)
|
||||
|
||||
// Missing url_prefix
|
||||
f(`
|
||||
users:
|
||||
@@ -302,6 +296,12 @@ func TestParseAuthConfigSuccess(t *testing.T) {
|
||||
|
||||
insecureSkipVerifyTrue := true
|
||||
|
||||
// Empty config
|
||||
f(``, map[string]*UserInfo{})
|
||||
|
||||
// Empty users
|
||||
f(`users: []`, map[string]*UserInfo{})
|
||||
|
||||
// Single user
|
||||
f(`
|
||||
users:
|
||||
|
||||
@@ -199,7 +199,7 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
u := normalizeURL(r.URL)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Header)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, r.Host, r.Header)
|
||||
isDefault := false
|
||||
if up == nil {
|
||||
if ui.DefaultURL == nil {
|
||||
@@ -213,7 +213,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
missingRouteRequests.Inc()
|
||||
var di string
|
||||
if ui.DumpRequestOnErrors {
|
||||
di = debugInfo(u, r.Header)
|
||||
di = debugInfo(u, r)
|
||||
}
|
||||
httpserver.Errorf(w, r, "missing route for %q%s", u.String(), di)
|
||||
return
|
||||
@@ -668,13 +668,13 @@ func (rtb *readTrackingBody) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func debugInfo(u *url.URL, h http.Header) string {
|
||||
func debugInfo(u *url.URL, r *http.Request) string {
|
||||
s := &strings.Builder{}
|
||||
fmt.Fprintf(s, " (host: %q; ", u.Host)
|
||||
fmt.Fprintf(s, " (host: %q; ", r.Host)
|
||||
fmt.Fprintf(s, "path: %q; ", u.Path)
|
||||
fmt.Fprintf(s, "args: %q; ", u.Query().Encode())
|
||||
fmt.Fprint(s, "headers:")
|
||||
_ = h.WriteSubset(s, nil)
|
||||
_ = r.Header.WriteSubset(s, nil)
|
||||
fmt.Fprint(s, ")")
|
||||
return s.String()
|
||||
}
|
||||
|
||||
@@ -51,9 +51,9 @@ func dropPrefixParts(path string, parts int) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL, h http.Header) (*URLPrefix, HeadersConf) {
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL, host string, h http.Header) (*URLPrefix, HeadersConf) {
|
||||
for _, e := range ui.URLMaps {
|
||||
if !matchAnyRegex(e.SrcHosts, u.Host) {
|
||||
if !matchAnyRegex(e.SrcHosts, host) {
|
||||
continue
|
||||
}
|
||||
if !matchAnyRegex(e.SrcPaths, u.Path) {
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot match available backend: %s", err)
|
||||
}
|
||||
@@ -306,7 +306,7 @@ func TestUserInfoGetBackendURL_SRV(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot match available backend: %s", err)
|
||||
}
|
||||
@@ -384,7 +384,7 @@ func TestUserInfoGetBackendURL_SRVZeroBackends(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, _ := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot match available backend: %s", err)
|
||||
}
|
||||
@@ -432,7 +432,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, nil)
|
||||
up, hc := ui.getURLPrefixAndHeaders(u, u.Host, nil)
|
||||
if up != nil {
|
||||
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||
}
|
||||
|
||||
@@ -4,13 +4,46 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
// StartIngestionRateLimiter starts ingestion rate limiter.
|
||||
//
|
||||
// Ingestion rate limiter must be started before Init() call.
|
||||
//
|
||||
// StopIngestionRateLimiter must be called before Stop() call in order to unblock all the callers
|
||||
// to ingestion rate limiter. Otherwise deadlock may occur at Stop() call.
|
||||
func StartIngestionRateLimiter(maxIngestionRate int) {
|
||||
if maxIngestionRate <= 0 {
|
||||
return
|
||||
}
|
||||
ingestionRateLimitReached := metrics.NewCounter(`vm_max_ingestion_rate_limit_reached_total`)
|
||||
ingestionRateLimiterStopCh = make(chan struct{})
|
||||
ingestionRateLimiter = ratelimiter.New(int64(maxIngestionRate), ingestionRateLimitReached, ingestionRateLimiterStopCh)
|
||||
}
|
||||
|
||||
// StopIngestionRateLimiter stops ingestion rate limiter.
|
||||
func StopIngestionRateLimiter() {
|
||||
if ingestionRateLimiterStopCh == nil {
|
||||
return
|
||||
}
|
||||
close(ingestionRateLimiterStopCh)
|
||||
ingestionRateLimiterStopCh = nil
|
||||
}
|
||||
|
||||
var (
|
||||
ingestionRateLimiter *ratelimiter.RateLimiter
|
||||
ingestionRateLimiterStopCh chan struct{}
|
||||
)
|
||||
|
||||
// InsertCtx contains common bits for data points insertion.
|
||||
@@ -59,7 +92,27 @@ func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompbmarshal
|
||||
return metricNameRaw[:len(metricNameRaw):len(metricNameRaw)]
|
||||
}
|
||||
|
||||
// TryPrepareLabels prepares context labels to the ingestion
|
||||
//
|
||||
// It returns false if timeseries should be skipped
|
||||
func (ctx *InsertCtx) TryPrepareLabels(hasRelabeling bool) bool {
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
return false
|
||||
}
|
||||
if timeserieslimits.Enabled() && timeserieslimits.IsExceeding(ctx.Labels) {
|
||||
return false
|
||||
}
|
||||
ctx.sortLabelsIfNeeded()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteDataPoint writes (timestamp, value) with the given prefix and labels into ctx buffer.
|
||||
//
|
||||
// caller should invoke TryPrepareLabels before using this function if needed
|
||||
func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompbmarshal.Label, timestamp int64, value float64) error {
|
||||
metricNameRaw := ctx.marshalMetricNameRaw(prefix, labels)
|
||||
return ctx.addRow(metricNameRaw, timestamp, value)
|
||||
@@ -67,6 +120,8 @@ func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompbmarshal.Label
|
||||
|
||||
// WriteDataPointExt writes (timestamp, value) with the given metricNameRaw and labels into ctx buffer.
|
||||
//
|
||||
// caller must invoke TryPrepareLabels before using this function
|
||||
//
|
||||
// It returns metricNameRaw for the given labels if len(metricNameRaw) == 0.
|
||||
func (ctx *InsertCtx) WriteDataPointExt(metricNameRaw []byte, labels []prompbmarshal.Label, timestamp int64, value float64) ([]byte, error) {
|
||||
if len(metricNameRaw) == 0 {
|
||||
@@ -149,9 +204,12 @@ func (ctx *InsertCtx) FlushBufs() error {
|
||||
}
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
}
|
||||
ingestionRateLimiter.Register(len(ctx.mrs))
|
||||
|
||||
// There is no need in limiting the number of concurrent calls to vmstorage.AddRows() here,
|
||||
// since the number of concurrent FlushBufs() calls should be already limited via writeconcurrencylimiter
|
||||
// used at every stream.Parse() call under lib/protoparser/*
|
||||
|
||||
err := vmstorage.AddRows(ctx.mrs)
|
||||
ctx.Reset(0)
|
||||
if err == nil {
|
||||
|
||||
@@ -12,8 +12,8 @@ var sortLabels = flag.Bool("sortLabels", false, `Whether to sort labels for inco
|
||||
`For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}. `+
|
||||
`Enabled sorting for labels can slow down ingestion performance a bit`)
|
||||
|
||||
// SortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set
|
||||
func (ctx *InsertCtx) SortLabelsIfNeeded() {
|
||||
// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set
|
||||
func (ctx *InsertCtx) sortLabelsIfNeeded() {
|
||||
if *sortLabels {
|
||||
sort.Sort(&ctx.Labels)
|
||||
}
|
||||
|
||||
@@ -46,14 +46,9 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -60,14 +60,9 @@ func insertRows(sketches []*datadogsketches.Sketch, extraLabels []prompbmarshal.
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
for _, p := range m.Points {
|
||||
|
||||
@@ -63,14 +63,9 @@ func insertRows(series []datadogv1.Series, extraLabels []prompbmarshal.Label) er
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
for _, pt := range ss.Points {
|
||||
|
||||
@@ -66,14 +66,9 @@ func insertRows(series []datadogv2.Series, extraLabels []prompbmarshal.Label) er
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
for _, pt := range ss.Points {
|
||||
|
||||
@@ -36,14 +36,9 @@ func insertRows(rows []parser.Row) error {
|
||||
tag := &r.Tags[j]
|
||||
ctx.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -69,6 +70,7 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label)
|
||||
ic.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
hasRelabeling := relabel.HasRelabeling()
|
||||
hasLimitsEnabled := timeserieslimits.Enabled()
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
rowsTotal += len(r.Fields)
|
||||
@@ -108,18 +110,17 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label)
|
||||
metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf)
|
||||
ic.Labels = append(ic.Labels[:0], ctx.originLabels...)
|
||||
ic.AddLabel("", metricGroup)
|
||||
ic.ApplyRelabeling()
|
||||
if len(ic.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ic.TryPrepareLabels(true) {
|
||||
continue
|
||||
}
|
||||
ic.SortLabelsIfNeeded()
|
||||
if err := ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, f.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ic.SortLabelsIfNeeded()
|
||||
if !ic.TryPrepareLabels(false) {
|
||||
continue
|
||||
}
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
labelsLen := len(ic.Labels)
|
||||
for j := range r.Fields {
|
||||
@@ -130,9 +131,12 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label)
|
||||
metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf)
|
||||
ic.Labels = ic.Labels[:labelsLen]
|
||||
ic.AddLabel("", metricGroup)
|
||||
if len(ic.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
continue
|
||||
if hasLimitsEnabled {
|
||||
// special case for optimisation above
|
||||
// check only __name__ label value limits
|
||||
if timeserieslimits.IsExceeding(ic.Labels[len(ic.Labels)-1:]) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := ic.WriteDataPoint(ctx.metricNameBuf, ic.Labels[len(ic.Labels)-1:], r.Timestamp, f.Value); err != nil {
|
||||
return err
|
||||
|
||||
@@ -41,8 +41,8 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -67,8 +67,9 @@ var (
|
||||
"at -opentsdbHTTPListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
||||
configAuthKey = flagutil.NewPassword("configAuthKey", "Authorization key for accessing /config page. It must be passed via authKey query arg. It overrides -httpAuth.*")
|
||||
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings.")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped. In this case the vm_metrics_with_dropped_labels_total metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 4*1024, "The maximum length of label values in the accepted time series. Longer label values are truncated. In this case the vm_too_long_label_values_total metric at /metrics page is incremented")
|
||||
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 40, "The maximum number of labels per time series to be accepted. Series with superfluous labels are ignored. In this case the vm_rows_ignored_total{reason=\"too_many_labels\"} metric at /metrics page is incremented")
|
||||
maxLabelNameLen = flag.Int("maxLabelNameLen", 256, "The maximum length of label name in the accepted time series. Series with longer label name are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_name\"} metric at /metrics page is incremented")
|
||||
maxLabelValueLen = flag.Int("maxLabelValueLen", 4*1024, "The maximum length of label values in the accepted time series. Series with longer label value are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_value\"} metric at /metrics page is incremented")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -87,8 +88,6 @@ var staticServer = http.FileServer(http.FS(staticFiles))
|
||||
func Init() {
|
||||
relabel.Init()
|
||||
vminsertCommon.InitStreamAggr()
|
||||
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
||||
storage.SetMaxLabelValueLen(*maxLabelValueLen)
|
||||
common.StartUnmarshalWorkers()
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphiteServer = graphiteserver.MustStart(*graphiteListenAddr, *graphiteUseProxyProtocol, graphite.InsertHandler)
|
||||
@@ -105,6 +104,7 @@ func Init() {
|
||||
promscrape.Init(func(_ *auth.Token, wr *prompbmarshal.WriteRequest) {
|
||||
prompush.Push(wr)
|
||||
})
|
||||
timeserieslimits.Init(*maxLabelsPerTimeseries, *maxLabelNameLen, *maxLabelValueLen)
|
||||
}
|
||||
|
||||
// Stop stops vminsert.
|
||||
@@ -439,14 +439,4 @@ var (
|
||||
promscrapeStatusConfigRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/status/config"}`)
|
||||
|
||||
promscrapeConfigReloadRequests = metrics.NewCounter(`vm_http_requests_total{path="/-/reload"}`)
|
||||
|
||||
_ = metrics.NewGauge(`vm_metrics_with_dropped_labels_total`, func() float64 {
|
||||
return float64(storage.MetricsWithDroppedLabels.Load())
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_too_long_label_names_total`, func() float64 {
|
||||
return float64(storage.TooLongLabelNames.Load())
|
||||
})
|
||||
_ = metrics.NewGauge(`vm_too_long_label_values_total`, func() float64 {
|
||||
return float64(storage.TooLongLabelValues.Load())
|
||||
})
|
||||
)
|
||||
|
||||
@@ -55,14 +55,9 @@ func insertRows(block *stream.Block, extraLabels []prompbmarshal.Label) error {
|
||||
label := &extraLabels[j]
|
||||
ic.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ic.ApplyRelabeling()
|
||||
}
|
||||
if len(ic.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ic.TryPrepareLabels(hasRelabeling) {
|
||||
return nil
|
||||
}
|
||||
ic.SortLabelsIfNeeded()
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
values := block.Values
|
||||
timestamps := block.Timestamps
|
||||
@@ -71,7 +66,9 @@ func insertRows(block *stream.Block, extraLabels []prompbmarshal.Label) error {
|
||||
}
|
||||
for j, value := range values {
|
||||
timestamp := timestamps[j]
|
||||
if err := ic.WriteDataPoint(ctx.metricNameBuf, nil, timestamp, value); err != nil {
|
||||
// TODO: @f41gh7 looks like it's better to use WriteDataPointExt
|
||||
// since metricName never changes inside insertRows call
|
||||
if err := ic.WriteDataPoint(ctx.metricNameBuf, ic.Labels, timestamp, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,14 +58,9 @@ func insertRows(rows []newrelic.Row, extraLabels []prompbmarshal.Label) error {
|
||||
label := &extraLabels[k]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, s.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,14 +59,9 @@ func insertRows(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Labe
|
||||
for _, label := range extraLabels {
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
samples := ts.Samples
|
||||
|
||||
@@ -36,14 +36,9 @@ func insertRows(rows []parser.Row) error {
|
||||
tag := &r.Tags[j]
|
||||
ctx.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -54,14 +54,9 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -54,14 +54,9 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -57,12 +57,9 @@ func push(ctx *common.InsertCtx, tss []prompbmarshal.TimeSeries) {
|
||||
label := &ts.Labels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
ctx.ApplyRelabeling()
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ctx.TryPrepareLabels(false) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
for i := range ts.Samples {
|
||||
|
||||
@@ -52,14 +52,10 @@ func insertRows(timeseries []prompb.TimeSeries, extraLabels []prompbmarshal.Labe
|
||||
label := &extraLabels[j]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
|
||||
if !ctx.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
samples := ts.Samples
|
||||
|
||||
@@ -58,14 +58,9 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
||||
label := &extraLabels[j]
|
||||
ic.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ic.ApplyRelabeling()
|
||||
}
|
||||
if len(ic.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
if !ic.TryPrepareLabels(hasRelabeling) {
|
||||
continue
|
||||
}
|
||||
ic.SortLabelsIfNeeded()
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
values := r.Values
|
||||
timestamps := r.Timestamps
|
||||
|
||||
@@ -699,8 +699,13 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
// Extend dstValues in order to remove mallocs below.
|
||||
dstValues = decimal.ExtendFloat64sCapacity(dstValues, len(rc.Timestamps))
|
||||
|
||||
scrapeInterval := getScrapeInterval(timestamps, rc.Step)
|
||||
maxPrevInterval := getMaxPrevInterval(scrapeInterval)
|
||||
// Use step as the scrape interval for instant queries (when start == end).
|
||||
maxPrevInterval := rc.Step
|
||||
if rc.Start < rc.End {
|
||||
scrapeInterval := getScrapeInterval(timestamps, rc.Step)
|
||||
maxPrevInterval = getMaxPrevInterval(scrapeInterval)
|
||||
}
|
||||
|
||||
if rc.LookbackDelta > 0 && maxPrevInterval > rc.LookbackDelta {
|
||||
maxPrevInterval = rc.LookbackDelta
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.b1929c64.css",
|
||||
"main.js": "./static/js/main.a7d57628.js",
|
||||
"main.css": "./static/css/main.876c56b7.css",
|
||||
"main.js": "./static/js/main.caf36c39.js",
|
||||
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.b1929c64.css",
|
||||
"static/js/main.a7d57628.js"
|
||||
"static/css/main.876c56b7.css",
|
||||
"static/js/main.caf36c39.js"
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.a7d57628.js"></script><link href="./static/css/main.b1929c64.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.caf36c39.js"></script><link href="./static/css/main.876c56b7.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
1
app/vmselect/vmui/static/css/main.876c56b7.css
Normal file
1
app/vmselect/vmui/static/css/main.876c56b7.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/main.caf36c39.js
Normal file
2
app/vmselect/vmui/static/js/main.caf36c39.js
Normal file
File diff suppressed because one or more lines are too long
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.23.3 AS build-web-stage
|
||||
FROM golang:1.23.4 AS build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
@@ -6,7 +6,7 @@ COPY web/ /build/
|
||||
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.20.3
|
||||
FROM alpine:3.21.0
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
||||
@@ -48,3 +48,11 @@ export interface LogHits {
|
||||
[key: string]: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface ReportMetaData {
|
||||
id: number;
|
||||
title: string;
|
||||
endpoint: string;
|
||||
comment: string;
|
||||
params: Record<string, string>;
|
||||
}
|
||||
|
||||
@@ -43,7 +43,8 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const [openAutocomplete, setOpenAutocomplete] = useState(false);
|
||||
const [caretPosition, setCaretPosition] = useState<[number, number]>([0, 0]);
|
||||
const [caretPositionAutocomplete, setCaretPositionAutocomplete] = useState<[number, number]>([0, 0]);
|
||||
const [caretPositionInput, setCaretPositionInput] = useState<[number, number]>([0, 0]);
|
||||
const autocompleteAnchorEl = useRef<HTMLInputElement>(null);
|
||||
|
||||
const [showAutocomplete, setShowAutocomplete] = useState(autocomplete);
|
||||
@@ -66,7 +67,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
|
||||
const handleSelect = (val: string, caretPosition: number) => {
|
||||
onChange(val);
|
||||
setCaretPosition([caretPosition, caretPosition]);
|
||||
setCaretPositionInput([caretPosition, caretPosition]);
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
@@ -108,7 +109,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
};
|
||||
|
||||
const handleChangeCaret = (val: [number, number]) => {
|
||||
setCaretPosition(prev => prev[0] === val[0] && prev[1] === val[1] ? prev : val);
|
||||
setCaretPositionAutocomplete(prev => prev[0] === val[0] && prev[1] === val[1] ? prev : val);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
@@ -118,7 +119,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
useEffect(() => {
|
||||
setShowAutocomplete(false);
|
||||
debouncedSetShowAutocomplete(true);
|
||||
}, [caretPosition]);
|
||||
}, [caretPositionAutocomplete]);
|
||||
|
||||
return (
|
||||
<div
|
||||
@@ -137,13 +138,13 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
onChangeCaret={handleChangeCaret}
|
||||
disabled={disabled}
|
||||
inputmode={"search"}
|
||||
caretPosition={caretPosition}
|
||||
caretPosition={caretPositionInput}
|
||||
/>
|
||||
{showAutocomplete && autocomplete && (
|
||||
<QueryEditorAutocomplete
|
||||
value={value}
|
||||
anchorEl={autocompleteAnchorEl}
|
||||
caretPosition={caretPosition}
|
||||
caretPosition={caretPositionAutocomplete}
|
||||
hasHelperText={Boolean(warning || error)}
|
||||
includeFunctions={includeFunctions}
|
||||
onSelect={handleSelect}
|
||||
|
||||
@@ -38,6 +38,10 @@
|
||||
align-items: flex-start;
|
||||
gap: $padding-small;
|
||||
|
||||
ul {
|
||||
list-style-position: inside;
|
||||
}
|
||||
|
||||
button {
|
||||
color: inherit;
|
||||
min-height: 29px;
|
||||
|
||||
@@ -19,6 +19,10 @@ const Accordion: FC<AccordionProps> = ({
|
||||
const [isOpen, setIsOpen] = useState(defaultExpanded);
|
||||
|
||||
const toggleOpen = () => {
|
||||
const selection = window.getSelection();
|
||||
if (selection && selection.toString()) {
|
||||
return; // If the text is selected, cancel the execution of toggle.
|
||||
}
|
||||
setIsOpen(prev => !prev);
|
||||
};
|
||||
|
||||
|
||||
@@ -46,6 +46,8 @@
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
align-self: flex-start;
|
||||
min-height: 24px;
|
||||
}
|
||||
|
||||
&__content {
|
||||
|
||||
@@ -570,3 +570,14 @@ export const SpinnerIcon = () => (
|
||||
</path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const CommentIcon = () => (
|
||||
<svg
|
||||
viewBox="0 0 24 24"
|
||||
fill="currentColor"
|
||||
>
|
||||
<path
|
||||
d="M21.99 4c0-1.1-.89-2-1.99-2H4c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h14l4 4zM18 14H6v-2h12zm0-3H6V9h12zm0-3H6V6h12z"
|
||||
></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
import React, { FC } from "preact/compat";
|
||||
import useBoolean from "../../../hooks/useBoolean";
|
||||
import classNames from "classnames";
|
||||
import TextField from "../TextField/TextField";
|
||||
import "./style.scss";
|
||||
import { marked } from "marked";
|
||||
|
||||
interface Props {
|
||||
value: string;
|
||||
onChange: (value: string) => void;
|
||||
}
|
||||
|
||||
const tabs = [
|
||||
{ title: "Write", value: false },
|
||||
{ title: "Preview", value: true },
|
||||
];
|
||||
|
||||
const MarkdownEditor: FC<Props> = ({ value, onChange }) => {
|
||||
const {
|
||||
value: markdownPreview,
|
||||
setTrue: setMarkdownPreviewTrue,
|
||||
setFalse: setMarkdownPreviewFalse,
|
||||
} = useBoolean(false);
|
||||
|
||||
return (
|
||||
<div className="vm-markdown-editor">
|
||||
<div className="vm-markdown-editor-header">
|
||||
<div className="vm-markdown-editor-header-tabs">
|
||||
{tabs.map(({ title, value }) => (
|
||||
<div
|
||||
key={title}
|
||||
className={classNames({
|
||||
"vm-markdown-editor-header-tabs__tab": true,
|
||||
"vm-markdown-editor-header-tabs__tab_active": markdownPreview === value,
|
||||
})}
|
||||
onClick={value ? setMarkdownPreviewTrue : setMarkdownPreviewFalse}
|
||||
>
|
||||
{title}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
<span className="vm-markdown-editor-header__info">
|
||||
Markdown is supported
|
||||
</span>
|
||||
</div>
|
||||
{markdownPreview ? (
|
||||
<div
|
||||
className="vm-markdown-editor-preview vm-markdown"
|
||||
dangerouslySetInnerHTML={{ __html: marked(value) as string }}
|
||||
/>
|
||||
) : (
|
||||
<TextField
|
||||
type="textarea"
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default MarkdownEditor;
|
||||
@@ -0,0 +1,75 @@
|
||||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-markdown-editor {
|
||||
margin-top: 6px;
|
||||
padding: 0 6px;
|
||||
border-radius: $border-radius-small;
|
||||
border: $border-divider;
|
||||
overflow: hidden;
|
||||
|
||||
&-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
background-color: $color-hover-black;
|
||||
padding-right: $padding-global;
|
||||
border-bottom: $border-divider;
|
||||
margin: -1px -7px 6px;
|
||||
|
||||
&-tabs {
|
||||
display: flex;
|
||||
|
||||
&__tab {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
margin-bottom: -1px;
|
||||
padding: $padding-small $padding-large;
|
||||
min-height: 40px;
|
||||
color: $color-text-secondary;
|
||||
transition: color 0.3s;
|
||||
cursor: pointer;
|
||||
|
||||
&:hover {
|
||||
color: $color-text;
|
||||
}
|
||||
|
||||
&_active {
|
||||
position: relative;
|
||||
color: $color-text;
|
||||
background-color: $color-background-body;
|
||||
border-top-right-radius: $border-radius-small;
|
||||
border-top-left-radius: $border-radius-small;
|
||||
z-index: 1;
|
||||
|
||||
&:first-child {
|
||||
border-right: $border-divider;
|
||||
}
|
||||
|
||||
&:last-child {
|
||||
border-right: $border-divider;
|
||||
border-left: $border-divider;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&__info {
|
||||
margin-left: auto;
|
||||
margin-right: 0;
|
||||
color: $color-text-secondary;
|
||||
font-size: $font-size-small;
|
||||
font-weight: 500;
|
||||
}
|
||||
}
|
||||
|
||||
&-preview {
|
||||
padding: $padding-small;
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
|
||||
&-preview,
|
||||
textarea {
|
||||
min-height: 200px;
|
||||
resize: vertical;
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
import React, {
|
||||
FC,
|
||||
useEffect,
|
||||
useState,
|
||||
useRef,
|
||||
useMemo,
|
||||
FormEvent,
|
||||
@@ -65,7 +64,6 @@ const TextField: FC<TextFieldProps> = ({
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const fieldRef = useMemo(() => type === "textarea" ? textareaRef : inputRef, [type]);
|
||||
const [selectionPos, setSelectionPos] = useState<[start: number, end: number]>([0, 0]);
|
||||
|
||||
const inputClasses = classNames({
|
||||
"vm-text-field__input": true,
|
||||
@@ -77,8 +75,9 @@ const TextField: FC<TextFieldProps> = ({
|
||||
});
|
||||
|
||||
const updateCaretPosition = (target: HTMLInputElement | HTMLTextAreaElement) => {
|
||||
if (!onChangeCaret) return;
|
||||
const { selectionStart, selectionEnd } = target;
|
||||
setSelectionPos([selectionStart || 0, selectionEnd || 0]);
|
||||
onChangeCaret && onChangeCaret([selectionStart || 0, selectionEnd || 0]);
|
||||
};
|
||||
|
||||
const handleMouseUp = (e: MouseEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
@@ -127,14 +126,6 @@ const TextField: FC<TextFieldProps> = ({
|
||||
fieldRef?.current?.focus && fieldRef.current.focus();
|
||||
}, [fieldRef, autofocus]);
|
||||
|
||||
useEffect(() => {
|
||||
onChangeCaret && onChangeCaret(selectionPos);
|
||||
}, [selectionPos]);
|
||||
|
||||
useEffect(() => {
|
||||
setSelectionRange(selectionPos);
|
||||
}, [value]);
|
||||
|
||||
useEffect(() => {
|
||||
caretPosition && setSelectionRange(caretPosition);
|
||||
}, [caretPosition]);
|
||||
|
||||
@@ -16,17 +16,20 @@ const UploadJsonButtons: FC<Props> = ({ onOpenModal, onChange }) => (
|
||||
>
|
||||
Paste JSON
|
||||
</Button>
|
||||
<Button>
|
||||
Upload Files
|
||||
<div className="vm-upload-json-buttons__upload">
|
||||
<Button>
|
||||
Upload Files
|
||||
</Button>
|
||||
<input
|
||||
id="json"
|
||||
name="json"
|
||||
type="file"
|
||||
accept="application/json"
|
||||
multiple
|
||||
title=" "
|
||||
onChange={onChange}
|
||||
/>
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user