mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
58 Commits
weakpointe
...
v1.79.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b4f4eaf710 | ||
|
|
7d4101931c | ||
|
|
70a579b725 | ||
|
|
f6211309c5 | ||
|
|
f088162c8a | ||
|
|
7115b8610a | ||
|
|
4fc3495bed | ||
|
|
00e55947e4 | ||
|
|
9b4ebf95b8 | ||
|
|
f68333a8ce | ||
|
|
7543bdfd54 | ||
|
|
2db5ec5509 | ||
|
|
5839112cda | ||
|
|
dca89c7d2f | ||
|
|
64c0133b88 | ||
|
|
dbf0ef5b38 | ||
|
|
4ef2d46b8b | ||
|
|
dd46521676 | ||
|
|
a037180167 | ||
|
|
32cbc0f497 | ||
|
|
e7119de7f7 | ||
|
|
d54cf15478 | ||
|
|
56d7f3e37b | ||
|
|
cd422a5435 | ||
|
|
c35b63cd0c | ||
|
|
3e2b434bad | ||
|
|
240acdf3b7 | ||
|
|
a9c5766ebc | ||
|
|
0aa41430dd | ||
|
|
992f36702f | ||
|
|
86d85591a4 | ||
|
|
e8b1131f97 | ||
|
|
2838ee93c6 | ||
|
|
72342939d6 | ||
|
|
e3fd90e35e | ||
|
|
e690bdda09 | ||
|
|
0a8fdc5b6a | ||
|
|
fc0edfab11 | ||
|
|
d3b38ddb2e | ||
|
|
056960102a | ||
|
|
aef7b33867 | ||
|
|
095933eef8 | ||
|
|
d335436b9a | ||
|
|
d77455a485 | ||
|
|
e3f8796e90 | ||
|
|
33268b261e | ||
|
|
2426695571 | ||
|
|
7e9794cf9f | ||
|
|
05356d2a49 | ||
|
|
b161fc46dc | ||
|
|
8f74f1bc91 | ||
|
|
0bbe842c31 | ||
|
|
0d5a025934 | ||
|
|
5b0b4e1078 | ||
|
|
c28aba604d | ||
|
|
8afcc01582 | ||
|
|
208a63e045 | ||
|
|
5df6790daf |
1
Makefile
1
Makefile
@@ -16,6 +16,7 @@ GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TA
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
include snap/local/Makefile
|
||||
include package/release/Makefile
|
||||
|
||||
all: \
|
||||
victoria-metrics-prod \
|
||||
|
||||
@@ -56,10 +56,12 @@ var (
|
||||
|
||||
awsUseSigv4 = flagutil.NewArrayBool("remoteWrite.aws.useSigv4", "Enables SigV4 request signing for the corresponding -remoteWrite.url. "+
|
||||
"It is expected that other -remoteWrite.aws.* command-line flags are set if sigv4 request signing is enabled")
|
||||
awsRegion = flagutil.NewArray("remoteWrite.aws.region", "Optional AWS region to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRoleARN = flagutil.NewArray("remoteWrite.aws.roleARN", "Optional AWS roleARN to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsAccessKey = flagutil.NewArray("remoteWrite.aws.accessKey", "Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsService = flagutil.NewArray("remoteWrite.aws.service", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
awsEC2Endpoint = flagutil.NewArray("remoteWrite.aws.ec2Endpoint", "Optional AWS EC2 API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsSTSEndpoint = flagutil.NewArray("remoteWrite.aws.stsEndpoint", "Optional AWS STS API endpoint to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRegion = flagutil.NewArray("remoteWrite.aws.region", "Optional AWS region to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsRoleARN = flagutil.NewArray("remoteWrite.aws.roleARN", "Optional AWS roleARN to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsAccessKey = flagutil.NewArray("remoteWrite.aws.accessKey", "Optional AWS AccessKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
awsService = flagutil.NewArray("remoteWrite.aws.service", "Optional AWS Service to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set. "+
|
||||
"Defaults to \"aps\"")
|
||||
awsSecretKey = flagutil.NewArray("remoteWrite.aws.secretKey", "Optional AWS SecretKey to use for the corresponding -remoteWrite.url if -remoteWrite.aws.useSigv4 is set")
|
||||
)
|
||||
@@ -154,6 +156,9 @@ func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
|
||||
c.packetsDropped = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_packets_dropped_total{url=%q}`, c.sanitizedURL))
|
||||
c.retriesCount = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.sanitizedURL))
|
||||
c.sendDuration = metrics.GetOrCreateFloatCounter(fmt.Sprintf(`vmagent_remotewrite_send_duration_seconds_total{url=%q}`, c.sanitizedURL))
|
||||
metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_queues{url=%q}`, c.sanitizedURL), func() float64 {
|
||||
return float64(*queues)
|
||||
})
|
||||
for i := 0; i < concurrency; i++ {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
@@ -231,12 +236,14 @@ func getAWSAPIConfig(argIdx int) (*awsapi.Config, error) {
|
||||
if !awsUseSigv4.GetOptionalArg(argIdx) {
|
||||
return nil, nil
|
||||
}
|
||||
ec2Endpoint := awsEC2Endpoint.GetOptionalArg(argIdx)
|
||||
stsEndpoint := awsSTSEndpoint.GetOptionalArg(argIdx)
|
||||
region := awsRegion.GetOptionalArg(argIdx)
|
||||
roleARN := awsRoleARN.GetOptionalArg(argIdx)
|
||||
accessKey := awsAccessKey.GetOptionalArg(argIdx)
|
||||
secretKey := awsSecretKey.GetOptionalArg(argIdx)
|
||||
service := awsService.GetOptionalArg(argIdx)
|
||||
cfg, err := awsapi.NewConfig(region, roleARN, accessKey, secretKey, service)
|
||||
cfg, err := awsapi.NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey, service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -637,11 +637,13 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -datasource.url.
|
||||
-datasource.queryStep duration
|
||||
queryStep defines how far a value can fallback to when evaluating queries. For example, if datasource.queryStep=15s then param "step" with value "15s" will be added to every query.If queryStep isn't specified, rule's evaluationInterval will be used instead.
|
||||
How far a value can fallback to when evaluating queries. For example, if -datasource.queryStep=15s then param "step" with value "15s" will be added to every query. If set to 0, rule's evaluation interval will be used instead. (default 5m0s)
|
||||
-datasource.queryTimeAlignment
|
||||
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
|
||||
-datasource.roundDigits int
|
||||
Adds "round_digits" GET param to datasource requests. In VM "round_digits" limits the number of digits after the decimal point in response values.
|
||||
-datasource.showURL
|
||||
Whether to show -datasource.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-datasource.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -datasource.url. By default, system CA is used
|
||||
-datasource.tlsCertFile string
|
||||
@@ -653,7 +655,7 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.tlsServerName string
|
||||
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
|
||||
-datasource.url string
|
||||
VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend
|
||||
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
|
||||
-defaultTenant.graphite string
|
||||
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
|
||||
-defaultTenant.prometheus string
|
||||
@@ -813,6 +815,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -remoteRead.url. Scopes must be delimited by ';'.
|
||||
-remoteRead.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -remoteRead.url.
|
||||
-remoteRead.showURL
|
||||
Whether to show -remoteRead.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteRead.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteRead.url. By default system CA is used
|
||||
-remoteRead.tlsCertFile string
|
||||
@@ -824,7 +828,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteRead.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteRead.url. By default the server name from -remoteRead.url is used
|
||||
-remoteRead.url vmalert
|
||||
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend
|
||||
Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect.Remote read is used to restore alerts state.This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.
|
||||
-remoteWrite.basicAuth.password string
|
||||
Optional basic auth password for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.passwordFile string
|
||||
@@ -855,6 +859,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'.
|
||||
-remoteWrite.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -notifier.url.
|
||||
-remoteWrite.showURL
|
||||
Whether to show -remoteWrite.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteWrite.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. By default system CA is used
|
||||
-remoteWrite.tlsCertFile string
|
||||
@@ -866,7 +872,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteWrite.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteWrite.url. By default the server name from -remoteWrite.url is used
|
||||
-remoteWrite.url string
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.
|
||||
-replay.disableProgressBar
|
||||
Whether to disable rendering progress bars during the replay. Progress bar rendering might be verbose or break the logs parsing, so it is recommended to be disabled when not used in interactive mode.
|
||||
-replay.maxDatapointsPerQuery int
|
||||
|
||||
@@ -285,15 +285,11 @@ func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
a.State = notifier.StatePending
|
||||
a.ActiveAt = ts
|
||||
}
|
||||
if a.Value != m.Values[0] {
|
||||
// update Value field with latest value
|
||||
a.Value = m.Values[0]
|
||||
// and re-exec template since Value can be used
|
||||
// in annotations
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.Value = m.Values[0]
|
||||
// re-exec template since Value or query can be used in annotations
|
||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -6,14 +6,18 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("datasource.url", "", "VictoriaMetrics or vmselect url. Required parameter. "+
|
||||
"E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend")
|
||||
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
|
||||
addr = flag.String("datasource.url", "", "Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. "+
|
||||
"E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.")
|
||||
appendTypePrefix = flag.Bool("datasource.appendTypePrefix", false, "Whether to add type prefix to -datasource.url based on the query type. Set to true if sending different query types to the vmselect URL.")
|
||||
showDatasourceURL = flag.Bool("datasource.showURL", false, "Whether to show -datasource.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
|
||||
basicAuthUsername = flag.String("datasource.basicAuth.username", "", "Optional basic auth username for -datasource.url")
|
||||
basicAuthPassword = flag.String("datasource.basicAuth.password", "", "Optional basic auth password for -datasource.url")
|
||||
@@ -35,9 +39,9 @@ var (
|
||||
oauth2Scopes = flag.String("datasource.oauth2.scopes", "", "Optional OAuth2 scopes to use for -datasource.url. Scopes must be delimited by ';'")
|
||||
|
||||
lookBack = flag.Duration("datasource.lookback", 0, `Lookback defines how far into the past to look when evaluating queries. For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.`)
|
||||
queryStep = flag.Duration("datasource.queryStep", 0, "queryStep defines how far a value can fallback to when evaluating queries. "+
|
||||
"For example, if datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query."+
|
||||
"If queryStep isn't specified, rule's evaluationInterval will be used instead.")
|
||||
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries. "+
|
||||
"For example, if -datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query. "+
|
||||
"If set to 0, rule's evaluation interval will be used instead.")
|
||||
queryTimeAlignment = flag.Bool("datasource.queryTimeAlignment", true, `Whether to align "time" parameter with evaluation interval.`+
|
||||
"Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
|
||||
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
|
||||
@@ -47,6 +51,13 @@ var (
|
||||
`In VM "round_digits" limits the number of digits after the decimal point in response values.`)
|
||||
)
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
func InitSecretFlags() {
|
||||
if !*showDatasourceURL {
|
||||
flagutil.RegisterSecretFlag("datasource.url")
|
||||
}
|
||||
}
|
||||
|
||||
// Param represents an HTTP GET param
|
||||
type Param struct {
|
||||
Key, Value string
|
||||
|
||||
@@ -79,6 +79,9 @@ func main() {
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
remoteread.InitSecretFlags()
|
||||
remotewrite.InitSecretFlags()
|
||||
datasource.InitSecretFlags()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
err := templates.Load(*ruleTemplatesPath, true)
|
||||
|
||||
@@ -7,12 +7,17 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts "+
|
||||
"state. This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
|
||||
"E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend")
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect."+
|
||||
"Remote read is used to restore alerts state."+
|
||||
"This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
|
||||
"E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.")
|
||||
|
||||
showRemoteReadURL = flag.Bool("remoteRead.showURL", false, "Whether to show -remoteRead.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
|
||||
basicAuthUsername = flag.String("remoteRead.basicAuth.username", "", "Optional basic auth username for -remoteRead.url")
|
||||
basicAuthPassword = flag.String("remoteRead.basicAuth.password", "", "Optional basic auth password for -remoteRead.url")
|
||||
@@ -36,6 +41,13 @@ var (
|
||||
oauth2Scopes = flag.String("remoteRead.oauth2.scopes", "", "Optional OAuth2 scopes to use for -remoteRead.url. Scopes must be delimited by ';'.")
|
||||
)
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
func InitSecretFlags() {
|
||||
if !*showRemoteReadURL {
|
||||
flagutil.RegisterSecretFlag("remoteRead.url")
|
||||
}
|
||||
}
|
||||
|
||||
// Init creates a Querier from provided flag values.
|
||||
// Returns nil if addr flag wasn't set.
|
||||
func Init() (datasource.QuerierBuilder, error) {
|
||||
|
||||
@@ -7,12 +7,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("remoteWrite.url", "", "Optional URL to VictoriaMetrics or vminsert where to persist alerts state "+
|
||||
"and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, "+
|
||||
"then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend")
|
||||
"then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.")
|
||||
showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+
|
||||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
|
||||
basicAuthUsername = flag.String("remoteWrite.basicAuth.username", "", "Optional basic auth username for -remoteWrite.url")
|
||||
basicAuthPassword = flag.String("remoteWrite.basicAuth.password", "", "Optional basic auth password for -remoteWrite.url")
|
||||
@@ -41,6 +44,13 @@ var (
|
||||
oauth2Scopes = flag.String("remoteWrite.oauth2.scopes", "", "Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'.")
|
||||
)
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
func InitSecretFlags() {
|
||||
if !*showRemoteWriteURL {
|
||||
flagutil.RegisterSecretFlag("remoteWrite.url")
|
||||
}
|
||||
}
|
||||
|
||||
// Init creates Client object from given flags.
|
||||
// Returns nil if addr flag wasn't set.
|
||||
func Init(ctx context.Context) (*Client, error) {
|
||||
|
||||
@@ -160,7 +160,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if strings.HasPrefix(r.URL.Path, "/api/v1/") {
|
||||
redirectURL = alert.APILink()
|
||||
}
|
||||
http.Redirect(w, r, "/"+redirectURL, http.StatusPermanentRedirect)
|
||||
httpserver.RedirectPermanent(w, "/"+redirectURL)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,10 +39,19 @@ func createTargetURL(ui *UserInfo, uOrig *url.URL) (*url.URL, []Header, error) {
|
||||
u := *uOrig
|
||||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
u.Path = path.Clean(u.Path)
|
||||
if !strings.HasSuffix(u.Path, "/") && strings.HasSuffix(uOrig.Path, "/") {
|
||||
// The path.Clean() removes traling slash.
|
||||
// Return it back if needed.
|
||||
// This should fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752
|
||||
u.Path += "/"
|
||||
}
|
||||
if !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
u.Path = strings.TrimSuffix(u.Path, "/")
|
||||
if u.Path == "/" {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1554
|
||||
u.Path = ""
|
||||
}
|
||||
for _, e := range ui.URLMap {
|
||||
for _, sp := range e.SrcPaths {
|
||||
if sp.match(u.Path) {
|
||||
|
||||
@@ -54,6 +54,9 @@ func (bw *Writer) reset() {
|
||||
|
||||
// Write writes p to bw.
|
||||
func (bw *Writer) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bw.lock.Lock()
|
||||
defer bw.lock.Unlock()
|
||||
if bw.err != nil {
|
||||
|
||||
@@ -168,7 +168,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
_ = r.ParseForm()
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
newURL := path + "/?" + r.Form.Encode()
|
||||
http.Redirect(w, r, newURL, http.StatusMovedPermanently)
|
||||
httpserver.RedirectPermanent(w, newURL)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
@@ -217,7 +217,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// vmalert access via incomplete url without `/` in the end. Redirecto to complete url.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
http.Redirect(w, r, "vmalert/", http.StatusMovedPermanently)
|
||||
httpserver.RedirectPermanent(w, "vmalert/")
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmalert/") {
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{% import (
|
||||
"math"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
@@ -7,10 +9,25 @@
|
||||
// Federate writes rs in /federate format.
|
||||
// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
{% func Federate(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 || len(rs.Values) == 0 %}{% return %}{% endif %}
|
||||
{% code
|
||||
values := rs.Values
|
||||
timestamps := rs.Timestamps
|
||||
%}
|
||||
{% if len(timestamps) == 0 || len(values) == 0 %}{% return %}{% endif %}
|
||||
{% code
|
||||
lastValue := values[len(values)-1]
|
||||
%}
|
||||
{% if math.IsNaN(lastValue) %}
|
||||
{% comment %}
|
||||
This is most likely a staleness marker.
|
||||
Return nothing after the staleness marker.
|
||||
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3185
|
||||
{% endcomment %}
|
||||
{% return %}
|
||||
{% endif %}
|
||||
{%= prometheusMetricName(&rs.MetricName) %}{% space %}
|
||||
{%f= rs.Values[len(rs.Values)-1] %}{% space %}
|
||||
{%dl= rs.Timestamps[len(rs.Timestamps)-1] %}{% newline %}
|
||||
{%f= lastValue %}{% space %}
|
||||
{%dl= timestamps[len(timestamps)-1] %}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
|
||||
@@ -6,70 +6,85 @@ package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:1
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// Federate writes rs in /federate format.// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
func StreamFederate(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
if len(rs.Timestamps) == 0 || len(rs.Values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
values := rs.Values
|
||||
timestamps := rs.Timestamps
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
if len(timestamps) == 0 || len(values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
//line app/vmselect/prometheus/federate.qtpl:16
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
//line app/vmselect/prometheus/federate.qtpl:18
|
||||
lastValue := values[len(values)-1]
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:20
|
||||
if math.IsNaN(lastValue) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:26
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:28
|
||||
streamprometheusMetricName(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
//line app/vmselect/prometheus/federate.qtpl:28
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().F(rs.Values[len(rs.Values)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
//line app/vmselect/prometheus/federate.qtpl:29
|
||||
qw422016.N().F(lastValue)
|
||||
//line app/vmselect/prometheus/federate.qtpl:29
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().DL(rs.Timestamps[len(rs.Timestamps)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
//line app/vmselect/prometheus/federate.qtpl:30
|
||||
qw422016.N().DL(timestamps[len(timestamps)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:30
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
func WriteFederate(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
StreamFederate(qw422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
func Federate(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
WriteFederate(qb422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
//line app/vmselect/prometheus/federate.qtpl:31
|
||||
}
|
||||
|
||||
@@ -104,6 +104,9 @@ func removeGroupTags(metricName *storage.MetricName, modifier *metricsql.Modifie
|
||||
|
||||
func aggrFuncExt(afe func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries, argOrig []*timeseries,
|
||||
modifier *metricsql.ModifierExpr, maxSeries int, keepOriginal bool) ([]*timeseries, error) {
|
||||
// Remove empty time series, e.g. series with all NaN samples,
|
||||
// since such series are ignored by aggregate functions.
|
||||
argOrig = removeEmptySeries(argOrig)
|
||||
arg := copyTimeseriesMetricNames(argOrig, keepOriginal)
|
||||
|
||||
// Perform grouping.
|
||||
|
||||
@@ -36,9 +36,9 @@ var binaryOpFuncs = map[string]binaryOpFunc{
|
||||
"unless": binaryOpUnless,
|
||||
|
||||
// New ops
|
||||
"if": newBinaryOpArithFunc(binaryop.If),
|
||||
"ifnot": newBinaryOpArithFunc(binaryop.Ifnot),
|
||||
"default": newBinaryOpArithFunc(binaryop.Default),
|
||||
"if": binaryOpIf,
|
||||
"ifnot": binaryOpIfnot,
|
||||
"default": binaryOpDefault,
|
||||
}
|
||||
|
||||
func getBinaryOpFunc(op string) binaryOpFunc {
|
||||
@@ -86,17 +86,6 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp
|
||||
right := bfa.right
|
||||
op := bfa.be.Op
|
||||
switch true {
|
||||
case op == "ifnot":
|
||||
left = removeEmptySeries(left)
|
||||
// Do not remove empty series on the right side,
|
||||
// so the left-side series could be matched against them.
|
||||
case op == "default":
|
||||
// Do not remove empty series on the left and the right side,
|
||||
// since this may lead to missing result:
|
||||
// - if empty time series are removed on the left side,
|
||||
// then they won't be substituted by time series from the right side.
|
||||
// - if empty time series are removed on the right side,
|
||||
// then this may result in missing time series from the left side.
|
||||
case metricsql.IsBinaryOpCmp(op):
|
||||
// Do not remove empty series for comparison operations,
|
||||
// since this may lead to missing result.
|
||||
@@ -136,7 +125,7 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp
|
||||
|
||||
func adjustBinaryOpTags(be *metricsql.BinaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) {
|
||||
if len(be.GroupModifier.Op) == 0 && len(be.JoinModifier.Op) == 0 {
|
||||
if isScalar(left) && be.Op != "default" && be.Op != "if" && be.Op != "ifnot" {
|
||||
if isScalar(left) {
|
||||
// Fast path: `scalar op vector`
|
||||
rvsLeft := make([]*timeseries, len(right))
|
||||
tsLeft := left[0]
|
||||
@@ -324,14 +313,23 @@ func resetMetricGroupIfRequired(be *metricsql.BinaryOpExpr, ts *timeseries) {
|
||||
// Do not reset MetricGroup for non-boolean `compare` binary ops like Prometheus does.
|
||||
return
|
||||
}
|
||||
switch be.Op {
|
||||
case "default", "if", "ifnot":
|
||||
// Do not reset MetricGroup for these ops.
|
||||
return
|
||||
}
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
|
||||
func binaryOpIf(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k, tssLeft := range mLeft {
|
||||
tssRight := seriesByKey(mRight, k)
|
||||
if tssRight == nil {
|
||||
continue
|
||||
}
|
||||
tssLeft = addRightNaNsToLeft(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
@@ -340,24 +338,47 @@ func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
if tssLeft == nil {
|
||||
continue
|
||||
}
|
||||
// Add gaps to tssLeft if there are gaps at tssRight.
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
hasValue := false
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
hasValue = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasValue {
|
||||
valuesLeft[i] = nan
|
||||
tssLeft = addRightNaNsToLeft(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func addRightNaNsToLeft(tssLeft, tssRight []*timeseries) []*timeseries {
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
hasValue := false
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
hasValue = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasValue {
|
||||
valuesLeft[i] = nan
|
||||
}
|
||||
}
|
||||
tssLeft = removeEmptySeries(tssLeft)
|
||||
}
|
||||
return removeEmptySeries(tssLeft)
|
||||
}
|
||||
|
||||
func binaryOpDefault(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
if len(mLeft) == 0 {
|
||||
for _, tss := range mRight {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
for k, tssLeft := range mLeft {
|
||||
rvs = append(rvs, tssLeft...)
|
||||
tssRight := seriesByKey(mRight, k)
|
||||
if tssRight == nil {
|
||||
continue
|
||||
}
|
||||
fillLeftNaNsWithRightValues(tssLeft, tssRight)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
@@ -374,24 +395,43 @@ func binaryOpOr(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
rvs = append(rvs, tssRight...)
|
||||
continue
|
||||
}
|
||||
// Fill gaps in tssLeft with values from tssRight as Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/552
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i, v := range valuesLeft {
|
||||
if !math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
for _, tsRight := range tssRight {
|
||||
vRight := tsRight.Values[i]
|
||||
if !math.IsNaN(vRight) {
|
||||
valuesLeft[i] = vRight
|
||||
break
|
||||
}
|
||||
fillLeftNaNsWithRightValues(tssLeft, tssRight)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func fillLeftNaNsWithRightValues(tssLeft, tssRight []*timeseries) {
|
||||
// Fill gaps in tssLeft with values from tssRight as Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/552
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i, v := range valuesLeft {
|
||||
if !math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
for _, tsRight := range tssRight {
|
||||
vRight := tsRight.Values[i]
|
||||
if !math.IsNaN(vRight) {
|
||||
valuesLeft[i] = vRight
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func binaryOpIfnot(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k, tssLeft := range mLeft {
|
||||
tssRight := seriesByKey(mRight, k)
|
||||
if tssRight == nil {
|
||||
rvs = append(rvs, tssLeft...)
|
||||
continue
|
||||
}
|
||||
tssLeft = addLeftNaNsIfNoRightNaNs(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
@@ -404,24 +444,44 @@ func binaryOpUnless(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
rvs = append(rvs, tssLeft...)
|
||||
continue
|
||||
}
|
||||
// Add gaps to tssLeft if the are no gaps at tssRight.
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
valuesLeft[i] = nan
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tssLeft = removeEmptySeries(tssLeft)
|
||||
tssLeft = addLeftNaNsIfNoRightNaNs(tssLeft, tssRight)
|
||||
rvs = append(rvs, tssLeft...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func addLeftNaNsIfNoRightNaNs(tssLeft, tssRight []*timeseries) []*timeseries {
|
||||
for _, tsLeft := range tssLeft {
|
||||
valuesLeft := tsLeft.Values
|
||||
for i := range valuesLeft {
|
||||
for _, tsRight := range tssRight {
|
||||
if !math.IsNaN(tsRight.Values[i]) {
|
||||
valuesLeft[i] = nan
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return removeEmptySeries(tssLeft)
|
||||
}
|
||||
|
||||
func seriesByKey(m map[string][]*timeseries, key string) []*timeseries {
|
||||
tss := m[key]
|
||||
if tss != nil {
|
||||
return tss
|
||||
}
|
||||
if len(m) != 1 {
|
||||
return nil
|
||||
}
|
||||
for _, tss := range m {
|
||||
if isScalar(tss) {
|
||||
return tss
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTimeseriesMapByTagSet(be *metricsql.BinaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) {
|
||||
groupTags := be.GroupModifier.Args
|
||||
groupOp := strings.ToLower(be.GroupModifier.Op)
|
||||
|
||||
@@ -2280,6 +2280,27 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`limit_offset NaN`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// q returns 3 time series, where foo=3 contains only NaN values
|
||||
// limit_offset suppose to apply offset for non-NaN series only
|
||||
q := `limit_offset(1, 1, sort_by_label_desc((
|
||||
label_set(time()*1, "foo", "1"),
|
||||
label_set(time()*2, "foo", "2"),
|
||||
label_set(time()*3, "foo", "3"),
|
||||
) < 3000, "foo"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("1"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(label_graphite_group)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(sum by (__name__) (
|
||||
@@ -2803,7 +2824,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
t.Run(`scalar default vector1`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `time() > 1400 default label_set(123, "foo", "bar")`
|
||||
resultExpected := []netstorage.Result{}
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, nan, nan, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`scalar default vector2`, func(t *testing.T) {
|
||||
@@ -5084,7 +5110,40 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantiles_over_time`, func(t *testing.T) {
|
||||
t.Run(`quantiles_over_time(single_sample)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
quantiles_over_time("phi", 0.5, 0.9,
|
||||
time()[100s:100s]
|
||||
),
|
||||
"phi",
|
||||
)`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("phi"),
|
||||
Value: []byte("0.5"),
|
||||
},
|
||||
}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("phi"),
|
||||
Value: []byte("0.9"),
|
||||
},
|
||||
}
|
||||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantiles_over_time(multiple_samples)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(
|
||||
quantiles_over_time("phi", 0.5, 0.9,
|
||||
@@ -5485,6 +5544,12 @@ func TestExecSuccess(t *testing.T) {
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`any(empty-series)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `any(label_set(time()<0, "foo", "bar"))`
|
||||
resultExpected := []netstorage.Result{}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`group() by (test)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `group((
|
||||
@@ -6092,7 +6157,18 @@ func TestExecSuccess(t *testing.T) {
|
||||
t.Run(`ifnot-no-matching-timeseries`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `label_set(time(), "foo", "bar") ifnot label_set(time() > 1400, "x", "y")`
|
||||
resultExpected := []netstorage.Result{}
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
},
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`quantile(-2)`, func(t *testing.T) {
|
||||
@@ -7925,6 +8001,7 @@ func TestExecError(t *testing.T) {
|
||||
f(`limit_offet(1, (alias(1,"foo"),alias(2,"bar")), 10)`)
|
||||
f(`round(1, 1 or label_set(2, "xx", "foo"))`)
|
||||
f(`histogram_quantile(1 or label_set(2, "xx", "foo"), 1)`)
|
||||
f(`histogram_quantiles("foo", 1 or label_set(2, "xxx", "foo"), 2)`)
|
||||
f(`label_set(1, 2, 3)`)
|
||||
f(`label_set(1, "foo", (label_set(1, "foo", bar") or label_set(2, "xxx", "yy")))`)
|
||||
f(`label_set(1, "foo", 3)`)
|
||||
|
||||
@@ -167,6 +167,8 @@ var rollupFuncsCanAdjustWindow = map[string]bool{
|
||||
"timestamp": true,
|
||||
}
|
||||
|
||||
// rollupFuncsRemoveCounterResets contains functions, which need to call removeCounterResets
|
||||
// over input samples before calling the corresponding rollup functions.
|
||||
var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||
"increase": true,
|
||||
"increase_prometheus": true,
|
||||
@@ -177,6 +179,36 @@ var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||
"rollup_rate": true,
|
||||
}
|
||||
|
||||
// rollupFuncsSamplesScannedPerCall contains functions, which scan lower number of samples
|
||||
// than is passed to the rollup func.
|
||||
//
|
||||
// It is expected that the remaining rollupFuncs scan all the samples passed to them.
|
||||
var rollupFuncsSamplesScannedPerCall = map[string]int{
|
||||
"absent_over_time": 1,
|
||||
"count_over_time": 1,
|
||||
"default_rollup": 1,
|
||||
"delta": 2,
|
||||
"delta_prometheus": 2,
|
||||
"deriv_fast": 2,
|
||||
"first_over_time": 1,
|
||||
"idelta": 2,
|
||||
"ideriv": 2,
|
||||
"increase": 2,
|
||||
"increase_prometheus": 2,
|
||||
"increase_pure": 2,
|
||||
"irate": 2,
|
||||
"lag": 1,
|
||||
"last_over_time": 1,
|
||||
"lifetime": 2,
|
||||
"present_over_time": 1,
|
||||
"rate": 2,
|
||||
"scrape_interval": 2,
|
||||
"tfirst_over_time": 1,
|
||||
"timestamp": 1,
|
||||
"timestamp_with_name": 1,
|
||||
"tlast_over_time": 1,
|
||||
}
|
||||
|
||||
// These functions don't change physical meaning of input time series,
|
||||
// so they don't drop metric name
|
||||
var rollupFuncsKeepMetricName = map[string]bool{
|
||||
@@ -248,26 +280,29 @@ func getRollupAggrFuncNames(expr metricsql.Expr) ([]string, error) {
|
||||
return aggrFuncNames, nil
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, end, step, window int64, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func getRollupConfigs(funcName string, rf rollupFunc, expr metricsql.Expr, start, end, step, window int64, lookbackDelta int64, sharedTimestamps []int64) (
|
||||
func(values []float64, timestamps []int64), []*rollupConfig, error) {
|
||||
preFunc := func(values []float64, timestamps []int64) {}
|
||||
if rollupFuncsRemoveCounterResets[name] {
|
||||
funcName = strings.ToLower(funcName)
|
||||
if rollupFuncsRemoveCounterResets[funcName] {
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
removeCounterResets(values)
|
||||
}
|
||||
}
|
||||
samplesScannedPerCall := rollupFuncsSamplesScannedPerCall[funcName]
|
||||
newRollupConfig := func(rf rollupFunc, tagValue string) *rollupConfig {
|
||||
return &rollupConfig{
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
MayAdjustWindow: rollupFuncsCanAdjustWindow[name],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
isDefaultRollup: name == "default_rollup",
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
MayAdjustWindow: rollupFuncsCanAdjustWindow[funcName],
|
||||
LookbackDelta: lookbackDelta,
|
||||
Timestamps: sharedTimestamps,
|
||||
isDefaultRollup: funcName == "default_rollup",
|
||||
samplesScannedPerCall: samplesScannedPerCall,
|
||||
}
|
||||
}
|
||||
appendRollupConfigs := func(dst []*rollupConfig) []*rollupConfig {
|
||||
@@ -277,7 +312,7 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
|
||||
return dst
|
||||
}
|
||||
var rcs []*rollupConfig
|
||||
switch name {
|
||||
switch funcName {
|
||||
case "rollup":
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_rate", "rollup_deriv":
|
||||
@@ -414,6 +449,11 @@ type rollupConfig struct {
|
||||
|
||||
// Whether default_rollup is used.
|
||||
isDefaultRollup bool
|
||||
|
||||
// The estimated number of samples scanned per Func call.
|
||||
//
|
||||
// If zero, then it is considered that Func scans all the samples passed to it.
|
||||
samplesScannedPerCall int
|
||||
}
|
||||
|
||||
func (rc *rollupConfig) String() string {
|
||||
@@ -552,7 +592,8 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
ni := 0
|
||||
nj := 0
|
||||
f := rc.Func
|
||||
var samplesScanned uint64
|
||||
samplesScanned := uint64(len(values))
|
||||
samplesScannedPerCall := uint64(rc.samplesScannedPerCall)
|
||||
for _, tEnd := range rc.Timestamps {
|
||||
tStart := tEnd - window
|
||||
ni = seekFirstTimestampIdxAfter(timestamps[i:], tStart, ni)
|
||||
@@ -584,7 +625,11 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||
rfa.currTimestamp = tEnd
|
||||
value := f(rfa)
|
||||
rfa.idx++
|
||||
samplesScanned += uint64(len(rfa.values))
|
||||
if samplesScannedPerCall > 0 {
|
||||
samplesScanned += samplesScannedPerCall
|
||||
} else {
|
||||
samplesScanned += uint64(len(rfa.values))
|
||||
}
|
||||
dstValues = append(dstValues, value)
|
||||
}
|
||||
putRollupFuncArg(rfa)
|
||||
@@ -1112,11 +1157,7 @@ func newRollupQuantiles(args []interface{}) (rollupFunc, error) {
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return rfa.prevValue
|
||||
}
|
||||
if len(values) == 1 {
|
||||
// Fast path - only a single value.
|
||||
return values[0]
|
||||
return nan
|
||||
}
|
||||
qs := getFloat64s()
|
||||
qs.A = quantiles(qs.A[:0], phis, values)
|
||||
@@ -1339,15 +1380,11 @@ func rollupRateOverSum(rfa *rollupFuncArg) float64 {
|
||||
// Assume that the value didn't change since rfa.prevValue.
|
||||
return 0
|
||||
}
|
||||
dt := rfa.window
|
||||
if !math.IsNaN(rfa.prevValue) {
|
||||
dt = timestamps[len(timestamps)-1] - rfa.prevTimestamp
|
||||
}
|
||||
sum := float64(0)
|
||||
for _, v := range rfa.values {
|
||||
sum += v
|
||||
}
|
||||
return sum / (float64(dt) / 1e3)
|
||||
return sum / (float64(rfa.window) / 1e3)
|
||||
}
|
||||
|
||||
func rollupRange(rfa *rollupFuncArg) float64 {
|
||||
@@ -1489,11 +1526,8 @@ func rollupDelta(rfa *rollupFuncArg) float64 {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/894
|
||||
return values[len(values)-1] - rfa.realPrevValue
|
||||
}
|
||||
// Assume that the previous non-existing value was 0 only in the following cases:
|
||||
//
|
||||
// - If the delta with the next value equals to 0.
|
||||
// This is the case for slow-changing counter - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962
|
||||
// - If the first value doesn't exceed too much the delta with the next value.
|
||||
// Assume that the previous non-existing value was 0
|
||||
// only if the first value doesn't exceed too much the delta with the next value.
|
||||
//
|
||||
// This should prevent from improper increase() results for os-level counters
|
||||
// such as cpu time or bytes sent over the network interface.
|
||||
@@ -1507,9 +1541,6 @@ func rollupDelta(rfa *rollupFuncArg) float64 {
|
||||
} else if !math.IsNaN(rfa.realNextValue) {
|
||||
d = rfa.realNextValue - values[0]
|
||||
}
|
||||
if d == 0 {
|
||||
d = 10
|
||||
}
|
||||
if math.Abs(values[0]) < 10*(math.Abs(d)+1) {
|
||||
prevValue = 0
|
||||
} else {
|
||||
|
||||
@@ -586,8 +586,8 @@ func TestRollupNoWindowNoPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
@@ -623,8 +623,8 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
@@ -640,8 +640,8 @@ func TestRollupWindowNoPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned != 0 {
|
||||
t.Fatalf("expecting zero samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
if samplesScanned != 12 {
|
||||
t.Fatalf("expecting 12 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, nan}
|
||||
timestampsExpected := []int64{161, 171, 181, 191}
|
||||
@@ -660,8 +660,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 15 {
|
||||
t.Fatalf("expecting 15 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, nan, 34, nan, 44}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20, 25}
|
||||
@@ -677,8 +677,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{44, 32, 34, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
@@ -694,8 +694,8 @@ func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, 123, 34, 32}
|
||||
timestampsExpected := []int64{-50, 0, 50, 100, 150}
|
||||
@@ -714,8 +714,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 123, 34, 34}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20}
|
||||
@@ -731,8 +731,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{44, 34, 34, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
@@ -748,8 +748,8 @@ func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 15 {
|
||||
t.Fatalf("expecting 15 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 54, 44, nan}
|
||||
timestampsExpected := []int64{0, 50, 100, 150}
|
||||
@@ -768,8 +768,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
@@ -785,8 +785,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
@@ -802,8 +802,8 @@ func TestRollupFuncsLookbackDelta(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 18 {
|
||||
t.Fatalf("expecting 18 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{99, nan, 44, nan, 32, 34, nan}
|
||||
timestampsExpected := []int64{80, 90, 100, 110, 120, 130, 140}
|
||||
@@ -822,8 +822,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 54, 44, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -839,8 +839,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 1}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -856,8 +856,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, 12, 32, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -873,8 +873,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 123, 99, 44, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -890,8 +890,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 222, 199, 110, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -907,8 +907,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, -9, 22, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -924,8 +924,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -102, -42, -10, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -941,8 +941,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{123, 33, -87, 0}
|
||||
timestampsExpected := []int64{10, 50, 90, 130}
|
||||
@@ -958,8 +958,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.004, 0, 0, 0.03}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -975,8 +975,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.031, 0.044, 0.04, 0.01}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -992,8 +992,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 47 {
|
||||
t.Fatalf("expecting 47 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.031, 0.075, 0.115, 0.125}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1009,8 +1009,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.010333333333333333, 0.011, 0.013333333333333334, 0.01}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1026,8 +1026,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 0.010333333333333333, 0.010714285714285714, 0.012, 0.0125}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1043,8 +1043,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1060,8 +1060,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 3, 3, 2, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1077,8 +1077,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 16 {
|
||||
t.Fatalf("expecting 16 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 1, 1, 1, 1, 0}
|
||||
timestampsExpected := []int64{0, 9, 18, 27, 36, 45}
|
||||
@@ -1094,8 +1094,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2, 2, 1, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1111,8 +1111,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 55.5, 49.75, 36.666666666666664, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1128,8 +1128,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -2879.310344827588, 127.87627310448904, -496.5831435079728, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1145,8 +1145,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 14 {
|
||||
t.Fatalf("expecting 14 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, nan, nan, 0, -8900, 0}
|
||||
timestampsExpected := []int64{0, 4, 8, 12, 16, 20}
|
||||
@@ -1162,8 +1162,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -1916.6666666666665, -43500, 400, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1179,8 +1179,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 39.81519810323691, 32.080952292598795, 5.2493385826745405, 0}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1196,8 +1196,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2.148, 1.593, 1.156, 1.36}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1213,8 +1213,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 24 {
|
||||
t.Fatalf("expecting 24 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 4, 3, 1}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1230,8 +1230,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 4, 7, 6, 3}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1247,8 +1247,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 21, 34, 34, 34}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1264,10 +1264,10 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, 2775, 5262.5, 3678.5714285714284, 2880}
|
||||
valuesExpected := []float64{nan, 2775, 5262.5, 3862.5, 1800}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
@@ -1281,8 +1281,8 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values, samplesScanned := rc.Do(nil, testValues, testTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 35 {
|
||||
t.Fatalf("expecting 35 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{nan, -0.86650328627136, -1.1200838283548589, -0.40035755084856683, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
@@ -1306,8 +1306,8 @@ func TestRollupBigNumberOfValues(t *testing.T) {
|
||||
srcTimestamps[i] = int64(i / 2)
|
||||
}
|
||||
values, samplesScanned := rc.Do(nil, srcValues, srcTimestamps)
|
||||
if samplesScanned == 0 {
|
||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||
if samplesScanned != 22002 {
|
||||
t.Fatalf("expecting 22002 samplesScanned from rollupConfig.Do; got %d", samplesScanned)
|
||||
}
|
||||
valuesExpected := []float64{1, 4001, 8001, 9999, nan, nan}
|
||||
timestampsExpected := []int64{0, 2000, 4000, 6000, 8000, 10000}
|
||||
@@ -1383,19 +1383,16 @@ func TestRollupDelta(t *testing.T) {
|
||||
|
||||
// Small initial value
|
||||
f(nan, nan, nan, []float64{1}, 1)
|
||||
f(nan, nan, nan, []float64{10}, 10)
|
||||
f(nan, nan, nan, []float64{100}, 100)
|
||||
f(nan, nan, nan, []float64{10}, 0)
|
||||
f(nan, nan, nan, []float64{100}, 0)
|
||||
f(nan, nan, nan, []float64{1, 2, 3}, 3)
|
||||
f(1, nan, nan, []float64{1, 2, 3}, 2)
|
||||
f(nan, nan, nan, []float64{5, 6, 8}, 8)
|
||||
f(2, nan, nan, []float64{5, 6, 8}, 6)
|
||||
|
||||
// Moderate initial value with zero delta after that.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962
|
||||
f(nan, nan, nan, []float64{100}, 100)
|
||||
f(nan, nan, nan, []float64{100, 100}, 100)
|
||||
f(nan, nan, nan, []float64{100, 100}, 0)
|
||||
|
||||
// Big initial value with with zero delta after that.
|
||||
// Big initial value with zero delta after that.
|
||||
f(nan, nan, nan, []float64{1000}, 0)
|
||||
f(nan, nan, nan, []float64{1000, 1000}, 0)
|
||||
|
||||
|
||||
@@ -822,8 +822,12 @@ func transformHistogramQuantiles(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
tssOrig := args[len(args)-1]
|
||||
// Calculate quantile individually per each phi.
|
||||
var rvs []*timeseries
|
||||
for _, phiArg := range phiArgs {
|
||||
phiStr := fmt.Sprintf("%g", phiArg[0].Values[0])
|
||||
for i, phiArg := range phiArgs {
|
||||
phis, err := getScalar(phiArg, i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse phi: %w", err)
|
||||
}
|
||||
phiStr := fmt.Sprintf("%g", phis[0])
|
||||
tss := copyTimeseries(tssOrig)
|
||||
tfaTmp := &transformFuncArg{
|
||||
ec: tfa.ec,
|
||||
@@ -1844,7 +1848,9 @@ func transformLimitOffset(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain offset arg: %w", err)
|
||||
}
|
||||
rvs := args[2]
|
||||
// removeEmptySeries so offset will be calculated after empty series
|
||||
// were filtered out.
|
||||
rvs := removeEmptySeries(args[2])
|
||||
if len(rvs) >= offset {
|
||||
rvs = rvs[offset:]
|
||||
}
|
||||
@@ -2183,17 +2189,13 @@ func transformTimezoneOffset(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
return nil, fmt.Errorf("cannot load timezone %q: %w", tzString, err)
|
||||
}
|
||||
|
||||
var ts timeseries
|
||||
ts.denyReuse = true
|
||||
timestamps := tfa.ec.getSharedTimestamps()
|
||||
values := make([]float64, len(timestamps))
|
||||
for i, v := range timestamps {
|
||||
_, offset := time.Unix(v/1000, 0).In(loc).Zone()
|
||||
values[i] = float64(offset)
|
||||
tss := evalNumber(tfa.ec, nan)
|
||||
ts := tss[0]
|
||||
for i, timestamp := range ts.Timestamps {
|
||||
_, offset := time.Unix(timestamp/1000, 0).In(loc).Zone()
|
||||
ts.Values[i] = float64(offset)
|
||||
}
|
||||
ts.Values = values
|
||||
ts.Timestamps = timestamps
|
||||
return []*timeseries{&ts}, nil
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func transformTime(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.7e6d0c89.css",
|
||||
"main.js": "./static/js/main.a6398eac.js",
|
||||
"main.js": "./static/js/main.9f52c638.js",
|
||||
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.7e6d0c89.css",
|
||||
"static/js/main.a6398eac.js"
|
||||
"static/js/main.9f52c638.js"
|
||||
]
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.a6398eac.js"></script><link href="./static/css/main.7e6d0c89.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.9f52c638.js"></script><link href="./static/css/main.7e6d0c89.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
File diff suppressed because one or more lines are too long
@@ -6,7 +6,7 @@ COPY web/ /build/
|
||||
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.16.0
|
||||
FROM alpine:3.16.2
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
||||
@@ -110,7 +110,9 @@ export const TimeSelector: FC = () => {
|
||||
open={open}
|
||||
anchorEl={anchorEl}
|
||||
placement="bottom-end"
|
||||
modifiers={[{name: "offset", options: {offset: [0, 6]}}]}>
|
||||
modifiers={[{name: "offset", options: {offset: [0, 6]}}]}
|
||||
sx={{zIndex: 3, position: "relative"}}
|
||||
>
|
||||
<ClickAwayListener onClickAway={() => setAnchorEl(null)}>
|
||||
<Paper elevation={3}>
|
||||
<Box sx={classes.container}>
|
||||
|
||||
@@ -50,9 +50,10 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||
const fetchData = async (fetchUrl: string[], fetchQueue: AbortController[], displayType: DisplayType, query: string[]) => {
|
||||
const controller = new AbortController();
|
||||
setFetchQueue([...fetchQueue, controller]);
|
||||
const isDisplayChart = displayType === "chart";
|
||||
try {
|
||||
const responses = await Promise.all(fetchUrl.map(url => fetch(url, {signal: controller.signal})));
|
||||
const tempData = [];
|
||||
const tempData: MetricBase[] = [];
|
||||
const tempTraces: Trace[] = [];
|
||||
let counter = 1;
|
||||
for await (const response of responses) {
|
||||
@@ -63,16 +64,16 @@ export const useFetchQuery = ({predefinedQuery, visible, display, customStep}: F
|
||||
const trace = new Trace(resp.trace, query[counter-1]);
|
||||
tempTraces.push(trace);
|
||||
}
|
||||
tempData.push(...resp.data.result.map((d: MetricBase) => {
|
||||
resp.data.result.forEach((d: MetricBase) => {
|
||||
d.group = counter;
|
||||
return d;
|
||||
}));
|
||||
tempData.push(d);
|
||||
});
|
||||
counter++;
|
||||
} else {
|
||||
setError(`${resp.errorType}\r\n${resp?.error}`);
|
||||
}
|
||||
}
|
||||
displayType === "chart" ? setGraphData(tempData) : setLiveData(tempData);
|
||||
isDisplayChart ? setGraphData(tempData as MetricResult[]) : setLiveData(tempData as InstantMetricResult[]);
|
||||
setTraces(tempTraces);
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "8.5.3"
|
||||
"version": "8.4.4"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
@@ -61,12 +61,12 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Overview for VictoriaMetrics vmagent v1.73.0 or higher",
|
||||
"description": "Overview for VictoriaMetrics vmagent v1.79.0 or higher",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"iteration": 1656943336787,
|
||||
"iteration": 1657810604530,
|
||||
"links": [
|
||||
{
|
||||
"icon": "doc",
|
||||
@@ -154,7 +154,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"up\"})",
|
||||
@@ -218,7 +218,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"down\"})",
|
||||
@@ -285,7 +285,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(increase(vm_log_messages_total{job=~\"$job\", instance=~\"$instance\", level!=\"info\"}[30m]))",
|
||||
@@ -344,7 +344,7 @@
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(vm_persistentqueue_bytes_pending{job=~\"$job\", instance=~\"$instance\"})",
|
||||
@@ -490,7 +490,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -589,7 +589,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -702,7 +702,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -805,7 +805,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -946,7 +946,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1039,7 +1039,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1138,7 +1138,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1237,7 +1237,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -1344,7 +1344,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2457,7 +2457,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 4
|
||||
"y": 43
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 60,
|
||||
@@ -2480,7 +2480,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2555,7 +2555,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 4
|
||||
"y": 43
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 66,
|
||||
@@ -2578,7 +2578,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2652,7 +2652,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 12
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 61,
|
||||
@@ -2675,7 +2675,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2748,7 +2748,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 12
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 65,
|
||||
@@ -2771,7 +2771,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2837,7 +2837,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 20
|
||||
"y": 59
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -2881,9 +2881,10 @@
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "Shows saturation of every connection to remote storage. If the threshold of 0.9sec is reached, then the connection is saturated by more than 90% and vmagent won't be able to keep up. This usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase the number of connections per each remote storage.\n",
|
||||
"description": "Shows saturation of every connection to remote storage. If the threshold of 90% is reached, then the connection is saturated (busy or slow) by more than 90%, so vmagent won't be able to keep up and can start buffering data. \n\nThis usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase the number of connections per each remote storage.\n",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"links": []
|
||||
@@ -2896,7 +2897,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 20
|
||||
"y": 59
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 84,
|
||||
@@ -2919,7 +2920,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -2930,7 +2931,7 @@
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": true,
|
||||
"expr": "sum(rate(vmagent_remotewrite_send_duration_seconds_total{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) by (instance, url)",
|
||||
"expr": "sum(rate(vmagent_remotewrite_send_duration_seconds_total{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}[$__rate_interval])) by (instance, url)\n/\nmax(vmagent_remotewrite_queues{job=~\"$job\", instance=~\"$instance\", url=~\"$url\"}) by(instance, url)",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
@@ -2943,7 +2944,7 @@
|
||||
"fill": true,
|
||||
"line": true,
|
||||
"op": "gt",
|
||||
"value": 0.9,
|
||||
"value": 90,
|
||||
"yaxis": "left"
|
||||
}
|
||||
],
|
||||
@@ -2963,7 +2964,7 @@
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:662",
|
||||
"format": "s",
|
||||
"format": "percentunit",
|
||||
"logBase": 1,
|
||||
"min": "0",
|
||||
"show": true
|
||||
@@ -2997,7 +2998,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 28
|
||||
"y": 67
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -3053,7 +3054,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 28
|
||||
"y": 67
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -3104,7 +3105,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 36
|
||||
"y": 75
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 88,
|
||||
@@ -3124,7 +3125,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -3207,7 +3208,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 36
|
||||
"y": 75
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 90,
|
||||
@@ -3227,7 +3228,7 @@
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "8.5.3",
|
||||
"pluginVersion": "8.4.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -4567,7 +4568,7 @@
|
||||
}
|
||||
],
|
||||
"refresh": "",
|
||||
"schemaVersion": 36,
|
||||
"schemaVersion": 35,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"vmagent",
|
||||
@@ -4577,7 +4578,9 @@
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"selected": false
|
||||
"selected": true,
|
||||
"text": "VM",
|
||||
"value": "VM"
|
||||
},
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
DOCKER_NAMESPACE := victoriametrics
|
||||
|
||||
ROOT_IMAGE ?= alpine:3.16.0
|
||||
CERTS_IMAGE := alpine:3.16.0
|
||||
GO_BUILDER_IMAGE := golang:1.18.4-alpine
|
||||
ROOT_IMAGE ?= alpine:3.16.2
|
||||
CERTS_IMAGE := alpine:3.16.2
|
||||
GO_BUILDER_IMAGE := golang:1.19.2-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
|
||||
|
||||
@@ -270,7 +270,9 @@ groups:
|
||||
Ensure that destination is up and reachable."
|
||||
|
||||
- alert: RemoteWriteConnectionIsSaturated
|
||||
expr: rate(vmagent_remotewrite_send_duration_seconds_total[5m]) > 0.9
|
||||
expr: |
|
||||
sum(rate(vmagent_remotewrite_send_duration_seconds_total[5m])) by(job, instance, url)
|
||||
> 0.9 * max(vmagent_remotewrite_queues) by(job, instance, url)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
@@ -13,12 +13,85 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||
* [How to build vmauth](https://docs.victoriametrics.com/vmauth.html#how-to-build-from-sources)
|
||||
* [How to build vmctl](https://docs.victoriametrics.com/vmctl.html#how-to-build)
|
||||
|
||||
## tip
|
||||
## v1.79.x long-time support release (LTS)
|
||||
|
||||
|
||||
## [v1.79.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.4)
|
||||
|
||||
Released at 07-10-2022
|
||||
|
||||
**Update note 1:** [vmalert](https://docs.victoriametrics.com/vmalert.html) changes default value for command-line flag `-datasource.queryStep` from `0s` to `5m`. The change supposed to improve reliability of the rules evaluation when evaluation interval is lower than scraping interval.
|
||||
|
||||
* FEATURE: expose `vmagent_remotewrite_queues` metric and use it in alerting rules in order to improve the detection of remote storage connection saturation. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2871).
|
||||
|
||||
* BUGFIX: do not export stale metrics via [/federate api](https://docs.victoriametrics.com/#federation) after the staleness markers. Previously such metrics were exported with `NaN` values. this could break some setups. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3185).
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): properly handle request paths ending with `/` such as `/vmui/`. Previously `vmui` was dropping the traling `/`, which could prevent from using `vmui` via `vmauth`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly encode query params for aws signed requests, use `%20` instead of `+` as api requires. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3171).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `rate_over_sum(m[d])` as `sum_over_time(m[d])/d`. Previously the `sum_over_time(m[d])` could be improperly divided by smaller than `d` time range. See [rate_over_sum() docs](https://docs.victoriametrics.com/MetricsQL.html#rate_over_sum) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3045).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `increase(m[d])` over slow-changing counters with values smaller than 100. Previously [increase](https://docs.victoriametrics.com/MetricsQL.html#increase) could return unexpectedly big results in this case. See [the related issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/962) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3163).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): ignore empty series when applying [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset). It should improve queries with additional filters by value in expressions like `limit_offset(1,1, foo > 1)`.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate [quantiles_over_time](https://docs.victoriametrics.com/MetricsQL.html#quantiles_over_time) when the lookbehind window contains only a single sample. Previously an empty result was incorrectly returned in this case.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix `RangeError: Maximum call stack size exceeded` error when the query returns too many data points at `Table` view. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3092/files).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): re-evaluate annotations per each alert evaluation. Previously, annotations were evaluated only on alert's value ch
|
||||
ange. This could result in stale annotations in some cases described in [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3119).
|
||||
* BUGFIX: prevent from excessive CPU usage when the storage enters [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode). The previous fix in [v1.79.3](https://docs.victoriametrics.com/CHANGELOG.html#v1793) wasn't complete.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): change default value for command-line flag `-datasource.queryStep` from `0s` to `5m`. Param `step` is added by vmalert to every rule evaluation request sent to datasource. Before this change, `step` was equal to group's evaluation interval by default. Param `step` for instant queries defines how far VM can look back for the last written data point. The change supposed to improve reliability of the rules evaluation when evaluation interval is lower than scraping interval.
|
||||
* BUGFIX: properly calculate `vm_rows_scanned_per_query` histogram exported at `/metrics` page of `vmselect` and single-node VictoriaMetrics. Previously it could return misleadingly high numbers for [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions), which scan only a few samples on the provided lookbehind window in square brackets. For example, `increase(m[1d])` always scans only 2 rows (aka `raw samples`) per each returned time series.
|
||||
|
||||
|
||||
## [v1.79.3](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.3)
|
||||
|
||||
Released at 30-08-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
* SECURITY: [vmalert](https://docs.victoriametrics.com/vmalert.html): do not expose `-remoteWrite.url`, `-remoteRead.url` and `-datasource.url` command-line flag values in logs and at `http://vmalert:8880/flags` page by default, since they may contain sensitive data such as auth keys. This aligns `vmalert` behaviour with [vmagent](), which doesn't expose `-remoteWrite.url` command-line flag value in logs and at `http://vmagent:8429/flags` page by default. Specify `-remoteWrite.showURL`, `-remoteRead.showURL` and `-datasource.showURL` command-line flags for showing values for the corresponding `-*.url` flags in logs. Thanks to @mble for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2965).
|
||||
* SECURITY: upgrade base docker image (alpine) from 3.16.1 to 3.16.2. See [alpine 3.16.2 release notes](https://alpinelinux.org/posts/Alpine-3.13.12-3.14.8-3.15.6-3.16.2-released.html).
|
||||
|
||||
* BUGFIX: prevent from excess CPU usage when the storage enters [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
* BUGFIX: improve performance for requests to [/api/v1/labels](https://docs.victoriametrics.com/url-examples.html#apiv1labels) and [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples.html#apiv1labelvalues) when the filter in the `match[]` query arg matches small number of time series. The performance for this case has been reduced in [v1.78.0](https://docs.victoriametrics.com/CHANGELOG.html#v1780). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1533) issues.
|
||||
* BUGFIX: increase the default limit on the number of concurrent merges for small parts from 8 to 16. This should help resolving potential issues with heavy data ingestion. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2673#issuecomment-1218185978) from @lukepalmer .
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): fix panic when incorrect arg is passed as `phi` into [histogram_quantiles](https://docs.victoriametrics.com/MetricsQL.html#histogram_quantiles) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3026).
|
||||
|
||||
## [v1.79.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.2)
|
||||
|
||||
Released at 08-08-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix potential panic in [multi-level cluster setup](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup) when top-level `vmselect` is configured with `-replicationFactor` bigger than 1. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2961).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly handle custom `endpoint` value in [ec2_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config). It was ignored since [v1.77.0](https://docs.victoriametrics.com/CHANGELOG.html#v1770) because of a bug in the implementation of [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1287).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): add missing `__meta_kubernetes_ingress_class_name` meta-label for `role: ingress` service discovery in Kubernetes. See [this commit from Prometheus](https://github.com/prometheus/prometheus/commit/7e65ad3e432bd2837c17e3e63e85dcbcc30f4a8a).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow stale responses from Consul service discovery (aka [consul_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config)) by default in the same way as Prometheus does. This should reduce load on Consul when discovering big number of targets. Stale responses can be disabled by specifying `allow_stale: false` option in `consul_sd_config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2940).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): [dockerswarm_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config): properly set `__meta_dockerswarm_container_label_*` labels instead of `__meta_dockerswarm_task_label_*` labels as Prometheus does. See [this issue](https://github.com/prometheus/prometheus/issues/9187).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): set `up` metric to `0` for partial scrapes in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously the `up` metric was set to `1` when at least a single metric has been scraped before the error. This aligns the behaviour of `vmselect` with Prometheus.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): restart all the scrape jobs during [config reload](https://docs.victoriametrics.com/vmagent.html#configuration-update) after `global` section is changed inside `-promscrape.config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with AWS ECS credentials. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2875). Thanks to @transacid for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2876).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not split regex in [relabeling rules](https://docs.victoriametrics.com/vmagent.html#relabeling) into multiple lines if it contains groups. This fixes [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): return series from `q1` if `q2` doesn't return matching time series in the query `q1 ifnot q2`. Previously series from `q1` weren't returned in this case.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly show date picker at `Table` tab. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2874).
|
||||
* BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
|
||||
|
||||
|
||||
## [v1.79.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.1)
|
||||
|
||||
Released at 02-08-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
* SECURITY: upgrade base docker image (alpine) from 3.16.0 to 3.16.1 . See [alpine 3.16.1 release notes](https://alpinelinux.org/posts/Alpine-3.16.1-released.html).
|
||||
|
||||
|
||||
## [v1.79.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.0)
|
||||
|
||||
Released at 14-07-2022
|
||||
|
||||
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
|
||||
|
||||
**Update note 1:** this release introduces backwards-incompatible changes to `vm_partial_results_total` metric by changing its labels to be consistent with `vm_requests_total` metric. If you use alerting rules or Grafana dashboards, which rely on this metric, then they must be updated. The official dashboards for VictoriaMetrics don't use this metric.
|
||||
|
||||
**Update note 2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) adds `/vmalert/` prefix to [web urls](https://docs.victoriametrics.com/vmalert.html#web) according to [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825). This may affect `vmalert` instances with non-empty `-http.pathPrefix` command-line flag. After the update, configuring this flag is no longer needed. Here's [why](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2799#issuecomment-1171392005).
|
||||
|
||||
@@ -487,7 +487,7 @@ See also [implicit query conversions](#implicit-query-conversions).
|
||||
|
||||
#### histogram_quantiles
|
||||
|
||||
`histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` calculates the given `phi*`-quantiles over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). `phi*` must be in the range `[0...1]`. Each calculated quantile is returned in a separate time series with the corresponding `{phiLabel="phi*"}` label. See also [histogram_quantile](#histogram_quantile).
|
||||
`histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` calculates the given `phi*`-quantiles over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). Argument `phi*` must be in the range `[0...1]`. For example, `histogram_quantiles('le', 0.3, 0.5, sum(rate(http_request_duration_seconds_bucket[5m]) by (le))`. Each calculated quantile is returned in a separate time series with the corresponding `{phiLabel="phi*"}` label. See also [histogram_quantile](#histogram_quantile).
|
||||
|
||||
#### histogram_share
|
||||
|
||||
|
||||
@@ -4,21 +4,59 @@ sort: 18
|
||||
|
||||
# Release process guidance
|
||||
|
||||
## Prereqs
|
||||
1. Make sure you have enterprise remote configured
|
||||
```
|
||||
git remote add enterprise <url>
|
||||
```
|
||||
2. Make sure you have singing key configured
|
||||
3. Make sure you have github token with at least `read:org, repo, write:packages` permissions exported under `GITHUB_TOKEN` env variable.
|
||||
You can create token [here](https://github.com/settings/tokens)
|
||||
|
||||
## Release version and Docker images
|
||||
|
||||
0. Make sure that the release commits have no security issues.
|
||||
1. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1a. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1b. Add `(available starting from v1.xx.y)` line to feature docs introduced in the upcoming release.
|
||||
2. Create the following release tags:
|
||||
* `git tag -s v1.xx.y` in `master` branch
|
||||
* `git tag -s v1.xx.y-cluster` in `cluster` branch
|
||||
* `git tag -s v1.xx.y-enterprise` in `enterprise` branch
|
||||
* `git tag -s v1.xx.y-enterprise-cluster` in `enterprise-cluster` branch
|
||||
3. Run `TAG=v1.xx.y make publish-release`. It will create `*.tar.gz` release archives with the corresponding `_checksums.txt` files inside `bin` directory and publish Docker images for the given `TAG`, `TAG-cluster`, `TAG-enterprise` and `TAG-enterprise-cluster`.
|
||||
4. Push release tags to <https://github.com/VictoriaMetrics/VictoriaMetrics> : `git push origin v1.xx.y` and `git push origin v1.xx.y-cluster`. Do not push `-enterprise` tags to public repository.
|
||||
5. Go to <https://github.com/VictoriaMetrics/VictoriaMetrics/releases> , create new release from the pushed tag on step 4 and upload `*.tar.gz` archive with the corresponding `_checksums.txt` from step 3.
|
||||
6. Copy the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release to [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) page.
|
||||
7. Bump version of the VictoriaMetrics cluster setup in for [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
||||
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
||||
3. Run `TAG=v1.xx.y make publish-release`. This command performs the following tasks:
|
||||
a) Build and package binaries in `*.tar.gz` release archives with the corresponding `_checksums.txt` files inside `bin` directory.
|
||||
This step can be run manually with the command `make release` from the needed git tag.
|
||||
b) Build and publish [multi-platform Docker images](https://docs.docker.com/build/buildx/multiplatform-images/)
|
||||
for the given `TAG`, `TAG-cluster`, `TAG-enterprise` and `TAG-enterprise-cluster`.
|
||||
The multi-platform Docker image is built for the following platforms:
|
||||
* linux/amd64
|
||||
* linux/arm64
|
||||
* linux/arm
|
||||
* linux/ppc64le
|
||||
* linux/386
|
||||
This step can be run manually with the command `make publish` from the needed git tag.
|
||||
4. Push the tags created `v1.xx.y` and `v1.xx.y-cluster` at step 2 to public GitHub repository at https://github.com/VictoriaMetrics/VictoriaMetrics .
|
||||
**Important note:** do not push enteprise tags to public GitHub repository - they must be pushed only to private repository.
|
||||
5. Run `TAG=v1.xx.yy make github-create-release github-upload-assets`. This command performs the following tasks:
|
||||
a) Create draft GitHub release with the name `TAG`. This step can be run manually
|
||||
with the command `TAG=v1.xx.y make github-create-release`.
|
||||
The release id is stored at `/tmp/vm-github-release` file.
|
||||
b) Upload all the binaries and checksums created at step `3a` to that release.
|
||||
This step can be run manually with the command `make github-upload-assets`.
|
||||
It is expected that the needed release id is stored at `/tmp/vm-github-release` file,
|
||||
which must be created at the step `a`.
|
||||
If the upload process is interrupted by any reason, then the following recovery steps must be performed:
|
||||
- To delete the created draft release by running the command `make github-delete-release`.
|
||||
This command expects that the id of the release to delete is located at `/tmp/vm-github-release`
|
||||
file created at the step `a`.
|
||||
- To run the command `TAG=v1.xx.y make github-create-release github-upload-assets`, so new release is created
|
||||
and all the needed assets are re-uploaded to it.
|
||||
6. Go to <https://github.com/VictoriaMetrics/VictoriaMetrics/releases> and verify that draft release with the name `TAG` has been created
|
||||
and this release contains all the needed binaries and checksums.
|
||||
7. Update the release description with the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release.
|
||||
8. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
||||
9. Bump version of the VictoriaMetrics cluster in the [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
||||
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
||||
|
||||
## Building snap package
|
||||
|
||||
@@ -74,7 +112,3 @@ Repository [https://github.com/VictoriaMetrics/ansible-playbooks](https://github
|
||||
5. Commit changes
|
||||
6. Create a new tag
|
||||
7. Create a new release. This automatically publishes the new versions to galaxy.ansible.com
|
||||
|
||||
## Github pages
|
||||
|
||||
All changes in `README.md`, `docs` folder and `.md` extension automatically push to Wiki
|
||||
|
||||
@@ -641,11 +641,13 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -datasource.url.
|
||||
-datasource.queryStep duration
|
||||
queryStep defines how far a value can fallback to when evaluating queries. For example, if datasource.queryStep=15s then param "step" with value "15s" will be added to every query.If queryStep isn't specified, rule's evaluationInterval will be used instead.
|
||||
How far a value can fallback to when evaluating queries. For example, if -datasource.queryStep=15s then param "step" with value "15s" will be added to every query. If set to 0, rule's evaluation interval will be used instead. (default 5m0s)
|
||||
-datasource.queryTimeAlignment
|
||||
Whether to align "time" parameter with evaluation interval.Alignment supposed to produce deterministic results despite of number of vmalert replicas or time they were started. See more details here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257 (default true)
|
||||
-datasource.roundDigits int
|
||||
Adds "round_digits" GET param to datasource requests. In VM "round_digits" limits the number of digits after the decimal point in response values.
|
||||
-datasource.showURL
|
||||
Whether to show -datasource.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-datasource.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -datasource.url. By default, system CA is used
|
||||
-datasource.tlsCertFile string
|
||||
@@ -657,7 +659,7 @@ The shortlist of configuration flags is the following:
|
||||
-datasource.tlsServerName string
|
||||
Optional TLS server name to use for connections to -datasource.url. By default, the server name from -datasource.url is used
|
||||
-datasource.url string
|
||||
VictoriaMetrics or vmselect url. Required parameter. E.g. http://127.0.0.1:8428 . See also -remoteRead.disablePathAppend
|
||||
Datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect URL. Required parameter. E.g. http://127.0.0.1:8428 . See also '-datasource.disablePathAppend', '-datasource.showURL'.
|
||||
-defaultTenant.graphite string
|
||||
Default tenant for Graphite alerting groups. See https://docs.victoriametrics.com/vmalert.html#multitenancy
|
||||
-defaultTenant.prometheus string
|
||||
@@ -817,6 +819,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -remoteRead.url. Scopes must be delimited by ';'.
|
||||
-remoteRead.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -remoteRead.url.
|
||||
-remoteRead.showURL
|
||||
Whether to show -remoteRead.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteRead.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteRead.url. By default system CA is used
|
||||
-remoteRead.tlsCertFile string
|
||||
@@ -828,7 +832,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteRead.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteRead.url. By default the server name from -remoteRead.url is used
|
||||
-remoteRead.url vmalert
|
||||
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend
|
||||
Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect.Remote read is used to restore alerts state.This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.
|
||||
-remoteWrite.basicAuth.password string
|
||||
Optional basic auth password for -remoteWrite.url
|
||||
-remoteWrite.basicAuth.passwordFile string
|
||||
@@ -859,6 +863,8 @@ The shortlist of configuration flags is the following:
|
||||
Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'.
|
||||
-remoteWrite.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -notifier.url.
|
||||
-remoteWrite.showURL
|
||||
Whether to show -remoteWrite.url in the exported metrics. It is hidden by default, since it can contain sensitive info such as auth key
|
||||
-remoteWrite.tlsCAFile string
|
||||
Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. By default system CA is used
|
||||
-remoteWrite.tlsCertFile string
|
||||
@@ -870,7 +876,7 @@ The shortlist of configuration flags is the following:
|
||||
-remoteWrite.tlsServerName string
|
||||
Optional TLS server name to use for connections to -remoteWrite.url. By default the server name from -remoteWrite.url is used
|
||||
-remoteWrite.url string
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend
|
||||
Optional URL to VictoriaMetrics or vminsert where to persist alerts state and recording rules results in form of timeseries. For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.
|
||||
-replay.disableProgressBar
|
||||
Whether to disable rendering progress bars during the replay. Progress bar rendering might be verbose or break the logs parsing, so it is recommended to be disabled when not used in interactive mode.
|
||||
-replay.maxDatapointsPerQuery int
|
||||
|
||||
@@ -43,8 +43,8 @@ type credentials struct {
|
||||
Expiration time.Time
|
||||
}
|
||||
|
||||
// NewConfig returns new AWS Config.
|
||||
func NewConfig(region, roleARN, accessKey, secretKey, service string) (*Config, error) {
|
||||
// NewConfig returns new AWS Config from the given args.
|
||||
func NewConfig(ec2Endpoint, stsEndpoint, region, roleARN, accessKey, secretKey, service string) (*Config, error) {
|
||||
cfg := &Config{
|
||||
client: http.DefaultClient,
|
||||
region: region,
|
||||
@@ -65,8 +65,8 @@ func NewConfig(region, roleARN, accessKey, secretKey, service string) (*Config,
|
||||
}
|
||||
cfg.region = r
|
||||
}
|
||||
cfg.ec2Endpoint = buildAPIEndpoint(cfg.ec2Endpoint, cfg.region, "ec2")
|
||||
cfg.stsEndpoint = buildAPIEndpoint(cfg.stsEndpoint, cfg.region, "sts")
|
||||
cfg.ec2Endpoint = buildAPIEndpoint(ec2Endpoint, cfg.region, "ec2")
|
||||
cfg.stsEndpoint = buildAPIEndpoint(stsEndpoint, cfg.region, "sts")
|
||||
if cfg.roleARN == "" {
|
||||
cfg.roleARN = os.Getenv("AWS_ROLE_ARN")
|
||||
}
|
||||
@@ -204,7 +204,11 @@ func (cfg *Config) getAPICredentials() (*credentials, error) {
|
||||
}
|
||||
if ecsMetaURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); len(ecsMetaURI) > 0 {
|
||||
path := "http://169.254.170.2" + ecsMetaURI
|
||||
return getECSRoleCredentialsByPath(cfg.client, path)
|
||||
ac, err := getECSRoleCredentialsByPath(cfg.client, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain ECS role credentials: %w", err)
|
||||
}
|
||||
acNew = ac
|
||||
}
|
||||
|
||||
// we need instance credentials if dont have access keys
|
||||
|
||||
@@ -41,6 +41,9 @@ func signRequestWithTime(req *http.Request, service, region, payloadHash string,
|
||||
datestamp := t.Format("20060102")
|
||||
canonicalURL := uri.Path
|
||||
canonicalQS := uri.Query().Encode()
|
||||
// Replace "%20" with "+" according to AWS requirements.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3171
|
||||
canonicalQS = strings.ReplaceAll(canonicalQS, "+", "%20")
|
||||
|
||||
canonicalHeaders := fmt.Sprintf("host:%s\nx-amz-date:%s\n", uri.Host, amzdate)
|
||||
signedHeaders := "host;x-amz-date"
|
||||
|
||||
@@ -235,13 +235,27 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
||||
connTimeoutClosedConns.Inc()
|
||||
w.Header().Set("Connection", "close")
|
||||
}
|
||||
path, err := getCanonicalPath(r.URL.Path)
|
||||
if err != nil {
|
||||
Errorf(w, r, "cannot get canonical path: %s", err)
|
||||
unsupportedRequestErrors.Inc()
|
||||
return
|
||||
path := r.URL.Path
|
||||
prefix := GetPathPrefix()
|
||||
if prefix != "" {
|
||||
// Trim -http.pathPrefix from path
|
||||
prefixNoTrailingSlash := strings.TrimSuffix(prefix, "/")
|
||||
if path == prefixNoTrailingSlash {
|
||||
// Redirect to url with / at the end.
|
||||
// This is needed for proper handling of relative urls in web browsers.
|
||||
// Intentionally ignore query args, since it is expected that the requested url
|
||||
// is composed by a human, so it doesn't contain query args.
|
||||
RedirectPermanent(w, prefix)
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
Errorf(w, r, "missing -http.pathPrefix=%q in the requested path %q", *pathPrefix, path)
|
||||
unsupportedRequestErrors.Inc()
|
||||
return
|
||||
}
|
||||
path = path[len(prefix)-1:]
|
||||
r.URL.Path = path
|
||||
}
|
||||
r.URL.Path = path
|
||||
switch r.URL.Path {
|
||||
case "/health":
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
@@ -327,24 +341,6 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
||||
}
|
||||
}
|
||||
|
||||
func getCanonicalPath(path string) (string, error) {
|
||||
if len(*pathPrefix) == 0 || path == "/" {
|
||||
return path, nil
|
||||
}
|
||||
if *pathPrefix == path {
|
||||
return "/", nil
|
||||
}
|
||||
prefix := *pathPrefix
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix = prefix + "/"
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
return "", fmt.Errorf("missing `-pathPrefix=%q` in the requested path: %q", *pathPrefix, path)
|
||||
}
|
||||
path = path[len(prefix)-1:]
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func checkBasicAuth(w http.ResponseWriter, r *http.Request) bool {
|
||||
if len(*httpAuthUsername) == 0 {
|
||||
// HTTP Basic Auth is disabled.
|
||||
@@ -643,7 +639,17 @@ func IsTLS() bool {
|
||||
|
||||
// GetPathPrefix - returns http server path prefix.
|
||||
func GetPathPrefix() string {
|
||||
return *pathPrefix
|
||||
prefix := *pathPrefix
|
||||
if prefix == "" {
|
||||
return ""
|
||||
}
|
||||
if !strings.HasPrefix(prefix, "/") {
|
||||
prefix = "/" + prefix
|
||||
}
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
// WriteAPIHelp writes pathList to w in HTML format.
|
||||
@@ -671,3 +677,12 @@ func GetRequestURI(r *http.Request) string {
|
||||
}
|
||||
return requestURI + delimiter + queryArgs
|
||||
}
|
||||
|
||||
// RedirectPermanent redirects to the given url using 301 status code.
|
||||
func RedirectPermanent(w http.ResponseWriter, url string) {
|
||||
// Do not use http.Redirect, since it breaks relative redirects
|
||||
// if the http.Request.URL contains unexpected url.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918
|
||||
w.Header().Set("Location", url)
|
||||
w.WriteHeader(http.StatusMovedPermanently)
|
||||
}
|
||||
|
||||
@@ -710,7 +710,7 @@ func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) {
|
||||
atomic.AddUint64(&tb.assistedMerges, 1)
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
|
||||
return
|
||||
}
|
||||
logger.Panicf("FATAL: cannot merge small parts: %s", err)
|
||||
@@ -805,12 +805,14 @@ func (tb *Table) canBackgroundMerge() bool {
|
||||
return atomic.LoadUint32(tb.isReadOnly) == 0
|
||||
}
|
||||
|
||||
var errReadOnlyMode = fmt.Errorf("storage is in readonly mode")
|
||||
|
||||
func (tb *Table) mergeExistingParts(isFinal bool) error {
|
||||
if !tb.canBackgroundMerge() {
|
||||
// Do not perform background merge in read-only mode
|
||||
// in order to prevent from disk space shortage.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
|
||||
return nil
|
||||
return errReadOnlyMode
|
||||
}
|
||||
n := fs.MustGetFreeSpace(tb.path)
|
||||
// Divide free space by the max number of concurrent merges.
|
||||
@@ -849,7 +851,7 @@ func (tb *Table) partMerger() error {
|
||||
// The merger has been stopped.
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errNothingToMerge) {
|
||||
if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) {
|
||||
return err
|
||||
}
|
||||
if fasttime.UnixTimestamp()-lastMergeTime > 30 {
|
||||
|
||||
@@ -104,6 +104,11 @@ func stringValue(v interface{}) (string, error) {
|
||||
|
||||
// MarshalYAML marshals mlr to YAML.
|
||||
func (mlr *MultiLineRegex) MarshalYAML() (interface{}, error) {
|
||||
if strings.ContainsAny(mlr.S, "([") {
|
||||
// The mlr.S contains groups. Fall back to returning the regexp as is without splitting it into parts.
|
||||
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928 .
|
||||
return mlr.S, nil
|
||||
}
|
||||
a := strings.Split(mlr.S, "|")
|
||||
if len(a) == 1 {
|
||||
return a[0], nil
|
||||
|
||||
@@ -7,6 +7,30 @@ import (
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestMultiLineRegexUnmarshalMarshal(t *testing.T) {
|
||||
f := func(data, resultExpected string) {
|
||||
t.Helper()
|
||||
var mlr MultiLineRegex
|
||||
if err := yaml.UnmarshalStrict([]byte(data), &mlr); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", data, err)
|
||||
}
|
||||
result, err := yaml.Marshal(&mlr)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal %q: %s", data, err)
|
||||
}
|
||||
if string(result) != resultExpected {
|
||||
t.Fatalf("unexpected marshaled data; got\n%q\nwant\n%q", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f(``, `""`+"\n")
|
||||
f(`foo`, "foo\n")
|
||||
f(`a|b||c`, "- a\n- b\n- \"\"\n- c\n")
|
||||
f(`(a|b)`, "(a|b)\n")
|
||||
f(`a|b[c|d]`, "a|b[c|d]\n")
|
||||
f("- a\n- b", "- a\n- b\n")
|
||||
f("- a\n- (b)", "a|(b)\n")
|
||||
}
|
||||
|
||||
func TestRelabelConfigMarshalUnmarshal(t *testing.T) {
|
||||
f := func(data, resultExpected string) {
|
||||
t.Helper()
|
||||
@@ -31,7 +55,7 @@ func TestRelabelConfigMarshalUnmarshal(t *testing.T) {
|
||||
- regex:
|
||||
- 'fo.+'
|
||||
- '.*ba[r-z]a'
|
||||
`, "- regex:\n - fo.+\n - .*ba[r-z]a\n")
|
||||
`, "- regex: fo.+|.*ba[r-z]a\n")
|
||||
f(`- regex: foo|bar`, "- regex:\n - foo\n - bar\n")
|
||||
f(`- regex: True`, `- regex: "true"`+"\n")
|
||||
f(`- regex: true`, `- regex: "true"`+"\n")
|
||||
|
||||
@@ -130,6 +130,10 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
|
||||
prevScrapeCfgByName[scPrev.JobName] = scPrev
|
||||
}
|
||||
|
||||
// Restart all the scrape jobs on Global config change.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884
|
||||
needGlobalRestart := !areEqualGlobalConfigs(&cfg.Global, &prevCfg.Global)
|
||||
|
||||
// Loop over the the new jobs, start new ones and restart updated ones.
|
||||
var started, stopped, restarted int
|
||||
currentJobNames := make(map[string]struct{}, len(cfg.ScrapeConfigs))
|
||||
@@ -142,7 +146,7 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
|
||||
started++
|
||||
continue
|
||||
}
|
||||
if areEqualScrapeConfigs(scPrev, sc) {
|
||||
if !needGlobalRestart && areEqualScrapeConfigs(scPrev, sc) {
|
||||
// The scrape config didn't change, so no need to restart it.
|
||||
// Use the reference to the previous job, so it could be stopped properly later.
|
||||
cfg.ScrapeConfigs[i] = scPrev
|
||||
@@ -165,6 +169,12 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
|
||||
logger.Infof("restarted service discovery routines in %.3f seconds, stopped=%d, started=%d, restarted=%d", time.Since(startTime).Seconds(), stopped, started, restarted)
|
||||
}
|
||||
|
||||
func areEqualGlobalConfigs(a, b *GlobalConfig) bool {
|
||||
sa := a.marshalJSON()
|
||||
sb := b.marshalJSON()
|
||||
return string(sa) == string(sb)
|
||||
}
|
||||
|
||||
func areEqualScrapeConfigs(a, b *ScrapeConfig) bool {
|
||||
sa := a.marshalJSON()
|
||||
sb := b.marshalJSON()
|
||||
@@ -183,6 +193,14 @@ func (sc *ScrapeConfig) marshalJSON() []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
func (gc *GlobalConfig) marshalJSON() []byte {
|
||||
data, err := json.Marshal(gc)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot marshal GlobalConfig: %s", err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func (cfg *Config) mustStop() {
|
||||
startTime := time.Now()
|
||||
logger.Infof("stopping service discovery routines...")
|
||||
|
||||
@@ -27,7 +27,7 @@ type SDConfig struct {
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
||||
TagSeparator *string `yaml:"tag_separator,omitempty"`
|
||||
AllowStale bool `yaml:"allow_stale,omitempty"`
|
||||
AllowStale *bool `yaml:"allow_stale,omitempty"`
|
||||
// RefreshInterval time.Duration `yaml:"refresh_interval"`
|
||||
// refresh_interval is obtained from `-promscrape.consulSDCheckInterval` command-line option.
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ type serviceWatcher struct {
|
||||
// newConsulWatcher creates new watcher and starts background service discovery for Consul.
|
||||
func newConsulWatcher(client *discoveryutils.Client, sdc *SDConfig, datacenter, namespace string) *consulWatcher {
|
||||
baseQueryArgs := "?dc=" + url.QueryEscape(datacenter)
|
||||
if sdc.AllowStale {
|
||||
if sdc.AllowStale == nil || *sdc.AllowStale {
|
||||
baseQueryArgs += "&stale"
|
||||
}
|
||||
if namespace != "" {
|
||||
|
||||
@@ -15,7 +15,6 @@ type task struct {
|
||||
ID string
|
||||
ServiceID string
|
||||
NodeID string
|
||||
Labels map[string]string
|
||||
DesiredState string
|
||||
NetworksAttachments []struct {
|
||||
Addresses []string
|
||||
@@ -32,6 +31,11 @@ type task struct {
|
||||
Ports []portConfig
|
||||
}
|
||||
}
|
||||
Spec struct {
|
||||
ContainerSpec struct {
|
||||
Labels map[string]string
|
||||
}
|
||||
}
|
||||
Slot int
|
||||
}
|
||||
|
||||
@@ -82,8 +86,8 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []map[string]strin
|
||||
"__meta_dockerswarm_task_slot": strconv.Itoa(task.Slot),
|
||||
"__meta_dockerswarm_task_state": task.Status.State,
|
||||
}
|
||||
for k, v := range task.Labels {
|
||||
commonLabels["__meta_dockerswarm_task_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
for k, v := range task.Spec.ContainerSpec.Labels {
|
||||
commonLabels["__meta_dockerswarm_container_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
}
|
||||
var svcPorts []portConfig
|
||||
for i, v := range services {
|
||||
|
||||
@@ -27,13 +27,13 @@ func Test_parseTasks(t *testing.T) {
|
||||
"Version": {
|
||||
"Index": 23
|
||||
},
|
||||
"Labels": {
|
||||
"label1": "value1"
|
||||
},
|
||||
"Spec": {
|
||||
"ContainerSpec": {
|
||||
"Image": "redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842",
|
||||
"Init": false
|
||||
"Init": false,
|
||||
"Labels": {
|
||||
"label1": "value1"
|
||||
}
|
||||
},
|
||||
"Resources": {
|
||||
"Limits": {},
|
||||
@@ -70,8 +70,18 @@ func Test_parseTasks(t *testing.T) {
|
||||
ID: "t4rdm7j2y9yctbrksiwvsgpu5",
|
||||
ServiceID: "t91nf284wzle1ya09lqvyjgnq",
|
||||
NodeID: "qauwmifceyvqs0sipvzu8oslu",
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
Spec: struct {
|
||||
ContainerSpec struct {
|
||||
Labels map[string]string
|
||||
}
|
||||
}{
|
||||
ContainerSpec: struct {
|
||||
Labels map[string]string
|
||||
}{
|
||||
Labels: map[string]string{
|
||||
"label1": "value1",
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredState: "running",
|
||||
Slot: 1,
|
||||
@@ -97,7 +107,7 @@ func Test_parseTasks(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("parseTasks() got = %v, want %v", got, tt.want)
|
||||
t.Errorf("parseTasks() got\n%v\nwant\n%v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -126,7 +136,6 @@ func Test_addTasksLabels(t *testing.T) {
|
||||
ID: "t4rdm7j2y9yctbrksiwvsgpu5",
|
||||
ServiceID: "t91nf284wzle1ya09lqvyjgnq",
|
||||
NodeID: "qauwmifceyvqs0sipvzu8oslu",
|
||||
Labels: map[string]string{},
|
||||
DesiredState: "running",
|
||||
Slot: 1,
|
||||
Status: struct {
|
||||
@@ -194,7 +203,6 @@ func Test_addTasksLabels(t *testing.T) {
|
||||
ID: "t4rdm7j2y9yctbrksiwvsgpu5",
|
||||
ServiceID: "tgsci5gd31aai3jyudv98pqxf",
|
||||
NodeID: "qauwmifceyvqs0sipvzu8oslu",
|
||||
Labels: map[string]string{},
|
||||
DesiredState: "running",
|
||||
Slot: 1,
|
||||
NetworksAttachments: []struct {
|
||||
|
||||
@@ -33,7 +33,11 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
||||
if sdc.Port != nil {
|
||||
port = *sdc.Port
|
||||
}
|
||||
awsCfg, err := awsapi.NewConfig(sdc.Region, sdc.RoleARN, sdc.AccessKey, sdc.SecretKey.String(), "ec2")
|
||||
stsEndpoint := sdc.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = sdc.Endpoint
|
||||
}
|
||||
awsCfg, err := awsapi.NewConfig(sdc.Endpoint, stsEndpoint, sdc.Region, sdc.RoleARN, sdc.AccessKey, sdc.SecretKey.String(), "ec2")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -18,12 +18,13 @@ var SDCheckInterval = flag.Duration("promscrape.ec2SDCheckInterval", time.Minute
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config
|
||||
type SDConfig struct {
|
||||
Region string `yaml:"region,omitempty"`
|
||||
Endpoint string `yaml:"endpoint,omitempty"`
|
||||
AccessKey string `yaml:"access_key,omitempty"`
|
||||
SecretKey *promauth.Secret `yaml:"secret_key,omitempty"`
|
||||
Region string `yaml:"region,omitempty"`
|
||||
Endpoint string `yaml:"endpoint,omitempty"`
|
||||
STSEndpoint string `yaml:"sts_endpoint,omitempty"`
|
||||
AccessKey string `yaml:"access_key,omitempty"`
|
||||
SecretKey *promauth.Secret `yaml:"secret_key,omitempty"`
|
||||
// TODO add support for Profile, not working atm
|
||||
Profile string `yaml:"profile,omitempty"`
|
||||
// Profile string `yaml:"profile,omitempty"`
|
||||
RoleARN string `yaml:"role_arn,omitempty"`
|
||||
// RefreshInterval time.Duration `yaml:"refresh_interval"`
|
||||
// refresh_interval is obtained from `-promscrape.ec2SDCheckInterval` command-line option.
|
||||
|
||||
@@ -52,8 +52,9 @@ type Ingress struct {
|
||||
//
|
||||
// See https://v1-21.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#ingressspec-v1-networking-k8s-io
|
||||
type IngressSpec struct {
|
||||
TLS []IngressTLS `json:"tls"`
|
||||
Rules []IngressRule
|
||||
TLS []IngressTLS `json:"tls"`
|
||||
Rules []IngressRule
|
||||
IngressClassName string
|
||||
}
|
||||
|
||||
// IngressTLS represents ingress TLS spec in k8s.
|
||||
@@ -130,12 +131,13 @@ func matchesHostPattern(pattern, host string) bool {
|
||||
|
||||
func getLabelsForIngressPath(ig *Ingress, scheme, host, path string) map[string]string {
|
||||
m := map[string]string{
|
||||
"__address__": host,
|
||||
"__meta_kubernetes_namespace": ig.Metadata.Namespace,
|
||||
"__meta_kubernetes_ingress_name": ig.Metadata.Name,
|
||||
"__meta_kubernetes_ingress_scheme": scheme,
|
||||
"__meta_kubernetes_ingress_host": host,
|
||||
"__meta_kubernetes_ingress_path": path,
|
||||
"__address__": host,
|
||||
"__meta_kubernetes_namespace": ig.Metadata.Namespace,
|
||||
"__meta_kubernetes_ingress_name": ig.Metadata.Name,
|
||||
"__meta_kubernetes_ingress_scheme": scheme,
|
||||
"__meta_kubernetes_ingress_host": host,
|
||||
"__meta_kubernetes_ingress_path": path,
|
||||
"__meta_kubernetes_ingress_class_name": ig.Spec.IngressClassName,
|
||||
}
|
||||
ig.Metadata.registerLabelsAndAnnotations("__meta_kubernetes_ingress", m)
|
||||
return m
|
||||
|
||||
@@ -78,7 +78,8 @@ func TestParseIngressListSuccess(t *testing.T) {
|
||||
{
|
||||
"host": "foobar"
|
||||
}
|
||||
]
|
||||
],
|
||||
"ingressClassName": "foo-class"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {
|
||||
@@ -107,11 +108,12 @@ func TestParseIngressListSuccess(t *testing.T) {
|
||||
"__address__": "foobar",
|
||||
"__meta_kubernetes_ingress_annotation_kubectl_kubernetes_io_last_applied_configuration": `{"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{},"name":"test-ingress","namespace":"default"},"spec":{"backend":{"serviceName":"testsvc","servicePort":80}}}` + "\n",
|
||||
"__meta_kubernetes_ingress_annotationpresent_kubectl_kubernetes_io_last_applied_configuration": "true",
|
||||
"__meta_kubernetes_ingress_host": "foobar",
|
||||
"__meta_kubernetes_ingress_name": "test-ingress",
|
||||
"__meta_kubernetes_ingress_path": "/",
|
||||
"__meta_kubernetes_ingress_scheme": "http",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_ingress_host": "foobar",
|
||||
"__meta_kubernetes_ingress_name": "test-ingress",
|
||||
"__meta_kubernetes_ingress_path": "/",
|
||||
"__meta_kubernetes_ingress_scheme": "http",
|
||||
"__meta_kubernetes_ingress_class_name": "foo-class",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
}),
|
||||
}
|
||||
if !areEqualLabelss(sortedLabelss, expectedLabelss) {
|
||||
|
||||
@@ -585,9 +585,9 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
||||
scrapeResponseSize.Update(float64(sbr.bodyLen))
|
||||
up := 1
|
||||
if err != nil {
|
||||
if samplesScraped == 0 {
|
||||
up = 0
|
||||
}
|
||||
// Mark the scrape as failed even if it already read and pushed some samples
|
||||
// to remote storage. This makes the logic compatible with Prometheus.
|
||||
up = 0
|
||||
scrapesFailed.Inc()
|
||||
}
|
||||
seriesAdded := 0
|
||||
|
||||
@@ -815,9 +815,13 @@ func (is *indexSearch) searchLabelNamesWithFiltersOnDate(qt *querytracer.Tracer,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filter != nil && filter.Len() == 0 {
|
||||
qt.Printf("found zero label names for filter=%s", tfss)
|
||||
return nil
|
||||
if filter != nil && filter.Len() <= 100e3 {
|
||||
// It is faster to obtain label names by metricIDs from the filter
|
||||
// instead of scanning the inverted index for the matching filters.
|
||||
// This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978
|
||||
metricIDs := filter.AppendTo(nil)
|
||||
qt.Printf("sort %d metricIDs", len(metricIDs))
|
||||
return is.getLabelNamesForMetricIDs(qt, metricIDs, lns, maxLabelNames)
|
||||
}
|
||||
var prevLabelName []byte
|
||||
ts := &is.ts
|
||||
@@ -877,6 +881,41 @@ func (is *indexSearch) searchLabelNamesWithFiltersOnDate(qt *querytracer.Tracer,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getLabelNamesForMetricIDs(qt *querytracer.Tracer, metricIDs []uint64, lns map[string]struct{}, maxLabelNames int) error {
|
||||
lns["__name__"] = struct{}{}
|
||||
var mn MetricName
|
||||
foundLabelNames := 0
|
||||
var buf []byte
|
||||
for _, metricID := range metricIDs {
|
||||
var err error
|
||||
buf, err = is.searchMetricNameWithCache(buf[:0], metricID)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// It is likely the metricID->metricName entry didn't propagate to inverted index yet.
|
||||
// Skip this metricID for now.
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err)
|
||||
}
|
||||
if err := mn.Unmarshal(buf); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %w", buf, err)
|
||||
}
|
||||
for _, tag := range mn.Tags {
|
||||
_, ok := lns[string(tag.Key)]
|
||||
if !ok {
|
||||
foundLabelNames++
|
||||
lns[string(tag.Key)] = struct{}{}
|
||||
if len(lns) >= maxLabelNames {
|
||||
qt.Printf("hit the limit on the number of unique label names: %d", maxLabelNames)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
qt.Printf("get %d distinct label names from %d metricIDs", foundLabelNames, len(metricIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchLabelValuesWithFiltersOnTimeRange returns label values for the given labelName, tfss and tr.
|
||||
func (db *indexDB) SearchLabelValuesWithFiltersOnTimeRange(qt *querytracer.Tracer, labelName string, tfss []*TagFilters, tr TimeRange,
|
||||
maxLabelValues, maxMetrics int, deadline uint64) ([]string, error) {
|
||||
@@ -972,9 +1011,13 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnDate(qt *querytracer.Tracer
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filter != nil && filter.Len() == 0 {
|
||||
qt.Printf("found zero label values for filter=%s", tfss)
|
||||
return nil
|
||||
if filter != nil && filter.Len() < 100e3 {
|
||||
// It is faster to obtain label values by metricIDs from the filter
|
||||
// instead of scanning the inverted index for the matching filters.
|
||||
// This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978
|
||||
metricIDs := filter.AppendTo(nil)
|
||||
qt.Printf("sort %d metricIDs", len(metricIDs))
|
||||
return is.getLabelValuesForMetricIDs(qt, lvs, labelName, metricIDs, maxLabelValues)
|
||||
}
|
||||
if labelName == "__name__" {
|
||||
// __name__ label is encoded as empty string in indexdb.
|
||||
@@ -1033,6 +1076,42 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnDate(qt *querytracer.Tracer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getLabelValuesForMetricIDs(qt *querytracer.Tracer, lvs map[string]struct{}, labelName string, metricIDs []uint64, maxLabelValues int) error {
|
||||
if labelName == "" {
|
||||
labelName = "__name__"
|
||||
}
|
||||
var mn MetricName
|
||||
foundLabelValues := 0
|
||||
var buf []byte
|
||||
for _, metricID := range metricIDs {
|
||||
var err error
|
||||
buf, err = is.searchMetricNameWithCache(buf[:0], metricID)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// It is likely the metricID->metricName entry didn't propagate to inverted index yet.
|
||||
// Skip this metricID for now.
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err)
|
||||
}
|
||||
if err := mn.Unmarshal(buf); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %w", buf, err)
|
||||
}
|
||||
tagValue := mn.GetTagValue(labelName)
|
||||
_, ok := lvs[string(tagValue)]
|
||||
if !ok {
|
||||
foundLabelValues++
|
||||
lvs[string(tagValue)] = struct{}{}
|
||||
if len(lvs) >= maxLabelValues {
|
||||
qt.Printf("hit the limit on the number of unique label values for label %q: %d", labelName, maxLabelValues)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
qt.Printf("get %d distinct values for label %q from %d metricIDs", foundLabelValues, labelName, len(metricIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchTagValueSuffixes returns all the tag value suffixes for the given tagKey and tagValuePrefix on the given tr.
|
||||
//
|
||||
// This allows implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find or similar APIs.
|
||||
|
||||
@@ -569,7 +569,7 @@ func (pt *partition) addRowsPart(rows []rawRow) {
|
||||
atomic.AddUint64(&pt.smallAssistedMerges, 1)
|
||||
return
|
||||
}
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
|
||||
return
|
||||
}
|
||||
logger.Panicf("FATAL: cannot merge small parts: %s", err)
|
||||
@@ -871,7 +871,7 @@ func hasActiveMerges(pws []*partWrapper) bool {
|
||||
|
||||
var (
|
||||
bigMergeWorkersCount = getDefaultMergeConcurrency(4)
|
||||
smallMergeWorkersCount = getDefaultMergeConcurrency(8)
|
||||
smallMergeWorkersCount = getDefaultMergeConcurrency(16)
|
||||
)
|
||||
|
||||
func getDefaultMergeConcurrency(max int) int {
|
||||
@@ -956,7 +956,7 @@ func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error {
|
||||
// The merger has been stopped.
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errNothingToMerge) {
|
||||
if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) {
|
||||
return err
|
||||
}
|
||||
if finalMergeDelaySeconds > 0 && fasttime.UnixTimestamp()-lastMergeTime > finalMergeDelaySeconds {
|
||||
@@ -1012,11 +1012,13 @@ func (pt *partition) canBackgroundMerge() bool {
|
||||
return atomic.LoadUint32(pt.isReadOnly) == 0
|
||||
}
|
||||
|
||||
var errReadOnlyMode = fmt.Errorf("storage is in readonly mode")
|
||||
|
||||
func (pt *partition) mergeBigParts(isFinal bool) error {
|
||||
if !pt.canBackgroundMerge() {
|
||||
// Do not perform merge in read-only mode, since this may result in disk space shortage.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
|
||||
return nil
|
||||
return errReadOnlyMode
|
||||
}
|
||||
maxOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount)
|
||||
|
||||
@@ -1032,7 +1034,7 @@ func (pt *partition) mergeSmallParts(isFinal bool) error {
|
||||
if !pt.canBackgroundMerge() {
|
||||
// Do not perform merge in read-only mode, since this may result in disk space shortage.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
|
||||
return nil
|
||||
return errReadOnlyMode
|
||||
}
|
||||
// Try merging small parts to a big part at first.
|
||||
maxBigPartOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount)
|
||||
|
||||
71
package/release/Makefile
Normal file
71
package/release/Makefile
Normal file
@@ -0,0 +1,71 @@
|
||||
GITHUB_RELEASE_SPEC_FILE="/tmp/vm-github-release"
|
||||
GITHUB_DEBUG_FILE="/tmp/vm-github-debug"
|
||||
|
||||
github-token-check:
|
||||
ifndef GITHUB_TOKEN
|
||||
$(error missing GITHUB_TOKEN env var. It must contain github token for VictoriaMetrics project obtained from https://github.com/settings/tokens)
|
||||
endif
|
||||
|
||||
github-tag-check:
|
||||
ifndef TAG
|
||||
$(error missing TAG env var. It must contain github release tag to create)
|
||||
endif
|
||||
|
||||
github-create-release: github-token-check github-tag-check
|
||||
@result=$$(curl -o $(GITHUB_RELEASE_SPEC_FILE) -s -w "%{http_code}" \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: token $(GITHUB_TOKEN)" \
|
||||
https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases \
|
||||
-d '{"tag_name":"$(TAG)","name":"$(TAG)","body":"TODO: put here the changelog for $(TAG) release from docs/CHANGELOG.md","draft":true,"prerelease":false,"generate_release_notes":false}'); \
|
||||
if [ $${result} = 201 ]; then \
|
||||
release_id=$$(cat $(GITHUB_RELEASE_SPEC_FILE) | grep '"id"' -m 1 | sed -E 's/.* ([[:digit:]]+)\,/\1/'); \
|
||||
printf "Created release $(TAG) with id=$${release_id}\n"; \
|
||||
else \
|
||||
printf "Failed to create release $(TAG)\n"; \
|
||||
cat $(GITHUB_RELEASE_SPEC_FILE); \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
github-upload-assets:
|
||||
@release_id=$$(cat $(GITHUB_RELEASE_SPEC_FILE) | grep '"id"' -m 1 | sed -E 's/.* ([[:digit:]]+)\,/\1/'); \
|
||||
$(foreach file, $(wildcard bin/*.zip), FILE=$(file) RELEASE_ID=$${release_id} CONTENT_TYPE="application/zip" $(MAKE) github-upload-asset || exit 1;) \
|
||||
$(foreach file, $(wildcard bin/*.tar.gz), FILE=$(file) RELEASE_ID=$${release_id} CONTENT_TYPE="application/x-gzip" $(MAKE) github-upload-asset || exit 1;) \
|
||||
$(foreach file, $(wildcard bin/*_checksums.txt), FILE=$(file) RELEASE_ID=$${release_id} CONTENT_TYPE="text/plain" $(MAKE) github-upload-asset || exit 1;)
|
||||
|
||||
github-upload-asset: github-token-check
|
||||
ifndef FILE
|
||||
$(error missing FILE env var. It must contain path to file to upload to github release)
|
||||
endif
|
||||
@printf "Uploading $(FILE)\n"
|
||||
@result=$$(curl -o $(GITHUB_DEBUG_FILE) -w "%{http_code}" \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: token $(GITHUB_TOKEN)" \
|
||||
-H "Content-Type: $(CONTENT_TYPE)" \
|
||||
--data-binary "@$(FILE)" \
|
||||
https://uploads.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/$(RELEASE_ID)/assets?name=$(notdir $(FILE))); \
|
||||
if [ $${result} = 201 ]; then \
|
||||
printf "Upload OK: $${result}\n"; \
|
||||
elif [ $${result} = 422 ]; then \
|
||||
printf "Asset already uploaded, you need to delete it from UI if you want to re-upload it\n"; \
|
||||
else \
|
||||
printf "Upload failed: $${result}\n"; \
|
||||
cat $(GITHUB_DEBUG_FILE); \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
github-delete-release: github-token-check
|
||||
@release_id=$$(cat $(GITHUB_RELEASE_SPEC_FILE) | grep '"id"' -m 1 | sed -E 's/.* ([[:digit:]]+)\,/\1/'); \
|
||||
result=$$(curl -o $(GITHUB_DEBUG_FILE) -s -w "%{http_code}" \
|
||||
-X DELETE \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: token $(GITHUB_TOKEN)" \
|
||||
https://api.github.com/repos/VictoriaMetrics/VictoriaMetrics/releases/$${release_id}); \
|
||||
if [ $${result} = 204 ]; then \
|
||||
printf "Deleted release with id=$${release_id}\n"; \
|
||||
else \
|
||||
printf "Failed to delete release with id=$${release_id}\n"; \
|
||||
cat $(GITHUB_DEBUG_FILE); \
|
||||
exit 1; \
|
||||
fi
|
||||
Reference in New Issue
Block a user