Compare commits

..

1 Commits

Author SHA1 Message Date
func25
4e57f4dc48 fix 2026-04-14 15:10:04 +07:00
466 changed files with 6950 additions and 15856 deletions

0
.codex
View File

View File

@@ -1,3 +1,10 @@
**PLEASE REMOVE LINE BELOW BEFORE SUBMITTING**
### Describe Your Changes
Before creating the PR, make sure you have read and followed the [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
Please provide a brief description of the changes you made. Be as specific as possible to help others understand the purpose and impact of your modifications.
### Checklist
The following checks are **mandatory**:
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [ ] My change adheres to [VictoriaMetrics development goals](https://docs.victoriametrics.com/victoriametrics/goals/).

View File

@@ -57,8 +57,6 @@ jobs:
arch: amd64
- os: openbsd
arch: amd64
- os: netbsd
arch: amd64
- os: windows
arch: amd64
steps:

View File

@@ -27,7 +27,7 @@ jobs:
- run: go version
- name: Cache Go artifacts
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cache/go-build

View File

@@ -40,7 +40,7 @@ jobs:
- run: go version
- name: Cache Go artifacts
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
@@ -50,14 +50,14 @@ jobs:
restore-keys: go-artifacts-${{ runner.os }}-codeql-analyze-${{ steps.go.outputs.go-version }}-
- name: Initialize CodeQL
uses: github/codeql-action/init@v4.35.2
uses: github/codeql-action/init@v4
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@v4.35.2
uses: github/codeql-action/autobuild@v4
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4.35.2
uses: github/codeql-action/analyze@v4
with:
category: 'language:go'

View File

@@ -47,7 +47,7 @@ jobs:
- run: go version
- name: Cache golangci-lint
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cache/golangci-lint

View File

@@ -1,4 +1,42 @@
# Security Policy
You can find out about our security policy and VictoriaMetrics version support on the [security page](https://docs.victoriametrics.com/victoriametrics/#security) in the documentation.
## Supported Versions
The following versions of VictoriaMetrics receive regular security fixes:
| Version | Supported |
|--------------------------------------------------------------------------------|--------------------|
| [Latest release](https://docs.victoriametrics.com/victoriametrics/changelog/) | :white_check_mark: |
| [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-releases/) | :white_check_mark: |
| other releases | :x: |
See [this page](https://victoriametrics.com/security/) for more details.
## Software Bill of Materials (SBOM)
Every VictoriaMetrics container{{% available_from "#" %}} image published to
[Docker Hub](https://hub.docker.com/u/victoriametrics)
and [Quay.io](https://quay.io/organization/victoriametrics)
includes an [SPDX](https://spdx.dev/) SBOM attestation
generated automatically by BuildKit during
`docker buildx build`.
To inspect the SBOM for an image:
```sh
docker buildx imagetools inspect \
docker.io/victoriametrics/victoria-metrics:latest \
--format "{{ json .SBOM }}"
```
To scan an image using its SBOM attestation with
[Trivy](https://github.com/aquasecurity/trivy):
```sh
trivy image --sbom-sources oci \
docker.io/victoriametrics/victoria-metrics:latest
```
## Reporting a Vulnerability
Please report any security issues to <security@victoriametrics.com>

View File

@@ -118,8 +118,8 @@ func main() {
logger.Fatalf("cannot stop the webservice: %s", err)
}
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
vminsertcommon.StopIngestionRateLimiter()
vminsert.Stop()
vminsertcommon.StopIngestionRateLimiter()
vmstorage.Stop()
vmselect.Stop()

View File

@@ -83,9 +83,6 @@ var (
maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 0, "The maximum number of labels per time series to be accepted. Series with superfluous labels are ignored. In this case the vm_rows_ignored_total{reason=\"too_many_labels\"} metric at /metrics page is incremented")
maxLabelNameLen = flag.Int("maxLabelNameLen", 0, "The maximum length of label names in the accepted time series. Series with longer label name are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_name\"} metric at /metrics page is incremented")
maxLabelValueLen = flag.Int("maxLabelValueLen", 0, "The maximum length of label values in the accepted time series. Series with longer label value are ignored. In this case the vm_rows_ignored_total{reason=\"too_long_label_value\"} metric at /metrics page is incremented")
enableMultitenancyViaHeaders = flag.Bool("enableMultitenancyViaHeaders", false, "Enables multitenancy via HTTP headers. "+
"See https://docs.victoriametrics.com/victoriametrics/vmagent/#multitenancy")
)
var (
@@ -219,7 +216,7 @@ func getOpenTSDBHTTPInsertHandler() func(req *http.Request) error {
}
return func(req *http.Request) error {
path := strings.ReplaceAll(req.URL.Path, "//", "/")
at, err := getAuthTokenFromPath(path, req.Header)
at, err := getAuthTokenFromPath(path)
if err != nil {
return fmt.Errorf("cannot obtain auth token from path %q: %w", path, err)
}
@@ -227,15 +224,8 @@ func getOpenTSDBHTTPInsertHandler() func(req *http.Request) error {
}
}
func parsePath(path string, header http.Header) (*httpserver.Path, error) {
if *enableMultitenancyViaHeaders {
return httpserver.ParsePathAndHeaders(path, header)
}
return httpserver.ParsePath(path)
}
func getAuthTokenFromPath(path string, header http.Header) (*auth.Token, error) {
p, err := parsePath(path, header)
func getAuthTokenFromPath(path string) (*auth.Token, error) {
p, err := httpserver.ParsePath(path)
if err != nil {
return nil, fmt.Errorf("cannot parse multitenant path: %w", err)
}
@@ -569,15 +559,14 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
}
func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path string) bool {
p, err := parsePath(path, r.Header)
p, err := httpserver.ParsePath(path)
if err != nil {
// Cannot parse multitenant path. Skip it - probably it will be parsed later.
return false
}
if p.Prefix != "insert" {
// processMultitenantRequest is called for all unmatched path variants,
// but we should try parsing only /insert prefixed to avoid catching all possible paths.
return false
httpserver.Errorf(w, r, `unsupported multitenant prefix: %q; expected "insert"`, p.Prefix)
return true
}
at, err := auth.NewTokenPossibleMultitenant(p.AuthToken)
if err != nil {

View File

@@ -77,6 +77,16 @@ func insertRows(at *auth.Token, tss []prompb.TimeSeries, mms []prompb.MetricMeta
var metadataTotal int
if prommetadata.IsEnabled() {
var accountID, projectID uint32
if at != nil {
accountID = at.AccountID
projectID = at.ProjectID
for i := range mms {
mm := &mms[i]
mm.AccountID = accountID
mm.ProjectID = projectID
}
}
ctx.WriteRequest.Metadata = mms
metadataTotal = len(mms)
}

View File

@@ -75,6 +75,11 @@ func insertRows(at *auth.Token, rows []prometheus.Row, mms []prometheus.Metadata
Samples: samples[len(samples)-1:],
})
}
var accountID, projectID uint32
if at != nil {
accountID = at.AccountID
projectID = at.ProjectID
}
for i := range mms {
mm := &mms[i]
mmsDst = append(mmsDst, prompb.MetricMetadata{
@@ -83,6 +88,8 @@ func insertRows(at *auth.Token, rows []prometheus.Row, mms []prometheus.Metadata
Type: mm.Type,
// there is no unit in Prometheus exposition formats
AccountID: accountID,
ProjectID: projectID,
})
}
ctx.WriteRequest.Timeseries = tssDst

View File

@@ -72,6 +72,11 @@ func insertRows(at *auth.Token, timeseries []prompb.TimeSeries, mms []prompb.Met
var metadataTotal int
if prommetadata.IsEnabled() {
var accountID, projectID uint32
if at != nil {
accountID = at.AccountID
projectID = at.ProjectID
}
for i := range mms {
mm := &mms[i]
mmsDst = append(mmsDst, prompb.MetricMetadata{
@@ -80,8 +85,8 @@ func insertRows(at *auth.Token, timeseries []prompb.TimeSeries, mms []prompb.Met
Type: mm.Type,
Unit: mm.Unit,
AccountID: mm.AccountID,
ProjectID: mm.ProjectID,
AccountID: accountID,
ProjectID: projectID,
})
}
ctx.WriteRequest.Metadata = mmsDst

View File

@@ -59,8 +59,6 @@ var (
"Multiple headers must be delimited by '^^': -remoteWrite.headers='header1:value1^^header2:value2'")
basicAuthUsername = flagutil.NewArrayString("remoteWrite.basicAuth.username", "Optional basic auth username to use for the corresponding -remoteWrite.url")
basicAuthUsernameFile = flagutil.NewArrayString("remoteWrite.basicAuth.usernameFile", "Optional path to basic auth username to use for the corresponding -remoteWrite.url. "+
"The file is re-read every second")
basicAuthPassword = flagutil.NewArrayString("remoteWrite.basicAuth.password", "Optional basic auth password to use for the corresponding -remoteWrite.url")
basicAuthPasswordFile = flagutil.NewArrayString("remoteWrite.basicAuth.passwordFile", "Optional path to basic auth password to use for the corresponding -remoteWrite.url. "+
"The file is re-read every second")
@@ -225,14 +223,12 @@ func getAuthConfig(argIdx int) (*promauth.Config, error) {
hdrs = strings.Split(headersValue, "^^")
}
username := basicAuthUsername.GetOptionalArg(argIdx)
usernameFile := basicAuthUsernameFile.GetOptionalArg(argIdx)
password := basicAuthPassword.GetOptionalArg(argIdx)
passwordFile := basicAuthPasswordFile.GetOptionalArg(argIdx)
var basicAuthCfg *promauth.BasicAuthConfig
if username != "" || usernameFile != "" || password != "" || passwordFile != "" {
if username != "" || password != "" || passwordFile != "" {
basicAuthCfg = &promauth.BasicAuthConfig{
Username: username,
UsernameFile: usernameFile,
Password: promauth.NewSecret(password),
PasswordFile: passwordFile,
}
@@ -470,7 +466,7 @@ again:
goto again
}
logger.Warnf("failed to repack zstd block (%d bytes) to snappy: %s; The block will be rejected. "+
logger.Warnf("failed to repack zstd block (%s bytes) to snappy: %s; The block will be rejected. "+
"Possible cause: ungraceful shutdown leading to persisted queue corruption.",
zstdBlockLen, err)
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/golang/snappy"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
)
func TestParseRetryAfterHeader(t *testing.T) {
@@ -36,6 +37,34 @@ func TestParseRetryAfterHeader(t *testing.T) {
f(time.Now().Add(10*time.Second).Format("Mon, 02 Jan 2006 15:04:05 FAKETZ"), 0)
}
func TestInitSecretFlags(t *testing.T) {
showRemoteWriteURLOrig := *showRemoteWriteURL
defer func() {
*showRemoteWriteURL = showRemoteWriteURLOrig
flagutil.UnregisterAllSecretFlags()
}()
flagutil.UnregisterAllSecretFlags()
*showRemoteWriteURL = false
InitSecretFlags()
if !flagutil.IsSecretFlag("remotewrite.url") {
t.Fatalf("expecting remoteWrite.url to be secret")
}
if !flagutil.IsSecretFlag("remotewrite.headers") {
t.Fatalf("expecting remoteWrite.headers to be secret")
}
flagutil.UnregisterAllSecretFlags()
*showRemoteWriteURL = true
InitSecretFlags()
if flagutil.IsSecretFlag("remotewrite.url") {
t.Fatalf("remoteWrite.url must remain visible when -remoteWrite.showURL is set")
}
if !flagutil.IsSecretFlag("remotewrite.headers") {
t.Fatalf("expecting remoteWrite.headers to remain secret")
}
}
func TestRepackBlockFromZstdToSnappy(t *testing.T) {
expectedPlainBlock := []byte(`foobar`)

View File

@@ -211,9 +211,6 @@ func (wr *writeRequest) copyMetadata(dst, src *prompb.MetricMetadata) {
dst.Type = src.Type
dst.Unit = src.Unit
dst.AccountID = src.AccountID
dst.ProjectID = src.ProjectID
// Pre-allocate memory for all string fields.
neededBufLen := len(src.MetricFamilyName) + len(src.Help)
bufLen := len(wr.metadatabuf)

View File

@@ -151,6 +151,8 @@ func InitSecretFlags() {
// remoteWrite.url can contain authentication codes, so hide it at `/metrics` output.
flagutil.RegisterSecretFlag("remoteWrite.url")
}
// remoteWrite.headers can contain auth headers such as Authorization and API keys.
flagutil.RegisterSecretFlag("remoteWrite.headers")
}
var (
@@ -285,7 +287,6 @@ func initRemoteWriteCtxs(urls []string) {
rwctxs[i] = newRemoteWriteCtx(i, remoteWriteURL, sanitizedURL)
rwctxIdx[i] = i
}
fs.RegisterPathFsMetrics(*tmpDataPath)
if *shardByURL {
consistentHashNodes := make([]string, 0, len(urls))
@@ -399,7 +400,7 @@ func tryPush(at *auth.Token, wr *prompb.WriteRequest, forceDropSamplesOnFailure
// Push metadata separately from time series, since it doesn't need sharding,
// relabeling, stream aggregation, deduplication, etc.
if !tryPushMetadataToRemoteStorages(at, rwctxs, mms, forceDropSamplesOnFailure) {
if !tryPushMetadataToRemoteStorages(rwctxs, mms, forceDropSamplesOnFailure) {
return false
}
@@ -537,18 +538,11 @@ func pushTimeSeriesToRemoteStoragesTrackDropped(tss []prompb.TimeSeries) {
}
}
func tryPushMetadataToRemoteStorages(at *auth.Token, rwctxs []*remoteWriteCtx, mms []prompb.MetricMetadata, forceDropSamplesOnFailure bool) bool {
func tryPushMetadataToRemoteStorages(rwctxs []*remoteWriteCtx, mms []prompb.MetricMetadata, forceDropSamplesOnFailure bool) bool {
if len(mms) == 0 {
// Nothing to push
return true
}
if at != nil {
for idx := range mms {
mm := &mms[idx]
mm.AccountID = at.AccountID
mm.ProjectID = at.ProjectID
}
}
// Do not shard metadata even if -remoteWrite.shardByURL is set, just replicate it among rwctxs.
// Since metadata is usually small and there is no guarantee that metadata can be sent to
// the same remote storage with the corresponding metrics.
@@ -699,7 +693,7 @@ func shardAmountRemoteWriteCtx(tssBlock []prompb.TimeSeries, shards [][]prompb.T
}
tmpLabels.Labels = hashLabels
}
h := getLabelsHashForShard(hashLabels)
h := getLabelsHash(hashLabels)
// Get the rwctxIdx through consistent hashing and then map it to the index in shards.
// The rwctxIdx is not always equal to the shardIdx, for example, when some rwctx are not available.
@@ -790,28 +784,11 @@ var (
dailySeriesLimitRowsDropped = metrics.NewCounter(`vmagent_daily_series_limit_rows_dropped_total`)
)
// getLabelsHashForShard is a separate function from getLabelsHash because
// it omits the '=' separator between label name and value for backward compatibility.
// Changing it would re-shard all series across remoteWrite targets.
func getLabelsHashForShard(labels []prompb.Label) uint64 {
bb := labelsHashBufPool.Get()
b := bb.B[:0]
for _, label := range labels {
b = append(b, label.Name...)
b = append(b, label.Value...)
}
h := xxhash.Sum64(b)
bb.B = b
labelsHashBufPool.Put(bb)
return h
}
func getLabelsHash(labels []prompb.Label) uint64 {
bb := labelsHashBufPool.Get()
b := bb.B[:0]
for _, label := range labels {
b = append(b, label.Name...)
b = append(b, '=')
b = append(b, label.Value...)
}
h := xxhash.Sum64(b)

View File

@@ -25,7 +25,7 @@ func TestGetLabelsHash_Distribution(t *testing.T) {
t.Helper()
// Distribute itemsCount hashes returned by getLabelsHash() across bucketsCount buckets.
itemsCount := 10_000 * bucketsCount
itemsCount := 1_000 * bucketsCount
m := make([]int, bucketsCount)
var labels []prompb.Label
for i := range itemsCount {
@@ -44,12 +44,10 @@ func TestGetLabelsHash_Distribution(t *testing.T) {
}
// Verify that the distribution is even
expectedItemsPerBucket := float64(itemsCount / bucketsCount)
allowedDeviation := math.Round(float64(expectedItemsPerBucket) * 0.04)
expectedItemsPerBucket := itemsCount / bucketsCount
for _, n := range m {
if math.Abs(expectedItemsPerBucket-float64(n)) > allowedDeviation {
t.Fatalf("unexpected items in the bucket for %d buckets; got %d; want in range [%.0f, %.0f]",
bucketsCount, n, expectedItemsPerBucket-allowedDeviation, expectedItemsPerBucket+allowedDeviation)
if math.Abs(1-float64(n)/float64(expectedItemsPerBucket)) > 0.04 {
t.Fatalf("unexpected items in the bucket for %d buckets; got %d; want around %d", bucketsCount, n, expectedItemsPerBucket)
}
}
}

View File

@@ -222,9 +222,6 @@ func (r *Rule) Validate() error {
if r.Expr == "" {
return fmt.Errorf("expression can't be empty")
}
if _, ok := r.Labels["__name__"]; ok {
return fmt.Errorf("invalid rule label __name__")
}
return checkOverflow(r.XXX, "rule")
}

View File

@@ -136,9 +136,6 @@ func TestRuleValidate(t *testing.T) {
if err := (&Rule{Alert: "alert"}).Validate(); err == nil {
t.Fatalf("expected empty expr error")
}
if err := (&Rule{Record: "record", Expr: "sum(test)", Labels: map[string]string{"__name__": "test"}}).Validate(); err == nil {
t.Fatalf("invalid rule label; got %s", err)
}
if err := (&Rule{Alert: "alert", Expr: "test>0"}).Validate(); err != nil {
t.Fatalf("expected valid rule; got %s", err)
}

View File

@@ -772,7 +772,7 @@ func TestHeaders(t *testing.T) {
// basic auth
f(func() *Client {
cfg, err := vmalertutil.AuthConfig(vmalertutil.WithBasicAuth("foo", "", "bar", ""))
cfg, err := vmalertutil.AuthConfig(vmalertutil.WithBasicAuth("foo", "bar", ""))
if err != nil {
t.Fatalf("Error get auth config: %s", err)
}
@@ -817,7 +817,7 @@ func TestHeaders(t *testing.T) {
// custom header overrides basic auth
f(func() *Client {
cfg, err := vmalertutil.AuthConfig(vmalertutil.WithBasicAuth("foo", "", "bar", ""))
cfg, err := vmalertutil.AuthConfig(vmalertutil.WithBasicAuth("foo", "bar", ""))
if err != nil {
t.Fatalf("Error get auth config: %s", err)
}

View File

@@ -87,7 +87,6 @@ func (m *Metric) DelLabel(key string) {
for i, l := range m.Labels {
if l.Name == key {
m.Labels = append(m.Labels[:i], m.Labels[i+1:]...)
break
}
}
}

View File

@@ -27,7 +27,6 @@ var (
"Multiple headers must be delimited by '^^': -datasource.headers='header1:value1^^header2:value2'")
basicAuthUsername = flag.String("datasource.basicAuth.username", "", "Optional basic auth username for -datasource.url")
basicAuthUsernameFile = flag.String("datasource.basicAuth.usernameFile", "", "Optional path to basic auth username to use for -datasource.url")
basicAuthPassword = flag.String("datasource.basicAuth.password", "", "Optional basic auth password for -datasource.url")
basicAuthPasswordFile = flag.String("datasource.basicAuth.passwordFile", "", "Optional path to basic auth password to use for -datasource.url")
@@ -106,7 +105,7 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
return nil, fmt.Errorf("cannot parse JSON for -datasource.oauth2.endpointParams=%s: %w", *oauth2EndpointParams, err)
}
authCfg, err := vmalertutil.AuthConfig(
vmalertutil.WithBasicAuth(*basicAuthUsername, *basicAuthUsernameFile, *basicAuthPassword, *basicAuthPasswordFile),
vmalertutil.WithBasicAuth(*basicAuthUsername, *basicAuthPassword, *basicAuthPasswordFile),
vmalertutil.WithBearer(*bearerToken, *bearerTokenFile),
vmalertutil.WithOAuth(*oauth2ClientID, *oauth2ClientSecret, *oauth2ClientSecretFile, *oauth2TokenURL, *oauth2Scopes, endpointParams),
vmalertutil.WithHeaders(*headers))

View File

@@ -191,7 +191,7 @@ func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg proma
}
aCfg, err := vmalertutil.AuthConfig(
vmalertutil.WithBasicAuth(ba.Username, ba.UsernameFile, ba.Password.String(), ba.PasswordFile),
vmalertutil.WithBasicAuth(ba.Username, ba.Password.String(), ba.PasswordFile),
vmalertutil.WithBearer(authCfg.BearerToken.String(), authCfg.BearerTokenFile),
vmalertutil.WithOAuth(oauth.ClientID, oauth.ClientSecret.String(), oauth.ClientSecretFile, oauth.TokenURL, strings.Join(oauth.Scopes, ";"), oauth.EndpointParams),
vmalertutil.WithHeaders(strings.Join(authCfg.Headers, "^^")),

View File

@@ -36,7 +36,6 @@ var (
"For example, -remoteWrite.headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding -notifier.url. "+
"Multiple headers must be delimited by '^^': -notifier.headers='header1:value1^^header2:value2,header3:value3'")
basicAuthUsername = flagutil.NewArrayString("notifier.basicAuth.username", "Optional basic auth username for -notifier.url")
basicAuthUsernameFile = flagutil.NewArrayString("notifier.basicAuth.usernameFile", "Optional path to basic auth username file for -notifier.url")
basicAuthPassword = flagutil.NewArrayString("notifier.basicAuth.password", "Optional basic auth password for -notifier.url")
basicAuthPasswordFile = flagutil.NewArrayString("notifier.basicAuth.passwordFile", "Optional path to basic auth password file for -notifier.url")
@@ -214,7 +213,6 @@ func notifiersFromFlags(gen AlertURLGenerator) ([]Notifier, error) {
},
BasicAuth: &promauth.BasicAuthConfig{
Username: basicAuthUsername.GetOptionalArg(i),
UsernameFile: basicAuthUsernameFile.GetOptionalArg(i),
Password: promauth.NewSecret(basicAuthPassword.GetOptionalArg(i)),
PasswordFile: basicAuthPasswordFile.GetOptionalArg(i),
},

View File

@@ -14,7 +14,7 @@ type Notifier interface {
Send(ctx context.Context, alerts []Alert, alertLabels [][]prompb.Label, notifierHeaders map[string]string) error
// Addr returns address where alerts are sent.
Addr() string
// LastError returns error, that occurred during last attempt to send data
// LastError returns error, that occured during last attempt to send data
LastError() string
// Close is a destructor for the Notifier
Close()

View File

@@ -28,7 +28,6 @@ var (
"Multiple headers must be delimited by '^^': -remoteRead.headers='header1:value1^^header2:value2'")
basicAuthUsername = flag.String("remoteRead.basicAuth.username", "", "Optional basic auth username for -remoteRead.url")
basicAuthUsernameFile = flag.String("remoteRead.basicAuth.usernameFile", "", "Optional path to basic auth username to use for -remoteRead.url")
basicAuthPassword = flag.String("remoteRead.basicAuth.password", "", "Optional basic auth password for -remoteRead.url")
basicAuthPasswordFile = flag.String("remoteRead.basicAuth.passwordFile", "", "Optional path to basic auth password to use for -remoteRead.url")
@@ -81,7 +80,7 @@ func Init() (datasource.QuerierBuilder, error) {
return nil, fmt.Errorf("cannot parse JSON for -remoteRead.oauth2.endpointParams=%s: %w", *oauth2EndpointParams, err)
}
authCfg, err := vmalertutil.AuthConfig(
vmalertutil.WithBasicAuth(*basicAuthUsername, *basicAuthUsernameFile, *basicAuthPassword, *basicAuthPasswordFile),
vmalertutil.WithBasicAuth(*basicAuthUsername, *basicAuthPassword, *basicAuthPasswordFile),
vmalertutil.WithBearer(*bearerToken, *bearerTokenFile),
vmalertutil.WithOAuth(*oauth2ClientID, *oauth2ClientSecret, *oauth2ClientSecretFile, *oauth2TokenURL, *oauth2Scopes, endpointParams),
vmalertutil.WithHeaders(*headers))

View File

@@ -11,7 +11,6 @@ import (
"path"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cespare/xxhash/v2"
@@ -19,8 +18,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
@@ -60,11 +57,6 @@ type Client struct {
wg sync.WaitGroup
doneCh chan struct{}
// Whether to encode the write request with VictoriaMetrics remote write protocol.
// It is set to true by default, and will be switched to false if the client
// receives specific errors indicating that the remote storage doesn't support VictoriaMetrics remote write protocol.
isVMRemoteWrite atomic.Bool
}
// Config is config for remote write client.
@@ -124,7 +116,6 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
doneCh: make(chan struct{}),
input: make(chan prompb.TimeSeries, cfg.MaxQueueSize),
}
c.isVMRemoteWrite.Store(true)
for i := 0; i < cc; i++ {
c.wg.Go(func() {
@@ -274,16 +265,8 @@ func (c *Client) flush(ctx context.Context, wr *prompb.WriteRequest) {
defer wr.Reset()
defer bufferFlushDuration.UpdateDuration(time.Now())
bb := writeRequestBufPool.Get()
bb.B = wr.MarshalProtobuf(bb.B[:0])
zb := compressBufPool.Get()
defer compressBufPool.Put(zb)
if c.isVMRemoteWrite.Load() {
zb.B = zstd.CompressLevel(zb.B[:0], bb.B, 0)
} else {
zb.B = snappy.Encode(zb.B[:cap(zb.B)], bb.B)
}
writeRequestBufPool.Put(bb)
data := wr.MarshalProtobuf(nil)
b := snappy.Encode(nil, data)
maxRetryInterval := *retryMaxTime
bt := timeutil.NewBackoffTimer(*retryMinInterval, maxRetryInterval)
@@ -295,17 +278,17 @@ func (c *Client) flush(ctx context.Context, wr *prompb.WriteRequest) {
attempts := 0
L:
for {
err := c.send(ctx, zb.B)
err := c.send(ctx, b)
if err != nil && (errors.Is(err, io.EOF) || netutil.IsTrivialNetworkError(err)) {
// Something in the middle between client and destination might be closing
// the connection. So we do a one more attempt in hope request will succeed.
err = c.send(ctx, zb.B)
err = c.send(ctx, b)
}
if err == nil {
sentRows.Add(len(wr.Timeseries))
sentBytes.Add(len(zb.B))
sentBytes.Add(len(b))
flushedRows.Update(float64(len(wr.Timeseries)))
flushedBytes.Update(float64(len(zb.B)))
flushedBytes.Update(float64(len(b)))
return
}
@@ -357,16 +340,12 @@ func (c *Client) send(ctx context.Context, data []byte) error {
return fmt.Errorf("failed to create new HTTP request: %w", err)
}
req.Header.Set("User-Agent", "vmalert")
// RFC standard compliant headers
req.Header.Set("Content-Encoding", "snappy")
req.Header.Set("Content-Type", "application/x-protobuf")
if encoding.IsZstd(data) {
req.Header.Set("Content-Encoding", "zstd")
req.Header.Set("X-VictoriaMetrics-Remote-Write-Version", "1")
} else {
req.Header.Set("Content-Encoding", "snappy")
req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
}
// Prometheus compliant headers
req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
if c.authCfg != nil {
err = c.authCfg.SetHeaders(req, true)
@@ -395,29 +374,6 @@ func (c *Client) send(ctx context.Context, data []byte) error {
// respond with HTTP 2xx status code when write is successful.
return nil
case 4:
// - Remote Write v1 specification implicitly expects a `400 Bad Request` when the encoding is not supported.
// - Remote Write v2 specification explicitly specifies a `415 Unsupported Media Type` for unsupported encodings.
// - Real-world implementations of v1 use both 400 and 415 status codes.
// See more in research: https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8462#issuecomment-2786918054
if resp.StatusCode == http.StatusUnsupportedMediaType || resp.StatusCode == http.StatusBadRequest {
if encoding.IsZstd(data) {
logger.Infof("received unsupported media type or bad request from remote storage at %q. Re-packing the block to Prometheus remote write and retrying."+
"See https://docs.victoriametrics.com/victoriametrics/vmagent/#victoriametrics-remote-write-protocol", req.URL.Redacted())
zstdBlockLen := len(data)
data, err = repackBlockFromZstdToSnappy(data)
if err == nil {
logger.Infof("received unsupported media type or bad request from remote storage at %q. Downgrading protocol from VictoriaMetrics to Prometheus remote write for all future requests. "+
"See https://docs.victoriametrics.com/victoriametrics/vmagent/#victoriametrics-remote-write-protocol", req.URL.Redacted())
c.isVMRemoteWrite.Store(false)
return c.send(ctx, data)
}
logger.Warnf("failed to repack zstd block (%d bytes) to snappy: %s; The block will be rejected. "+
"Possible cause: ungraceful shutdown leading to persisted queue corruption.",
zstdBlockLen, err)
}
}
if resp.StatusCode != http.StatusTooManyRequests {
// MUST NOT retry write requests on HTTP 4xx responses other than 429
return &nonRetriableError{
@@ -438,19 +394,3 @@ type nonRetriableError struct {
func (e *nonRetriableError) Error() string {
return e.err.Error()
}
var (
writeRequestBufPool bytesutil.ByteBufferPool
compressBufPool bytesutil.ByteBufferPool
)
// repackBlockFromZstdToSnappy repacks the given zstd-compressed block to snappy-compressed block.
func repackBlockFromZstdToSnappy(zstdBlock []byte) ([]byte, error) {
plainBlock := make([]byte, 0, len(zstdBlock)*2)
plainBlock, err := encoding.DecompressZSTD(plainBlock, zstdBlock)
if err != nil {
return nil, err
}
return snappy.Encode(nil, plainBlock), nil
}

View File

@@ -12,7 +12,8 @@ import (
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
"github.com/golang/snappy"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
)
@@ -102,10 +103,7 @@ func TestClient_run_maxBatchSizeDuringShutdown(t *testing.T) {
// push time series to the client.
for range pushCnt {
if err = rwClient.Push(prompb.TimeSeries{
Labels: []prompb.Label{{Name: "__name__", Value: "m"}},
Samples: []prompb.Sample{{Value: 1, Timestamp: 1000}},
}); err != nil {
if err = rwClient.Push(prompb.TimeSeries{}); err != nil {
t.Fatalf("cannot time series to the client: %s", err)
}
}
@@ -158,8 +156,8 @@ func (rw *rwServer) handler(w http.ResponseWriter, r *http.Request) {
}
h := r.Header.Get("Content-Encoding")
if h != "zstd" {
rw.err(w, fmt.Errorf("header read error: Content-Encoding is not zstd (%q)", h))
if h != "snappy" {
rw.err(w, fmt.Errorf("header read error: Content-Encoding is not snappy (%q)", h))
}
h = r.Header.Get("Content-Type")
@@ -167,9 +165,9 @@ func (rw *rwServer) handler(w http.ResponseWriter, r *http.Request) {
rw.err(w, fmt.Errorf("header read error: Content-Type is not x-protobuf (%q)", h))
}
h = r.Header.Get("X-VictoriaMetrics-Remote-Write-Version")
if h != "1" {
rw.err(w, fmt.Errorf("header read error: X-VictoriaMetrics-Remote-Write-Version is not 1 (%q)", h))
h = r.Header.Get("X-Prometheus-Remote-Write-Version")
if h != "0.1.0" {
rw.err(w, fmt.Errorf("header read error: X-Prometheus-Remote-Write-Version is not 0.1.0 (%q)", h))
}
data, err := io.ReadAll(r.Body)
@@ -179,7 +177,7 @@ func (rw *rwServer) handler(w http.ResponseWriter, r *http.Request) {
}
defer func() { _ = r.Body.Close() }()
b, err := zstd.Decompress(nil, data)
b, err := snappy.Decode(nil, data)
if err != nil {
rw.err(w, fmt.Errorf("decode err: %w", err))
return

View File

@@ -9,7 +9,8 @@ import (
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
"github.com/golang/snappy"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
@@ -63,17 +64,19 @@ func (c *DebugClient) Close() error {
}
func (c *DebugClient) send(data []byte) error {
b := zstd.CompressLevel(nil, data, 0)
b := snappy.Encode(nil, data)
r := bytes.NewReader(b)
req, err := http.NewRequest(http.MethodPost, c.addr, r)
if err != nil {
return fmt.Errorf("failed to create new HTTP request: %w", err)
}
req.Header.Set("Content-Encoding", "zstd")
// RFC standard compliant headers
req.Header.Set("Content-Encoding", "snappy")
req.Header.Set("Content-Type", "application/x-protobuf")
req.Header.Set("X-VictoriaMetrics-Remote-Write-Version", "1")
// Prometheus compliant headers
req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
if !*disablePathAppend {
req.URL.Path = path.Join(req.URL.Path, "/api/v1/write")

View File

@@ -13,8 +13,8 @@ import (
)
var (
addr = flag.String("remoteWrite.url", "", "Optional URL to persist alerts state and recording rules results in form of timeseries. "+
"It must support either VictoriaMetrics remote write protocol or Prometheus remote_write protocol. "+
addr = flag.String("remoteWrite.url", "", "Optional URL to VictoriaMetrics or vminsert where to persist alerts state "+
"and recording rules results in form of timeseries. "+
"Supports address in the form of IP address with a port (e.g., http://127.0.0.1:8428) or DNS SRV record. "+
"For example, if -remoteWrite.url=http://127.0.0.1:8428 is specified, "+
"then the alerts state will be written to http://127.0.0.1:8428/api/v1/write . See also -remoteWrite.disablePathAppend, '-remoteWrite.showURL'.")
@@ -26,7 +26,6 @@ var (
"Multiple headers must be delimited by '^^': -remoteWrite.headers='header1:value1^^header2:value2'")
basicAuthUsername = flag.String("remoteWrite.basicAuth.username", "", "Optional basic auth username for -remoteWrite.url")
basicAuthUsernameFile = flag.String("remoteWrite.basicAuth.usernameFile", "", "Optional path to basic auth username to use for -remoteWrite.url")
basicAuthPassword = flag.String("remoteWrite.basicAuth.password", "", "Optional basic auth password for -remoteWrite.url")
basicAuthPasswordFile = flag.String("remoteWrite.basicAuth.passwordFile", "", "Optional path to basic auth password to use for -remoteWrite.url")
@@ -84,7 +83,7 @@ func Init(ctx context.Context) (*Client, error) {
return nil, fmt.Errorf("cannot parse JSON for -remoteWrite.oauth2.endpointParams=%s: %w", *oauth2EndpointParams, err)
}
authCfg, err := vmalertutil.AuthConfig(
vmalertutil.WithBasicAuth(*basicAuthUsername, *basicAuthUsernameFile, *basicAuthPassword, *basicAuthPasswordFile),
vmalertutil.WithBasicAuth(*basicAuthUsername, *basicAuthPassword, *basicAuthPasswordFile),
vmalertutil.WithBearer(*bearerToken, *bearerTokenFile),
vmalertutil.WithOAuth(*oauth2ClientID, *oauth2ClientSecret, *oauth2ClientSecretFile, *oauth2TokenURL, *oauth2Scopes, endpointParams),
vmalertutil.WithHeaders(*headers))

View File

@@ -312,11 +312,9 @@ type labelSet struct {
// On k conflicts in origin set, the original value is preferred and copied
// to processed with `exported_%k` key. The copy happens only if passed v isn't equal to origin[k] value.
func (ls *labelSet) add(k, v string) {
// do not add label with empty value to the result, as it has no meaning:
// if the label already exists in the original query result, remove it to preserve compatibility with relabeling, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10766.
// otherwise, ignore the label, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984.
// do not add label with empty value, since it has no meaning.
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
if v == "" {
delete(ls.processed, k)
return
}
ls.processed[k] = v

View File

@@ -1363,7 +1363,6 @@ func TestAlertingRule_ToLabels(t *testing.T) {
{Name: "instance", Value: "0.0.0.0:8800"},
{Name: "group", Value: "vmalert"},
{Name: "alertname", Value: "ConfigurationReloadFailure"},
{Name: "pod", Value: "vmalert-0"},
},
Values: []float64{1},
Timestamps: []int64{time.Now().UnixNano()},
@@ -1375,7 +1374,6 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"group": "vmalert", // this shouldn't have effect since value in metric is equal
"invalid_label": "{{ .Values.mustRuntimeFail }}",
"empty_label": "", // this should be dropped
"pod": "", // this should remove the pod label from query result
},
Expr: "sum(vmalert_alerting_rules_error) by(instance, group, alertname) > 0",
Name: "AlertingRulesError",
@@ -1387,7 +1385,6 @@ func TestAlertingRule_ToLabels(t *testing.T) {
"group": "vmalert",
"alertname": "ConfigurationReloadFailure",
"alertgroup": "vmalert",
"pod": "vmalert-0",
"invalid_label": `error evaluating template: template: :1:298: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
}

View File

@@ -8,7 +8,6 @@ import (
"hash/fnv"
"maps"
"net/url"
"path"
"sync"
"time"
@@ -43,9 +42,6 @@ var (
"For example, if lookback=1h then range from now() to now()-1h will be scanned.")
maxStartDelay = flag.Duration("group.maxStartDelay", 5*time.Minute, "Defines the max delay before starting the group evaluation. Group's start is artificially delayed for random duration on interval"+
" [0..min(--group.maxStartDelay, group.interval)]. This helps smoothing out the load on the configured datasource, so evaluations aren't executed too close to each other.")
ruleStripFilePath = flag.Bool("rule.stripFilePath", false, "Whether to strip rule file paths in logs and all API responses, including /metrics. "+
"For example, file path '/path/to/tenant_id/rules.yml' will be stripped to 'groupHashID/rules.yml'. "+
"This flag may be useful for hiding sensitive information in file paths, such as S3 bucket details.")
)
// Group is an entity for grouping rules
@@ -151,12 +147,6 @@ func NewGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval ti
g.EvalDelay = &cfg.EvalDelay.D
}
g.id = g.CreateID()
// strip file path from group.File after generated group ID when ruleStripFilePath is set,
// so it won't be exposed in logs and api responses
if *ruleStripFilePath {
_, filename := path.Split(g.File)
g.File = fmt.Sprintf("%d/%s", g.id, filename)
}
for _, h := range cfg.Headers {
g.Headers[h.Key] = h.Value
}
@@ -419,9 +409,6 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
g.mu.Unlock()
defer g.evalCancel()
// start the interval ticker before the first evaluation,
// so that the evaluation timestamps of groups with the `eval_offset` option are also aligned,
// see https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10773
t := time.NewTicker(g.Interval)
defer t.Stop()

View File

@@ -742,64 +742,3 @@ func parseTime(t *testing.T, s string) time.Time {
}
return tt
}
func TestRuleStripFilePath(t *testing.T) {
configG := config.Group{
Name: "group",
File: "/var/local/test/rules.yaml",
Type: config.NewRawType("prometheus"),
Concurrency: 1,
Rules: []config.Rule{
{
ID: 0,
Alert: "alert",
},
{
ID: 1,
Record: "record",
},
}}
qb := &datasource.FakeQuerier{}
g := NewGroup(configG, qb, 1*time.Minute, nil)
gID := g.id
if g.File != "/var/local/test/rules.yaml" {
t.Fatalf("expected file path to be unchanged; got %q instead", g.File)
}
for _, r := range g.Rules {
if ar, ok := r.(*AlertingRule); ok {
if ar.File != "/var/local/test/rules.yaml" {
t.Fatalf("expected rule file path to be unchanged; got %q instead", ar.File)
}
}
if rr, ok := r.(*RecordingRule); ok {
if rr.File != "/var/local/test/rules.yaml" {
t.Fatalf("expected rule file path to be unchanged; got %q instead", rr.File)
}
}
}
oldRuleStripFilePath := *ruleStripFilePath
*ruleStripFilePath = true
defer func() {
*ruleStripFilePath = oldRuleStripFilePath
}()
g = NewGroup(configG, qb, 1*time.Minute, nil)
if g.File != fmt.Sprintf("%d/rules.yaml", gID) {
t.Fatalf("expected file path to be stripped to %q; got %q instead", fmt.Sprintf("%d/rules.yaml", gID), g.File)
}
for _, r := range g.Rules {
if ar, ok := r.(*AlertingRule); ok {
if ar.File != fmt.Sprintf("%d/rules.yaml", gID) {
t.Fatalf("expected rule file path to be unchanged; got %q instead", ar.File)
}
}
if rr, ok := r.(*RecordingRule); ok {
if rr.File != fmt.Sprintf("%d/rules.yaml", gID) {
t.Fatalf("expected rule file path to be unchanged; got %q instead", rr.File)
}
}
}
}

View File

@@ -293,11 +293,9 @@ func (rr *RecordingRule) toTimeSeries(m datasource.Metric) prompb.TimeSeries {
}
// add extra labels configured by user
for k := range rr.Labels {
// do not add label with empty value to the result, as it has no meaning:
// if the label already exists in the original query result, remove it to preserve compatibility with relabeling, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10766.
// otherwise, ignore the label, see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984.
// do not add label with empty value, since it has no meaning.
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
if rr.Labels[k] == "" {
m.DelLabel(k)
continue
}
existingLabel := promrelabel.GetLabelByName(m.Labels, k)

View File

@@ -163,13 +163,11 @@ func TestRecordingRule_Exec(t *testing.T) {
f(&RecordingRule{
Name: "job:foo",
Labels: map[string]string{
"source": "test",
"empty_label": "", // this should be dropped
"pod": "", // this should remove the pod label from query result
"source": "test",
},
}, [][]datasource.Metric{{
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo", "pod", "vmalert-0"),
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin", "pod", "vmalert-1"),
metricWithValueAndLabels(t, 2, "__name__", "foo", "job", "foo"),
metricWithValueAndLabels(t, 1, "__name__", "bar", "job", "bar", "source", "origin"),
metricWithValueAndLabels(t, 1, "__name__", "baz", "job", "baz", "source", "test"),
}}, [][]prompb.TimeSeries{{
newTimeSeries([]float64{2}, []int64{ts.UnixNano()}, []prompb.Label{

View File

@@ -252,9 +252,6 @@ func (r *ApiRule) ExtendState() {
// ToAPI returns ApiGroup representation of g
func (g *Group) ToAPI() *ApiGroup {
if g == nil {
return &ApiGroup{}
}
g.mu.RLock()
defer g.mu.RUnlock()
ag := ApiGroup{

View File

@@ -402,20 +402,6 @@ func templateFuncs() textTpl.FuncMap {
return t, nil
},
// formatTime formats the given Unix timestamp with the provided layout.
// For example: {{ now | formatTime "2006-01-02T15:04:05Z07:00" }}
"formatTime": func(layout string, i any) (string, error) {
v, err := toFloat64(i)
if err != nil {
return "", fmt.Errorf("formatTime: %w", err)
}
if math.IsNaN(v) || math.IsInf(v, 0) {
return "", fmt.Errorf("formatTime: cannot convert %v to time", v)
}
t := timeFromUnixTimestamp(v).Time().UTC()
return t.Format(layout), nil
},
/* URLs */
// externalURL returns value of `external.url` flag

View File

@@ -6,7 +6,6 @@ import (
"strings"
"testing"
textTpl "text/template"
"time"
)
func TestTemplateFuncs_StringConversion(t *testing.T) {
@@ -104,26 +103,6 @@ func TestTemplateFuncs_Formatting(t *testing.T) {
f("humanizeTimestamp", 1679055557, "2023-03-17 12:19:17 +0000 UTC")
}
func TestTemplateFuncs_FormatTime(t *testing.T) {
funcs := templateFuncs()
formatTime := funcs["formatTime"].(func(layout string, i any) (string, error))
f := func(layout string, input any, expected string) {
t.Helper()
result, err := formatTime(layout, input)
if err != nil {
t.Fatalf("unexpected error for formatTime(%q, %v): %s", layout, input, err)
}
if result != expected {
t.Fatalf("unexpected result for formatTime(%q, %v); got\n%s\nwant\n%s", layout, input, result, expected)
}
}
f(time.RFC3339, float64(1679055557), "2023-03-17T12:19:17Z")
f("2006-01-02T15:04:05", int64(1679055557), "2023-03-17T12:19:17")
f(time.RFC822, int(1679055557), "17 Mar 23 12:19 UTC")
}
func mkTemplate(current, replacement any) textTemplate {
tmpl := textTemplate{}
if current != nil {

View File

@@ -20,12 +20,11 @@ func AuthConfig(filterOptions ...AuthConfigOptions) (*promauth.Config, error) {
}
// WithBasicAuth returns AuthConfigOptions and initialized promauth.BasicAuthConfig based on given params
func WithBasicAuth(username, usernameFile, password, passwordFile string) AuthConfigOptions {
func WithBasicAuth(username, password, passwordFile string) AuthConfigOptions {
return func(config *promauth.HTTPClientConfig) {
if username != "" || usernameFile != "" || password != "" || passwordFile != "" {
if username != "" || password != "" || passwordFile != "" {
config.BasicAuth = &promauth.BasicAuthConfig{
Username: username,
UsernameFile: usernameFile,
Password: promauth.NewSecret(password),
PasswordFile: passwordFile,
}

View File

@@ -362,62 +362,40 @@ func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error {
}
type backendURLs struct {
bhc backendHealthCheck
healthChecksContext context.Context
healthChecksCancel func()
healthChecksWG sync.WaitGroup
bus []*backendURL
}
type backendHealthCheck struct {
ctx context.Context
// mu protects fields below
cancel func()
mu sync.Mutex
isStopped bool
wg sync.WaitGroup
}
func (bhc *backendHealthCheck) run(hc func()) {
bhc.mu.Lock()
defer bhc.mu.Unlock()
if bhc.isStopped {
return
}
bhc.wg.Go(hc)
}
func (bhc *backendHealthCheck) stop() {
bhc.mu.Lock()
bhc.cancel()
bhc.isStopped = true
bhc.mu.Unlock()
bhc.wg.Wait()
}
func newBackendURLs() *backendURLs {
ctx, cancel := context.WithCancel(context.Background())
return &backendURLs{
bhc: backendHealthCheck{
ctx: ctx,
cancel: cancel,
},
healthChecksContext: ctx,
healthChecksCancel: cancel,
}
}
func (bus *backendURLs) add(u *url.URL) {
bus.bus = append(bus.bus, &backendURL{
url: u,
bhc: &bus.bhc,
hasPlaceHolders: hasAnyPlaceholders(u),
url: u,
healthCheckContext: bus.healthChecksContext,
healthCheckWG: &bus.healthChecksWG,
hasPlaceHolders: hasAnyPlaceholders(u),
})
}
func (bus *backendURLs) stopHealthChecks() {
bus.bhc.stop()
bus.healthChecksCancel()
bus.healthChecksWG.Wait()
}
type backendURL struct {
broken atomic.Bool
bhc *backendHealthCheck
healthCheckContext context.Context
healthCheckWG *sync.WaitGroup
concurrentRequests atomic.Int32
@@ -432,7 +410,7 @@ func (bu *backendURL) isBroken() bool {
func (bu *backendURL) setBroken() {
if bu.broken.CompareAndSwap(false, true) {
bu.bhc.run(func() {
bu.healthCheckWG.Go(func() {
bu.runHealthCheck()
bu.broken.Store(false)
})
@@ -454,11 +432,11 @@ func (bu *backendURL) runHealthCheck() {
case <-t.C:
// Verify network connectivity via TCP dial before marking backend healthy.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9997
ctx, cancel := context.WithTimeout(bu.bhc.ctx, time.Second)
ctx, cancel := context.WithTimeout(bu.healthCheckContext, time.Second)
c, err := netutil.Dialer.DialContext(ctx, "tcp", addr)
cancel()
if err != nil {
if errors.Is(bu.bhc.ctx.Err(), context.Canceled) {
if errors.Is(bu.healthCheckContext.Err(), context.Canceled) {
return
}
logger.Warnf("ignoring the backend at %s for %s because of dial error: %s", addr, *failTimeout, err)
@@ -467,7 +445,7 @@ func (bu *backendURL) runHealthCheck() {
_ = c.Close()
return
case <-bu.bhc.ctx.Done():
case <-bu.healthCheckContext.Done():
return
}
}
@@ -610,7 +588,6 @@ func areEqualBackendURLs(a, b []*backendURL) bool {
}
// getFirstAvailableBackendURL returns the first available backendURL, which isn't broken.
// If all backendURLs are broken, then returns the first backendURL.
//
// backendURL.put() must be called on the returned backendURL after the request is complete.
func getFirstAvailableBackendURL(bus []*backendURL) *backendURL {
@@ -629,22 +606,21 @@ func getFirstAvailableBackendURL(bus []*backendURL) *backendURL {
return bu
}
}
// All backend urls are unavailable, then returning a first one, it could help increase the success rate of the requests。
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10837#issuecomment-4307050980.
bu.get()
return bu
return nil
}
// getLeastLoadedBackendURL returns a non-broken backendURL with the lowest number of concurrent requests.
// If all backendURLs are broken, then returns the first backendURL.
//
// backendURL.put() must be called on the returned backendURL after the request is complete.
func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *backendURL {
firstBu := bus[0]
if len(bus) == 1 {
firstBu.get()
return firstBu
// Fast path - return the only backend url.
bu := bus[0]
if bu.isBroken() {
return nil
}
bu.get()
return bu
}
// Slow path - select other backend urls.
@@ -682,10 +658,7 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
}
buMin := bus[buMinIdx]
if buMin.isBroken() {
// If all backendURLs are broken, then returns the first backendURL.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10837#issuecomment-4307050980.
firstBu.get()
return firstBu
return nil
}
buMin.get()
atomicCounter.CompareAndSwap(n+1, buMinIdx+1)

View File

@@ -1031,33 +1031,6 @@ func TestLogRequest(t *testing.T) {
f("foo", 404, 10*time.Millisecond, `access_log request_host="localhost:8080" request_uri="" status_code=404 remote_addr="" user_agent="" referer="" duration_ms=10 username="foo"`)
}
func TestGetFirstAvailableBackend(t *testing.T) {
f := func(broken []bool, expectedIdx int) {
t.Helper()
bus := make([]*backendURL, len(broken))
for i := range broken {
bus[i] = &backendURL{
url: &url.URL{Host: fmt.Sprintf("server-%d", i)},
}
bus[i].broken.Store(broken[i])
}
bu := getFirstAvailableBackendURL(bus)
if bu == nil {
t.Fatalf("unexpected nil backend")
}
if bu.url.Host != fmt.Sprintf("server-%d", expectedIdx) {
t.Fatalf("unexpected backend, expected server-%d, got %s", expectedIdx, bu.url.Host)
}
}
f([]bool{false, false, false}, 0)
f([]bool{true, true, false}, 2)
// all backend are broken, then return the first one.
f([]bool{true, true, true}, 0)
f([]bool{true}, 0)
}
func getRegexs(paths []string) []*Regex {
var sps []*Regex
for _, path := range paths {

View File

@@ -51,7 +51,7 @@ var (
"This allows reducing the consumption of backend resources when processing requests from clients connected via slow networks. "+
"Set to 0 to disable request buffering. See https://docs.victoriametrics.com/victoriametrics/vmauth/#request-body-buffering")
maxRequestBodySizeToRetry = flagutil.NewBytes("maxRequestBodySizeToRetry", 16*1024, "The maximum request body size to buffer in memory for potential retries at other backends. "+
"Request bodies larger than this size cannot be retried if the backend fails. Zero or negative value disables retries. "+
"Request bodies larger than this size cannot be retried if the backend fails. Zero or negative value disables request body buffering and retries. "+
"See also -requestBufferSize")
maxConcurrentRequests = flag.Int("maxConcurrentRequests", 1000, "The maximum number of concurrent requests vmauth can process simultaneously. "+
@@ -481,9 +481,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
canRetry := !bbOK || bb.canRetry()
res, err := ui.rt.RoundTrip(req)
if err == nil {
defer func() { _ = res.Body.Close() }()
}
if errors.Is(r.Context().Err(), context.Canceled) {
// Do not retry canceled requests.
@@ -553,6 +550,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
w.WriteHeader(res.StatusCode)
err = copyStreamToClient(w, res.Body)
_ = res.Body.Close()
if errors.Is(r.Context().Err(), context.Canceled) {
// Do not retry canceled requests.
@@ -850,18 +848,14 @@ func (bb *bufferedBody) Read(p []byte) (int, error) {
}
func (bb *bufferedBody) canRetry() bool {
if bb.r != nil {
return false
}
maxRetrySize := maxRequestBodySizeToRetry.IntN()
return len(bb.buf) == 0 || (maxRetrySize > 0 && len(bb.buf) <= maxRetrySize)
return bb.r == nil
}
// Close implements io.Closer interface.
func (bb *bufferedBody) Close() error {
bb.resetReader()
bb.cannotRetry = !bb.canRetry()
if bb.r != nil {
bb.cannotRetry = true
return bb.r.Close()
}
return nil

View File

@@ -19,7 +19,6 @@ import (
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync/atomic"
"testing"
@@ -1832,7 +1831,7 @@ func (r *mockBody) Read(p []byte) (n int, err error) {
}
func TestBufferedBody_RetrySuccess(t *testing.T) {
f := func(s string, maxSizeToRetry, bufferSize int) {
f := func(s string, maxBodySize int) {
t.Helper()
defaultRequestBufferSize := requestBufferSize.String()
@@ -1841,7 +1840,7 @@ func TestBufferedBody_RetrySuccess(t *testing.T) {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set(strconv.Itoa(bufferSize)); err != nil {
if err := requestBufferSize.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
@@ -1851,7 +1850,7 @@ func TestBufferedBody_RetrySuccess(t *testing.T) {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
if err := maxRequestBodySizeToRetry.Set(strconv.Itoa(maxSizeToRetry)); err != nil {
if err := maxRequestBodySizeToRetry.Set("0"); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
@@ -1880,20 +1879,16 @@ func TestBufferedBody_RetrySuccess(t *testing.T) {
}
}
f("", 0, 2000)
f("", 0, 0)
f("", -1, 2000)
f("", 100, 2000)
f("foo", 100, 2000)
f("foobar", 100, 2000)
f("foobar", 100, 0)
f("foobar", 100, -1)
f(newTestString(1000), 1001, 2000)
f(newTestString(1000), 1001, 500)
f("", 0)
f("", -1)
f("", 100)
f("foo", 100)
f("foobar", 100)
f(newTestString(1000), 1001)
}
func TestBufferedBody_RetrySuccessPartialRead(t *testing.T) {
f := func(s string, maxSizeToRetry, bufferSize int) {
f := func(s string, maxBodySize int) {
t.Helper()
// Check the case with partial read
@@ -1903,7 +1898,7 @@ func TestBufferedBody_RetrySuccessPartialRead(t *testing.T) {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set(strconv.Itoa(bufferSize)); err != nil {
if err := requestBufferSize.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
@@ -1913,7 +1908,7 @@ func TestBufferedBody_RetrySuccessPartialRead(t *testing.T) {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
if err := maxRequestBodySizeToRetry.Set(strconv.Itoa(maxSizeToRetry)); err != nil {
if err := maxRequestBodySizeToRetry.Set("0"); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
@@ -1957,20 +1952,16 @@ func TestBufferedBody_RetrySuccessPartialRead(t *testing.T) {
}
}
f("", 0, 2000)
f("", 0, 0)
f("", -1, 2000)
f("", 100, 2000)
f("foo", 100, 2000)
f("foobar", 100, 2000)
f("foobar", 100, 0)
f("foobar", 100, -1)
f(newTestString(1000), 1001, 2000)
f(newTestString(1000), 1001, 500)
f("", 0)
f("", -1)
f("", 100)
f("foo", 100)
f("foobar", 100)
f(newTestString(1000), 1001)
}
func TestBufferedBody_RetryFailureTooBigBody(t *testing.T) {
f := func(s string, maxSizeToRetry, bufferSize int) {
f := func(s string, maxBodySize int) {
t.Helper()
defaultRequestBufferSize := requestBufferSize.String()
@@ -1979,7 +1970,7 @@ func TestBufferedBody_RetryFailureTooBigBody(t *testing.T) {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set(strconv.Itoa(bufferSize)); err != nil {
if err := requestBufferSize.Set("0"); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
@@ -1989,7 +1980,7 @@ func TestBufferedBody_RetryFailureTooBigBody(t *testing.T) {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
if err := maxRequestBodySizeToRetry.Set(strconv.Itoa(maxSizeToRetry)); err != nil {
if err := maxRequestBodySizeToRetry.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
@@ -2034,17 +2025,12 @@ func TestBufferedBody_RetryFailureTooBigBody(t *testing.T) {
}
const maxBodySize = 1000
f(newTestString(maxBodySize+1), 0, 2*maxBodySize)
f(newTestString(maxBodySize+1), -1, 2*maxBodySize)
f(newTestString(maxBodySize+1), maxBodySize, 0)
f(newTestString(maxBodySize+1), maxBodySize, -1)
f(newTestString(maxBodySize+1), maxBodySize, maxBodySize)
f(newTestString(maxBodySize+1), maxBodySize, 2*maxBodySize)
f(newTestString(2*maxBodySize), maxBodySize, 0)
f(newTestString(maxBodySize+1), maxBodySize)
f(newTestString(2*maxBodySize), maxBodySize)
}
func TestBufferedBody_RetryDisabledByMaxRequestBodySizeToRetry(t *testing.T) {
f := func(s string, maxSizeToRetry, bufferSize int) {
func TestBufferedBody_RetryFailureZeroOrNegativeMaxBodySize(t *testing.T) {
f := func(s string, maxBodySize int) {
t.Helper()
defaultRequestBufferSize := requestBufferSize.String()
@@ -2053,20 +2039,10 @@ func TestBufferedBody_RetryDisabledByMaxRequestBodySizeToRetry(t *testing.T) {
t.Fatalf("cannot reset requestBufferSize: %s", err)
}
}()
if err := requestBufferSize.Set(strconv.Itoa(bufferSize)); err != nil {
if err := requestBufferSize.Set(fmt.Sprintf("%d", maxBodySize)); err != nil {
t.Fatalf("cannot set requestBufferSize: %s", err)
}
defaultMaxRequestBodySizeToRetry := maxRequestBodySizeToRetry.String()
defer func() {
if err := maxRequestBodySizeToRetry.Set(defaultMaxRequestBodySizeToRetry); err != nil {
t.Fatalf("cannot reset maxRequestBodySizeToRetry: %s", err)
}
}()
if err := maxRequestBodySizeToRetry.Set(strconv.Itoa(maxSizeToRetry)); err != nil {
t.Fatalf("cannot set maxRequestBodySizeToRetry: %s", err)
}
ctx := context.Background()
rb, err := bufferRequestBody(ctx, io.NopCloser(bytes.NewBufferString(s)), "foo")
if err != nil {
@@ -2075,8 +2051,8 @@ func TestBufferedBody_RetryDisabledByMaxRequestBodySizeToRetry(t *testing.T) {
bb, ok := rb.(*bufferedBody)
canRetry := !ok || bb.canRetry()
if canRetry {
t.Fatalf("canRetry() must return false before reading anything")
if !canRetry {
t.Fatalf("canRetry() must return true before reading anything")
}
data, err := io.ReadAll(rb)
if err != nil {
@@ -2090,19 +2066,19 @@ func TestBufferedBody_RetryDisabledByMaxRequestBodySizeToRetry(t *testing.T) {
}
data, err = io.ReadAll(rb)
if err == nil {
t.Fatalf("expecting non-nil error")
if err != nil {
t.Fatalf("unexpected error in io.ReadAll: %s", err)
}
if len(data) != 0 {
t.Fatalf("unexpected non-empty data read: %q", data)
if string(data) != s {
t.Fatalf("unexpected data read\ngot\n%s\nwant\n%s", data, s)
}
}
f("foobar", 0, 2048)
f(newTestString(1000), 0, 2048)
f("foobar", 0)
f(newTestString(1000), 0)
f("foobar", -1, 2048)
f(newTestString(1000), -1, 2048)
f("foobar", -1)
f(newTestString(1000), -1)
}
func newTestString(sLen int) string {

View File

@@ -8,10 +8,10 @@ import (
"time"
vmetrics "github.com/VictoriaMetrics/metrics"
"github.com/cheggaaa/pb/v3"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/cheggaaa/pb/v3"
)
type otsdbProcessor struct {
@@ -89,6 +89,9 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
// we're going to make serieslist * queryRanges queries, so we should represent that in the progress bar
otsdbSeriesTotal.Add(len(serieslist) * queryRanges)
bar := pb.StartNew(len(serieslist) * queryRanges)
defer func(bar *pb.ProgressBar) {
bar.Finish()
}(bar)
var wg sync.WaitGroup
for range op.otsdbcc {
wg.Go(func() {
@@ -103,22 +106,41 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
}
})
}
runErr := op.sendQueries(ctx, serieslist, seriesCh, errCh, startTime)
/*
Loop through all series for this metric, processing all retentions and time ranges
requested. This loop is our primary "collect data from OpenTSDB loop" and should
be async, sending data to VictoriaMetrics over time.
// Always drain channels and wait for workers to prevent goroutine leaks
The idea with having the select at the inner-most loop is to ensure quick
short-circuiting on error.
*/
for _, series := range serieslist {
for _, rt := range op.oc.Retentions {
for _, tr := range rt.QueryRanges {
select {
case otsdbErr := <-errCh:
return fmt.Errorf("opentsdb error: %s", otsdbErr)
case vmErr := <-op.im.Errors():
otsdbErrorsTotal.Inc()
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
case seriesCh <- queryObj{
Tr: tr, StartTime: startTime,
Series: series, Rt: opentsdb.RetentionMeta{
FirstOrder: rt.FirstOrder, SecondOrder: rt.SecondOrder, AggTime: rt.AggTime}}:
}
}
}
}
// Drain channels per metric
close(seriesCh)
wg.Wait()
close(errCh)
// check for any lingering errors on the query side
for otsdbErr := range errCh {
if runErr == nil {
runErr = fmt.Errorf("import process failed: \n%s", otsdbErr)
}
return fmt.Errorf("import process failed: \n%s", otsdbErr)
}
bar.Finish()
if runErr != nil {
return runErr
}
log.Print(op.im.Stats())
}
op.im.Close()
@@ -133,34 +155,6 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
return nil
}
// sendQueries iterates over all series and retention ranges, sending queries to workers.
// It returns early if ctx is canceled or an error is received.
func (op *otsdbProcessor) sendQueries(ctx context.Context, serieslist []opentsdb.Meta, seriesCh chan<- queryObj, errCh <-chan error, startTime int64) error {
for _, series := range serieslist {
for _, rt := range op.oc.Retentions {
for _, tr := range rt.QueryRanges {
select {
case <-ctx.Done():
return fmt.Errorf("context canceled: %s", ctx.Err())
case otsdbErr := <-errCh:
otsdbErrorsTotal.Inc()
return fmt.Errorf("opentsdb error: %s", otsdbErr)
case vmErr := <-op.im.Errors():
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
case seriesCh <- queryObj{
Tr: tr, StartTime: startTime,
Series: series, Rt: opentsdb.RetentionMeta{
FirstOrder: rt.FirstOrder,
SecondOrder: rt.SecondOrder,
AggTime: rt.AggTime,
}}:
}
}
}
}
return nil
}
func (op *otsdbProcessor) do(s queryObj) error {
start := s.StartTime - s.Tr.Start
end := s.StartTime - s.Tr.End
@@ -169,7 +163,6 @@ func (op *otsdbProcessor) do(s queryObj) error {
return fmt.Errorf("failed to collect data for %v in %v:%v :: %v", s.Series, s.Rt, s.Tr, err)
}
if len(data.Timestamps) < 1 || len(data.Values) < 1 {
log.Printf("no data found for %v in %v:%v...skipping", s.Series, s.Rt, s.Tr)
return nil
}
labels := make([]vm.LabelPair, 0, len(data.Tags))

View File

@@ -108,10 +108,10 @@ func (c Client) FindMetrics(q string) ([]string, error) {
if err != nil {
return nil, fmt.Errorf("failed to send GET request to %q: %s", q, err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("bad return from OpenTSDB: %d: %v", resp.StatusCode, resp)
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("could not retrieve metric data from %q: %s", q, err)
@@ -130,12 +130,12 @@ func (c Client) FindSeries(metric string) ([]Meta, error) {
q := fmt.Sprintf("%s/api/search/lookup?m=%s&limit=%d", c.Addr, metric, c.Limit)
resp, err := c.c.Get(q)
if err != nil {
return nil, fmt.Errorf("failed to send GET request to %q: %s", q, err)
return nil, fmt.Errorf("failed to set GET request to %q: %s", q, err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("bad return from OpenTSDB: %d: %v", resp.StatusCode, resp)
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("could not retrieve series data from %q: %s", q, err)
@@ -185,7 +185,6 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, m
if err != nil {
return Metric{}, fmt.Errorf("failed to send GET request to %q: %s", q, err)
}
defer func() { _ = resp.Body.Close() }()
/*
There are three potential failures here, none of which should kill the entire
migration run:
@@ -197,6 +196,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, m
log.Printf("bad response code from OpenTSDB query %v for %q...skipping", resp.StatusCode, q)
return Metric{}, nil
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Println("couldn't read response body from OpenTSDB query...skipping")
@@ -239,20 +239,27 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, m
In all "bad" cases, we don't end the migration, we just don't process that particular message
*/
if len(output) < 1 {
// no results returned...return an empty object without error
return Metric{}, nil
}
if len(output) > 1 {
return Metric{}, fmt.Errorf("unexpected number of series returned: %d for query %q; expected 1", len(output), q)
// multiple series returned for a single query. We can't process this right, so...
return Metric{}, nil
}
if len(output[0].AggregateTags) > 0 {
return Metric{}, fmt.Errorf("aggregate tags %v present in response for query %q; series may be suppressed", output[0].AggregateTags, q)
// This failure means we've suppressed potential series somehow...
return Metric{}, nil
}
data := Metric{}
data.Metric = output[0].Metric
data.Tags = output[0].Tags
/*
We evaluate data for correctness before formatting the actual values
to skip a little bit of time if the series has invalid formatting
*/
data, err = modifyData(data, c.Normalize)
if err != nil {
return Metric{}, fmt.Errorf("failed to convert metric data for query %q: %w", q, err)
return Metric{}, nil
}
/*

View File

@@ -32,7 +32,7 @@ func convertDuration(duration string) (time.Duration, error) {
var err error
var timeValue int
if strings.HasSuffix(duration, "y") {
timeValue, err = strconv.Atoi(strings.TrimSuffix(duration, "y"))
timeValue, err = strconv.Atoi(strings.Trim(duration, "y"))
if err != nil {
return 0, fmt.Errorf("invalid time range: %q", duration)
}
@@ -42,7 +42,7 @@ func convertDuration(duration string) (time.Duration, error) {
return 0, fmt.Errorf("invalid time range: %q", duration)
}
} else if strings.HasSuffix(duration, "w") {
timeValue, err = strconv.Atoi(strings.TrimSuffix(duration, "w"))
timeValue, err = strconv.Atoi(strings.Trim(duration, "w"))
if err != nil {
return 0, fmt.Errorf("invalid time range: %q", duration)
}
@@ -52,7 +52,7 @@ func convertDuration(duration string) (time.Duration, error) {
return 0, fmt.Errorf("invalid time range: %q", duration)
}
} else if strings.HasSuffix(duration, "d") {
timeValue, err = strconv.Atoi(strings.TrimSuffix(duration, "d"))
timeValue, err = strconv.Atoi(strings.Trim(duration, "d"))
if err != nil {
return 0, fmt.Errorf("invalid time range: %q", duration)
}
@@ -95,9 +95,6 @@ func convertRetention(retention string, offset int64, msecTime bool) (Retention,
if !msecTime {
queryLength = queryLength / 1000
}
if queryLength <= 0 {
return Retention{}, fmt.Errorf("ttl %q resolves to non-positive query range %d; use a larger duration", chunks[2], queryLength)
}
queryRange := queryLength
// bump by the offset so we don't look at empty ranges any time offset > ttl
queryLength += offset
@@ -141,29 +138,16 @@ func convertRetention(retention string, offset int64, msecTime bool) (Retention,
2. we discover the actual size of each "chunk"
This is second division step
*/
divisor := queryRange / (rowLength * 4)
if divisor == 0 {
querySize = queryRange
} else {
querySize = queryRange / divisor
}
querySize = int64(queryRange / (queryRange / (rowLength * 4)))
} else {
/*
Unless the aggTime (how long a range of data we're requesting per individual point)
is greater than the row size. Then we'll need to use that to determine
how big each individual query should be
*/
divisor := queryRange / (aggTime * 4)
if divisor == 0 {
querySize = queryRange
} else {
querySize = queryRange / divisor
}
querySize = int64(queryRange / (queryRange / (aggTime * 4)))
}
if querySize <= 0 {
return Retention{}, fmt.Errorf("computed non-positive querySize=%d for retention %q; check parameters", querySize, retention)
}
var timeChunks []TimeRange
var i int64
for i = offset; i <= queryLength; i = i + querySize {

View File

@@ -262,7 +262,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
return true
case "/api/v1/export":
exportRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.ExportHandler(startTime, w, r); err != nil {
exportErrors.Inc()
httpserver.Errorf(w, r, "%s", err)
@@ -271,7 +270,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
return true
case "/api/v1/export/csv":
exportCSVRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.ExportCSVHandler(startTime, w, r); err != nil {
exportCSVErrors.Inc()
httpserver.Errorf(w, r, "%s", err)
@@ -280,7 +278,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
return true
case "/api/v1/export/native":
exportNativeRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.ExportNativeHandler(startTime, w, r); err != nil {
exportNativeErrors.Inc()
httpserver.Errorf(w, r, "%s", err)

View File

@@ -1223,7 +1223,11 @@ func getCommonParamsInternal(r *http.Request, startTime time.Time, requireNonEmp
if err != nil {
return nil, err
}
maxTS := int64(math.MaxInt64 / 1_000_000)
// Limit the `end` arg to the current time +2 days in the same way
// as it is limited during data ingestion.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/blob/ea06d2fd3ccbbb6aa4480ab3b04f7b671408be2a/lib/storage/table.go#L378
// This should fix possible timestamp overflow - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2669
maxTS := startTime.UnixNano()/1e6 + 2*24*3600*1000
if end > maxTS {
end = maxTS
}

View File

@@ -132,20 +132,9 @@ func (d *Deadline) String() string {
//
// {env="prod",team="devops",t1="v1",t2="v2"}
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
//
// Query args from URL path have precedence over post form args.
func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
var tagFilters []storage.TagFilter
urlQueryValues := r.URL.Query()
getRequestParam := func(key string) []string {
// query request param must always take precedence over form values
// in order to simplify security enforcement policy for extra_label and extra_filters
if uv, ok := urlQueryValues[key]; ok {
return uv
}
return r.Form[key]
}
for _, match := range getRequestParam("extra_label") {
for _, match := range r.Form["extra_label"] {
tmp := strings.SplitN(match, "=", 2)
if len(tmp) != 2 {
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
@@ -159,8 +148,8 @@ func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
Value: []byte(tmp[1]),
})
}
extraFilters := append([]string{}, getRequestParam("extra_filters")...)
extraFilters = append(extraFilters, getRequestParam("extra_filters[]")...)
extraFilters := append([]string{}, r.Form["extra_filters"]...)
extraFilters = append(extraFilters, r.Form["extra_filters[]"]...)
if len(extraFilters) == 0 {
if len(tagFilters) == 0 {
return nil, nil

View File

@@ -20,7 +20,6 @@ func TestGetExtraTagFilters(t *testing.T) {
}
return &http.Request{
Form: q,
URL: &url.URL{RawQuery: q.Encode()},
}
}
f := func(t *testing.T, r *http.Request, want []string, wantErr bool) {
@@ -80,24 +79,6 @@ func TestGetExtraTagFilters(t *testing.T) {
nil,
false,
)
formValues, err := url.ParseQuery(`extra_label=env=prod&extra_label=job=vmsingle&extra_label=tenant=prod&extra_filters[]={foo="bar"}&extra_filters[]={tenant="prod"}`)
if err != nil {
t.Fatalf("BUG: cannot parse query: %s", err)
}
urlValues, err := url.ParseQuery(`extra_label=job=vmagent&extra_label=env=dev&extra_filters[]={tenant="dev"}`)
if err != nil {
t.Fatalf("BUG: cannot parse query: %s", err)
}
httpReqWithBothFormAndURLParams := &http.Request{
Form: formValues,
URL: &url.URL{
RawQuery: urlValues.Encode(),
},
}
f(t, httpReqWithBothFormAndURLParams,
[]string{`{tenant="dev",job="vmagent",env="dev"}`},
false)
}
func TestParseMetricSelectorSuccess(t *testing.T) {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -37,9 +37,9 @@
<meta property="og:title" content="UI for VictoriaMetrics">
<meta property="og:url" content="https://victoriametrics.com/">
<meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data">
<script type="module" crossorigin src="./assets/index-C7gvW_Zn.js"></script>
<script type="module" crossorigin src="./assets/index-C24BPpD_.js"></script>
<link rel="modulepreload" crossorigin href="./assets/rolldown-runtime-COnpUsM8.js">
<link rel="modulepreload" crossorigin href="./assets/vendor-C8Kwp93_.js">
<link rel="modulepreload" crossorigin href="./assets/vendor-BWBgVCcr.js">
<link rel="stylesheet" crossorigin href="./assets/vendor-CnsZ1jie.css">
<link rel="stylesheet" crossorigin href="./assets/index-D2OEy8Ra.css">
</head>

View File

@@ -33,8 +33,6 @@ import (
var (
retentionPeriod = flagutil.NewRetentionDuration("retentionPeriod", "1M", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. "+
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention. See also -retentionFilter")
futureRetention = flagutil.NewRetentionDuration("futureRetention", "2d", "Data with timestamps bigger than now+futureRetention is automatically deleted. "+
"The minimum futureRetention is 2 days. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention")
snapshotAuthKey = flagutil.NewPassword("snapshotAuthKey", "authKey, which must be passed in query string to /snapshot* pages. It overrides -httpAuth.*")
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages. It overrides -httpAuth.*")
forceFlushAuthKey = flagutil.NewPassword("forceFlushAuthKey", "authKey, which must be passed in query string to /internal/force_flush pages. It overrides -httpAuth.*")
@@ -137,12 +135,7 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
mergeset.SetDataBlocksSparseCacheSize(cacheSizeIndexDBDataBlocksSparse.IntN())
if retentionPeriod.Duration() < 24*time.Hour {
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s. "+
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention", retentionPeriod)
}
if futureRetention.Duration() < 2*24*time.Hour {
logger.Fatalf("-futureRetention cannot be smaller than 2 days; got %s. "+
"See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#retention", futureRetention)
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
}
if *idbPrefillStart > 23*time.Hour {
logger.Panicf("-storage.idbPrefillStart cannot exceed 23 hours; got %s", idbPrefillStart)
@@ -152,7 +145,6 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
WG = syncwg.WaitGroup{}
opts := storage.OpenOptions{
Retention: retentionPeriod.Duration(),
FutureRetention: futureRetention.Duration(),
MaxHourlySeries: getMaxHourlySeries(),
MaxDailySeries: getMaxDailySeries(),
DisablePerDayIndex: *disablePerDayIndex,
@@ -180,7 +172,6 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
writeStorageMetrics(w, strg)
})
metrics.RegisterSet(storageMetrics)
fs.RegisterPathFsMetrics(*DataPath)
}
var storageMetrics *metrics.Set

View File

@@ -1,4 +1,4 @@
FROM golang:1.26.3 AS build-web-stage
FROM golang:1.26.2 AS build-web-stage
COPY build /build
WORKDIR /build
@@ -6,7 +6,7 @@ COPY web/ /build/
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
FROM alpine:3.23.4
FROM alpine:3.23.3
USER root
COPY --from=build-web-stage /build/web-amd64 /app/web

File diff suppressed because it is too large Load Diff

View File

@@ -23,14 +23,14 @@
"classnames": "^2.5.1",
"dayjs": "^1.11.20",
"lodash.debounce": "^4.0.8",
"marked": "^18.0.2",
"preact": "^10.29.1",
"qs": "^6.15.1",
"marked": "^17.0.5",
"preact": "^10.29.0",
"qs": "^6.15.0",
"react-input-mask": "^2.0.4",
"react-router-dom": "^7.14.1",
"react-router-dom": "^7.13.2",
"uplot": "^1.6.32",
"vite": "^8.0.8",
"web-vitals": "^5.2.0"
"vite": "^8.0.7",
"web-vitals": "^5.1.0"
},
"devDependencies": {
"@eslint/eslintrc": "^3.3.5",
@@ -39,24 +39,24 @@
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/preact": "^3.2.4",
"@types/lodash.debounce": "^4.0.9",
"@types/node": "^25.6.0",
"@types/node": "^25.5.0",
"@types/qs": "^6.15.0",
"@types/react": "^19.2.14",
"@types/react-input-mask": "^3.0.6",
"@types/react-router-dom": "^5.3.3",
"@typescript-eslint/eslint-plugin": "^8.58.2",
"@typescript-eslint/parser": "^8.58.2",
"@typescript-eslint/eslint-plugin": "^8.57.2",
"@typescript-eslint/parser": "^8.57.2",
"cross-env": "^10.1.0",
"eslint": "^9.39.2",
"eslint-plugin-react": "^7.37.5",
"eslint-plugin-unused-imports": "^4.4.1",
"globals": "^17.5.0",
"globals": "^17.4.0",
"http-proxy-middleware": "^3.0.5",
"jsdom": "^29.0.2",
"postcss": "^8.5.10",
"sass-embedded": "^1.99.0",
"typescript": "^6.0.2",
"vitest": "^4.1.4"
"jsdom": "^29.0.1",
"postcss": "^8.5.8",
"sass-embedded": "^1.98.0",
"typescript": "^5.9.3",
"vitest": "^4.1.1"
},
"browserslist": {
"production": [

View File

@@ -1,7 +1,7 @@
import { useMemo } from "preact/compat";
import "./style.scss";
import { Alert as APIAlert, Group } from "../../../types";
import { Link } from "react-router-dom";
import { Alert as APIAlert } from "../../../types";
import { createSearchParams } from "react-router-dom";
import Button from "../../Main/Button/Button";
import Badges, { BadgeColor } from "../Badges";
import { formatEventTime } from "../helpers";
@@ -9,14 +9,12 @@ import {
SearchIcon,
} from "../../Main/Icons";
import CodeExample from "../../Main/CodeExample/CodeExample";
import router from "../../../router";
interface BaseAlertProps {
item: APIAlert;
group?: Group;
}
const BaseAlert = ({ item, group }: BaseAlertProps) => {
const BaseAlert = ({ item }: BaseAlertProps) => {
const query = item?.expression;
const alertLabels = item?.labels || {};
const alertLabelsItems = useMemo(() => {
@@ -26,19 +24,13 @@ const BaseAlert = ({ item, group }: BaseAlertProps) => {
}]));
}, [alertLabels]);
const queryLink = useMemo(() => {
if (!group?.interval) return;
const params = new URLSearchParams({
const openQueryLink = () => {
const params = {
"g0.expr": query,
"g0.end_time": item.activeAt,
// Interval is the Group's evaluation interval in float seconds as present in the file. See: /app/vmalert/rule/web.go
"g0.step_input": `${group.interval}s`,
"g0.relative_time": "none",
});
return `${router.home}?${params.toString()}`;
}, [query, item.activeAt, group?.interval]);
"g0.end_time": ""
};
window.open(`#/?${createSearchParams(params).toString()}`, "_blank", "noopener noreferrer");
};
return (
<div className="vm-explore-alerts-alert-item">
@@ -53,22 +45,15 @@ const BaseAlert = ({ item, group }: BaseAlertProps) => {
style={{ "text-align": "end" }}
colSpan={2}
>
{queryLink && (
<Link
to={queryLink}
target={"_blank"}
rel="noreferrer"
>
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
>
<span className="vm-button-text">Run query</span>
</Button>
</Link>
)}
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
onClick={openQueryLink}
>
<span className="vm-button-text">Run query</span>
</Button>
</td>
</tr>
<tr>

View File

@@ -1,21 +1,19 @@
import { useMemo } from "preact/compat";
import "./style.scss";
import { Group, Rule as APIRule } from "../../../types";
import { useNavigate, Link } from "react-router-dom";
import { Rule as APIRule } from "../../../types";
import { useNavigate, createSearchParams } from "react-router-dom";
import { SearchIcon, DetailsIcon } from "../../Main/Icons";
import Button from "../../Main/Button/Button";
import Alert from "../../Main/Alert/Alert";
import Badges, { BadgeColor } from "../Badges";
import { formatDuration, formatEventTime } from "../helpers";
import CodeExample from "../../Main/CodeExample/CodeExample";
import router from "../../../router";
interface BaseRuleProps {
item: APIRule;
group?: Group;
}
const BaseRule = ({ item, group }: BaseRuleProps) => {
const BaseRule = ({ item }: BaseRuleProps) => {
const query = item?.query;
const navigate = useNavigate();
const openAlertLink = (id: string) => {
@@ -35,19 +33,13 @@ const BaseRule = ({ item, group }: BaseRuleProps) => {
}]));
}, [ruleLabels]);
const queryLink = useMemo(() => {
if (!group?.interval) return;
const params = new URLSearchParams({
const openQueryLink = () => {
const params = {
"g0.expr": query,
"g0.end_time": item.lastEvaluation,
// Interval is the Group's evaluation interval in float seconds as present in the file. See: /app/vmalert/rule/web.go
"g0.step_input": `${group.interval}s`,
"g0.relative_time": "none",
});
return `${router.home}?${params.toString()}`;
}, [query, item.lastEvaluation, group?.interval]);
"g0.end_time": ""
};
window.open(`#/?${createSearchParams(params).toString()}`, "_blank", "noopener noreferrer");
};
return (
<div className="vm-explore-alerts-rule-item">
@@ -62,22 +54,15 @@ const BaseRule = ({ item, group }: BaseRuleProps) => {
style={{ "text-align": "end" }}
colSpan={2}
>
{queryLink && (
<Link
to={queryLink}
target={"_blank"}
rel="noreferrer"
>
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
>
<span className="vm-button-text">Run query</span>
</Button>
</Link>
)}
<Button
size="small"
variant="outlined"
color="gray"
startIcon={<SearchIcon />}
onClick={openQueryLink}
>
<span className="vm-button-text">Run query</span>
</Button>
</td>
</tr>
<tr>

View File

@@ -2,16 +2,15 @@ import { FC } from "preact/compat";
import ItemHeader from "../ItemHeader";
import Accordion from "../../Main/Accordion/Accordion";
import "./style.scss";
import { Group, Rule as APIRule } from "../../../types";
import { Rule as APIRule } from "../../../types";
import BaseRule from "../BaseRule";
interface RuleProps {
states: Record<string, number>;
rule: APIRule;
group: Group;
}
const Rule: FC<RuleProps> = ({ states, rule, group }) => {
const Rule: FC<RuleProps> = ({ states, rule }) => {
const state = Object.keys(states).length > 0 ? Object.keys(states)[0] : "ok";
return (
<div className={`vm-explore-alerts-rule vm-badge-item ${state.replace(" ", "-")}`}>
@@ -26,10 +25,7 @@ const Rule: FC<RuleProps> = ({ states, rule, group }) => {
name={rule.name}
/>}
>
<BaseRule
item={rule}
group={group}
/>
<BaseRule item={rule} />
</Accordion>
</div>
);

View File

@@ -50,6 +50,7 @@ const RulesHeader = ({
label="Rule type"
placeholder="Please select rule type"
onChange={onChangeRuleType}
autofocus={!!types.length && !isMobile}
includeAll
searchable
/>

View File

@@ -17,7 +17,7 @@ export const formatDuration = (raw: number) => {
export const formatEventTime = (raw: string) => {
const t = dayjs(raw);
return t.year() <= 1 ? "Never" : t.tz().format("DD MMM YYYY HH:mm:ss");
return t.year() <= 1 ? "Never" : t.format("DD MMM YYYY HH:mm:ss");
};
export const getStates = (rule: Rule) => {

View File

@@ -2,11 +2,10 @@ import Spinner from "../../components/Main/Spinner/Spinner";
import Alert from "../../components/Main/Alert/Alert";
import { useFetchItem } from "./hooks/useFetchItem";
import "./style.scss";
import { Alert as APIAlert, Group as APIGroup } from "../../types";
import { Alert as APIAlert } from "../../types";
import ItemHeader from "../../components/ExploreAlerts/ItemHeader";
import BaseAlert from "../../components/ExploreAlerts/BaseAlert";
import Modal from "../../components/Main/Modal/Modal";
import { useFetchGroup } from "./hooks/useFetchGroup";
interface ExploreAlertProps {
groupId: string;
@@ -18,19 +17,10 @@ interface ExploreAlertProps {
const ExploreAlert = ({ groupId, id, mode, onClose }: ExploreAlertProps) => {
const {
item,
isLoading: isLoadingItem,
error: errorItem,
isLoading,
error,
} = useFetchItem<APIAlert>({ groupId, id, mode });
const {
group,
isLoading: isLoadingGroup,
error: errorGroup,
} = useFetchGroup<APIGroup>({ id: groupId });
const error = errorItem || errorGroup;
const isLoading = isLoadingItem || isLoadingGroup;
if (isLoading) return (
<Spinner />
);
@@ -61,12 +51,7 @@ const ExploreAlert = ({ groupId, id, mode, onClose }: ExploreAlertProps) => {
onClose={onClose}
>
<div className="vm-explore-alerts">
{item ? (
<BaseAlert
item={item}
group={group}
/>
) : (
{item && (<BaseAlert item={item} />) || (
<Alert variant="info">{noItemFound}</Alert>
)}
</div>

View File

@@ -2,12 +2,11 @@ import Spinner from "../../components/Main/Spinner/Spinner";
import Alert from "../../components/Main/Alert/Alert";
import { useFetchItem } from "./hooks/useFetchItem";
import "./style.scss";
import { Group as APIGroup, Rule as APIRule } from "../../types";
import { Rule as APIRule } from "../../types";
import ItemHeader from "../../components/ExploreAlerts/ItemHeader";
import BaseRule from "../../components/ExploreAlerts/BaseRule";
import Modal from "../../components/Main/Modal/Modal";
import { getStates } from "../../components/ExploreAlerts/helpers";
import { useFetchGroup } from "./hooks/useFetchGroup";
interface ExploreRuleProps {
groupId: string;
@@ -19,19 +18,10 @@ interface ExploreRuleProps {
const ExploreRule = ({ groupId, id, mode, onClose }: ExploreRuleProps) => {
const {
item,
isLoading: isLoadingItem,
error: errorItem,
isLoading,
error,
} = useFetchItem<APIRule>({ groupId, id, mode });
const {
group,
isLoading: isLoadingGroup,
error: errorGroup,
} = useFetchGroup<APIGroup>({ id: groupId });
const error = errorItem || errorGroup;
const isLoading = isLoadingItem || isLoadingGroup;
if (isLoading) return (
<Spinner />
);
@@ -59,12 +49,7 @@ const ExploreRule = ({ groupId, id, mode, onClose }: ExploreRuleProps) => {
onClose={onClose}
>
<div className="vm-explore-alerts">
{item ? (
<BaseRule
item={item}
group={group}
/>
) : (
{item && (<BaseRule item={item} />) || (
<Alert variant="info">{noItemFound}</Alert>
)}
</div>

View File

@@ -132,7 +132,7 @@ const ExploreRules: FC = () => {
newParams.set("page_num", "1");
setSearchParams(newParams);
const changes = getChanges(title, states);
setStates(changes.length === allStates.length ? [] : changes);
setStates(changes.length == allStates.length ? [] : changes);
}, [states, searchParams]);
const handleChangeRuleType = useCallback((title: string) => {
@@ -186,7 +186,6 @@ const ExploreRules: FC = () => {
<Rule
key={`rule-${rule.id}`}
rule={rule}
group={group}
states={getStates(rule)}
/>
))}

View File

@@ -1,4 +1,4 @@
import { FC, ReactNode, useState } from "react";
import { FC, useState } from "react";
import { TopQuery } from "../../../types";
import JsonView from "../../../components/Views/JsonView/JsonView";
import { CodeIcon, TableIcon } from "../../../components/Main/Icons";
@@ -8,18 +8,10 @@ import "./style.scss";
import classNames from "classnames";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
export interface TopQueryColumn {
title?: string;
tooltip?: string;
key: keyof TopQuery;
sortBy?: keyof TopQuery;
format?: (row: TopQuery) => ReactNode;
}
export interface TopQueryPanelProps {
rows: TopQuery[],
title?: string,
columns: TopQueryColumn[],
columns: {title?: string, key: (keyof TopQuery), sortBy?: (keyof TopQuery)}[],
defaultOrderBy?: keyof TopQuery,
}
const tabs = ["table", "JSON"].map((t, i) => ({

View File

@@ -3,7 +3,7 @@ import { TopQuery } from "../../../types";
import { getComparator, stableSort } from "../../../components/Table/helpers";
import { TopQueryPanelProps } from "../TopQueryPanel/TopQueryPanel";
import classNames from "classnames";
import { ArrowDropDownIcon, CopyIcon, InfoOutlinedIcon, PlayCircleOutlineIcon } from "../../../components/Main/Icons";
import { ArrowDropDownIcon, CopyIcon, PlayCircleOutlineIcon } from "../../../components/Main/Icons";
import Button from "../../../components/Main/Button/Button";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import { Link } from "react-router-dom";
@@ -35,40 +35,26 @@ const TopQueryTable:FC<TopQueryPanelProps> = ({ rows, columns, defaultOrderBy })
<table className="vm-table">
<thead className="vm-table-header">
<tr className="vm-table__row vm-table__row_header">
{columns.map((col) => {
const sortKey = col.sortBy || col.key;
return (
<th
className="vm-table-cell vm-table-cell_header vm-table-cell_sort"
onClick={createSortHandler(sortKey)}
key={col.key}
>
<div className="vm-table-cell__content">
{col.title || col.key}
{col.tooltip && (
<Tooltip
placement="top-center"
title={col.tooltip}
>
<span className="vm-top-queries-table__info-icon">
<InfoOutlinedIcon/>
</span>
</Tooltip>
)}
<div
className={classNames({
"vm-table__sort-icon": true,
"vm-table__sort-icon_active": orderBy === sortKey,
"vm-table__sort-icon_desc": orderDir === "desc" && orderBy === sortKey
})}
>
<ArrowDropDownIcon/>
</div>
{columns.map((col) => (
<th
className="vm-table-cell vm-table-cell_header vm-table-cell_sort"
onClick={createSortHandler(col.sortBy || col.key)}
key={col.key}
>
<div className="vm-table-cell__content">
{col.title || col.key}
<div
className={classNames({
"vm-table__sort-icon": true,
"vm-table__sort-icon_active": orderBy === col.key,
"vm-table__sort-icon_desc": orderDir === "desc" && orderBy === col.key
})}
>
<ArrowDropDownIcon/>
</div>
</th>
);
})}
</div>
</th>
))}
<th className="vm-table-cell vm-table-cell_header"/> {/* empty cell for actions */}
</tr>
</thead>
@@ -83,7 +69,7 @@ const TopQueryTable:FC<TopQueryPanelProps> = ({ rows, columns, defaultOrderBy })
className="vm-table-cell"
key={col.key}
>
{col.format?.(row) ?? row[col.key] ?? "-"}
{row[col.key] || "-"}
</td>
))}
<td className="vm-table-cell vm-table-cell_no-padding">

View File

@@ -34,7 +34,7 @@ const processResponse = (data: TopQueriesData) => {
target.forEach(t => {
const timeRange = getDurationFromMilliseconds(t.timeRangeSeconds*1000);
t.url = getQueryUrl(t, timeRange);
t.timeRange = timeRange || "instant";
t.timeRange = timeRange;
});
});

View File

@@ -1,77 +0,0 @@
import { useMemo } from "react";
import { TopQueryColumn } from "../TopQueryPanel/TopQueryPanel";
import { humanizeSeconds } from "../../../utils/time";
import { formatBytes } from "../../../utils/bytes";
type UseTopQueriesColumns = {
maxLifetime: string;
};
export const useTopQueriesColumns = ({ maxLifetime }: UseTopQueriesColumns) => {
return useMemo(() => {
const queryCol: TopQueryColumn = {
key: "query"
};
const timeRangeCol: TopQueryColumn = {
key: "timeRange",
sortBy: "timeRangeSeconds",
title: "range",
tooltip: "The time range between start and end of the query request. 'instant' means the query was executed at a single point in time without a time range"
};
const countCol: TopQueryColumn = {
key: "count",
tooltip: `The number of times the query was executed over the last ${maxLifetime}`,
};
const topBySumDuration: TopQueryColumn[] = [
queryCol,
{
key: "sumDurationSeconds",
title: "duration",
tooltip: `Cumulative time spent executing the query across all its invocations over the last ${maxLifetime}`,
format: (row) => humanizeSeconds(row.sumDurationSeconds)
},
timeRangeCol,
countCol,
];
const topByAvgDuration: TopQueryColumn[] = [
queryCol,
{
key: "avgDurationSeconds",
title: "duration",
tooltip: `Average time spent executing the query over the last ${maxLifetime}`,
format: (row) => humanizeSeconds(row.avgDurationSeconds)
},
timeRangeCol,
countCol,
];
const topByCount: TopQueryColumn[] = [
queryCol,
timeRangeCol,
countCol,
];
const topByAvgMemoryUsage: TopQueryColumn[] = [
queryCol,
{
key: "avgMemoryBytes",
title: "memory",
tooltip: `Average memory used during query execution over the last ${maxLifetime}`,
format: (row) => formatBytes(row.avgMemoryBytes)
},
timeRangeCol,
countCol,
];
return {
topBySumDuration,
topByAvgDuration,
topByCount,
topByAvgMemoryUsage,
};
}, [maxLifetime]);
};

View File

@@ -15,7 +15,6 @@ import "./style.scss";
import useDeviceDetect from "../../hooks/useDeviceDetect";
import classNames from "classnames";
import useStateSearchParams from "../../hooks/useStateSearchParams";
import { useTopQueriesColumns } from "./hooks/useTopQueriesColumns";
const exampleDuration = "30ms, 15s, 3d4h, 1y2w";
@@ -24,7 +23,6 @@ const TopQueries: FC = () => {
const [topN, setTopN] = useStateSearchParams(10, "topN");
const [maxLifetime, setMaxLifetime] = useStateSearchParams("10m", "maxLifetime");
const columns = useTopQueriesColumns({ maxLifetime });
const { data, error, loading, fetch } = useFetchTopQueries({ topN, maxLifetime });
@@ -147,33 +145,52 @@ const TopQueries: FC = () => {
{error && <Alert variant="error">{error}</Alert>}
{data && (
{data && (<>
<div className="vm-top-queries-panels">
<TopQueryPanel
title="Queries with most summary time to execute"
rows={data.topBySumDuration}
columns={columns.topBySumDuration}
defaultOrderBy="sumDurationSeconds"
title={"Queries with most summary time to execute"}
columns={[
{ key: "query" },
{ key: "sumDurationSeconds", title: "sum duration, sec" },
{ key: "timeRange", sortBy: "timeRangeSeconds", title: "query time interval" },
{ key: "count" }
]}
defaultOrderBy={"sumDurationSeconds"}
/>
<TopQueryPanel
title="Most heavy queries"
rows={data.topByAvgDuration}
columns={columns.topByAvgDuration}
defaultOrderBy="avgDurationSeconds"
title={"Most heavy queries"}
columns={[
{ key: "query" },
{ key: "avgDurationSeconds", title: "avg duration, sec" },
{ key: "timeRange", sortBy: "timeRangeSeconds", title: "query time interval" },
{ key: "count" }
]}
defaultOrderBy={"avgDurationSeconds"}
/>
<TopQueryPanel
title="Most frequently executed queries"
rows={data.topByCount}
columns={columns.topByCount}
title={"Most frequently executed queries"}
columns={[
{ key: "query" },
{ key: "timeRange", sortBy: "timeRangeSeconds", title: "query time interval" },
{ key: "count" }
]}
/>
<TopQueryPanel
title="Queries with most memory to execute"
rows={data.topByAvgMemoryUsage}
columns={columns.topByAvgMemoryUsage}
defaultOrderBy="avgMemoryBytes"
title={"Queries with most memory to execute"}
columns={[
{ key: "query" },
{ key: "avgMemoryBytes", title: "avg memory usage, bytes" },
{ key: "timeRange", sortBy: "timeRangeSeconds", title: "query time interval" },
{ key: "count" }
]}
defaultOrderBy={"avgMemoryBytes"}
/>
</div>
)}
</>)}
</div>
);
};

View File

@@ -1,19 +1,5 @@
@use "src/styles/variables" as *;
.vm-top-queries-table {
&__info-icon {
display: inline-flex;
align-items: center;
color: $color-text-secondary;
margin-left: 4px;
svg {
width: 14px;
height: 14px;
}
}
}
.vm-top-queries {
display: grid;
align-items: flex-start;

View File

@@ -1,47 +0,0 @@
import { describe, expect, it } from "vitest";
import { formatBytes } from "./bytes";
describe("formatBytes", () => {
it("returns null for invalid values", () => {
expect(formatBytes(-1)).toBeNull();
expect(formatBytes(Number.NaN)).toBeNull();
expect(formatBytes(Number.POSITIVE_INFINITY)).toBeNull();
expect(formatBytes(Number.NEGATIVE_INFINITY)).toBeNull();
});
it("formats zero bytes", () => {
expect(formatBytes(0)).toBe("0 B");
});
it("formats bytes", () => {
expect(formatBytes(0.5)).toBe("0.5 B");
expect(formatBytes(1)).toBe("1 B");
expect(formatBytes(512)).toBe("512 B");
expect(formatBytes(1023)).toBe("1023 B");
});
it("formats kilobytes", () => {
expect(formatBytes(1024)).toBe("1 KB");
expect(formatBytes(1536)).toBe("1.5 KB");
});
it("formats megabytes", () => {
expect(formatBytes(1024 ** 2)).toBe("1 MB");
expect(formatBytes(2.5 * 1024 ** 2)).toBe("2.5 MB");
});
it("formats gigabytes, terabytes and petabytes", () => {
expect(formatBytes(1024 ** 3)).toBe("1 GB");
expect(formatBytes(1024 ** 4)).toBe("1 TB");
expect(formatBytes(1024 ** 5)).toBe("1 PB");
});
it("caps values above PB to PB unit", () => {
expect(formatBytes(1024 ** 6)).toBe("1024 PB");
});
it("rounds to two decimals", () => {
expect(formatBytes(1234)).toBe("1.21 KB");
expect(formatBytes(1234567)).toBe("1.18 MB");
});
});

View File

@@ -1,14 +0,0 @@
const LOG_1024 = Math.log(1024);
const UNITS = ["B", "KB", "MB", "GB", "TB", "PB"] as const;
export const formatBytes = (bytes: number): string | null => {
if (!Number.isFinite(bytes) || bytes < 0) return null;
if (bytes === 0) return "0 B";
const unitIndex = Math.min(
Math.max(Math.floor(Math.log(bytes) / LOG_1024), 0),
UNITS.length - 1
);
return `${parseFloat((bytes / 1024 ** unitIndex).toFixed(2))} ${UNITS[unitIndex]}`;
};

View File

@@ -1,18 +1,47 @@
import { ArrayRGB } from "../types";
export const baseContrastColors = [
"#e6194b", // red
"#4363d8", // blue
"#3cb44b", // green
"#911eb4", // purple
"#f58231", // orange
"#f032e6", // magenta
"#c8a200", // dark yellow
"#a65628", // brown
"#42d4f4", // cyan
"#a9a9a9", // gray
"#e54040",
"#32a9dc",
"#2ee329",
"#7126a1",
"#e38f0f",
"#3d811a",
"#ffea00",
"#2d2d2d",
"#da42a6",
"#a44e0c",
];
export const hexToRGB = (hex: string): string => {
if (hex.length != 7) return "0, 0, 0";
const r = parseInt(hex.slice(1, 3), 16);
const g = parseInt(hex.slice(3, 5), 16);
const b = parseInt(hex.slice(5, 7), 16);
return `${r}, ${g}, ${b}`;
};
export const getColorFromString = (text: string): string => {
const SEED = 16777215;
const FACTOR = 49979693;
let b = 1;
let d = 0;
let f = 1;
if (text.length > 0) {
for (let i = 0; i < text.length; i++) {
text[i].charCodeAt(0) > d && (d = text[i].charCodeAt(0));
f = parseInt(String(SEED / d));
b = (b + text[i].charCodeAt(0) * f * FACTOR) % SEED;
}
}
let hex = ((b * text.length) % SEED).toString(16);
hex = hex.padEnd(6, hex);
return `#${hex}`;
};
export const getContrastColor = (value: string) => {
let hex = value.replace("#", "").trim();
@@ -41,109 +70,3 @@ export const generateGradient = (start: ArrayRGB, end: ArrayRGB, steps: number)
}
return gradient.map(c => `rgb(${c})`);
};
const clamp = (n: number, min: number, max: number) => Math.min(max, Math.max(min, n));
const hexToRgb = (hex: string) => {
let value = hex.replace("#", "").trim();
if (value.length === 3) {
value = value.split("").map((c) => c + c).join("");
}
if (!/^[0-9a-fA-F]{6}$/.test(value)) {
throw new Error("Invalid HEX color.");
}
return {
r: parseInt(value.slice(0, 2), 16),
g: parseInt(value.slice(2, 4), 16),
b: parseInt(value.slice(4, 6), 16),
};
};
const rgbToHex = (r: number, g: number, b: number) =>
`#${[r, g, b].map((v) => clamp(Math.round(v), 0, 255).toString(16).padStart(2, "0")).join("")}`;
const rgbToHsl = (r: number, g: number, b: number) => {
r /= 255; g /= 255; b /= 255;
const max = Math.max(r, g, b);
const min = Math.min(r, g, b);
const l = (max + min) / 2;
const d = max - min;
let h = 0;
let s = 0;
if (d !== 0) {
s = d / (1 - Math.abs(2 * l - 1));
switch (max) {
case r: h = ((g - b) / d) % 6; break;
case g: h = (b - r) / d + 2; break;
case b: h = (r - g) / d + 4; break;
}
h *= 60;
if (h < 0) h += 360;
}
return { h, s: s * 100, l: l * 100 };
};
const hslToRgb = (h: number, s: number, l: number) => {
s /= 100;
l /= 100;
const c = (1 - Math.abs(2 * l - 1)) * s;
const x = c * (1 - Math.abs((h / 60) % 2 - 1));
const m = l - c / 2;
let r: number;
let g: number;
let b: number;
if (h < 60) [r, g, b] = [c, x, 0];
else if (h < 120) [r, g, b] = [x, c, 0];
else if (h < 180) [r, g, b] = [0, c, x];
else if (h < 240) [r, g, b] = [0, x, c];
else if (h < 300) [r, g, b] = [x, 0, c];
else [r, g, b] = [c, 0, x];
return {
r: (r + m) * 255,
g: (g + m) * 255,
b: (b + m) * 255,
};
};
const varyColor = (hex: string, variant: number) => {
const { r, g, b } = hexToRgb(hex);
const { h, s, l } = rgbToHsl(r, g, b);
const variants = [
{ ds: 0, dl: 0 },
{ ds: -20, dl: -16 },
{ ds: -16, dl: +16 },
{ ds: +14, dl: -20 },
];
const v = variants[variant % variants.length];
const nextS = clamp(s + v.ds, 35, 85);
const nextL = clamp(l + v.dl, 35, 70);
const rgb = hslToRgb(h, nextS, nextL);
return rgbToHex(rgb.r, rgb.g, rgb.b);
};
export const getSeriesColor = (index: number) => {
const baseCount = baseContrastColors.length;
const baseIndex = index % baseCount;
const variantIndex = Math.floor(index / baseCount);
const base = baseContrastColors[(baseIndex + variantIndex) % baseCount];
return varyColor(base, variantIndex);
};

View File

@@ -2,7 +2,7 @@ import { MetricBase, MetricResult } from "../../api/types";
import uPlot, { Series as uPlotSeries } from "uplot";
import { getNameForMetric, promValueToNumber } from "../metric";
import { HideSeriesArgs, LegendItemType, SeriesItem } from "../../types";
import { getSeriesColor } from "../color";
import { baseContrastColors, getColorFromString } from "../color";
import { getMathStats } from "../math";
import { formatPrettyNumber } from "./helpers";
import { drawPoints } from "./scatter";
@@ -17,10 +17,11 @@ export const extractFields = (metric: MetricBase["metric"]): string => {
export const getSeriesItemContext = (data: MetricResult[], hideSeries: string[], alias: string[], showPoints?: boolean, isRawQuery?: boolean) => {
const colorState: {[key: string]: string} = {};
const maxColors = Math.min(data.length, baseContrastColors.length);
for (let i = 0; i < data.length; i++) {
for (let i = 0; i < maxColors; i++) {
const label = getNameForMetric(data[i], alias[data[i].group - 1]);
colorState[label] = getSeriesColor(i);
colorState[label] = baseContrastColors[i];
}
return (d: MetricResult): SeriesItem => {
@@ -31,7 +32,7 @@ export const getSeriesItemContext = (data: MetricResult[], hideSeries: string[],
label,
hasAlias: Boolean(aliasValue),
width: 1.4,
stroke: colorState[label],
stroke: colorState[label] || getColorFromString(label),
points: getPointsSeries(showPoints, isRawQuery),
spanGaps: false,
freeFormFields: d.metric,

View File

@@ -15,12 +15,13 @@
"forceConsistentCasingInFileNames": true,
"noFallthroughCasesInSwitch": true,
"module": "esnext",
"moduleResolution": "bundler",
"moduleResolution": "node",
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
"jsxImportSource": "preact",
"downlevelIteration": true,
"noUnusedLocals": true,
"paths": {
"react": ["./node_modules/preact/compat/"],
@@ -31,8 +32,5 @@
},
"include": [
"src"
],
"exclude": [
"scripts/**/*.ts"
]
}

View File

@@ -2,8 +2,6 @@ package apptest
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
@@ -14,10 +12,6 @@ import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
otlppb "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
"github.com/golang/snappy"
)
// Client is used for interacting with the apps over the network.
@@ -39,41 +33,37 @@ func (c *Client) CloseConnections() {
c.httpCli.CloseIdleConnections()
}
// Get sends an HTTP GET request, returns
// Get sends a HTTP GET request, returns
// the response body and status code to the caller.
func (c *Client) Get(t *testing.T, url string, headers http.Header) (string, int) {
func (c *Client) Get(t *testing.T, url string) (string, int) {
t.Helper()
return c.do(t, http.MethodGet, url, nil, headers)
return c.do(t, http.MethodGet, url, "", nil)
}
// Post sends an HTTP POST request, returns
// Post sends a HTTP POST request, returns
// the response body and status code to the caller.
func (c *Client) Post(t *testing.T, url string, data []byte, headers http.Header) (string, int) {
func (c *Client) Post(t *testing.T, url, contentType string, data []byte) (string, int) {
t.Helper()
return c.do(t, http.MethodPost, url, data, headers)
return c.do(t, http.MethodPost, url, contentType, data)
}
// PostForm sends an HTTP POST request containing the POST-form data with attached getHeaders, returns
// PostForm sends a HTTP POST request containing the POST-form data, returns
// the response body and status code to the caller.
func (c *Client) PostForm(t *testing.T, url string, data url.Values, headers http.Header) (string, int) {
func (c *Client) PostForm(t *testing.T, url string, data url.Values) (string, int) {
t.Helper()
if headers == nil {
headers = make(http.Header)
}
headers.Set("Content-Type", "application/x-www-form-urlencoded")
return c.Post(t, url, []byte(data.Encode()), headers)
return c.Post(t, url, "application/x-www-form-urlencoded", []byte(data.Encode()))
}
// Delete sends an HTTP DELETE request and returns the response body and status code
// Delete sends a HTTP DELETE request and returns the response body and status code
// to the caller.
func (c *Client) Delete(t *testing.T, url string) (string, int) {
t.Helper()
return c.do(t, http.MethodDelete, url, nil, nil)
return c.do(t, http.MethodDelete, url, "", nil)
}
// do prepares an HTTP request, sends it to the server, receives the response
// do prepares a HTTP request, sends it to the server, receives the response
// from the server, returns the response body and status code to the caller.
func (c *Client) do(t *testing.T, method, url string, data []byte, headers http.Header) (string, int) {
func (c *Client) do(t *testing.T, method, url, contentType string, data []byte) (string, int) {
t.Helper()
req, err := http.NewRequest(method, url, bytes.NewReader(data))
@@ -81,7 +71,9 @@ func (c *Client) do(t *testing.T, method, url string, data []byte, headers http.
t.Fatalf("could not create a HTTP request: %v", err)
}
req.Header = headers
if len(contentType) > 0 {
req.Header.Add("Content-Type", contentType)
}
res, err := c.httpCli.Do(req)
if err != nil {
t.Fatalf("could not send HTTP request: %v", err)
@@ -111,35 +103,6 @@ func (c *Client) Write(t *testing.T, address string, data []string) {
}
}
// getClusterPath returns path in cluster's URL format.
// Based on QueryOpts, it will either put tenant ID into URL
// or will skip it if tenant is set via HTTP headers.
func getClusterPath(addr, prefix, suffix string, o QueryOpts) string {
if o.Tenant != "" {
// QueryOpts.Tenant has priority over headers
return tenantViaURL(addr, prefix, o.Tenant, suffix)
}
h := o.getHeaders()
if h.Get("AccountID") != "" || h.Get("ProjectID") != "" {
return tenantViaHeaders(addr, prefix, suffix)
}
// tenant is missing in QueryOpts and in HTTP headers. Falling back to default 0:0 tenant in URL
return tenantViaURL(addr, prefix, "0:0", suffix)
}
// tenantViaURL returns path in cluster's URL format with tenant specified in URL
func tenantViaURL(addr, prefix, tenant, suffix string) string {
return fmt.Sprintf("http://%s/%s/%s/%s", addr, prefix, tenant, suffix)
}
// tenantViaHeaders returns path in cluster's URL format where tenant is omitted in URL
// Only supported if -enableMultitenancyViaHeaders is specified
func tenantViaHeaders(addr, prefix, suffix string) string {
return fmt.Sprintf("http://%s/%s/%s", addr, prefix, suffix)
}
// readAllAndClose reads everything from the response body and then closes it.
func readAllAndClose(t *testing.T, responseBody io.ReadCloser) string {
t.Helper()
@@ -152,34 +115,27 @@ func readAllAndClose(t *testing.T, responseBody io.ReadCloser) string {
return string(b)
}
// metricsClient is used to retrieve the app's metrics.
// ServesMetrics is used to retrieve the app's metrics.
//
// This type is expected to be embedded by the apps that serve metrics.
type metricsClient struct {
metricsCli *Client
url string
}
func newMetricsClient(cli *Client, addr string) *metricsClient {
return &metricsClient{
metricsCli: cli,
url: fmt.Sprintf("http://%s/metrics", addr),
}
type ServesMetrics struct {
metricsURL string
cli *Client
}
// GetIntMetric retrieves the value of a metric served by an app at /metrics URL.
// The value is then converted to int.
func (c *metricsClient) GetIntMetric(t *testing.T, metricName string) int {
func (app *ServesMetrics) GetIntMetric(t *testing.T, metricName string) int {
t.Helper()
return int(c.GetMetric(t, metricName))
return int(app.GetMetric(t, metricName))
}
// GetMetric retrieves the value of a metric served by an app at /metrics URL.
func (c *metricsClient) GetMetric(t *testing.T, metricName string) float64 {
func (app *ServesMetrics) GetMetric(t *testing.T, metricName string) float64 {
t.Helper()
metrics, statusCode := c.metricsCli.Get(t, c.url, nil)
metrics, statusCode := app.cli.Get(t, app.metricsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -200,12 +156,12 @@ func (c *metricsClient) GetMetric(t *testing.T, metricName string) float64 {
// GetMetricsByPrefix retrieves the values of all metrics that start with given
// prefix.
func (c *metricsClient) GetMetricsByPrefix(t *testing.T, prefix string) []float64 {
func (app *ServesMetrics) GetMetricsByPrefix(t *testing.T, prefix string) []float64 {
t.Helper()
values := []float64{}
metrics, statusCode := c.metricsCli.Get(t, c.url, nil)
metrics, statusCode := app.cli.Get(t, app.metricsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -229,12 +185,12 @@ func (c *metricsClient) GetMetricsByPrefix(t *testing.T, prefix string) []float6
return values
}
func (c *metricsClient) GetMetricsByRegexp(t *testing.T, re *regexp.Regexp) []float64 {
func (app *ServesMetrics) GetMetricsByRegexp(t *testing.T, re *regexp.Regexp) []float64 {
t.Helper()
values := []float64{}
metrics, statusCode := c.metricsCli.Get(t, c.url, nil)
metrics, statusCode := app.cli.Get(t, app.metricsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
@@ -257,756 +213,3 @@ func (c *metricsClient) GetMetricsByRegexp(t *testing.T, re *regexp.Regexp) []fl
}
return values
}
// rpcRowsSentTotal retrieves the values of all vminsert
// `vm_rpc_rows_sent_total` metrics (there will be one for each vmstorage) and
// returns their integer sum.
func (c *metricsClient) rpcRowsSentTotal(t *testing.T) int {
total := 0.0
for _, v := range c.GetMetricsByPrefix(t, "vm_rpc_rows_sent_total") {
total += v
}
return int(total)
}
type vmselectClient struct {
vmselectCli *Client
url func(op, path string, opts QueryOpts) string
metricNamesStatsResetURL string
tenantsURL string
}
// PrometheusAPIV1Export is a test helper function that performs the export of
// raw samples in JSON line format by sending a request to
// /prometheus/api/v1/export endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1export
func (c *vmselectClient) PrometheusAPIV1Export(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/export", opts)
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1ExportNative is a test helper function that performs the export of
// raw samples in native binary format by sending a request to
// /prometheus/api/v1/export/native endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1exportnative
func (c *vmselectClient) PrometheusAPIV1ExportNative(t *testing.T, query string, opts QueryOpts) []byte {
t.Helper()
url := c.url("select", "prometheus/api/v1/export/native", opts)
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return []byte(res)
}
// PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
// instant query by sending a request to /prometheus/api/v1/query endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1query
func (c *vmselectClient) PrometheusAPIV1Query(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/query", opts)
values := opts.asURLValues()
values.Add("query", query)
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1QueryRange is a test helper function that performs
// PromQL/MetricsQL range query by sending a request to
// /prometheus/api/v1/query_range endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1query_range
func (c *vmselectClient) PrometheusAPIV1QueryRange(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/query_range", opts)
values := opts.asURLValues()
values.Add("query", query)
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1Series retrieves list of time series that match the query by
// sending a request to /prometheus/api/v1/series endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1series
func (c *vmselectClient) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/series", opts)
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1SeriesResponse(t, res)
}
// PrometheusAPIV1SeriesCount retrieves the total number of time series by
// sending a request to /prometheus/api/v1/series/count endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1series
func (c *vmselectClient) PrometheusAPIV1SeriesCount(t *testing.T, opts QueryOpts) *PrometheusAPIV1SeriesCountResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/series/count", opts)
values := opts.asURLValues()
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1SeriesCountResponse(t, res)
}
// PrometheusAPIV1Labels retrieves the label names for time series that match a
// query by sending a request to /prometheus/api/v1/labels endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1labels
func (c *vmselectClient) PrometheusAPIV1Labels(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1LabelsResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/labels", opts)
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1LabelsResponse(t, res)
}
// PrometheusAPIV1LabelValues retrieves the labels values for the metrics that
// match the query by sending a request to /prometheus/api/v1/label/.../values
// endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1labelvalues
func (c *vmselectClient) PrometheusAPIV1LabelValues(t *testing.T, labelName, matchQuery string, opts QueryOpts) *PrometheusAPIV1LabelValuesResponse {
t.Helper()
path := fmt.Sprintf("prometheus/api/v1/label/%s/values", labelName)
url := c.url("select", path, opts)
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1LabelValuesResponse(t, res)
}
// PrometheusAPIV1Metadata retrieves metadata for the given metric by sending a
// request to /prometheus/api/v1/metadata endpoint.
func (c *vmselectClient) PrometheusAPIV1Metadata(t *testing.T, metric string, limit int, opts QueryOpts) *PrometheusAPIV1Metadata {
t.Helper()
url := c.url("select", "prometheus/api/v1/metadata", opts)
values := opts.asURLValues()
values.Add("metric", metric)
values.Add("limit", strconv.Itoa(limit))
res, _ := c.vmselectCli.PostForm(t, url, values, opts.Headers)
return NewPrometheusAPIV1Metadata(t, res)
}
// PrometheusAPIV1AdminTSDBDeleteSeries deletes the series that match the query
// by sending a request to /prometheus/api/v1/admin/tsdb/delete_series.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1admintsdbdelete_series
func (c *vmselectClient) PrometheusAPIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string, opts QueryOpts) {
t.Helper()
url := c.url("delete", "prometheus/api/v1/admin/tsdb/delete_series", opts)
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
}
// PrometheusAPIV1StatusMetricNamesStats sends a query to
// /prometheus/api/v1/status/metric_names_stats endpoint and returns the metric
// usage stats response for given params.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#track-ingested-metrics-usage
func (c *vmselectClient) PrometheusAPIV1StatusMetricNamesStats(t *testing.T, limit, le, matchPattern string, opts QueryOpts) MetricNamesStatsResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/status/metric_names_stats", opts)
values := opts.asURLValues()
values.Add("limit", limit)
values.Add("le", le)
values.Add("match_pattern", matchPattern)
res, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var resp MetricNamesStatsResponse
if err := json.Unmarshal([]byte(res), &resp); err != nil {
t.Fatalf("could not unmarshal metric names stats response data:\n%s\n err: %v", res, err)
}
return resp
}
// PrometheusAPIV1StatusTSDB retrieves the TSDB status for the time series that
// match the query on the given date by sending a request to
// /prometheus/api/v1/status/tsdb endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#tsdb-stats
func (c *vmselectClient) PrometheusAPIV1StatusTSDB(t *testing.T, matchQuery string, date string, topN string, opts QueryOpts) TSDBStatusResponse {
t.Helper()
url := c.url("select", "prometheus/api/v1/status/tsdb", opts)
values := opts.asURLValues()
addNonEmpty := func(name, value string) {
if len(value) == 0 {
return
}
values.Add(name, value)
}
addNonEmpty("match[]", matchQuery)
addNonEmpty("topN", topN)
addNonEmpty("date", date)
res, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var status TSDBStatusResponse
if err := json.Unmarshal([]byte(res), &status); err != nil {
t.Fatalf("could not unmarshal tsdb status response data:\n%s\n err: %v", res, err)
}
status.Sort()
return status
}
// GraphiteMetricsIndex retrieves the list of all metrics by sending a request
// to /graphite/metrics/index.json endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#metrics-api
func (c *vmselectClient) GraphiteMetricsIndex(t *testing.T, opts QueryOpts) GraphiteMetricsIndexResponse {
t.Helper()
url := c.url("select", "graphite/metrics/index.json", opts)
res, statusCode := c.vmselectCli.Get(t, url, opts.Headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var index GraphiteMetricsIndexResponse
if err := json.Unmarshal([]byte(res), &index); err != nil {
t.Fatalf("could not unmarshal metrics index response data:\n%s\n err: %v", res, err)
}
return index
}
// GraphiteMetricsFind finds metrics under a given path by sending a request
// to /metrics/find endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#metrics-api
// and https://graphite.readthedocs.io/en/latest/metrics_api.html#metrics-find
func (c *vmselectClient) GraphiteMetricsFind(t *testing.T, query string, opts QueryOpts) GraphiteMetricsFindResponse {
t.Helper()
url := c.url("select", "graphite/metrics/find", opts)
values := opts.asURLValues()
values.Add("query", query)
resText, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, resText)
}
var res GraphiteMetricsFindResponse
if err := json.Unmarshal([]byte(resText), &res); err != nil {
t.Fatalf("could not unmarshal response data:\n%s\n err: %v", resText, err)
}
return res
}
// GraphiteMetricsExpand expands the given query with matching paths by sending
// a request to /graphite/metrics/expand endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#metrics-api
// and https://graphite.readthedocs.io/en/latest/metrics_api.html#metrics-expand
func (c *vmselectClient) GraphiteMetricsExpand(t *testing.T, query string, opts QueryOpts) GraphiteMetricsExpandResponse {
t.Helper()
url := c.url("select", "graphite/metrics/expand", opts)
values := opts.asURLValues()
values.Add("query", query)
resText, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, resText)
}
var res GraphiteMetricsExpandResponse
if err := json.Unmarshal([]byte(resText), &res); err != nil {
t.Fatalf("could not unmarshal response data:\n%s\n err: %v", resText, err)
}
return res
}
// GraphiteRender retrieves the raw metric data by sending a request to
// /graphite/render endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#render-api
// and https://graphite-api.readthedocs.io/en/latest/api.html#the-render-api-render
func (c *vmselectClient) GraphiteRender(t *testing.T, target string, opts QueryOpts) GraphiteRenderResponse {
t.Helper()
url := c.url("select", "graphite/render", opts)
values := opts.asURLValues()
values.Add("format", "json")
values.Add("target", target)
resText, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, resText)
}
var res GraphiteRenderResponse
if err := json.Unmarshal([]byte(resText), &res); err != nil {
t.Fatalf("could not unmarshal response data:\n%s\n err: %v", resText, err)
}
return res
}
// GraphiteTagsTagSeries is a test helper function that registers Graphite tags
// for a single time series by sending a request to /graphite/tags/tagSeries
// endpoint.
func (c *vmselectClient) GraphiteTagsTagSeries(t *testing.T, record string, opts QueryOpts) {
t.Helper()
url := c.url("select", "graphite/tags/tagSeries", opts)
values := opts.asURLValues()
values.Add("path", record)
_, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
}
// GraphiteTagsTagMultiSeries is a test helper function that registers Graphite
// tags for a multiple time series by sending a request to
// /graphite/tags/tagSeries endpoint.
func (c *vmselectClient) GraphiteTagsTagMultiSeries(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := c.url("select", "graphite/tags/tagMultiSeries", opts)
values := opts.asURLValues()
for _, rec := range records {
values.Add("path", rec)
}
_, statusCode := c.vmselectCli.PostForm(t, url, values, opts.Headers)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
}
// PrometheusAPIV1AdminStatusMetricNamesStatsReset resets the metric name usage
// stats by sending a request to
// /prometheus/api/v1/admin/status/metric_names_stats/reset endpoint
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#track-ingested-metrics-usage
func (c *vmselectClient) PrometheusAPIV1AdminStatusMetricNamesStatsReset(t *testing.T, opts QueryOpts) {
t.Helper()
values := opts.asURLValues()
res, statusCode := c.vmselectCli.PostForm(t, c.metricNamesStatsResetURL, values, opts.Headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
}
// APIV1AdminTenants retrieves the list of tenants by sending a request to
// /admin/tenants endpoint.
func (c *vmselectClient) APIV1AdminTenants(t *testing.T, opts QueryOpts) *AdminTenantsResponse {
t.Helper()
res, statusCode := c.vmselectCli.Get(t, c.tenantsURL, opts.Headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
tenants := &AdminTenantsResponse{}
if err := json.Unmarshal([]byte(res), tenants); err != nil {
t.Fatalf("could not unmarshal tenants response data:\n%s\n err: %v", res, err)
}
return tenants
}
type vminsertClient struct {
vminsertCli *Client
url func(op, path string, opts QueryOpts) string
openTSDBURL func(op, path string, opts QueryOpts) string
graphiteListenAddr string
sendBlocking func(t *testing.T, numRecordsToSend int, send func())
}
// PrometheusAPIV1ImportCSV is a test helper function that inserts a collection
// of records in CSV format for the given tenant by sending an HTTP POST
// request to prometheus/api/v1/import/csv vminsert endpoint.
//
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
func (c *vminsertClient) PrometheusAPIV1ImportCSV(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := c.url("insert", "prometheus/api/v1/import/csv", opts)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
c.sendBlocking(t, len(records), func() {
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// PrometheusAPIV1ImportNative is a test helper function that inserts a collection
// of records in Native format for the given tenant by sending an HTTP POST
// request to prometheus/api/v1/import/native vminsert endpoint.
//
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
func (c *vminsertClient) PrometheusAPIV1ImportNative(t *testing.T, data []byte, opts QueryOpts) {
t.Helper()
url := c.url("insert", "prometheus/api/v1/import/native", opts)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
c.sendBlocking(t, 1, func() {
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// PrometheusAPIV1Write is a test helper function that inserts a
// collection of records in Prometheus remote-write format by sending a HTTP
// POST request to /prometheus/api/v1/write vminsert endpoint.
func (c *vminsertClient) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, opts QueryOpts) {
t.Helper()
url := c.url("insert", "prometheus/api/v1/write", opts)
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
recordsCount := len(wr.Timeseries)
if prommetadata.IsEnabled() {
recordsCount += len(wr.Metadata)
}
headers := opts.getHeaders()
headers.Set("Content-Type", "application/x-protobuf")
c.sendBlocking(t, recordsCount, func() {
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// PrometheusAPIV1ImportPrometheus is a test helper function that inserts a
// collection of records in Prometheus text exposition format for the given
// tenant by sending a HTTP POST request to
// /prometheus/api/v1/import/prometheus vminsert endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1importprometheus
func (c *vminsertClient) PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := c.url("insert", "prometheus/api/v1/import/prometheus", opts)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
var recordsCount int
var metadataRecords int
uniqueMetadataMetricNames := make(map[string]struct{})
for _, record := range records {
// metric metadata has the following format:
//# HELP importprometheus_series
//# TYPE importprometheus_series
// it results into single metadata record
if strings.HasPrefix(record, "# ") {
metadataItems := strings.Split(record, " ")
if len(metadataItems) < 3 {
t.Fatalf("BUG: unexpected metadata format=%q", record)
}
metricName := metadataItems[2]
if _, ok := uniqueMetadataMetricNames[metricName]; ok {
continue
}
uniqueMetadataMetricNames[metricName] = struct{}{}
metadataRecords++
continue
}
recordsCount++
}
if prommetadata.IsEnabled() {
recordsCount += metadataRecords
}
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
c.sendBlocking(t, recordsCount, func() {
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// InfluxWrite is a test helper function that inserts a collection of records in
// Influx line format by sending a HTTP POST request to /influx/write endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#influxwrite
func (c *vminsertClient) InfluxWrite(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := c.url("insert", "influx/write", opts)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
c.sendBlocking(t, len(records), func() {
t.Helper()
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// OpentelemetryV1Metrics is a test helper function that inserts a
// collection of records in Opentelemetry protocol format by sending a HTTP
// POST request to /opentelemetry/v1/metrics vminsert endpoint.
func (c *vminsertClient) OpentelemetryV1Metrics(t *testing.T, md otlppb.MetricsData, opts QueryOpts) {
t.Helper()
var recordsCount int
for _, rss := range md.ResourceMetrics {
for _, sm := range rss.ScopeMetrics {
recordsCount += len(sm.Metrics)
for _, m := range sm.Metrics {
if prommetadata.IsEnabled() {
recordsCount += len(m.Metadata)
}
}
}
}
url := c.url("insert", "opentelemetry/v1/metrics", opts)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := md.MarshalProtobuf(nil)
headers := opts.getHeaders()
headers.Set("Content-Type", "application/x-protobuf")
c.sendBlocking(t, recordsCount, func() {
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
})
}
// OpenTSDBAPIPut is a test helper function that inserts a collection of
// records in OpenTSDB format for the given tenant by sending an HTTP POST
// request to /opentsdb/api/put vminsert endpoint.
//
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
func (c *vminsertClient) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := c.openTSDBURL("insert", "opentsdb/api/put", opts)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte("[" + strings.Join(records, ",") + "]")
headers := opts.getHeaders()
headers.Set("Content-Type", "application/json")
c.sendBlocking(t, len(records), func() {
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// ZabbixConnectorHistory is a test helper function that inserts a
// collection of records in zabbixconnector format by sending a HTTP
// POST request to /zabbixconnector/api/v1/history vmsingle endpoint.
func (c *vminsertClient) ZabbixConnectorHistory(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := c.url("insert", "zabbixconnector/api/v1/history", opts)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "application/json")
c.sendBlocking(t, len(records), func() {
_, statusCode := c.vminsertCli.Post(t, url, data, headers)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
})
}
// GraphiteWrite is a test helper function that sends a
// collection of records to graphiteListenAddr port.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting
func (c *vminsertClient) GraphiteWrite(t *testing.T, records []string, _ QueryOpts) {
t.Helper()
c.vminsertCli.Write(t, c.graphiteListenAddr, records)
}
type vmstorageClient struct {
vmstorageCli *Client
httpListenAddr string
}
// ForceFlush is a test helper function that forces the flushing of inserted
// data, so it becomes available for searching immediately.
func (c *vmstorageClient) ForceFlush(t *testing.T) {
t.Helper()
url := fmt.Sprintf("http://%s/internal/force_flush", c.httpListenAddr)
_, statusCode := c.vmstorageCli.Get(t, url, nil)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// ForceMerge is a test helper function that forces the merging of parts.
func (c *vmstorageClient) ForceMerge(t *testing.T) {
t.Helper()
url := fmt.Sprintf("http://%s/internal/force_merge", c.httpListenAddr)
_, statusCode := c.vmstorageCli.Get(t, url, nil)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// SnapshotCreate creates a database snapshot by sending a query to the
// /snapshot/create endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (c *vmstorageClient) SnapshotCreate(t *testing.T) *SnapshotCreateResponse {
t.Helper()
data, statusCode := c.vmstorageCli.Post(t, c.SnapshotCreateURL(), nil, nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotCreateResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot create response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotCreateURL returns the URL for creating snapshots.
func (c *vmstorageClient) SnapshotCreateURL() string {
return fmt.Sprintf("http://%s/snapshot/create", c.httpListenAddr)
}
// APIV1AdminTSDBSnapshot creates a database snapshot by sending a query to the
// /api/v1/admin/tsdb/snapshot endpoint.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#snapshot.
func (c *vmstorageClient) APIV1AdminTSDBSnapshot(t *testing.T) *APIV1AdminTSDBSnapshotResponse {
t.Helper()
url := fmt.Sprintf("http://%s/api/v1/admin/tsdb/snapshot", c.httpListenAddr)
data, statusCode := c.vmstorageCli.Post(t, url, nil, nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res APIV1AdminTSDBSnapshotResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal prometheus snapshot create response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotList lists existing database snapshots by sending a query to the
// /snapshot/list endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (c *vmstorageClient) SnapshotList(t *testing.T) *SnapshotListResponse {
t.Helper()
url := fmt.Sprintf("http://%s/snapshot/list", c.httpListenAddr)
data, statusCode := c.vmstorageCli.Get(t, url, nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotListResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot list response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotDelete deletes a snapshot by sending a query to the
// /snapshot/delete endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (c *vmstorageClient) SnapshotDelete(t *testing.T, snapshotName string) *SnapshotDeleteResponse {
t.Helper()
url := fmt.Sprintf("http://%s/snapshot/delete?snapshot=%s", c.httpListenAddr, snapshotName)
data, statusCode := c.vmstorageCli.Delete(t, url)
wantStatusCodes := map[int]bool{
http.StatusOK: true,
http.StatusInternalServerError: true,
}
if !wantStatusCodes[statusCode] {
t.Fatalf("unexpected status code: got %d, want %v, resp text=%q", statusCode, wantStatusCodes, data)
}
var res SnapshotDeleteResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot delete response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotDeleteAll deletes all snapshots by sending a query to the
// /snapshot/delete_all endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (c *vmstorageClient) SnapshotDeleteAll(t *testing.T) *SnapshotDeleteAllResponse {
t.Helper()
url := fmt.Sprintf("http://%s/snapshot/delete_all", c.httpListenAddr)
data, statusCode := c.vmstorageCli.Post(t, url, nil, nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotDeleteAllResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot delete all response: data=%q, err: %v", data, err)
}
return &res
}

View File

@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"math"
"net/http"
"net/url"
"slices"
"sort"
@@ -27,16 +26,13 @@ type PrometheusQuerier interface {
PrometheusAPIV1LabelValues(t *testing.T, labelName, query string, opts QueryOpts) *PrometheusAPIV1LabelValuesResponse
PrometheusAPIV1ExportNative(t *testing.T, query string, opts QueryOpts) []byte
PrometheusAPIV1Metadata(t *testing.T, metric string, limit int, opts QueryOpts) *PrometheusAPIV1Metadata
PrometheusAPIV1StatusMetricNamesStats(t *testing.T, limit, le, matchPattern string, opts QueryOpts) MetricNamesStatsResponse
PrometheusAPIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string, opts QueryOpts)
APIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string, opts QueryOpts)
// TODO(@rtm0): Prometheus does not provide this API. Either move it to a
// separate interface or rename this interface to allow for multiple querier
// types.
GraphiteMetricsIndex(t *testing.T, opts QueryOpts) GraphiteMetricsIndexResponse
GraphiteMetricsFind(t *testing.T, query string, opts QueryOpts) GraphiteMetricsFindResponse
GraphiteMetricsExpand(t *testing.T, query string, opts QueryOpts) GraphiteMetricsExpandResponse
GraphiteRender(t *testing.T, target string, opts QueryOpts) GraphiteRenderResponse
GraphiteTagsTagSeries(t *testing.T, record string, opts QueryOpts)
GraphiteTagsTagMultiSeries(t *testing.T, records []string, opts QueryOpts)
}
@@ -93,17 +89,6 @@ type QueryOpts struct {
LatencyOffset string
Format string
NoCache string
Headers http.Header
From string
Until string
StorageStep string
}
func (qos *QueryOpts) getHeaders() http.Header {
if qos.Headers == nil {
qos.Headers = make(http.Header)
}
return qos.Headers
}
func (qos *QueryOpts) asURLValues() url.Values {
@@ -129,13 +114,18 @@ func (qos *QueryOpts) asURLValues() url.Values {
addNonEmpty("latency_offset", qos.LatencyOffset)
addNonEmpty("format", qos.Format)
addNonEmpty("nocache", qos.NoCache)
addNonEmpty("from", qos.From)
addNonEmpty("until", qos.Until)
addNonEmpty("storage_step", qos.StorageStep)
return uv
}
// getTenant returns tenant with optional default value
func (qos *QueryOpts) getTenant() string {
if qos.Tenant == "" {
return "0"
}
return qos.Tenant
}
// PrometheusAPIV1QueryResponse is an inmemory representation of the
// /prometheus/api/v1/query or /prometheus/api/v1/query_range response.
type PrometheusAPIV1QueryResponse struct {
@@ -489,6 +479,10 @@ type TSDBStatusResponse struct {
Data TSDBStatusResponseData
}
// GraphiteMetricsIndexResponse is an in-memory representation of the json response
// returned by the /graphite/metrics/index.json endpoint.
type GraphiteMetricsIndexResponse = []string
// AdminTenantsResponse is an in-memory representation of the json response
// returned by the /api/v1/admin/tenants endpoint.
type AdminTenantsResponse struct {
@@ -538,32 +532,3 @@ func sortTSDBStatusResponseEntries(entries []TSDBStatusResponseEntry) {
return left.Count < right.Count
})
}
// GraphiteMetricsIndexResponse is an in-memory representation of the json response
// returned by the /graphite/metrics/index.json endpoint.
type GraphiteMetricsIndexResponse = []string
type GraphiteMetric struct {
Id string
Text string
AllowChildren int
Expandable int
Leaf int
}
// GraphiteMetricsIndexResponse is an in-memory representation of the json response
// returned by the /graphite/metrics/find endpoint.
type GraphiteMetricsFindResponse = []GraphiteMetric
// GraphiteMetricsExpandResponse is an in-memory representation of the json response
// returned by the /graphite/metrics/expand endpoint.
type GraphiteMetricsExpandResponse = []string
type GraphiteRenderedTarget struct {
Target string
Datapoints [][2]float64
}
// GraphiteRenderResponse is an in-memory representation of the json response
// returned by the /graphite/render endpoint.
type GraphiteRenderResponse = []GraphiteRenderedTarget

View File

@@ -6,7 +6,6 @@ import (
"os"
"path"
"path/filepath"
"slices"
"sync"
"testing"
"time"
@@ -170,18 +169,6 @@ func (tc *TestCase) MustStartVmagent(instance string, flags []string, promScrape
return app
}
// MustStartDefaultRWVmagent is a test helper function that starts an instance of
// vmagent with defaults suitable for remote-write tests.
func (tc *TestCase) MustStartDefaultRWVmagent(instance string, flags []string) *Vmagent {
tc.t.Helper()
defaultFlags := []string{
"-remoteWrite.flushInterval=50ms",
}
defaultFlags = slices.Concat(defaultFlags, flags)
return tc.MustStartVmagent(instance, defaultFlags, ``)
}
// Vmcluster represents a typical cluster setup: several vmstorage replicas, one
// vminsert, and one vmselect.
//

View File

@@ -28,7 +28,6 @@ func TestSingleBackupRestore(t *testing.T) {
return tc.MustStartVmsingle("vmsingle", []string{
"-storageDataPath=" + storageDataPath,
"-retentionPeriod=100y",
"-futureRetention=2y",
})
},
stopSUT: func() {
@@ -61,13 +60,11 @@ func TestClusterBackupRestore(t *testing.T) {
Vmstorage1Flags: []string{
"-storageDataPath=" + storage1DataPath,
"-retentionPeriod=100y",
"-futureRetention=2y",
},
Vmstorage2Instance: "vmstorage2",
Vmstorage2Flags: []string{
"-storageDataPath=" + storage2DataPath,
"-retentionPeriod=100y",
"-futureRetention=2y",
},
VminsertInstance: "vminsert",
VminsertFlags: []string{},
@@ -100,16 +97,10 @@ func TestClusterBackupRestore(t *testing.T) {
func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
t := tc.T()
type data struct {
samples []string
wantSeries []map[string]string
wantQueryResults []*apptest.QueryResult
}
genData := func(count int, prefix string, start, step int64) data {
recs := make([]string, count)
wantSeries := make([]map[string]string, count)
wantQueryResults := make([]*apptest.QueryResult, count)
genData := func(count int, prefix string, start, step int64) (recs []string, wantSeries []map[string]string, wantQueryResults []*apptest.QueryResult) {
recs = make([]string, count)
wantSeries = make([]map[string]string, count)
wantQueryResults = make([]*apptest.QueryResult, count)
for i := range count {
name := fmt.Sprintf("%s_%03d", prefix, i)
value := float64(i)
@@ -122,15 +113,7 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
Samples: []*apptest.Sample{{Timestamp: timestamp, Value: value}},
}
}
return data{recs, wantSeries, wantQueryResults}
}
concatData := func(d1, d2 data) data {
var d data
d.samples = slices.Concat(d1.samples, d2.samples)
d.wantSeries = slices.Concat(d1.wantSeries, d2.wantSeries)
d.wantQueryResults = slices.Concat(d1.wantQueryResults, d2.wantQueryResults)
return d
return recs, wantSeries, wantQueryResults
}
backupBaseDir, err := filepath.Abs(filepath.Join(tc.Dir(), "backups"))
@@ -207,20 +190,10 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
// Use the same number of metrics and time range for all the data ingestions
// below.
const numMetrics = 1000
// With 1000 metrics (one per minute), the time range spans 2 months.
start := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC).UnixMilli()
end := time.Date(2025, 3, 1, 0, 0, 0, 0, time.UTC).UnixMilli()
step := (end - start) / numMetrics
batch1 := genData(numMetrics, "batch1", start, step)
batch2 := genData(numMetrics, "batch2", start, step)
batches12 := concatData(batch1, batch2)
now := time.Now().UTC()
startFuture := time.Date(now.Year()+1, 1, 1, 0, 0, 0, 0, time.UTC).UnixMilli()
endFuture := time.Date(now.Year()+1, 3, 1, 0, 0, 0, 0, time.UTC).UnixMilli()
stepFuture := (endFuture - startFuture) / numMetrics
batch1Future := genData(numMetrics, "batch1", startFuture, stepFuture)
batch2Future := genData(numMetrics, "batch2", startFuture, stepFuture)
batches12Future := concatData(batch1Future, batch2Future)
// Verify backup/restore:
//
@@ -234,25 +207,24 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
// - Start vmsingle
// - Ensure that the queries return batch1 data only.
batch1Data, wantBatch1Series, wantBatch1QueryResults := genData(numMetrics, "batch1", start, step)
batch2Data, wantBatch2Series, wantBatch2QueryResults := genData(numMetrics, "batch2", start, step)
wantBatch12Series := slices.Concat(wantBatch1Series, wantBatch2Series)
wantBatch12QueryResults := slices.Concat(wantBatch1QueryResults, wantBatch2QueryResults)
sut := opts.startSUT()
sut.PrometheusAPIV1ImportPrometheus(t, batch1.samples, apptest.QueryOpts{})
sut.PrometheusAPIV1ImportPrometheus(t, batch1Future.samples, apptest.QueryOpts{})
sut.PrometheusAPIV1ImportPrometheus(t, batch1Data, apptest.QueryOpts{})
sut.ForceFlush(t)
assertSeries(sut, `{__name__=~"batch1.*"}`, start, end, batch1.wantSeries)
assertSeries(sut, `{__name__=~"batch1.*"}`, startFuture, endFuture, batch1Future.wantSeries)
assertQueryResults(sut, `{__name__=~"batch1.*"}`, start, end, step, batch1.wantQueryResults)
assertQueryResults(sut, `{__name__=~"batch1.*"}`, startFuture, endFuture, stepFuture, batch1Future.wantQueryResults)
assertSeries(sut, `{__name__=~"batch1.*"}`, start, end, wantBatch1Series)
assertQueryResults(sut, `{__name__=~"batch1.*"}`, start, end, step, wantBatch1QueryResults)
createBackup(sut, "batch1")
sut.PrometheusAPIV1ImportPrometheus(t, batch2.samples, apptest.QueryOpts{})
sut.PrometheusAPIV1ImportPrometheus(t, batch2Future.samples, apptest.QueryOpts{})
sut.PrometheusAPIV1ImportPrometheus(t, batch2Data, apptest.QueryOpts{})
sut.ForceFlush(t)
assertSeries(sut, `{__name__=~"batch(1|2).*"}`, start, end, batches12.wantSeries)
assertSeries(sut, `{__name__=~"batch(1|2).*"}`, startFuture, endFuture, batches12Future.wantSeries)
assertQueryResults(sut, `{__name__=~"batch(1|2).*"}`, start, end, step, batches12.wantQueryResults)
assertQueryResults(sut, `{__name__=~"batch(1|2).*"}`, startFuture, endFuture, stepFuture, batches12Future.wantQueryResults)
assertSeries(sut, `{__name__=~"batch(1|2).*"}`, start, end, wantBatch12Series)
assertQueryResults(sut, `{__name__=~"batch(1|2).*"}`, start, end, step, wantBatch12QueryResults)
createBackup(sut, "batch12")
opts.stopSUT()
@@ -261,8 +233,6 @@ func testBackupRestore(tc *apptest.TestCase, opts testBackupRestoreOpts) {
sut = opts.startSUT()
assertSeries(sut, `{__name__=~"batch(1|2).*"}`, start, end, batch1.wantSeries)
assertSeries(sut, `{__name__=~"batch(1|2).*"}`, startFuture, endFuture, batch1Future.wantSeries)
assertQueryResults(sut, `{__name__=~"batch(1|2).*"}`, start, end, step, batch1.wantQueryResults)
assertQueryResults(sut, `{__name__=~"batch(1|2).*"}`, startFuture, endFuture, stepFuture, batch1Future.wantQueryResults)
assertSeries(sut, `{__name__=~"batch1.*"}`, start, end, wantBatch1Series)
assertQueryResults(sut, `{__name__=~"batch1.*"}`, start, end, step, wantBatch1QueryResults)
}

View File

@@ -1,211 +0,0 @@
package tests
import (
"fmt"
"path/filepath"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
)
func TestSingleFutureTimestamps(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
opts := testFutureTimestampsOpts{
start: func() apptest.PrometheusWriteQuerier {
return tc.MustStartVmsingle("vmsingle", []string{
"-storageDataPath=" + filepath.Join(tc.Dir(), "vmsingle"),
"-retentionPeriod=100y",
"-futureRetention=100y",
})
},
stop: func() {
tc.StopApp("vmsingle")
},
}
testFutureTimestamps(tc, opts)
}
func TestClusterFutureTimestamps(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
opts := testFutureTimestampsOpts{
start: func() apptest.PrometheusWriteQuerier {
return tc.MustStartCluster(&apptest.ClusterOptions{
Vmstorage1Instance: "vmstorage1",
Vmstorage1Flags: []string{
"-storageDataPath=" + filepath.Join(tc.Dir(), "vmstorage1"),
"-retentionPeriod=100y",
"-futureRetention=100y",
},
Vmstorage2Instance: "vmstorage2",
Vmstorage2Flags: []string{
"-storageDataPath=" + filepath.Join(tc.Dir(), "vmstorage2"),
"-retentionPeriod=100y",
"-futureRetention=100y",
},
VminsertInstance: "vminsert",
VminsertFlags: []string{},
VmselectInstance: "vmselect",
VmselectFlags: []string{},
})
},
stop: func() {
tc.StopApp("vminsert")
tc.StopApp("vmselect")
tc.StopApp("vmstorage1")
tc.StopApp("vmstorage2")
},
}
testFutureTimestamps(tc, opts)
}
type testFutureTimestampsOpts struct {
start func() apptest.PrometheusWriteQuerier
stop func()
}
func testFutureTimestamps(tc *apptest.TestCase, opts testFutureTimestampsOpts) {
t := tc.T()
// assertSeries retrieves set of all metric names from the storage and
// compares it with the expected set.
assertSeries := func(app apptest.PrometheusQuerier, prefix string, start, end int64, want []map[string]string) {
t.Helper()
query := fmt.Sprintf(`{__name__=~"metric_%s.*"}`, prefix)
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected /api/v1/series response",
Got: func() any {
return app.PrometheusAPIV1Series(t, query, apptest.QueryOpts{
Start: fmt.Sprintf("%d", start),
End: fmt.Sprintf("%d", end),
}).Sort()
},
Want: &apptest.PrometheusAPIV1SeriesResponse{
Status: "success",
Data: want,
},
FailNow: true,
})
}
// assertSeries retrieves all data from the storage and compares it with the
// expected result.
assertQueryResults := func(app apptest.PrometheusQuerier, prefix string, start, end, step int64, want []*apptest.QueryResult) {
t.Helper()
query := fmt.Sprintf(`{__name__=~"metric_%s.*"}`, prefix)
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected /api/v1/query_range response",
Got: func() any {
return app.PrometheusAPIV1QueryRange(t, query, apptest.QueryOpts{
Start: fmt.Sprintf("%d", start),
End: fmt.Sprintf("%d", end),
Step: fmt.Sprintf("%dms", step),
MaxLookback: fmt.Sprintf("%dms", step-1),
NoCache: "1",
})
},
Want: &apptest.PrometheusAPIV1QueryResponse{
Status: "success",
Data: &apptest.QueryData{
ResultType: "matrix",
Result: want,
},
},
FailNow: true,
})
}
f := func(prefix string, startTime, endTime time.Time, wantEmpty bool) {
const numMetrics = 1000
start := startTime.UnixMilli()
end := endTime.UnixMilli()
step := (end - start) / numMetrics
data := genFutureTimestampsData(prefix, numMetrics, start, step)
if wantEmpty {
data.wantSeries = []map[string]string{}
data.wantQueryResults = []*apptest.QueryResult{}
}
// Ingest data and check query results.
sut := opts.start()
sut.PrometheusAPIV1ImportPrometheus(t, data.samples, apptest.QueryOpts{})
sut.ForceFlush(t)
assertSeries(sut, prefix, start, end, data.wantSeries)
assertQueryResults(sut, prefix, start, end, step, data.wantQueryResults)
// Ensure the queries work after restrart.
opts.stop()
sut = opts.start()
assertSeries(sut, prefix, start, end, data.wantSeries)
assertQueryResults(sut, prefix, start, end, step, data.wantQueryResults)
opts.stop()
}
now := time.Now().UTC()
retentionLimit := 100 * 365 * 24 * time.Hour
var start, end time.Time
start = time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, time.UTC)
end = time.Date(now.Year(), now.Month(), now.Day()+2, 0, 0, 0, 0, time.UTC)
f("future_1d", start, end, false)
start = time.Date(now.Year(), now.Month()+1, 1, 0, 0, 0, 0, time.UTC)
end = time.Date(now.Year(), now.Month()+2, 1, 0, 0, 0, 0, time.UTC)
f("future_1m", start, end, false)
start = time.Date(now.Year()+1, 1, 1, 0, 0, 0, 0, time.UTC)
end = time.Date(now.Year()+2, 1, 1, 0, 0, 0, 0, time.UTC)
f("future_1y", start, end, false)
start = now.Add(retentionLimit - 24*time.Hour)
end = now.Add(retentionLimit)
f("future_1d_before_limit", start, end, false)
start = now.Add(retentionLimit + time.Minute)
end = now.Add(retentionLimit + 24*time.Hour)
f("future_1d_beyond_limit", start, end, true)
}
type futureTimestampsData struct {
samples []string
wantSeries []map[string]string
wantQueryResults []*apptest.QueryResult
}
func genFutureTimestampsData(prefix string, numMetrics, start, step int64) futureTimestampsData {
samples := make([]string, numMetrics)
wantSeries := make([]map[string]string, numMetrics)
wantQueryResults := make([]*apptest.QueryResult, numMetrics)
for i := range numMetrics {
metricName := fmt.Sprintf("metric_%s_%04d", prefix, i)
labelName := fmt.Sprintf("label_%s_%04d", prefix, i)
labelValue := fmt.Sprintf("value_%s_%04d", prefix, i)
value := i
timestamp := start + i*step
samples[i] = fmt.Sprintf(`%s{%s="value", label="%s"} %d %d`, metricName, labelName, labelValue, value, timestamp)
wantSeries[i] = map[string]string{
"__name__": metricName,
labelName: "value",
"label": labelValue,
}
wantQueryResults[i] = &apptest.QueryResult{
Metric: map[string]string{
"__name__": metricName,
labelName: "value",
"label": labelValue,
},
Samples: []*apptest.Sample{{Timestamp: timestamp, Value: float64(value)}},
}
}
return futureTimestampsData{samples, wantSeries, wantQueryResults}
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
otlppb "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
)
func TestSingleIngestionProtocols(t *testing.T) {
@@ -298,231 +297,6 @@ func TestSingleIngestionProtocols(t *testing.T) {
},
})
// opentelemetry metrics protocol
tsNano := uint64(1707123456700 * 1e6) // 2024-02-05T08:57:36.700Z
otlpData := otlppb.MetricsData{
ResourceMetrics: []*otlppb.ResourceMetrics{
{
Resource: &otlppb.Resource{
Attributes: []*otlppb.KeyValue{
{
Key: "foo",
Value: &otlppb.AnyValue{StringValue: new("bar")},
},
},
},
ScopeMetrics: []*otlppb.ScopeMetrics{
{
Scope: &otlppb.InstrumentationScope{
Name: new("otlp"),
Version: new("v1"),
Attributes: []*otlppb.KeyValue{
{
Key: "scope_attribute",
Value: &otlppb.AnyValue{IntValue: new(int64(100))},
},
},
},
Metrics: []*otlppb.Metric{
{
Name: "otlp_series_gauge",
Gauge: &otlppb.Gauge{
DataPoints: []*otlppb.NumberDataPoint{
{IntValue: new(int64(10)), TimeUnixNano: tsNano},
{IntValue: new(int64(5)), TimeUnixNano: tsNano, Attributes: []*otlppb.KeyValue{{Key: "bar", Value: &otlppb.AnyValue{StringValue: new("foo")}}}},
},
},
},
{
Name: "otlp_series_counter",
Sum: &otlppb.Sum{
DataPoints: []*otlppb.NumberDataPoint{
{IntValue: new(int64(30)), TimeUnixNano: tsNano, Attributes: []*otlppb.KeyValue{{Key: "bar", Value: &otlppb.AnyValue{StringValue: new("foo")}}}},
},
},
},
},
},
{
Scope: &otlppb.InstrumentationScope{
Name: new("otlp2"),
Version: new("v2"),
},
Metrics: []*otlppb.Metric{
{
Name: "otlp_series_histogram",
Histogram: &otlppb.Histogram{
DataPoints: []*otlppb.HistogramDataPoint{
{
Count: 15,
Sum: new(float64(100)),
ExplicitBounds: []float64{0.1, 0.5, 1.0, 5.0},
BucketCounts: []uint64{0, 5, 10, 0, 0},
TimeUnixNano: tsNano,
Attributes: []*otlppb.KeyValue{
{Key: "baz", Value: &otlppb.AnyValue{ArrayValue: &otlppb.ArrayValue{Values: []*otlppb.AnyValue{
{StringValue: new("foo")},
{IntValue: new(int64(100))},
}}}},
},
},
},
},
},
},
},
},
},
{
ScopeMetrics: []*otlppb.ScopeMetrics{
{
Metrics: []*otlppb.Metric{
{
Name: "otlp_series_summary",
Summary: &otlppb.Summary{
DataPoints: []*otlppb.SummaryDataPoint{
{
Attributes: []*otlppb.KeyValue{},
TimeUnixNano: tsNano,
Sum: 17.5,
Count: 2,
QuantileValues: []*otlppb.ValueAtQuantile{
{
Quantile: 0.1,
Value: 7.5,
},
{
Quantile: 0.5,
Value: 10.0,
},
},
},
},
},
},
},
},
},
},
},
}
sut.OpentelemetryV1Metrics(t, otlpData, apptest.QueryOpts{})
sut.ForceFlush(t)
f(sut, &opts{
query: `{__name__=~"otlp.+"}`,
wantMetrics: []map[string]string{
{
"__name__": "otlp_series_counter",
"foo": "bar",
"bar": "foo",
"scope.attributes.scope_attribute": "100",
"scope.name": "otlp",
"scope.version": "v1",
},
{
"__name__": "otlp_series_gauge",
"foo": "bar",
"bar": "foo",
"scope.attributes.scope_attribute": "100",
"scope.name": "otlp",
"scope.version": "v1",
},
{
"__name__": "otlp_series_gauge",
"foo": "bar",
"scope.attributes.scope_attribute": "100",
"scope.name": "otlp",
"scope.version": "v1",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "+Inf",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "0.1",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "0.5",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "1",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "5",
},
{
"__name__": "otlp_series_histogram_count",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
},
{
"__name__": "otlp_series_histogram_sum",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
},
{
"__name__": "otlp_series_summary",
"quantile": "0.1",
},
{
"__name__": "otlp_series_summary",
"quantile": "0.5",
},
{
"__name__": "otlp_series_summary_count",
},
{
"__name__": "otlp_series_summary_sum",
},
},
wantSamples: []*apptest.Sample{
{Timestamp: 1707123456700, Value: 30}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 5}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 0}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 5}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 100}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 7.5}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 2}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 17.5}, // 2024-02-05T08:57:36.700Z
},
})
}
func TestSingleCardinalityLimiter(t *testing.T) {
@@ -944,231 +718,6 @@ func TestClusterIngestionProtocols(t *testing.T) {
},
})
// opentelemetry metrics protocol
tsNano := uint64(1707123456700 * 1e6) // 2024-02-05T08:57:36.700Z
otlpData := otlppb.MetricsData{
ResourceMetrics: []*otlppb.ResourceMetrics{
{
Resource: &otlppb.Resource{
Attributes: []*otlppb.KeyValue{
{
Key: "foo",
Value: &otlppb.AnyValue{StringValue: new("bar")},
},
},
},
ScopeMetrics: []*otlppb.ScopeMetrics{
{
Scope: &otlppb.InstrumentationScope{
Name: new("otlp"),
Version: new("v1"),
Attributes: []*otlppb.KeyValue{
{
Key: "scope_attribute",
Value: &otlppb.AnyValue{IntValue: new(int64(100))},
},
},
},
Metrics: []*otlppb.Metric{
{
Name: "otlp_series_gauge",
Gauge: &otlppb.Gauge{
DataPoints: []*otlppb.NumberDataPoint{
{IntValue: new(int64(10)), TimeUnixNano: tsNano},
{IntValue: new(int64(5)), TimeUnixNano: tsNano, Attributes: []*otlppb.KeyValue{{Key: "bar", Value: &otlppb.AnyValue{StringValue: new("foo")}}}},
},
},
},
{
Name: "otlp_series_counter",
Sum: &otlppb.Sum{
DataPoints: []*otlppb.NumberDataPoint{
{IntValue: new(int64(30)), TimeUnixNano: tsNano, Attributes: []*otlppb.KeyValue{{Key: "bar", Value: &otlppb.AnyValue{StringValue: new("foo")}}}},
},
},
},
},
},
{
Scope: &otlppb.InstrumentationScope{
Name: new("otlp2"),
Version: new("v2"),
},
Metrics: []*otlppb.Metric{
{
Name: "otlp_series_histogram",
Histogram: &otlppb.Histogram{
DataPoints: []*otlppb.HistogramDataPoint{
{
Count: 15,
Sum: new(float64(100)),
ExplicitBounds: []float64{0.1, 0.5, 1.0, 5.0},
BucketCounts: []uint64{0, 5, 10, 0, 0},
TimeUnixNano: tsNano,
Attributes: []*otlppb.KeyValue{
{Key: "baz", Value: &otlppb.AnyValue{ArrayValue: &otlppb.ArrayValue{Values: []*otlppb.AnyValue{
{StringValue: new("foo")},
{IntValue: new(int64(100))},
}}}},
},
},
},
},
},
},
},
},
},
{
ScopeMetrics: []*otlppb.ScopeMetrics{
{
Metrics: []*otlppb.Metric{
{
Name: "otlp_series_summary",
Summary: &otlppb.Summary{
DataPoints: []*otlppb.SummaryDataPoint{
{
Attributes: []*otlppb.KeyValue{},
TimeUnixNano: tsNano,
Sum: 17.5,
Count: 2,
QuantileValues: []*otlppb.ValueAtQuantile{
{
Quantile: 0.1,
Value: 7.5,
},
{
Quantile: 0.5,
Value: 10.0,
},
},
},
},
},
},
},
},
},
},
},
}
vminsert.OpentelemetryV1Metrics(t, otlpData, apptest.QueryOpts{})
vmstorage.ForceFlush(t)
f(&opts{
query: `{__name__=~"otlp.+"}`,
wantMetrics: []map[string]string{
{
"__name__": "otlp_series_counter",
"foo": "bar",
"bar": "foo",
"scope.attributes.scope_attribute": "100",
"scope.name": "otlp",
"scope.version": "v1",
},
{
"__name__": "otlp_series_gauge",
"foo": "bar",
"bar": "foo",
"scope.attributes.scope_attribute": "100",
"scope.name": "otlp",
"scope.version": "v1",
},
{
"__name__": "otlp_series_gauge",
"foo": "bar",
"scope.attributes.scope_attribute": "100",
"scope.name": "otlp",
"scope.version": "v1",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "+Inf",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "0.1",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "0.5",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "1",
},
{
"__name__": "otlp_series_histogram_bucket",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
"le": "5",
},
{
"__name__": "otlp_series_histogram_count",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
},
{
"__name__": "otlp_series_histogram_sum",
"baz": `["foo",100]`,
"foo": "bar",
"scope.name": "otlp2",
"scope.version": "v2",
},
{
"__name__": "otlp_series_summary",
"quantile": "0.1",
},
{
"__name__": "otlp_series_summary",
"quantile": "0.5",
},
{
"__name__": "otlp_series_summary_count",
},
{
"__name__": "otlp_series_summary_sum",
},
},
wantSamples: []*apptest.Sample{
{Timestamp: 1707123456700, Value: 30}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 5}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 0}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 5}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 15}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 100}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 7.5}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 2}, // 2024-02-05T08:57:36.700Z
{Timestamp: 1707123456700, Value: 17.5}, // 2024-02-05T08:57:36.700Z
},
})
}
func TestClusterCardinalityLimiter(t *testing.T) {

View File

@@ -214,7 +214,7 @@ func testLegacyDeleteSeries(tc *at.TestCase, opts testLegacyDeleteSeriesOpts) {
newSUT := opts.startNewSUT()
assertSearchResults(newSUT, `{__name__=~".*"}`, start1, end1, "1d", want1)
newSUT.PrometheusAPIV1AdminTSDBDeleteSeries(t, `{__name__=~".*"}`, at.QueryOpts{})
newSUT.APIV1AdminTSDBDeleteSeries(t, `{__name__=~".*"}`, at.QueryOpts{})
wantNoResults := &want{
series: []map[string]string{},
queryResults: []*at.QueryResult{},
@@ -877,7 +877,7 @@ func testLegacyDowngrade(tc *at.TestCase, opts testLegacyDowngradeOpts) {
// Ingest legacy2 records, ensure the queries return only legacy2.
legacySUT = opts.startLegacySUT()
assertQueries(legacySUT, `{__name__=~".*"}`, wantLegacy1, numMetrics)
legacySUT.PrometheusAPIV1AdminTSDBDeleteSeries(t, `{__name__=~".*"}`, at.QueryOpts{})
legacySUT.APIV1AdminTSDBDeleteSeries(t, `{__name__=~".*"}`, at.QueryOpts{})
assertQueries(legacySUT, `{__name__=~".*"}`, wantEmpty, numMetrics)
legacySUT.PrometheusAPIV1ImportPrometheus(t, legacy2Data, at.QueryOpts{})
legacySUT.ForceFlush(t)
@@ -891,7 +891,7 @@ func testLegacyDowngrade(tc *at.TestCase, opts testLegacyDowngradeOpts) {
newSUT = opts.startNewSUT()
// series count includes deleted metrics
assertQueries(newSUT, `{__name__=~".*"}`, wantLegacy2New1, 3*numMetrics)
newSUT.PrometheusAPIV1AdminTSDBDeleteSeries(t, `{__name__=~".*"}`, at.QueryOpts{})
newSUT.APIV1AdminTSDBDeleteSeries(t, `{__name__=~".*"}`, at.QueryOpts{})
// series count includes deleted metrics
assertQueries(newSUT, `{__name__=~".*"}`, wantEmpty, 3*numMetrics)
opts.stopNewSUT()

View File

@@ -48,7 +48,7 @@ func TestSingleMetricNamesStats(t *testing.T) {
{MetricName: "metric_name_3"},
},
}
got := sut.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
got := sut.APIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
if diff := cmp.Diff(expected, got); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
@@ -63,7 +63,7 @@ func TestSingleMetricNamesStats(t *testing.T) {
{MetricName: "metric_name_3", QueryRequestsCount: 1},
},
}
got = sut.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
got = sut.APIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
if diff := cmp.Diff(expected, got); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
@@ -90,7 +90,7 @@ func TestSingleMetricNamesStats(t *testing.T) {
},
}
expectedStatsResponse.Sort()
gotStatus := sut.PrometheusAPIV1StatusTSDB(t, "", date, "", apptest.QueryOpts{})
gotStatus := sut.APIV1StatusTSDB(t, "", date, "", apptest.QueryOpts{})
if diff := cmp.Diff(expectedStatsResponse, gotStatus, tsdbMetricNameEntryCmpOpts); diff != "" {
t.Errorf("unexpected APIV1StatusTSDB response (-want, +got):\n%s", diff)
}
@@ -105,7 +105,7 @@ func TestSingleMetricNamesStats(t *testing.T) {
{MetricName: "metric_name_3", QueryRequestsCount: 1},
},
}
got = sut.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
got = sut.APIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
if diff := cmp.Diff(expected, got); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
@@ -118,17 +118,17 @@ func TestSingleMetricNamesStats(t *testing.T) {
{MetricName: "metric_name_3", QueryRequestsCount: 1},
},
}
got = sut.PrometheusAPIV1StatusMetricNamesStats(t, "", "2", "", apptest.QueryOpts{})
got = sut.APIV1StatusMetricNamesStats(t, "", "2", "", apptest.QueryOpts{})
if diff := cmp.Diff(expected, got); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// reset state and check empty request response
sut.PrometheusAPIV1AdminStatusMetricNamesStatsReset(t, apptest.QueryOpts{})
sut.APIV1AdminStatusMetricNamesStatsReset(t, apptest.QueryOpts{})
expected = apptest.MetricNamesStatsResponse{
Records: []apptest.MetricNamesStatsRecord{},
}
got = sut.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
got = sut.APIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{})
if diff := cmp.Diff(expected, got); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
@@ -158,7 +158,7 @@ func TestClusterMetricNamesStats(t *testing.T) {
fmt.Sprintf("-storageNode=%s,%s", vmstorage1.VmselectAddr(), vmstorage2.VmselectAddr()),
})
// verify empty stats
resp := vmselect.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: "0:0"})
resp := vmselect.MetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: "0:0"})
if len(resp.Records) != 0 {
t.Fatalf("unexpected resp Records: %d, want: %d", len(resp.Records), 0)
}
@@ -198,7 +198,7 @@ func TestClusterMetricNamesStats(t *testing.T) {
{MetricName: "metric_name_3"},
},
}
gotStats := vmselect.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: tenantID})
gotStats := vmselect.MetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: tenantID})
if diff := cmp.Diff(expected, gotStats); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
@@ -216,7 +216,7 @@ func TestClusterMetricNamesStats(t *testing.T) {
{MetricName: "metric_name_1", QueryRequestsCount: 3},
},
}
gotStats = vmselect.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: tenantID})
gotStats = vmselect.MetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: tenantID})
if diff := cmp.Diff(expected, gotStats); diff != "" {
t.Errorf("unexpected response tenant: %s (-want, +got):\n%s", tenantID, diff)
}
@@ -243,9 +243,9 @@ func TestClusterMetricNamesStats(t *testing.T) {
},
}
expectedStatsResponse.Sort()
gotStatus := vmselect.PrometheusAPIV1StatusTSDB(t, "", date, "", apptest.QueryOpts{Tenant: tenantID})
gotStatus := vmselect.APIV1StatusTSDB(t, "", date, "", apptest.QueryOpts{Tenant: tenantID})
if diff := cmp.Diff(expectedStatsResponse, gotStatus, tsdbMetricNameEntryCmpOpts); diff != "" {
t.Errorf("unexpected TSDB status for tenant %s (-want, +got):\n%s", tenantID, diff)
t.Errorf("unexpected APIV1StatusTSDB response tenant: %s (-want, +got):\n%s", tenantID, diff)
}
}
@@ -258,14 +258,14 @@ func TestClusterMetricNamesStats(t *testing.T) {
{MetricName: "metric_name_1", QueryRequestsCount: 9},
},
}
gotStats := vmselect.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: "multitenant"})
gotStats := vmselect.MetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: "multitenant"})
if diff := cmp.Diff(expected, gotStats); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// reset cache and check empty state
vmselect.PrometheusAPIV1AdminStatusMetricNamesStatsReset(t, apptest.QueryOpts{})
resp = vmselect.PrometheusAPIV1StatusMetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: "multitenant"})
vmselect.MetricNamesStatsReset(t, apptest.QueryOpts{})
resp = vmselect.MetricNamesStats(t, "", "", "", apptest.QueryOpts{Tenant: "multitenant"})
if len(resp.Records) != 0 {
t.Fatalf("want 0 records, got: %d", len(resp.Records))
}

View File

@@ -1,313 +0,0 @@
package tests
import (
"net/http"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
)
func TestClusterMultiTenantSelectViaHeaders(t *testing.T) {
fs.MustRemoveDir(t.Name())
cmpOpt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType")
cmpSROpt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1SeriesResponse{}, "Status", "IsPartial")
tc := apptest.NewTestCase(t)
defer tc.Stop()
vmstorage := tc.MustStartVmstorage("vmstorage", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage",
"-retentionPeriod=100y",
})
vminsert := tc.MustStartVminsert("vminsert", []string{
"-storageNode=" + vmstorage.VminsertAddr(),
"-enableMultitenancyViaHeaders",
})
vmselect := tc.MustStartVmselect("vmselect", []string{
"-storageNode=" + vmstorage.VmselectAddr(),
"-search.tenantCacheExpireDuration=0",
"-enableMultitenancyViaHeaders",
})
multitenant := make(http.Header)
multitenant.Set("AccountID", "multitenant")
// test for empty tenants request
got := vmselect.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{
Headers: multitenant,
Step: "5m",
Time: "2022-05-10T08:03:00.000Z",
})
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data":{"result":[]}}`)
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// ingest per tenant data and verify it with search
samples := []string{
`foo_bar 1.00 1652169600000`, // 2022-05-10T08:00:00Z
`foo_bar 2.00 1652169660000`, // 2022-05-10T08:01:00Z
`foo_bar 3.00 1652169720000`, // 2022-05-10T08:02:00Z
}
tenantHeaders := []map[string]string{
{"AccountID": "1", "ProjectID": "1"},
{"AccountID": "1", "ProjectID": "15"},
{"AccountID": "2"},
{"ProjectID": "3"},
}
instantCT := "2022-05-10T08:05:00.000Z" // 1652169900 Unix seconds
for _, headers := range tenantHeaders {
h := make(http.Header)
for k, v := range headers {
h.Set(k, v)
}
vminsert.PrometheusAPIV1ImportPrometheus(t, samples, apptest.QueryOpts{Headers: h})
vmstorage.ForceFlush(t)
// verify tenants are searchable via tenantID in headers
got := vmselect.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{
Headers: h, Time: instantCT,
})
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data":{"result":[{"metric":{"__name__":"foo_bar"},"value":[1652169900,"3"]}]}}`)
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
}
// verify all tenants searchable with multitenant header
// /api/v1/query
want = apptest.NewPrometheusAPIV1QueryResponse(t,
`{"data":
{"result":[
{"metric":{"__name__":"foo_bar","vm_account_id":"0","vm_project_id":"3"},"value":[1652169900,"3"]},
{"metric":{"__name__":"foo_bar","vm_account_id":"1","vm_project_id": "1"},"value":[1652169900,"3"]},
{"metric":{"__name__":"foo_bar","vm_account_id":"1","vm_project_id":"15"},"value":[1652169900,"3"]},
{"metric":{"__name__":"foo_bar","vm_account_id":"2","vm_project_id":"0"},"value":[1652169900,"3"]}
]
}
}`,
)
got = vmselect.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{
Headers: multitenant,
Time: instantCT,
})
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// /api/v1/query_range aggregated by tenant labels
query := "sum(foo_bar) by(vm_account_id,vm_project_id)"
got = vmselect.PrometheusAPIV1QueryRange(t, query, apptest.QueryOpts{
Headers: multitenant,
Start: "2022-05-10T07:59:00.000Z",
End: "2022-05-10T08:05:00.000Z",
Step: "1m",
})
want = apptest.NewPrometheusAPIV1QueryResponse(t,
`{"data":
{"result": [
{"metric": {"vm_account_id": "0","vm_project_id":"3"}, "values": [[1652169600,"1"],[1652169660,"2"],[1652169720,"3"],[1652169780,"3"]]},
{"metric": {"vm_account_id": "1","vm_project_id":"1"}, "values": [[1652169600,"1"],[1652169660,"2"],[1652169720,"3"],[1652169780,"3"]]},
{"metric": {"vm_account_id": "1","vm_project_id":"15"}, "values": [[1652169600,"1"],[1652169660,"2"],[1652169720,"3"],[1652169780,"3"]]},
{"metric": {"vm_account_id": "2","vm_project_id":"0"}, "values": [[1652169600,"1"],[1652169660,"2"],[1652169720,"3"],[1652169780,"3"]]}
]
}
}`)
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// verify /api/v1/series response
wantSR := apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"1"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"15"},
{"__name__":"foo_bar", "vm_account_id":"2", "vm_project_id":"0"},
{"__name__":"foo_bar", "vm_account_id":"0", "vm_project_id":"3"}
]
}`)
wantSR.Sort()
gotSR := vmselect.PrometheusAPIV1Series(t, "foo_bar", apptest.QueryOpts{
Headers: multitenant,
Start: "2022-05-10T08:03:00.000Z",
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// test ingestion with multitenant header, tenants must be populated from labels
//
var tenantLabelsSamples = []string{
`foo_bar{vm_account_id="5"} 1.00 1652169720000`, // 2022-05-10T08:02:00Z'
`foo_bar{vm_project_id="10"} 2.00 1652169660000`, // 2022-05-10T08:01:00Z
`foo_bar{vm_account_id="5",vm_project_id="15"} 3.00 1652169720000`, // 2022-05-10T08:02:00Z
}
vminsert.PrometheusAPIV1ImportPrometheus(t, tenantLabelsSamples, apptest.QueryOpts{Headers: multitenant})
vmstorage.ForceFlush(t)
// /api/v1/query with query filters
want = apptest.NewPrometheusAPIV1QueryResponse(t,
`{"data":
{"result":[
{"metric":{"__name__":"foo_bar","vm_account_id":"5","vm_project_id": "0"},"value":[1652169900,"1"]},
{"metric":{"__name__":"foo_bar","vm_account_id":"5","vm_project_id":"15"},"value":[1652169900,"3"]}
]
}
}`,
)
got = vmselect.PrometheusAPIV1Query(t, `foo_bar{vm_account_id="5"}`, apptest.QueryOpts{
Time: instantCT,
Headers: multitenant,
})
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// /api/v1/series with extra_filters
wantSR = apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"5", "vm_project_id":"15"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"15"}
]
}`)
wantSR.Sort()
gotSR = vmselect.PrometheusAPIV1Series(t, "foo_bar", apptest.QueryOpts{
Start: "2022-05-10T08:00:00.000Z",
End: "2022-05-10T08:30:00.000Z",
ExtraFilters: []string{`{vm_project_id="15"}`},
Headers: multitenant,
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// /api/v1/label/../value with extra_filters
wantVR := apptest.NewPrometheusAPIV1LabelValuesResponse(t,
`{"data": [
"5"
]
}`)
// matchQuery is ignored for /api/v1/label/<labelName>/values lookups with multitenant token
gotVR := vmselect.PrometheusAPIV1LabelValues(t, "vm_account_id", "xxx", apptest.QueryOpts{
Start: "2022-05-10T08:00:00.000Z",
End: "2022-05-10T08:30:00.000Z",
ExtraFilters: []string{`{vm_account_id="5"}`},
Headers: multitenant,
})
gotSR.Sort()
if diff := cmp.Diff(wantVR, gotVR, cmpopts.IgnoreFields(apptest.PrometheusAPIV1LabelValuesResponse{}, "Status", "IsPartial")); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Delete series from specific tenant
tenantID := make(http.Header)
tenantID.Set("AccountID", "5")
tenantID.Set("ProjectID", "15")
vmselect.PrometheusAPIV1AdminTSDBDeleteSeries(t, "foo_bar", apptest.QueryOpts{
Headers: tenantID,
})
wantSR = apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"0", "vm_project_id":"3"},
{"__name__":"foo_bar", "vm_account_id":"0", "vm_project_id":"10"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"1"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"15"},
{"__name__":"foo_bar", "vm_account_id":"2", "vm_project_id":"0"},
{"__name__":"foo_bar", "vm_account_id":"5", "vm_project_id":"0"}
]
}`)
wantSR.Sort()
gotSR = vmselect.PrometheusAPIV1Series(t, "foo_bar", apptest.QueryOpts{
Headers: multitenant,
Start: "2022-05-10T08:03:00.000Z",
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Delete series for multitenant with tenant filter
vmselect.PrometheusAPIV1AdminTSDBDeleteSeries(t, `foo_bar{vm_account_id="1"}`, apptest.QueryOpts{
Headers: multitenant,
})
wantSR = apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"0", "vm_project_id":"3"},
{"__name__":"foo_bar", "vm_account_id":"0", "vm_project_id":"10"},
{"__name__":"foo_bar", "vm_account_id":"2", "vm_project_id":"0"},
{"__name__":"foo_bar", "vm_account_id":"5", "vm_project_id":"0"}
]
}`)
wantSR.Sort()
gotSR = vmselect.PrometheusAPIV1Series(t, `foo_bar`, apptest.QueryOpts{
Headers: multitenant,
Start: "2022-05-10T08:03:00.000Z",
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
if got := vmselect.GetIntMetric(t, `vm_cache_requests_total{type="multitenancy/tenants"}`); got != 0 {
t.Errorf("unexpected multitenancy tenants cache requests; got %d; want 0", got)
}
if got := vmselect.GetIntMetric(t, `vm_cache_misses_total{type="multitenancy/tenants"}`); got != 0 {
t.Errorf("unexpected multitenancy tenants cache misses; got %d; want 0", got)
}
if got := vmselect.GetIntMetric(t, `vm_cache_entries{type="multitenancy/tenants"}`); got != 0 {
t.Errorf("unexpected multitenancy tenants cache entries; got %d; want 0", got)
}
// verify that tenant in path has priority over tenant specified in headers
// /api/v1/import/prometheus
tenantInHeader := make(http.Header)
tenantInHeader.Set("AccountID", "42")
tenantInPath := "112"
vminsert.PrometheusAPIV1ImportPrometheus(t, samples, apptest.QueryOpts{
// tenants in header and path clash - path should have higher priority on ingestion
Headers: tenantInHeader,
Tenant: "112",
})
vmstorage.ForceFlush(t)
want = apptest.NewPrometheusAPIV1QueryResponse(t,
`{"data":
{"result":[
{"metric":{"__name__":"foo_bar"},"value":[1652169900,"3"]}
]
}
}`,
)
got = vmselect.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{
// tenants in header and path clash - path should have higher priority on ingestion
Headers: multitenant,
Tenant: tenantInPath,
Time: instantCT,
})
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
}

View File

@@ -192,7 +192,7 @@ func TestClusterMultiTenantSelect(t *testing.T) {
}
// Delete series from specific tenant
vmselect.PrometheusAPIV1AdminTSDBDeleteSeries(t, "foo_bar", apptest.QueryOpts{
vmselect.APIV1AdminTSDBDeleteSeries(t, "foo_bar", apptest.QueryOpts{
Tenant: "5:15",
})
wantSR = apptest.NewPrometheusAPIV1SeriesResponse(t,
@@ -215,7 +215,7 @@ func TestClusterMultiTenantSelect(t *testing.T) {
}
// Delete series for multitenant with tenant filter
vmselect.PrometheusAPIV1AdminTSDBDeleteSeries(t, `foo_bar{vm_account_id="1"}`, apptest.QueryOpts{
vmselect.APIV1AdminTSDBDeleteSeries(t, `foo_bar{vm_account_id="1"}`, apptest.QueryOpts{
Tenant: "multitenant",
})

View File

@@ -1,186 +0,0 @@
package tests
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"sort"
"strconv"
"strings"
"testing"
)
// openTSDBPoint is a single data point served by the mock OpenTSDB server.
type openTSDBPoint struct {
Metric string
Tags map[string]string
Timestamp int64
Value float64
}
// openTSDBMockServer implements the minimal subset of the OpenTSDB HTTP API
// used by vmctl opentsdb: /api/suggest, /api/search/lookup, /api/query.
type openTSDBMockServer struct {
server *httptest.Server
points []openTSDBPoint
}
// newOpenTSDBMockServer starts an httptest server serving the given points.
func newOpenTSDBMockServer(t *testing.T, points []openTSDBPoint) *openTSDBMockServer {
t.Helper()
s := &openTSDBMockServer{points: points}
mux := http.NewServeMux()
mux.HandleFunc("/api/suggest", s.handleSuggest)
mux.HandleFunc("/api/search/lookup", s.handleLookup)
mux.HandleFunc("/api/query", s.handleQuery)
s.server = httptest.NewServer(mux)
return s
}
// close shuts down the server.
func (s *openTSDBMockServer) close() { s.server.Close() }
// httpAddr returns the server URL.
func (s *openTSDBMockServer) httpAddr() string { return s.server.URL }
// handleSuggest serves https://opentsdb.net/docs/build/html/api_http/suggest.html
func (s *openTSDBMockServer) handleSuggest(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query().Get("q")
seen := make(map[string]bool, len(s.points))
var out []string
for _, p := range s.points {
if seen[p.Metric] {
continue
}
if q != "" && !strings.Contains(p.Metric, q) {
continue
}
seen[p.Metric] = true
out = append(out, p.Metric)
}
_ = json.NewEncoder(w).Encode(out)
}
// handleLookup serves https://opentsdb.net/docs/build/html/api_http/search/lookup.html
func (s *openTSDBMockServer) handleLookup(w http.ResponseWriter, r *http.Request) {
metric := r.URL.Query().Get("m")
type meta struct {
Metric string `json:"metric"`
Tags map[string]string `json:"tags"`
}
seen := make(map[string]bool, len(s.points))
var results []meta
for _, p := range s.points {
if p.Metric != metric {
continue
}
key := tagsKey(p.Tags)
if seen[key] {
continue
}
seen[key] = true
results = append(results, meta{p.Metric, p.Tags})
}
_ = json.NewEncoder(w).Encode(map[string]any{
"type": "LOOKUP",
"metric": metric,
"results": results,
})
}
// handleQuery serves https://opentsdb.net/docs/build/html/api_http/query/index.html
func (s *openTSDBMockServer) handleQuery(w http.ResponseWriter, r *http.Request) {
m := r.URL.Query().Get("m")
metric, tagFilter, ok := parseQuery(m)
if !ok {
http.Error(w, "bad query param", http.StatusBadRequest)
return
}
start, err := strconv.ParseInt(r.URL.Query().Get("start"), 10, 64)
if err != nil {
http.Error(w, "bad start param", http.StatusBadRequest)
return
}
end, err := strconv.ParseInt(r.URL.Query().Get("end"), 10, 64)
if err != nil {
http.Error(w, "bad end param", http.StatusBadRequest)
return
}
type resp struct {
Metric string `json:"metric"`
Tags map[string]string `json:"tags"`
AggregateTags []string `json:"aggregateTags"`
Dps map[string]float64 `json:"dps"`
}
grouped := make(map[string]*resp, len(s.points))
for _, p := range s.points {
if p.Metric != metric {
continue
}
if !matchTags(p.Tags, tagFilter) {
continue
}
if p.Timestamp < start || p.Timestamp > end {
continue
}
key := tagsKey(p.Tags)
if _, exists := grouped[key]; !exists {
grouped[key] = &resp{
Metric: p.Metric,
Tags: p.Tags,
AggregateTags: []string{},
Dps: map[string]float64{},
}
}
grouped[key].Dps[fmt.Sprintf("%d", p.Timestamp)] = p.Value
}
out := make([]*resp, 0, len(grouped))
for _, v := range grouped {
out = append(out, v)
}
_ = json.NewEncoder(w).Encode(out)
}
// parseQuery parses the OpenTSDB m= query parameter.
// Format: "<agg>:<bucket>-<agg>-none:<metric>{k=v,k=v}"
func parseQuery(m string) (string, map[string]string, bool) {
parts := strings.SplitN(m, ":", 3)
if len(parts) != 3 {
return "", nil, false
}
metric, tagStr, _ := strings.Cut(parts[2], "{")
tags := make(map[string]string, 4)
tagStr = strings.TrimSuffix(tagStr, "}")
for _, kv := range strings.Split(tagStr, ",") {
if k, v, ok := strings.Cut(kv, "="); ok {
tags[k] = v
}
}
return metric, tags, true
}
func matchTags(got, filter map[string]string) bool {
for k, v := range filter {
if v == "*" {
continue
}
if got[k] != v {
return false
}
}
return true
}
func tagsKey(tags map[string]string) string {
keys := make([]string, 0, len(tags))
for k := range tags {
keys = append(keys, k)
}
sort.Strings(keys)
parts := make([]string, 0, len(keys))
for _, k := range keys {
parts = append(parts, k+"="+tags[k])
}
return strings.Join(parts, ",")
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
)
// TestSingleVMAgentReloadConfigs verifies that vmagent reload new configurations on SIGHUP signal
@@ -30,12 +29,13 @@ func TestSingleVMAgentReloadConfigs(t *testing.T) {
relabelFilePath := fmt.Sprintf("%s/%s", t.TempDir(), "relabel_config.yaml")
fs.MustWriteSync(relabelFilePath, []byte(relabelingRules))
vmagent := tc.MustStartDefaultRWVmagent("vmagent", []string{
vmagent := tc.MustStartVmagent("vmagent", []string{
`-remoteWrite.flushInterval=50ms`,
`-remoteWrite.forcePromProto=true`,
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent",
fmt.Sprintf(`-remoteWrite.url=http://%s/api/v1/write`, vmsingle.HTTPAddr()),
fmt.Sprintf(`-remoteWrite.urlRelabelConfig=%s`, relabelFilePath),
})
}, ``)
checkResponse := func(query, expResponse string) {
t.Helper()
@@ -131,11 +131,12 @@ func testSingleVMAgentRemoteWrite(t *testing.T, forcePromProto bool) {
vmsingle := tc.MustStartDefaultVmsingle()
vmagent := tc.MustStartDefaultRWVmagent("vmagent", []string{
vmagent := tc.MustStartVmagent("vmagent", []string{
`-remoteWrite.flushInterval=50ms`,
fmt.Sprintf(`-remoteWrite.forcePromProto=%v`, forcePromProto),
fmt.Sprintf(`-remoteWrite.url=http://%s/api/v1/write`, vmsingle.HTTPAddr()),
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent",
})
}, ``)
vmagent.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
@@ -179,11 +180,12 @@ func TestSingleVMAgentUnsupportedMediaTypeDropIfSnappy(t *testing.T) {
}))
defer remoteWriteSrv.Close()
vmagent := tc.MustStartDefaultRWVmagent("vmagent", []string{
vmagent := tc.MustStartVmagent("vmagent", []string{
`-remoteWrite.flushInterval=50ms`,
`-remoteWrite.forcePromProto=true`,
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent",
})
}, ``)
vmagent.APIV1ImportPrometheusNoWaitFlush(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
@@ -242,10 +244,11 @@ func TestSingleVMAgentDowngradeRemoteWriteProtocol(t *testing.T) {
}))
defer remoteWriteSrv.Close()
vmagent := tc.MustStartDefaultRWVmagent("vmagent", []string{
vmagent := tc.MustStartVmagent("vmagent", []string{
`-remoteWrite.flushInterval=50ms`,
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent",
})
}, ``)
// Send request encoded with `zstd`; it fails, gets repacked as `snappy`, and retries successfully.
vmagent.APIV1ImportPrometheus(t, []string{
@@ -290,7 +293,8 @@ func TestSingleVMAgentDropOnOverload(t *testing.T) {
}))
defer remoteWriteSrv2.Close()
vmagent := tc.MustStartDefaultRWVmagent("vmagent", []string{
vmagent := tc.MustStartVmagent("vmagent", []string{
`-remoteWrite.flushInterval=50ms`,
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv2.URL),
"-remoteWrite.disableOnDiskQueue=true",
@@ -306,7 +310,7 @@ func TestSingleVMAgentDropOnOverload(t *testing.T) {
// It improves the test stability on resource-constrained runners.
// Should be bigger than retries * period
"-remoteWrite.retryMinInterval=3s",
})
}, ``)
const (
retries = 20
@@ -392,12 +396,13 @@ func TestSingleVMAgentCardinalityLimiter(t *testing.T) {
defer remoteWriteSrv.Close()
// Verify hourly limit is applied
vmagent := tc.MustStartDefaultRWVmagent("vmagent-hourly", []string{
vmagent := tc.MustStartVmagent("vmagent-hourly", []string{
`-remoteWrite.flushInterval=50ms`,
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
"-remoteWrite.maxRowsPerBlock=1",
"-remoteWrite.maxHourlySeries=1",
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent-hourly",
})
}, ``)
vmagent.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
@@ -426,12 +431,13 @@ func TestSingleVMAgentCardinalityLimiter(t *testing.T) {
)
// Daily limits
vmagent2 := tc.MustStartDefaultRWVmagent("vmagent-daily", []string{
vmagent2 := tc.MustStartVmagent("vmagent-daily", []string{
`-remoteWrite.flushInterval=50ms`,
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
"-remoteWrite.maxRowsPerBlock=1",
"-remoteWrite.maxDailySeries=1",
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent-daily",
})
}, ``)
vmagent2.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
@@ -460,13 +466,14 @@ func TestSingleVMAgentCardinalityLimiter(t *testing.T) {
)
// test running with unlimited tracker
vmagent3 := tc.MustStartDefaultRWVmagent("vmagent-unlimited", []string{
vmagent3 := tc.MustStartVmagent("vmagent-unlimited", []string{
`-remoteWrite.flushInterval=50ms`,
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
"-remoteWrite.maxRowsPerBlock=10",
"-remoteWrite.maxDailySeries=-1",
"-remoteWrite.maxHourlySeries=-1",
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent-unlimited",
})
}, ``)
metrics := make([]string, 0, 100)
for i := range 100 {
@@ -505,139 +512,3 @@ func TestSingleVMAgentCardinalityLimiter(t *testing.T) {
t.Fatalf("unexpected vmagent_daily_series_limit_rows_dropped_total value: %d", v)
}
}
func TestClusterVMAgentForwardMetricsMetadata(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
sut := tc.MustStartDefaultCluster()
vmagent := tc.MustStartDefaultRWVmagent("vmagent", []string{
`-remoteWrite.forcePromProto=true`,
`-enableMultitenantHandlers=true`,
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent",
fmt.Sprintf(`-remoteWrite.url=http://%s/insert/multitenant/prometheus/api/v1/write`, sut.Vminsert.HTTPAddr()),
})
prometheusRemoteWriteDataSet := prompb.WriteRequest{
Metadata: []prompb.MetricMetadata{
{MetricFamilyName: "metric_name_4", Help: "some help message", Type: prompb.MetricTypeSummary, AccountID: 100},
},
}
vmagent.PrometheusAPIV1Write(t, prometheusRemoteWriteDataSet, apptest.QueryOpts{Tenant: "multitenant"})
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected /api/v1/metadata response",
Got: func() any {
return sut.PrometheusAPIV1Metadata(t, ``, -1, apptest.QueryOpts{Tenant: "100:0"})
},
Want: &apptest.PrometheusAPIV1Metadata{
Status: "success",
Data: map[string][]apptest.MetadataEntry{
"metric_name_4": {{Help: "some help message", Type: "summary"}},
},
},
})
prometheusRemoteWriteDataSet = prompb.WriteRequest{
Metadata: []prompb.MetricMetadata{
{MetricFamilyName: "metric_name_6", Help: "some help message", Type: prompb.MetricTypeSummary, AccountID: 100},
},
}
// enforce tenant from request uri /insert/tenant_id/prometheus/api/v1/write
vmagent.PrometheusAPIV1Write(t, prometheusRemoteWriteDataSet, apptest.QueryOpts{Tenant: "500:500"})
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected /api/v1/metadata response",
Got: func() any {
return sut.PrometheusAPIV1Metadata(t, ``, -1, apptest.QueryOpts{Tenant: "500:500"})
},
Want: &apptest.PrometheusAPIV1Metadata{
Status: "success",
Data: map[string][]apptest.MetadataEntry{
"metric_name_6": {{Help: "some help message", Type: "summary"}},
},
},
})
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected /api/v1/metadata response",
Got: func() any {
return sut.PrometheusAPIV1Metadata(t, ``, -1, apptest.QueryOpts{Tenant: "multitenant"})
},
Want: &apptest.PrometheusAPIV1Metadata{
Status: "success",
Data: map[string][]apptest.MetadataEntry{
"metric_name_4": {{Help: "some help message", Type: "summary"}},
"metric_name_6": {{Help: "some help message", Type: "summary"}},
},
},
})
}
// See https://docs.victoriametrics.com/victoriametrics/vmagent/#multitenancy
func TestSingleVMAgentMultitenancy(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
remoteWriteSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}))
defer remoteWriteSrv.Close()
vmagent := tc.MustStartDefaultRWVmagent("vmagent-multitenancy", []string{
fmt.Sprintf(`-remoteWrite.url=%s/api/v1/write`, remoteWriteSrv.URL),
"-remoteWrite.tmpDataPath=" + tc.Dir() + "/vmagent-multitenancy",
"-enableMultitenantHandlers",
"-enableMultitenancyViaHeaders",
})
vmagent.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
}, apptest.QueryOpts{Tenant: "2"})
v := vmagent.GetIntMetric(t, `vmagent_tenant_inserted_rows_total{type="prometheus",accountID="2",projectID="0"}`)
if v != 1 {
t.Fatalf("expected vmagent_tenant_inserted_rows_total to have value 1 for accountID=2")
}
vmagent.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
}, apptest.QueryOpts{Tenant: "2:2"})
v = vmagent.GetIntMetric(t, `vmagent_tenant_inserted_rows_total{type="prometheus",accountID="2",projectID="2"}`)
if v != 1 {
t.Fatalf("expected vmagent_tenant_inserted_rows_total to have value 1 for accountID=2, projectID=2")
}
headers := make(http.Header)
headers.Set("AccountID", "3")
vmagent.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
}, apptest.QueryOpts{Headers: headers})
v = vmagent.GetIntMetric(t, `vmagent_tenant_inserted_rows_total{type="prometheus",accountID="3",projectID="0"}`)
if v != 1 {
t.Fatalf("expected vmagent_tenant_inserted_rows_total to have value 1 for accountID=3, projectID=0")
}
headers.Set("AccountID", "3")
headers.Set("ProjectID", "3")
vmagent.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
}, apptest.QueryOpts{Headers: headers})
v = vmagent.GetIntMetric(t, `vmagent_tenant_inserted_rows_total{type="prometheus",accountID="3",projectID="3"}`)
if v != 1 {
t.Fatalf("expected vmagent_tenant_inserted_rows_total to have value 1 for accountID=3, projectID=3")
}
// tenants in header and path clash - path should have higher priority on ingestion
opts := apptest.QueryOpts{Headers: make(http.Header)}
opts.Headers.Set("AccountID", "4")
opts.Tenant = "5"
vmagent.APIV1ImportPrometheus(t, []string{
"foo_bar 1 1652169600000", // 2022-05-10T08:00:00Z
}, opts)
v = vmagent.GetIntMetric(t, `vmagent_tenant_inserted_rows_total{type="prometheus",accountID="5",projectID="0"}`)
if v != 1 {
t.Fatalf("expected vmagent_tenant_inserted_rows_total to have value 1 for accountID=5, projectID=0")
}
}

View File

@@ -1,167 +0,0 @@
package tests
import (
"fmt"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
)
func TestSingleVmctlOpenTSDBProtocol(t *testing.T) {
fs.MustRemoveDir(t.Name())
tc := apptest.NewTestCase(t)
defer tc.Stop()
vmsingleDst := tc.MustStartDefaultVmsingle()
vmAddr := fmt.Sprintf("http://%s/", vmsingleDst.HTTPAddr())
// Generate 60 points at 1-minute intervals starting 2 hours ago.
// This ensures data falls within vmctl's default query window (now - retention).
baseTS := time.Now().Add(-2 * time.Hour).Truncate(time.Minute).Unix()
points := make([]openTSDBPoint, 0, 60)
for i := range 60 {
points = append(points, openTSDBPoint{
Metric: "test.cpu",
Tags: map[string]string{"host": "h1", "env": "prod"},
Timestamp: baseTS + int64(i*60),
Value: float64(i),
})
}
otsdb := newOpenTSDBMockServer(t, points)
defer otsdb.close()
vmctlFlags := []string{
`opentsdb`,
`--otsdb-addr=` + otsdb.httpAddr(),
`--vm-addr=` + vmAddr,
`--otsdb-retentions=ssum-1m-avg:1d:1d`,
`--otsdb-filters=test`,
`--otsdb-normalize`,
`--disable-progress-bar=true`,
`-s`,
}
testOpenTSDBProtocol(tc, vmsingleDst, vmctlFlags, points, "test_cpu", baseTS)
}
func TestClusterVmctlOpenTSDBProtocol(t *testing.T) {
fs.MustRemoveDir(t.Name())
tc := apptest.NewTestCase(t)
defer tc.Stop()
cluster := tc.MustStartDefaultCluster()
vmAddr := fmt.Sprintf("http://%s/", cluster.Vminsert.HTTPAddr())
// Generate 60 points at 1-minute intervals starting 2 hours ago.
baseTS := time.Now().Add(-2 * time.Hour).Truncate(time.Minute).Unix()
points := make([]openTSDBPoint, 0, 60)
for i := range 60 {
points = append(points, openTSDBPoint{
Metric: "test.mem",
Tags: map[string]string{"host": "h1"},
Timestamp: baseTS + int64(i*60),
Value: float64(i * 2),
})
}
otsdb := newOpenTSDBMockServer(t, points)
defer otsdb.close()
vmctlFlags := []string{
`opentsdb`,
`--otsdb-addr=` + otsdb.httpAddr(),
`--vm-addr=` + vmAddr,
`--otsdb-retentions=sum-1m-avg:1d:1d`,
`--otsdb-filters=test`,
`--otsdb-normalize`,
`--disable-progress-bar=true`,
`--vm-account-id=0`,
`-s`,
}
testOpenTSDBProtocol(tc, cluster, vmctlFlags, points, "test_mem", baseTS)
}
func testOpenTSDBProtocol(
tc *apptest.TestCase,
queries apptest.PrometheusWriteQuerier,
vmctlFlags []string,
points []openTSDBPoint,
vmMetricName string,
baseTS int64,
) {
t := tc.T()
t.Helper()
// Build dynamic time range covering all data points with 1-hour padding.
queryStart := time.Unix(baseTS-3600, 0).UTC().Format(time.RFC3339)
queryEnd := time.Unix(baseTS+7200, 0).UTC().Format(time.RFC3339)
cmpOpt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType")
got := queries.PrometheusAPIV1Query(t, `{__name__=~".*"}`, apptest.QueryOpts{
Step: "5m",
Time: queryStart,
})
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data":{"result":[]}}`)
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
tc.MustStartVmctl("vmctl", vmctlFlags)
queries.ForceFlush(t)
expected := buildExpectedOpenTSDBResult(points, vmMetricName)
tc.Assert(&apptest.AssertOptions{
Retries: 300,
Msg: `unexpected metrics stored via opentsdb protocol`,
Got: func() any {
r := queries.PrometheusAPIV1Export(t, fmt.Sprintf(`{__name__=%q}`, vmMetricName), apptest.QueryOpts{
Start: queryStart,
End: queryEnd,
})
r.Sort()
return r.Data.Result
},
Want: expected,
CmpOpts: []cmp.Option{
cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType"),
},
})
}
func buildExpectedOpenTSDBResult(points []openTSDBPoint, vmMetricName string) []*apptest.QueryResult {
grouped := map[string]*apptest.QueryResult{}
for _, p := range points {
metric := map[string]string{"__name__": vmMetricName}
for k, v := range p.Tags {
metric[k] = v
}
key := tagsKey(metric)
if _, ok := grouped[key]; !ok {
grouped[key] = &apptest.QueryResult{Metric: metric}
}
grouped[key].Samples = append(grouped[key].Samples, &apptest.Sample{
Timestamp: p.Timestamp * 1000,
Value: p.Value,
})
}
out := make([]*apptest.QueryResult, 0, len(grouped))
for _, v := range grouped {
out = append(out, v)
}
resp := apptest.PrometheusAPIV1QueryResponse{
Data: &apptest.QueryData{Result: out},
}
resp.Sort()
return resp.Data.Result
}

View File

@@ -10,20 +10,15 @@ import (
"syscall"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/golang/snappy"
)
// Vmagent holds the state of a vmagent app and provides vmagent-specific functions
type Vmagent struct {
*app
*metricsClient
*ServesMetrics
httpListenAddr string
cli *Client
httpListenAddr string
apiV1ImportPrometheusURL string
}
// StartVmagent starts an instance of vmagent with the given flags. It also
@@ -48,10 +43,13 @@ func StartVmagent(instance string, flags []string, cli *Client, promScrapeConfig
}
return &Vmagent{
app: app,
metricsClient: newMetricsClient(cli, stderrExtracts[0]),
httpListenAddr: stderrExtracts[0],
cli: cli,
app: app,
ServesMetrics: &ServesMetrics{
metricsURL: fmt.Sprintf("http://%s/metrics", stderrExtracts[0]),
cli: cli,
},
httpListenAddr: stderrExtracts[0],
apiV1ImportPrometheusURL: fmt.Sprintf("http://%s/api/v1/import/prometheus", stderrExtracts[0]),
}, nil
}
@@ -78,39 +76,16 @@ func (app *Vmagent) APIV1ImportPrometheus(t *testing.T, records []string, opts Q
// Flushing may still be in progress on the function return.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1importprometheus
func (app *Vmagent) APIV1ImportPrometheusNoWaitFlush(t *testing.T, records []string, opts QueryOpts) {
func (app *Vmagent) APIV1ImportPrometheusNoWaitFlush(t *testing.T, records []string, _ QueryOpts) {
t.Helper()
data := []byte(strings.Join(records, "\n"))
headers := opts.getHeaders()
headers.Set("Content-Type", "text/plain")
url := getVMAgentInsertPath(app.httpListenAddr, "prometheus/api/v1/import/prometheus", opts)
_, statusCode := app.cli.Post(t, url, data, headers)
_, statusCode := app.cli.Post(t, app.apiV1ImportPrometheusURL, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
}
// getVMAgentInsertPath returns URL path for writes.
// If tenant is set in QueryOpts, it will return cluster-like path for ingestion.
// If tenant is empty, it will return single-node (no tenants) path.
func getVMAgentInsertPath(addr, suffix string, o QueryOpts) string {
if o.Tenant != "" {
// QueryOpts.Tenant has priority over headers
return fmt.Sprintf("http://%s/insert/%s/%s", addr, o.Tenant, suffix)
}
h := o.getHeaders()
if h.Get("AccountID") != "" || h.Get("ProjectID") != "" {
// vmagent supports tenantID in HTTP headers only if -enableMultitenantHandlers and -enableMultitenancyViaHeaders are set
// see https://docs.victoriametrics.com/victoriametrics/vmagent/#multitenancy
return fmt.Sprintf("http://%s/insert/%s", addr, suffix)
}
// tenant is missing in QueryOpts and in HTTP headers. Use single-node (no tenants) path
return fmt.Sprintf("http://%s/%s", addr, suffix)
}
// RemoteWriteRequestsRetriesCountTotal sums up the total retries for remote write requests.
func (app *Vmagent) RemoteWriteRequestsRetriesCountTotal(t *testing.T) int {
total := 0.0
@@ -181,28 +156,6 @@ func (app *Vmagent) ReloadRelabelConfigs(t *testing.T) {
t.Fatalf("relabel configs were not reloaded after SIGHUP signal; previous total: %f, current total: %f", prevTotal, currTotal)
}
// PrometheusAPIV1Write is a test helper function that inserts a
// collection of records in Prometheus remote-write format by sending a HTTP
// POST request to /prometheus/api/v1/write vmagent endpoint.
func (app *Vmagent) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, opts QueryOpts) {
t.Helper()
url := getVMAgentInsertPath(app.httpListenAddr, "prometheus/api/v1/write", opts)
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
recordsCount := len(wr.Timeseries)
if prommetadata.IsEnabled() {
recordsCount += len(wr.Metadata)
}
headers := opts.getHeaders()
headers.Set("Content-Type", "application/x-protobuf")
app.sendBlocking(t, recordsCount, func() {
_, statusCode := app.cli.Post(t, url, data, headers)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// HTTPAddr returns the address at which the vmagent process is listening
// for http connections.
func (app *Vmagent) HTTPAddr() string {
@@ -221,22 +174,16 @@ func (app *Vmagent) HTTPAddr() string {
// If it is, then the data has been sent to vmstorage.
//
// Unreliable if the records are inserted concurrently.
func (app *Vmagent) sendBlocking(t *testing.T, _ int, send func()) {
func (app *Vmagent) sendBlocking(t *testing.T, numRecordsToSend int, send func()) {
t.Helper()
currRowsSentCount := app.remoteWriteRequestsTotal(t)
send()
const (
retries = 20
period = 100 * time.Millisecond
)
// TODO: properly account wantRowsSentCount
// currently vmagent doesn't expose per time-series write information
// so we can only account number of blocks sent via remote write protocol
// it should be suitable for tests purpose
wantRowsSentCount := currRowsSentCount + 1
wantRowsSentCount := app.remoteWriteRequestsTotal(t) + numRecordsToSend
for range retries {
if app.remoteWriteRequestsTotal(t) >= wantRowsSentCount {
return

View File

@@ -1,6 +1,7 @@
package apptest
import (
"fmt"
"io"
"regexp"
"syscall"
@@ -16,7 +17,7 @@ var httpBuilitinListenAddrRE = regexp.MustCompile(`pprof handlers are exposed at
// functions.
type Vmauth struct {
*app
*metricsClient
*ServesMetrics
httpListenAddr string
configFilePath string
@@ -44,8 +45,11 @@ func StartVmauth(instance string, flags []string, cli *Client, configFilePath st
}
return &Vmauth{
app: app,
metricsClient: newMetricsClient(cli, stderrExtracts[0]),
app: app,
ServesMetrics: &ServesMetrics{
metricsURL: fmt.Sprintf("http://%s/metrics", stderrExtracts[0]),
cli: cli,
},
httpListenAddr: stderrExtracts[0],
configFilePath: configFilePath,
cli: cli,

View File

@@ -3,21 +3,30 @@ package apptest
import (
"fmt"
"io"
"net/http"
"regexp"
"strings"
"testing"
"time"
"github.com/golang/snappy"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
)
// Vminsert holds the state of a vminsert app and provides vminsert-specific
// functions.
type Vminsert struct {
*app
*metricsClient
*vminsertClient
*ServesMetrics
httpListenAddr string
clusternativeListenAddr string
graphiteListenAddr string
openTSDBListenAddr string
cli *Client
}
// storageNodes returns the storage node addresses passed to vminsert via
@@ -63,26 +72,17 @@ func StartVminsert(instance string, flags []string, cli *Client, output io.Write
return nil, err
}
metricsClient := newMetricsClient(cli, stderrExtracts[0])
return &Vminsert{
app: app,
metricsClient: metricsClient,
vminsertClient: &vminsertClient{
vminsertCli: cli,
url: func(op, path string, opts QueryOpts) string {
return getClusterPath(stderrExtracts[0], op, path, opts)
},
openTSDBURL: func(op, path string, opts QueryOpts) string {
return getClusterPath(stderrExtracts[3], op, path, opts)
},
graphiteListenAddr: stderrExtracts[2],
sendBlocking: func(t *testing.T, numRecordsToSend int, send func()) {
t.Helper()
sendBlocking(t, metricsClient, numRecordsToSend, send)
},
app: app,
ServesMetrics: &ServesMetrics{
metricsURL: fmt.Sprintf("http://%s/metrics", stderrExtracts[0]),
cli: cli,
},
httpListenAddr: stderrExtracts[0],
clusternativeListenAddr: stderrExtracts[1],
graphiteListenAddr: stderrExtracts[2],
openTSDBListenAddr: stderrExtracts[3],
cli: cli,
}, nil
}
@@ -98,6 +98,199 @@ func (app *Vminsert) HTTPAddr() string {
return app.httpListenAddr
}
// InfluxWrite is a test helper function that inserts a
// collection of records in Influx line format by sending a HTTP
// POST request to /influx/write vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#influxwrite
func (app *Vminsert) InfluxWrite(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/influx/write", app.httpListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// GraphiteWrite is a test helper function that sends a
// collection of records to graphiteListenAddr port.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting
func (app *Vminsert) GraphiteWrite(t *testing.T, records []string, _ QueryOpts) {
t.Helper()
app.cli.Write(t, app.graphiteListenAddr, records)
}
// PrometheusAPIV1ImportCSV is a test helper function that inserts a collection
// of records in CSV format for the given tenant by sending an HTTP POST
// request to prometheus/api/v1/import/csv vminsert endpoint.
//
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
func (app *Vminsert) PrometheusAPIV1ImportCSV(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/import/csv", app.httpListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// PrometheusAPIV1ImportNative is a test helper function that inserts a collection
// of records in Native format for the given tenant by sending an HTTP POST
// request to prometheus/api/v1/import/native vminsert endpoint.
//
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
func (app *Vminsert) PrometheusAPIV1ImportNative(t *testing.T, data []byte, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/import/native", app.httpListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
app.sendBlocking(t, 1, func() {
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// OpenTSDBAPIPut is a test helper function that inserts a collection of
// records in OpenTSDB format for the given tenant by sending an HTTP POST
// request to /opentsdb/api/put vminsert endpoint.
//
// See https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
func (app *Vminsert) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/opentsdb/api/put", app.openTSDBListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte("[" + strings.Join(records, ",") + "]")
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// PrometheusAPIV1Write is a test helper function that inserts a
// collection of records in Prometheus remote-write format by sending a HTTP
// POST request to /prometheus/api/v1/write vminsert endpoint.
func (app *Vminsert) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/write", app.httpListenAddr, opts.getTenant())
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
recordsCount := len(wr.Timeseries)
if prommetadata.IsEnabled() {
recordsCount += len(wr.Metadata)
}
app.sendBlocking(t, recordsCount, func() {
_, statusCode := app.cli.Post(t, url, "application/x-protobuf", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// PrometheusAPIV1ImportPrometheus is a test helper function that inserts a
// collection of records in Prometheus text exposition format for the given
// tenant by sending a HTTP POST request to
// /prometheus/api/v1/import/prometheus vminsert endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1importprometheus
func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/import/prometheus", app.httpListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
var recordsCount int
var metadataRecords int
uniqueMetadataMetricNames := make(map[string]struct{})
for _, record := range records {
// metric metadata has the following format:
//# HELP importprometheus_series
//# TYPE importprometheus_series
// it results into single metadata record
if strings.HasPrefix(record, "# ") {
metadataItems := strings.Split(record, " ")
if len(metadataItems) < 2 {
t.Fatalf("BUG: unexpected metadata format=%q", record)
}
metricName := metadataItems[2]
if _, ok := uniqueMetadataMetricNames[metricName]; ok {
continue
}
uniqueMetadataMetricNames[metricName] = struct{}{}
metadataRecords++
continue
}
recordsCount++
}
if prommetadata.IsEnabled() {
recordsCount += metadataRecords
}
app.sendBlocking(t, recordsCount, func() {
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
})
}
// ZabbixConnectorHistory is a test helper function that inserts a
// collection of records in zabbixconnector format by sending a HTTP
// POST request to /zabbixconnector/api/v1/history vmsingle endpoint.
func (app *Vminsert) ZabbixConnectorHistory(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/zabbixconnector/api/v1/history", app.httpListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
app.sendBlocking(t, len(records), func() {
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
})
}
// String returns the string representation of the vminsert app state.
func (app *Vminsert) String() string {
return fmt.Sprintf("{app: %s httpListenAddr: %q}", app.app, app.httpListenAddr)
@@ -113,10 +306,13 @@ func (app *Vminsert) String() string {
// Waiting is implemented a retrieving the value of `vm_rpc_rows_sent_total`
// metric and checking whether it is equal or greater than the wanted value.
// If it is, then the data has been sent to vmstorage.
func sendBlocking(t *testing.T, c *metricsClient, numRecordsToSend int, send func()) {
//
// Unreliable if the records are inserted concurrently.
// TODO(rtm0): Put sending and waiting into a critical section to make reliable?
func (app *Vminsert) sendBlocking(t *testing.T, numRecordsToSend int, send func()) {
t.Helper()
wantRowsSentCount := c.rpcRowsSentTotal(t) + numRecordsToSend
wantRowsSentCount := app.rpcRowsSentTotal(t) + numRecordsToSend
send()
@@ -125,7 +321,7 @@ func sendBlocking(t *testing.T, c *metricsClient, numRecordsToSend int, send fun
period = 100 * time.Millisecond
)
for range retries {
d := c.rpcRowsSentTotal(t)
d := app.rpcRowsSentTotal(t)
if d >= wantRowsSentCount {
return
}
@@ -133,3 +329,14 @@ func sendBlocking(t *testing.T, c *metricsClient, numRecordsToSend int, send fun
}
t.Fatalf("timed out while waiting for inserted rows to be sent to vmstorage")
}
// rpcRowsSentTotal retrieves the values of all vminsert
// `vm_rpc_rows_sent_total` metrics (there will be one for each vmstorage) and
// returns their integer sum.
func (app *Vminsert) rpcRowsSentTotal(t *testing.T) int {
total := 0.0
for _, v := range app.GetMetricsByPrefix(t, "vm_rpc_rows_sent_total") {
total += v
}
return int(total)
}

View File

@@ -1,17 +1,20 @@
package apptest
import (
"encoding/json"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"testing"
)
// Vmselect holds the state of a vmselect app and provides vmselect-specific
// functions.
type Vmselect struct {
*app
*metricsClient
*vmselectClient
*ServesMetrics
httpListenAddr string
clusternativeListenAddr string
@@ -38,15 +41,10 @@ func StartVmselect(instance string, flags []string, cli *Client, output io.Write
}
return &Vmselect{
app: app,
metricsClient: newMetricsClient(cli, stderrExtracts[0]),
vmselectClient: &vmselectClient{
vmselectCli: cli,
url: func(op, path string, opts QueryOpts) string {
return getClusterPath(stderrExtracts[0], op, path, opts)
},
metricNamesStatsResetURL: fmt.Sprintf("http://%s/admin/api/v1/admin/status/metric_names_stats/reset", stderrExtracts[0]),
tenantsURL: fmt.Sprintf("http://%s/admin/tenants", stderrExtracts[0]),
app: app,
ServesMetrics: &ServesMetrics{
metricsURL: fmt.Sprintf("http://%s/metrics", stderrExtracts[0]),
cli: cli,
},
httpListenAddr: stderrExtracts[0],
clusternativeListenAddr: stderrExtracts[1],
@@ -66,6 +64,298 @@ func (app *Vmselect) HTTPAddr() string {
return app.httpListenAddr
}
// PrometheusAPIV1Export is a test helper function that performs the export of
// raw samples in JSON line format by sending a HTTP POST request to
// /prometheus/api/v1/export vmselect endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1export
func (app *Vmselect) PrometheusAPIV1Export(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
exportURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/export", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, exportURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1ExportNative is a test helper function that performs the export of
// raw samples in native binary format by sending an HTTP POST request to
// /prometheus/api/v1/export/native vmselect endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1exportnative
func (app *Vmselect) PrometheusAPIV1ExportNative(t *testing.T, query string, opts QueryOpts) []byte {
t.Helper()
exportURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/export/native", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, exportURL, values)
return []byte(res)
}
// PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
// instant query by sending a HTTP POST request to /prometheus/api/v1/query
// vmselect endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1query
func (app *Vmselect) PrometheusAPIV1Query(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1QueryRange is a test helper function that performs
// PromQL/MetricsQL range query by sending a HTTP POST request to
// /prometheus/api/v1/query_range vmselect endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1query_range
func (app *Vmselect) PrometheusAPIV1QueryRange(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query_range", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1Series sends a query to a /prometheus/api/v1/series endpoint
// and returns the list of time series that match the query.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1series
func (app *Vmselect) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, _ := app.cli.PostForm(t, seriesURL, values)
return NewPrometheusAPIV1SeriesResponse(t, res)
}
// PrometheusAPIV1SeriesCount sends a query to a /prometheus/api/v1/series/count endpoint
// and returns the total number of time series.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1series
func (app *Vmselect) PrometheusAPIV1SeriesCount(t *testing.T, opts QueryOpts) *PrometheusAPIV1SeriesCountResponse {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series/count", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
res, _ := app.cli.PostForm(t, seriesURL, values)
return NewPrometheusAPIV1SeriesCountResponse(t, res)
}
// PrometheusAPIV1Labels sends a query to a /prometheus/api/v1/labels endpoint
// and returns the label names list of time series that match the query.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1labels
func (app *Vmselect) PrometheusAPIV1Labels(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1LabelsResponse {
t.Helper()
values := opts.asURLValues()
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/labels", app.httpListenAddr, opts.getTenant())
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelsResponse(t, res)
}
// PrometheusAPIV1LabelValues sends a query to a /prometheus/api/v1/label/.../values endpoint
// and returns the label names list of time series that match the query.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1labelvalues
func (app *Vmselect) PrometheusAPIV1LabelValues(t *testing.T, labelName, matchQuery string, opts QueryOpts) *PrometheusAPIV1LabelValuesResponse {
t.Helper()
values := opts.asURLValues()
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/label/%s/values", app.httpListenAddr, opts.getTenant(), labelName)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelValuesResponse(t, res)
}
// PrometheusAPIV1Metadata sends a query to a /prometheus/api/v1/metadata endpoint
// and returns the results.
func (app *Vmselect) PrometheusAPIV1Metadata(t *testing.T, metric string, limit int, opts QueryOpts) *PrometheusAPIV1Metadata {
t.Helper()
values := opts.asURLValues()
values.Add("metric", metric)
values.Add("limit", strconv.Itoa(limit))
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/metadata", app.httpListenAddr, opts.getTenant())
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1Metadata(t, res)
}
// APIV1AdminTSDBDeleteSeries deletes the series that match the query by sending
// a request to /api/v1/admin/tsdb/delete_series.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1admintsdbdelete_series
func (app *Vmselect) APIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string, opts QueryOpts) {
t.Helper()
queryURL := fmt.Sprintf("http://%s/delete/%s/prometheus/api/v1/admin/tsdb/delete_series", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
}
// MetricNamesStats sends a query to a /select/tenant/prometheus/api/v1/status/metric_names_stats endpoint
// and returns the statistics response for given params.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#track-ingested-metrics-usage
func (app *Vmselect) MetricNamesStats(t *testing.T, limit, le, matchPattern string, opts QueryOpts) MetricNamesStatsResponse {
t.Helper()
values := opts.asURLValues()
values.Add("limit", limit)
values.Add("le", le)
values.Add("match_pattern", matchPattern)
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/status/metric_names_stats", app.httpListenAddr, opts.getTenant())
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var resp MetricNamesStatsResponse
if err := json.Unmarshal([]byte(res), &resp); err != nil {
t.Fatalf("could not unmarshal series response data:\n%s\n err: %v", res, err)
}
return resp
}
// MetricNamesStatsReset sends a query to a /admin/api/v1/status/metric_names_stats/reset endpoint
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#track-ingested-metrics-usage
func (app *Vmselect) MetricNamesStatsReset(t *testing.T, opts QueryOpts) {
t.Helper()
values := opts.asURLValues()
queryURL := fmt.Sprintf("http://%s/admin/api/v1/admin/status/metric_names_stats/reset", app.httpListenAddr)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
}
// APIV1StatusTSDB sends a query to a /prometheus/api/v1/status/tsdb
// //
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#tsdb-stats
func (app *Vmselect) APIV1StatusTSDB(t *testing.T, matchQuery string, date string, topN string, opts QueryOpts) TSDBStatusResponse {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/status/tsdb", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
addNonEmpty := func(name, value string) {
if len(value) == 0 {
return
}
values.Add(name, value)
}
addNonEmpty("match[]", matchQuery)
addNonEmpty("topN", topN)
addNonEmpty("date", date)
res, statusCode := app.cli.PostForm(t, seriesURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var status TSDBStatusResponse
if err := json.Unmarshal([]byte(res), &status); err != nil {
t.Fatalf("could not unmarshal tsdb status response data:\n%s\n err: %v", res, err)
}
status.Sort()
return status
}
// GraphiteMetricsIndex sends a query to a /graphite/metrics/index.json
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#metrics-api
func (app *Vmselect) GraphiteMetricsIndex(t *testing.T, opts QueryOpts) GraphiteMetricsIndexResponse {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/select/%s/graphite/metrics/index.json", app.httpListenAddr, opts.getTenant())
res, statusCode := app.cli.Get(t, seriesURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var index GraphiteMetricsIndexResponse
if err := json.Unmarshal([]byte(res), &index); err != nil {
t.Fatalf("could not unmarshal metrics index response data:\n%s\n err: %v", res, err)
}
return index
}
// GraphiteTagsTagSeries is a test helper function that registers Graphite tags
// for a single time series by sending a HTTP POST request to
// /graphite/tags/tagSeries vmsingle endpoint.
func (app *Vmselect) GraphiteTagsTagSeries(t *testing.T, record string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/select/%s/graphite/tags/tagSeries", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("path", record)
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
}
func (app *Vmselect) GraphiteTagsTagMultiSeries(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/select/%s/graphite/tags/tagMultiSeries", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
for _, rec := range records {
values.Add("path", rec)
}
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
}
// APIV1AdminTenants sends a query to a /admin/tenants endpoint
func (app *Vmselect) APIV1AdminTenants(t *testing.T) *AdminTenantsResponse {
t.Helper()
tenantsURL := fmt.Sprintf("http://%s/admin/tenants", app.httpListenAddr)
res, statusCode := app.cli.Get(t, tenantsURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var tenants *AdminTenantsResponse
if err := json.Unmarshal([]byte(res), tenants); err != nil {
t.Fatalf("could not unmarshal tenants response data:\n%s\n err: %v", res, err)
}
return tenants
}
// String returns the string representation of the vmselect app state.
func (app *Vmselect) String() string {
return fmt.Sprintf("{app: %s httpListenAddr: %q}", app.app, app.httpListenAddr)

View File

@@ -1,25 +1,48 @@
package apptest
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"testing"
"time"
"github.com/golang/snappy"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
)
// Vmsingle holds the state of a vmsingle app and provides vmsingle-specific
// functions.
type Vmsingle struct {
*app
*metricsClient
*vmstorageClient
*vmselectClient
*vminsertClient
*ServesMetrics
storageDataPath string
httpListenAddr string
// vmstorage URLs.
forceFlushURL string
forceMergeURL string
// vminsert URLs.
influxLineWriteURL string
graphiteWriteAddr string
openTSDBHTTPURL string
prometheusAPIV1ImportPrometheusURL string
prometheusAPIV1WriteURL string
// vmselect URLs.
prometheusAPIV1ExportURL string
prometheusAPIV1ExportNativeURL string
prometheusAPIV1QueryURL string
prometheusAPIV1QueryRangeURL string
prometheusAPIV1SeriesURL string
}
// StartVmsingleAt starts an instance of vmsingle with the given flags. It also
@@ -46,39 +69,584 @@ func StartVmsingleAt(instance, binary string, flags []string, cli *Client, outpu
}
return &Vmsingle{
app: app,
metricsClient: newMetricsClient(cli, stderrExtracts[1]),
vmstorageClient: &vmstorageClient{
vmstorageCli: cli,
httpListenAddr: stderrExtracts[1],
},
vmselectClient: &vmselectClient{
vmselectCli: cli,
url: func(op, path string, opts QueryOpts) string {
return fmt.Sprintf("http://%s/%s", stderrExtracts[1], path)
},
metricNamesStatsResetURL: fmt.Sprintf("http://%s/api/v1/admin/status/metric_names_stats/reset", stderrExtracts[1]),
tenantsURL: "vmsingle-does-not-serve-tenants",
},
vminsertClient: &vminsertClient{
vminsertCli: cli,
url: func(_, path string, _ QueryOpts) string {
return fmt.Sprintf("http://%s/%s", stderrExtracts[1], path)
},
openTSDBURL: func(_, path string, _ QueryOpts) string {
return fmt.Sprintf("http://%s/%s", stderrExtracts[3], path)
},
graphiteListenAddr: stderrExtracts[2],
sendBlocking: func(t *testing.T, _ int, send func()) {
t.Helper()
send()
},
app: app,
ServesMetrics: &ServesMetrics{
metricsURL: fmt.Sprintf("http://%s/metrics", stderrExtracts[1]),
cli: cli,
},
storageDataPath: stderrExtracts[0],
httpListenAddr: stderrExtracts[1],
forceFlushURL: fmt.Sprintf("http://%s/internal/force_flush", stderrExtracts[1]),
forceMergeURL: fmt.Sprintf("http://%s/internal/force_merge", stderrExtracts[1]),
influxLineWriteURL: fmt.Sprintf("http://%s/influx/write", stderrExtracts[1]),
graphiteWriteAddr: stderrExtracts[2],
openTSDBHTTPURL: fmt.Sprintf("http://%s", stderrExtracts[3]),
prometheusAPIV1ImportPrometheusURL: fmt.Sprintf("http://%s/prometheus/api/v1/import/prometheus", stderrExtracts[1]),
prometheusAPIV1WriteURL: fmt.Sprintf("http://%s/prometheus/api/v1/write", stderrExtracts[1]),
prometheusAPIV1ExportURL: fmt.Sprintf("http://%s/prometheus/api/v1/export", stderrExtracts[1]),
prometheusAPIV1ExportNativeURL: fmt.Sprintf("http://%s/prometheus/api/v1/export/native", stderrExtracts[1]),
prometheusAPIV1QueryURL: fmt.Sprintf("http://%s/prometheus/api/v1/query", stderrExtracts[1]),
prometheusAPIV1QueryRangeURL: fmt.Sprintf("http://%s/prometheus/api/v1/query_range", stderrExtracts[1]),
prometheusAPIV1SeriesURL: fmt.Sprintf("http://%s/prometheus/api/v1/series", stderrExtracts[1]),
}, nil
}
// ForceFlush is a test helper function that forces the flushing of inserted
// data, so it becomes available for searching immediately.
func (app *Vmsingle) ForceFlush(t *testing.T) {
t.Helper()
_, statusCode := app.cli.Get(t, app.forceFlushURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// ForceMerge is a test helper function that forces the merging of parts.
func (app *Vmsingle) ForceMerge(t *testing.T) {
t.Helper()
_, statusCode := app.cli.Get(t, app.forceMergeURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// InfluxWrite is a test helper function that inserts a
// collection of records in Influx line format by sending a HTTP
// POST request to /influx/write vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#influxwrite
func (app *Vmsingle) InfluxWrite(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
data := []byte(strings.Join(records, "\n"))
url := app.influxLineWriteURL
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
}
// GraphiteWrite is a test helper function that sends a collection of records
// to graphiteListenAddr port.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting
func (app *Vmsingle) GraphiteWrite(t *testing.T, records []string, _ QueryOpts) {
t.Helper()
app.cli.Write(t, app.graphiteWriteAddr, records)
}
// PrometheusAPIV1ImportCSV is a test helper function that inserts a collection
// of records in CSV format for the given tenant by sending an HTTP POST
// request to /api/v1/import/csv vmsingle endpoint.
//
// See https://docs.victoriametrics.com/single-server-victoriametrics/#how-to-import-csv-data
func (app *Vmsingle) PrometheusAPIV1ImportCSV(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/api/v1/import/csv", app.httpListenAddr)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
}
// PrometheusAPIV1ImportNative is a test helper function that inserts a collection
// of records in native format for the given tenant by sending an HTTP POST
// request to /api/v1/import/native vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-native-format
func (app *Vmsingle) PrometheusAPIV1ImportNative(t *testing.T, data []byte, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/api/v1/import/native", app.httpListenAddr)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
}
// OpenTSDBAPIPut is a test helper function that inserts a collection of
// records in OpenTSDB format for the given tenant by sending an HTTP POST
// request to /api/put vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-http
func (app *Vmsingle) OpenTSDBAPIPut(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
// add extra label
url := app.openTSDBHTTPURL + "/api/put"
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte("[" + strings.Join(records, ",") + "]")
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
}
// PrometheusAPIV1Write is a test helper function that inserts a
// collection of records in Prometheus remote-write format by sending a HTTP
// POST request to /prometheus/api/v1/write vmsingle endpoint.
func (app *Vmsingle) PrometheusAPIV1Write(t *testing.T, wr prompb.WriteRequest, _ QueryOpts) {
t.Helper()
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
_, statusCode := app.cli.Post(t, app.prometheusAPIV1WriteURL, "application/x-protobuf", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
}
// PrometheusAPIV1ImportPrometheus is a test helper function that inserts a
// collection of records in Prometheus text exposition format by sending a HTTP
// POST request to /prometheus/api/v1/import/prometheus vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1importprometheus
func (app *Vmsingle) PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
// add extra label
url := app.prometheusAPIV1ImportPrometheusURL
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
_, statusCode := app.cli.Post(t, url, "text/plain", data)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusNoContent)
}
}
// PrometheusAPIV1Export is a test helper function that performs the export of
// raw samples in JSON line format by sending a HTTP POST request to
// /prometheus/api/v1/export vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1export
func (app *Vmsingle) PrometheusAPIV1Export(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1ExportNative is a test helper function that performs the export of
// raw samples in native binary format by sending an HTTP POST request to
// /prometheus/api/v1/export/native vmselect endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1exportnative
func (app *Vmsingle) PrometheusAPIV1ExportNative(t *testing.T, query string, opts QueryOpts) []byte {
t.Helper()
t.Helper()
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res, _ := app.cli.PostForm(t, app.prometheusAPIV1ExportNativeURL, values)
return []byte(res)
}
// PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
// instant query by sending a HTTP POST request to /prometheus/api/v1/query
// vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1query
func (app *Vmsingle) PrometheusAPIV1Query(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1QueryRange is a test helper function that performs
// PromQL/MetricsQL range query by sending a HTTP POST request to
// /prometheus/api/v1/query_range vmsingle endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1query_range
func (app *Vmsingle) PrometheusAPIV1QueryRange(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := opts.asURLValues()
values.Add("query", query)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1QueryRangeURL, values)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1Series sends a query to a /prometheus/api/v1/series endpoint
// and returns the list of time series that match the query.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1series
func (app *Vmsingle) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse {
t.Helper()
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, _ := app.cli.PostForm(t, app.prometheusAPIV1SeriesURL, values)
return NewPrometheusAPIV1SeriesResponse(t, res)
}
// PrometheusAPIV1SeriesCount sends a query to a /prometheus/api/v1/series/count endpoint
// and returns the total number of time series.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1series
func (app *Vmsingle) PrometheusAPIV1SeriesCount(t *testing.T, opts QueryOpts) *PrometheusAPIV1SeriesCountResponse {
t.Helper()
values := opts.asURLValues()
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/series/count", app.httpListenAddr)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1SeriesCountResponse(t, res)
}
// PrometheusAPIV1Labels sends a query to a /prometheus/api/v1/labels endpoint
// and returns the label names list of time series that match the query.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1labels
func (app *Vmsingle) PrometheusAPIV1Labels(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1LabelsResponse {
t.Helper()
values := opts.asURLValues()
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/labels", app.httpListenAddr)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelsResponse(t, res)
}
// PrometheusAPIV1LabelValues sends a query to a /prometheus/api/v1/label/.../values endpoint
// and returns the label names list of time series that match the query.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1labelvalues
func (app *Vmsingle) PrometheusAPIV1LabelValues(t *testing.T, labelName, matchQuery string, opts QueryOpts) *PrometheusAPIV1LabelValuesResponse {
t.Helper()
values := opts.asURLValues()
values.Add("match[]", matchQuery)
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/label/%s/values", app.httpListenAddr, labelName)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1LabelValuesResponse(t, res)
}
// PrometheusAPIV1Metadata sends a query to a /prometheus/api/v1/metadata endpoint
// and returns the results.
func (app *Vmsingle) PrometheusAPIV1Metadata(t *testing.T, metric string, limit int, opts QueryOpts) *PrometheusAPIV1Metadata {
t.Helper()
values := opts.asURLValues()
values.Add("metric", metric)
values.Add("limit", strconv.Itoa(limit))
queryURL := fmt.Sprintf("http://%s/prometheus/api/v1/metadata", app.httpListenAddr)
res, _ := app.cli.PostForm(t, queryURL, values)
return NewPrometheusAPIV1Metadata(t, res)
}
// APIV1AdminTSDBDeleteSeries deletes the series that match the query by sending
// a request to /api/v1/admin/tsdb/delete_series.
//
// See https://docs.victoriametrics.com/victoriametrics/url-examples/#apiv1admintsdbdelete_series
func (app *Vmsingle) APIV1AdminTSDBDeleteSeries(t *testing.T, matchQuery string, opts QueryOpts) {
t.Helper()
queryURL := fmt.Sprintf("http://%s/api/v1/admin/tsdb/delete_series", app.httpListenAddr)
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
}
// GraphiteMetricsIndex sends a query to a /metrics/index.json
//
// See https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#metrics-api
func (app *Vmsingle) GraphiteMetricsIndex(t *testing.T, _ QueryOpts) GraphiteMetricsIndexResponse {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/metrics/index.json", app.httpListenAddr)
res, statusCode := app.cli.Get(t, seriesURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var index GraphiteMetricsIndexResponse
if err := json.Unmarshal([]byte(res), &index); err != nil {
t.Fatalf("could not unmarshal metrics index response data:\n%s\n err: %v", res, err)
}
return index
}
// GraphiteTagsTagSeries is a test helper function that registers Graphite tags
// for a single time series by sending a HTTP POST request to
// /graphite/tags/tagSeries vmsingle endpoint.
func (app *Vmsingle) GraphiteTagsTagSeries(t *testing.T, record string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/graphite/tags/tagSeries", app.httpListenAddr)
values := opts.asURLValues()
values.Add("path", record)
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
}
func (app *Vmsingle) GraphiteTagsTagMultiSeries(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/graphite/tags/tagMultiSeries", app.httpListenAddr)
values := opts.asURLValues()
for _, rec := range records {
values.Add("path", rec)
}
_, statusCode := app.cli.PostForm(t, url, values)
if got, want := statusCode, http.StatusNotImplemented; got != want {
t.Fatalf("unexpected status code: got %d, want %d", got, want)
}
}
// APIV1StatusMetricNamesStats sends a query to a /api/v1/status/metric_names_stats endpoint
// and returns the statistics response for given params.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#track-ingested-metrics-usage
func (app *Vmsingle) APIV1StatusMetricNamesStats(t *testing.T, limit, le, matchPattern string, opts QueryOpts) MetricNamesStatsResponse {
t.Helper()
values := opts.asURLValues()
values.Add("limit", limit)
values.Add("le", le)
values.Add("match_pattern", matchPattern)
queryURL := fmt.Sprintf("http://%s/api/v1/status/metric_names_stats", app.httpListenAddr)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var resp MetricNamesStatsResponse
if err := json.Unmarshal([]byte(res), &resp); err != nil {
t.Fatalf("could not unmarshal metric names stats response data:\n%s\n err: %v", res, err)
}
return resp
}
// APIV1AdminStatusMetricNamesStatsReset sends a query to a /api/v1/admin/status/metric_names_stats/reset endpoint
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#track-ingested-metrics-usage
func (app *Vmsingle) APIV1AdminStatusMetricNamesStatsReset(t *testing.T, opts QueryOpts) {
t.Helper()
values := opts.asURLValues()
queryURL := fmt.Sprintf("http://%s/api/v1/admin/status/metric_names_stats/reset", app.httpListenAddr)
res, statusCode := app.cli.PostForm(t, queryURL, values)
if statusCode != http.StatusNoContent {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusNoContent, res)
}
}
// SnapshotCreate creates a database snapshot by sending a query to the
// /snapshot/create endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmsingle) SnapshotCreate(t *testing.T) *SnapshotCreateResponse {
t.Helper()
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotCreateResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot create response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotCreateURL returns the URL for creating snapshots.
func (app *Vmsingle) SnapshotCreateURL() string {
return fmt.Sprintf("http://%s/snapshot/create", app.httpListenAddr)
}
// APIV1AdminTSDBSnapshot creates a database snapshot by sending a query to the
// /api/v1/admin/tsdb/snapshot endpoint.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#snapshot.
func (app *Vmsingle) APIV1AdminTSDBSnapshot(t *testing.T) *APIV1AdminTSDBSnapshotResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/api/v1/admin/tsdb/snapshot", app.httpListenAddr)
data, statusCode := app.cli.Post(t, queryURL, "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res APIV1AdminTSDBSnapshotResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal prometheus snapshot create response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotList lists existing database snapshots by sending a query to the
// /snapshot/list endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmsingle) SnapshotList(t *testing.T) *SnapshotListResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/list", app.httpListenAddr)
data, statusCode := app.cli.Get(t, queryURL)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotListResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot list response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotDelete deletes a snapshot by sending a query to the
// /snapshot/delete endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmsingle) SnapshotDelete(t *testing.T, snapshotName string) *SnapshotDeleteResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/delete?snapshot=%s", app.httpListenAddr, snapshotName)
data, statusCode := app.cli.Delete(t, queryURL)
wantStatusCodes := map[int]bool{
http.StatusOK: true,
http.StatusInternalServerError: true,
}
if !wantStatusCodes[statusCode] {
t.Fatalf("unexpected status code: got %d, want %v, resp text=%q", statusCode, wantStatusCodes, data)
}
var res SnapshotDeleteResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot delete response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotDeleteAll deletes all snapshots by sending a query to the
// /snapshot/delete_all endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmsingle) SnapshotDeleteAll(t *testing.T) *SnapshotDeleteAllResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/delete_all", app.httpListenAddr)
data, statusCode := app.cli.Get(t, queryURL)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotDeleteAllResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot delete all response: data=%q, err: %v", data, err)
}
return &res
}
// APIV1StatusTSDB sends a query to a /prometheus/api/v1/status/tsdb
// //
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#tsdb-stats
func (app *Vmsingle) APIV1StatusTSDB(t *testing.T, matchQuery string, date string, topN string, opts QueryOpts) TSDBStatusResponse {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/prometheus/api/v1/status/tsdb", app.httpListenAddr)
values := opts.asURLValues()
addNonEmpty := func(name, value string) {
if len(value) == 0 {
return
}
values.Add(name, value)
}
addNonEmpty("match[]", matchQuery)
addNonEmpty("topN", topN)
addNonEmpty("date", date)
res, statusCode := app.cli.PostForm(t, seriesURL, values)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", statusCode, http.StatusOK, res)
}
var status TSDBStatusResponse
if err := json.Unmarshal([]byte(res), &status); err != nil {
t.Fatalf("could not unmarshal tsdb status response data:\n%s\n err: %v", res, err)
}
status.Sort()
return status
}
// ZabbixConnectorHistory is a test helper function that inserts a
// collection of records in zabbixconnector format by sending a HTTP
// POST request to /zabbixconnector/api/v1/history vmsingle endpoint.
func (app *Vmsingle) ZabbixConnectorHistory(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/zabbixconnector/api/v1/history", app.httpListenAddr)
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
_, statusCode := app.cli.Post(t, url, "application/json", data)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// HTTPAddr returns the address at which the vminsert process is
// listening for incoming HTTP requests.
func (app *Vmsingle) HTTPAddr() string {

View File

@@ -1,10 +1,13 @@
package apptest
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"testing"
"time"
)
@@ -12,8 +15,7 @@ import (
// functions.
type Vmstorage struct {
*app
*metricsClient
*vmstorageClient
*ServesMetrics
storageDataPath string
httpListenAddr string
@@ -45,11 +47,10 @@ func StartVmstorageAt(instance, binary string, flags []string, cli *Client, outp
}
return &Vmstorage{
app: app,
metricsClient: newMetricsClient(cli, stderrExtracts[1]),
vmstorageClient: &vmstorageClient{
vmstorageCli: cli,
httpListenAddr: stderrExtracts[1],
app: app,
ServesMetrics: &ServesMetrics{
metricsURL: fmt.Sprintf("http://%s/metrics", stderrExtracts[1]),
cli: cli,
},
storageDataPath: stderrExtracts[0],
httpListenAddr: stderrExtracts[1],
@@ -70,6 +71,121 @@ func (app *Vmstorage) VmselectAddr() string {
return app.vmselectAddr
}
// ForceFlush is a test helper function that forces the flushing of inserted
// data, so it becomes available for searching immediately.
func (app *Vmstorage) ForceFlush(t *testing.T) {
t.Helper()
forceFlushURL := fmt.Sprintf("http://%s/internal/force_flush", app.httpListenAddr)
_, statusCode := app.cli.Get(t, forceFlushURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// ForceMerge is a test helper function that forces the merging of parts.
func (app *Vmstorage) ForceMerge(t *testing.T) {
t.Helper()
forceMergeURL := fmt.Sprintf("http://%s/internal/force_merge", app.httpListenAddr)
_, statusCode := app.cli.Get(t, forceMergeURL)
if statusCode != http.StatusOK {
t.Fatalf("unexpected status code: got %d, want %d", statusCode, http.StatusOK)
}
}
// SnapshotCreate creates a database snapshot by sending a query to the
// /snapshot/create endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmstorage) SnapshotCreate(t *testing.T) *SnapshotCreateResponse {
t.Helper()
data, statusCode := app.cli.Post(t, app.SnapshotCreateURL(), "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotCreateResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot create response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotCreateURL returns the URL for creating snapshots.
func (app *Vmstorage) SnapshotCreateURL() string {
return fmt.Sprintf("http://%s/snapshot/create", app.httpListenAddr)
}
// SnapshotList lists existing database snapshots by sending a query to the
// /snapshot/list endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmstorage) SnapshotList(t *testing.T) *SnapshotListResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/list", app.httpListenAddr)
data, statusCode := app.cli.Get(t, queryURL)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotListResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot list response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotDelete deletes a snapshot by sending a query to the
// /snapshot/delete endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmstorage) SnapshotDelete(t *testing.T, snapshotName string) *SnapshotDeleteResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/delete?snapshot=%s", app.httpListenAddr, snapshotName)
data, statusCode := app.cli.Delete(t, queryURL)
wantStatusCodes := map[int]bool{
http.StatusOK: true,
http.StatusInternalServerError: true,
}
if !wantStatusCodes[statusCode] {
t.Fatalf("unexpected status code: got %d, want %v, resp text=%q", statusCode, wantStatusCodes, data)
}
var res SnapshotDeleteResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot delete response: data=%q, err: %v", data, err)
}
return &res
}
// SnapshotDeleteAll deletes all snapshots by sending a query to the
// /snapshot/delete_all endpoint.
//
// See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-work-with-snapshots
func (app *Vmstorage) SnapshotDeleteAll(t *testing.T) *SnapshotDeleteAllResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/snapshot/delete_all", app.httpListenAddr)
data, statusCode := app.cli.Post(t, queryURL, "", nil)
if got, want := statusCode, http.StatusOK; got != want {
t.Fatalf("unexpected status code: got %d, want %d, resp text=%q", got, want, data)
}
var res SnapshotDeleteAllResponse
if err := json.Unmarshal([]byte(data), &res); err != nil {
t.Fatalf("could not unmarshal snapshot delete all response: data=%q, err: %v", data, err)
}
return &res
}
// String returns the string representation of the vmstorage app state.
func (app *Vmstorage) String() string {
return fmt.Sprintf("{app: %s storageDataPath: %q httpListenAddr: %q vminsertAddr: %q vmselectAddr: %q}", []any{

View File

@@ -9093,20 +9093,18 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows the rate of data rows ingested from write requests. There are two kinds of rows:\n1. Raw sample: each sample consists of a value and a timestamp, see https://docs.victoriametrics.com/victoriametrics/keyconcepts/#raw-samples. \n2. Metric metadata: refers to descriptive information about metrics. It can be disabled by setting `-enableMetadata=false`, see https://docs.victoriametrics.com/victoriametrics/vmagent/#metric-metadata.",
"description": "* `*` - unsupported query path\n* `/write` - insert into VM\n* `/metrics` - query VM system metrics\n* `/query` - query instant values\n* `/query_range` - query over a range of time\n* `/series` - match a certain label set\n* `/label/{}/values` - query a list of label values (variables mostly)",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9115,7 +9113,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9123,7 +9120,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9133,7 +9129,6 @@
"mode": "off"
}
},
"decimals": 2,
"links": [],
"mappings": [],
"min": 0,
@@ -9141,8 +9136,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9158,9 +9152,9 @@
"h": 8,
"w": 12,
"x": 0,
"y": 43
"y": 8385
},
"id": 227,
"id": 97,
"options": {
"legend": {
"calcs": [
@@ -9175,12 +9169,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9188,30 +9181,15 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_rows_inserted_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) by (type) > 0 ",
"expr": "sum(rate(vm_http_requests_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) by (path) > 0",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "sample: {{type}}",
"legendFormat": "__auto",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_metadata_rows_inserted_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) by (type) > 0 ",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "metadata: {{type}}",
"range": true,
"refId": "B"
}
],
"title": "Rows rate ($instance)",
"title": "Requests rate ($instance)",
"type": "timeseries"
},
{
@@ -9226,13 +9204,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -9241,7 +9217,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9249,7 +9224,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9267,8 +9241,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9304,7 +9277,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 43
"y": 8385
},
"id": 99,
"options": {
@@ -9321,12 +9294,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9364,20 +9336,17 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "* `*` - unsupported query path\n* `/write` - insert into VM\n* `/metrics` - query VM system metrics\n* `/query` - query instant values\n* `/query_range` - query over a range of time\n* `/series` - match a certain label set\n* `/label/{}/values` - query a list of label values (variables mostly)",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9386,7 +9355,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9394,116 +9362,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"links": [],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 51
},
"id": 97,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_http_requests_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) by (path) > 0",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Requests rate ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9526,8 +9384,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9542,8 +9399,8 @@
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 51
"x": 0,
"y": 8393
},
"id": 185,
"options": {
@@ -9560,12 +9417,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9591,6 +9447,7 @@
"exemplar": true,
"expr": "min(\n rate(process_cpu_seconds_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "min",
@@ -9606,6 +9463,7 @@
"exemplar": true,
"expr": "quantile(0.5,\n rate(process_cpu_seconds_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "median",
@@ -9628,13 +9486,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9643,7 +9499,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9651,7 +9506,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9674,8 +9528,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9690,8 +9543,8 @@
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 59
"x": 12,
"y": 8393
},
"id": 187,
"options": {
@@ -9708,12 +9561,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9739,6 +9591,7 @@
"exemplar": true,
"expr": "min(\n max_over_time(process_resident_memory_anon_bytes{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "min",
@@ -9754,6 +9607,7 @@
"exemplar": true,
"expr": "quantile(0.5,\n max_over_time(process_resident_memory_anon_bytes{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "median",
@@ -9764,115 +9618,6 @@
"title": "Memory (anon) usage % ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows when vmstorage node is unreachable for vminsert.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"links": [],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 59
},
"id": 114,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"editorMode": "code",
"expr": "vm_rpc_vmstorage_is_reachable{job=~\"$job\", instance=~\"$instance\"} != 1",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{instance}} => {{addr}}",
"range": true,
"refId": "A"
}
],
"title": "Storage reachability ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
@@ -9885,13 +9630,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9900,7 +9643,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9908,7 +9650,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9926,8 +9667,7 @@
"mode": "absolute",
"steps": [
{
"color": "transparent",
"value": 0
"color": "transparent"
},
{
"color": "red",
@@ -9943,7 +9683,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 67
"y": 8401
},
"id": 139,
"options": {
@@ -9959,12 +9699,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9990,20 +9729,18 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows network usage between vminserts and vmstorages.",
"description": "Shows when vmstorage node is unreachable for vminsert.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -10012,7 +9749,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -10020,7 +9756,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -10030,14 +9765,15 @@
"mode": "off"
}
},
"decimals": 0,
"links": [],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -10045,30 +9781,17 @@
}
]
},
"unit": "bps"
"unit": "short"
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": "/read.*/"
},
"properties": [
{
"id": "custom.transform",
"value": "negative-Y"
}
]
}
]
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 67
"y": 8401
},
"id": 209,
"id": 114,
"options": {
"legend": {
"calcs": [
@@ -10077,17 +9800,14 @@
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -10095,28 +9815,16 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_read_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"expr": "vm_rpc_vmstorage_is_reachable{job=~\"$job\", instance=~\"$instance\"} != 1",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "read from vmstorage",
"legendFormat": "{{instance}} => {{addr}}",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_written_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "write to vmstorage",
"range": true,
"refId": "B"
}
],
"title": "Network usage: vmstorage ($instance)",
"title": "Storage reachability ($instance)",
"type": "timeseries"
},
{
@@ -10131,13 +9839,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -10146,7 +9852,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -10154,7 +9859,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -10170,8 +9874,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -10200,7 +9903,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 75
"y": 8409
},
"id": 208,
"options": {
@@ -10216,12 +9919,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -10244,6 +9946,7 @@
"editorMode": "code",
"expr": "sum(rate(vm_tcplistener_written_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "write to client",
"range": true,
@@ -10258,19 +9961,147 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows network usage between vminserts and vmstorages.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "bps"
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": "/read.*/"
},
"properties": [
{
"id": "custom.transform",
"value": "negative-Y"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8409
},
"id": 209,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
},
"tooltip": {
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_read_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "read from vmstorage",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_written_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "write to vmstorage",
"range": true,
"refId": "B"
}
],
"title": "Network usage: vmstorage ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -10279,7 +10110,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -10287,7 +10117,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -10305,8 +10134,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -10322,7 +10150,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 75
"y": 8417
},
"id": 88,
"options": {
@@ -10339,12 +10167,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {

View File

@@ -9094,20 +9094,18 @@
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows the rate of data rows ingested from write requests. There are two kinds of rows:\n1. Raw sample: each sample consists of a value and a timestamp, see https://docs.victoriametrics.com/victoriametrics/keyconcepts/#raw-samples. \n2. Metric metadata: refers to descriptive information about metrics. It can be disabled by setting `-enableMetadata=false`, see https://docs.victoriametrics.com/victoriametrics/vmagent/#metric-metadata.",
"description": "* `*` - unsupported query path\n* `/write` - insert into VM\n* `/metrics` - query VM system metrics\n* `/query` - query instant values\n* `/query_range` - query over a range of time\n* `/series` - match a certain label set\n* `/label/{}/values` - query a list of label values (variables mostly)",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9116,7 +9114,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9124,7 +9121,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9134,7 +9130,6 @@
"mode": "off"
}
},
"decimals": 2,
"links": [],
"mappings": [],
"min": 0,
@@ -9142,8 +9137,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9159,9 +9153,9 @@
"h": 8,
"w": 12,
"x": 0,
"y": 43
"y": 8385
},
"id": 227,
"id": 97,
"options": {
"legend": {
"calcs": [
@@ -9176,12 +9170,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9189,30 +9182,15 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_rows_inserted_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) by (type) > 0 ",
"expr": "sum(rate(vm_http_requests_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) by (path) > 0",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "sample: {{type}}",
"legendFormat": "__auto",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_metadata_rows_inserted_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) by (type) > 0 ",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "metadata: {{type}}",
"range": true,
"refId": "B"
}
],
"title": "Rows rate ($instance)",
"title": "Requests rate ($instance)",
"type": "timeseries"
},
{
@@ -9227,13 +9205,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
@@ -9242,7 +9218,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9250,7 +9225,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9268,8 +9242,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9305,7 +9278,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 43
"y": 8385
},
"id": 99,
"options": {
@@ -9322,12 +9295,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9365,20 +9337,17 @@
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "* `*` - unsupported query path\n* `/write` - insert into VM\n* `/metrics` - query VM system metrics\n* `/query` - query instant values\n* `/query_range` - query over a range of time\n* `/series` - match a certain label set\n* `/label/{}/values` - query a list of label values (variables mostly)",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9387,7 +9356,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9395,116 +9363,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"links": [],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 51
},
"id": 97,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_http_requests_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) by (path) > 0",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Requests rate ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9527,8 +9385,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9543,8 +9400,8 @@
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 51
"x": 0,
"y": 8393
},
"id": 185,
"options": {
@@ -9561,12 +9418,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9592,6 +9448,7 @@
"exemplar": true,
"expr": "min(\n rate(process_cpu_seconds_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "min",
@@ -9607,6 +9464,7 @@
"exemplar": true,
"expr": "quantile(0.5,\n rate(process_cpu_seconds_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "median",
@@ -9629,13 +9487,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9644,7 +9500,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9652,7 +9507,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9675,8 +9529,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -9691,8 +9544,8 @@
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 59
"x": 12,
"y": 8393
},
"id": 187,
"options": {
@@ -9709,12 +9562,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9740,6 +9592,7 @@
"exemplar": true,
"expr": "min(\n max_over_time(process_resident_memory_anon_bytes{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "min",
@@ -9755,6 +9608,7 @@
"exemplar": true,
"expr": "quantile(0.5,\n max_over_time(process_resident_memory_anon_bytes{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "median",
@@ -9765,115 +9619,6 @@
"title": "Memory (anon) usage % ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows when vmstorage node is unreachable for vminsert.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"links": [],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 59
},
"id": 114,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "vm_rpc_vmstorage_is_reachable{job=~\"$job\", instance=~\"$instance\"} != 1",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{instance}} => {{addr}}",
"range": true,
"refId": "A"
}
],
"title": "Storage reachability ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
@@ -9886,13 +9631,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -9901,7 +9644,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -9909,7 +9651,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -9927,8 +9668,7 @@
"mode": "absolute",
"steps": [
{
"color": "transparent",
"value": 0
"color": "transparent"
},
{
"color": "red",
@@ -9944,7 +9684,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 67
"y": 8401
},
"id": 139,
"options": {
@@ -9960,12 +9700,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -9991,20 +9730,18 @@
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows network usage between vminserts and vmstorages.",
"description": "Shows when vmstorage node is unreachable for vminsert.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -10013,7 +9750,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -10021,7 +9757,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -10031,14 +9766,15 @@
"mode": "off"
}
},
"decimals": 0,
"links": [],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -10046,30 +9782,17 @@
}
]
},
"unit": "bps"
"unit": "short"
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": "/read.*/"
},
"properties": [
{
"id": "custom.transform",
"value": "negative-Y"
}
]
}
]
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 67
"y": 8401
},
"id": 209,
"id": 114,
"options": {
"legend": {
"calcs": [
@@ -10078,17 +9801,14 @@
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -10096,28 +9816,16 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_read_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"expr": "vm_rpc_vmstorage_is_reachable{job=~\"$job\", instance=~\"$instance\"} != 1",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "read from vmstorage",
"legendFormat": "{{instance}} => {{addr}}",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_written_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "write to vmstorage",
"range": true,
"refId": "B"
}
],
"title": "Network usage: vmstorage ($instance)",
"title": "Storage reachability ($instance)",
"type": "timeseries"
},
{
@@ -10132,13 +9840,11 @@
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -10147,7 +9853,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -10155,7 +9860,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -10171,8 +9875,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -10201,7 +9904,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 75
"y": 8409
},
"id": 208,
"options": {
@@ -10217,12 +9920,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
@@ -10245,6 +9947,7 @@
"editorMode": "code",
"expr": "sum(rate(vm_tcplistener_written_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "write to client",
"range": true,
@@ -10259,19 +9962,147 @@
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "Shows network usage between vminserts and vmstorages.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "bps"
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": "/read.*/"
},
"properties": [
{
"id": "custom.transform",
"value": "negative-Y"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8409
},
"id": 209,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true,
"sortBy": "Last *",
"sortDesc": true
},
"tooltip": {
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_read_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "read from vmstorage",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"editorMode": "code",
"expr": "sum(rate(vm_tcpdialer_written_bytes_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])) * 8 > 0",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "write to vmstorage",
"range": true,
"refId": "B"
}
],
"title": "Network usage: vmstorage ($instance)",
"type": "timeseries"
},
{
"datasource": {
"type": "victoriametrics-metrics-datasource",
"uid": "$ds"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -10280,7 +10111,6 @@
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -10288,7 +10118,6 @@
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
@@ -10306,8 +10135,7 @@
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
"color": "green"
},
{
"color": "red",
@@ -10323,7 +10151,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 75
"y": 8417
},
"id": 88,
"options": {
@@ -10340,12 +10168,11 @@
"sortDesc": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "12.4.3",
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {

Some files were not shown because too many files have changed in this diff Show More