mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
1 Commits
fs-paralle
...
debug-dock
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
29858bc0bb |
2
.github/ISSUE_TEMPLATE/question.yml
vendored
2
.github/ISSUE_TEMPLATE/question.yml
vendored
@@ -5,7 +5,7 @@ body:
|
||||
- type: textarea
|
||||
id: describe-the-component
|
||||
attributes:
|
||||
label: Is your question related to a specific component?
|
||||
label: Is your question request related to a specific component?
|
||||
placeholder: |
|
||||
VictoriaMetrics, vmagent, vmalert, vmui, etc...
|
||||
validations:
|
||||
|
||||
48
.github/scripts/lint-changelog-tip.sh
vendored
48
.github/scripts/lint-changelog-tip.sh
vendored
@@ -1,48 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
set -e
|
||||
|
||||
CHANGELOG_FILE="docs/victoriametrics/changelog/CHANGELOG.md"
|
||||
|
||||
GITHUB_BASE_REF=${GITHUB_BASE_REF:-"master"}
|
||||
GIT_REMOTE=${GIT_REMOTE:-"origin"}
|
||||
|
||||
git diff "${GIT_REMOTE}/${GITHUB_BASE_REF}"...HEAD -- $CHANGELOG_FILE > diff.txt
|
||||
if ! grep -q "^+" diff.txt; then
|
||||
echo "No additions in CHANGELOG.md"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ADDED_LINES=$(grep "^+\S" diff.txt | sed 's/^+//')
|
||||
|
||||
START_TIP=$(grep -n "^## tip" "$CHANGELOG_FILE" | head -1 | cut -d: -f1)
|
||||
if [ -z "$START_TIP" ]; then
|
||||
echo "ERROR: ${CHANGELOG_FILE} does not contain a ## tip section"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
END_TIP=$(awk "NR>$START_TIP && /^## / {print NR; exit}" "${CHANGELOG_FILE}")
|
||||
if [ -z "$END_TIP" ]; then
|
||||
END_TIP=$(wc -l < "$CHANGELOG_FILE")
|
||||
fi
|
||||
|
||||
BAD=0
|
||||
while IFS= read -r line; do
|
||||
# Grep exact line inside the file and get line numbers
|
||||
MATCHES=$(grep -n -F "$line" "$CHANGELOG_FILE" | cut -d: -f1)
|
||||
for m in $MATCHES; do
|
||||
if [ "$m" -lt "$START_TIP" ] || [ "$m" -gt "$END_TIP" ]; then
|
||||
echo "'$line' on line ${m} is outside ## tip section (lines ${START_TIP}-${END_TIP})"
|
||||
BAD=1
|
||||
fi
|
||||
done
|
||||
done << EOF
|
||||
$ADDED_LINES
|
||||
EOF
|
||||
|
||||
if [ "$BAD" -ne 0 ]; then
|
||||
echo "CHANGELOG modifications must be placed inside the ## tip section."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "CHANGELOG modifications are valid."
|
||||
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -47,8 +47,6 @@ jobs:
|
||||
arch: arm
|
||||
- os: linux
|
||||
arch: ppc64le
|
||||
- os: linux
|
||||
arch: s390x
|
||||
- os: darwin
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
@@ -61,7 +59,7 @@ jobs:
|
||||
arch: amd64
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
|
||||
19
.github/workflows/changelog-linter.yml
vendored
19
.github/workflows/changelog-linter.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: 'changelog-linter'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "docs/victoriametrics/changelog/CHANGELOG.md"
|
||||
|
||||
jobs:
|
||||
tip-lint:
|
||||
runs-on: 'ubuntu-latest'
|
||||
steps:
|
||||
- uses: 'actions/checkout@v6'
|
||||
with:
|
||||
# needed for proper diff
|
||||
fetch-depth: 0
|
||||
|
||||
- name: 'Validate that changelog changes are under ## tip'
|
||||
run: |
|
||||
GITHUB_BASE_REF=${{ github.base_ref }} ./.github/scripts/lint-changelog-tip.sh
|
||||
2
.github/workflows/check-commit-signed.yml
vendored
2
.github/workflows/check-commit-signed.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0 # we need full history for commit verification
|
||||
|
||||
|
||||
2
.github/workflows/codeql-analysis-go.yml
vendored
2
.github/workflows/codeql-analysis-go.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
|
||||
4
.github/workflows/docs.yaml
vendored
4
.github/workflows/docs.yaml
vendored
@@ -16,12 +16,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: __vm
|
||||
|
||||
- name: Checkout private code
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
|
||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
@@ -97,7 +97,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
|
||||
2
.github/workflows/vmui.yml
vendored
2
.github/workflows/vmui.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v6
|
||||
|
||||
42
Makefile
42
Makefile
@@ -17,7 +17,7 @@ EXTRA_GO_BUILD_TAGS ?=
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
|
||||
TAR_OWNERSHIP ?= --owner=1000 --group=1000
|
||||
|
||||
GOLANGCI_LINT_VERSION := 2.7.2
|
||||
GOLANGCI_LINT_VERSION := 2.4.0
|
||||
|
||||
.PHONY: $(MAKECMDGOALS)
|
||||
|
||||
@@ -125,15 +125,6 @@ vmutils-linux-ppc64le: \
|
||||
vmrestore-linux-ppc64le \
|
||||
vmctl-linux-ppc64le
|
||||
|
||||
vmutils-linux-s390x: \
|
||||
vmagent-linux-s390x \
|
||||
vmalert-linux-s390x \
|
||||
vmalert-tool-linux-s390x \
|
||||
vmauth-linux-s390x \
|
||||
vmbackup-linux-s390x \
|
||||
vmrestore-linux-s390x \
|
||||
vmctl-linux-s390x
|
||||
|
||||
vmutils-darwin-amd64: \
|
||||
vmagent-darwin-amd64 \
|
||||
vmalert-darwin-amd64 \
|
||||
@@ -266,7 +257,6 @@ release-victoria-metrics: \
|
||||
release-victoria-metrics-linux-amd64 \
|
||||
release-victoria-metrics-linux-arm \
|
||||
release-victoria-metrics-linux-arm64 \
|
||||
release-victoria-metrics-linux-s390x \
|
||||
release-victoria-metrics-darwin-amd64 \
|
||||
release-victoria-metrics-darwin-arm64 \
|
||||
release-victoria-metrics-freebsd-amd64 \
|
||||
@@ -285,9 +275,6 @@ release-victoria-metrics-linux-arm:
|
||||
release-victoria-metrics-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-linux-s390x:
|
||||
GOOS=linux GOARCH=s390x $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
release-victoria-metrics-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
@@ -327,7 +314,6 @@ release-vmutils: \
|
||||
release-vmutils-linux-amd64 \
|
||||
release-vmutils-linux-arm64 \
|
||||
release-vmutils-linux-arm \
|
||||
release-vmutils-linux-s390x \
|
||||
release-vmutils-darwin-amd64 \
|
||||
release-vmutils-darwin-arm64 \
|
||||
release-vmutils-freebsd-amd64 \
|
||||
@@ -346,9 +332,6 @@ release-vmutils-linux-arm64:
|
||||
release-vmutils-linux-arm:
|
||||
GOOS=linux GOARCH=arm $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-linux-s390x:
|
||||
GOOS=linux GOARCH=s390x $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
release-vmutils-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-vmutils-goos-goarch
|
||||
|
||||
@@ -435,7 +418,7 @@ release-vmutils-windows-goarch: \
|
||||
vmctl-windows-$(GOARCH)-prod.exe
|
||||
|
||||
pprof-cpu:
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics $(PPROF_FILE)
|
||||
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)
|
||||
|
||||
fmt:
|
||||
gofmt -l -w -s ./lib
|
||||
@@ -471,23 +454,7 @@ integration-test:
|
||||
|
||||
apptest:
|
||||
$(MAKE) victoria-metrics vmagent vmalert vmauth vmctl vmbackup vmrestore
|
||||
go test ./apptest/... -skip="^Test(Cluster|Legacy).*"
|
||||
|
||||
integration-test-legacy: victoria-metrics vmbackup vmrestore
|
||||
OS=$$(uname | tr '[:upper:]' '[:lower:]'); \
|
||||
ARCH=$$(uname -m | tr '[:upper:]' '[:lower:]' | sed 's/x86_64/amd64/'); \
|
||||
VERSION=v1.132.0; \
|
||||
VMSINGLE=victoria-metrics-$${OS}-$${ARCH}-$${VERSION}.tar.gz; \
|
||||
VMCLUSTER=victoria-metrics-$${OS}-$${ARCH}-$${VERSION}-cluster.tar.gz; \
|
||||
URL=https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/$${VERSION}; \
|
||||
DIR=/tmp/$${VERSION}; \
|
||||
test -d $${DIR} || (mkdir $${DIR} && \
|
||||
curl --output-dir /tmp -LO $${URL}/$${VMSINGLE} && tar xzf /tmp/$${VMSINGLE} -C $${DIR} && \
|
||||
curl --output-dir /tmp -LO $${URL}/$${VMCLUSTER} && tar xzf /tmp/$${VMCLUSTER} -C $${DIR} \
|
||||
); \
|
||||
VM_LEGACY_VMSINGLE_PATH=$${DIR}/victoria-metrics-prod \
|
||||
VM_LEGACY_VMSTORAGE_PATH=$${DIR}/vmstorage-prod \
|
||||
go test ./apptest/tests -run="^TestLegacySingle.*"
|
||||
go test ./apptest/... -skip="^TestCluster.*"
|
||||
|
||||
benchmark:
|
||||
GOEXPERIMENT=synctest go test -bench=. ./lib/...
|
||||
@@ -516,8 +483,7 @@ app-local-windows-goarch:
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc -dir=lib
|
||||
qtc -dir=app
|
||||
qtc
|
||||
|
||||
install-qtc:
|
||||
which qtc || go install github.com/valyala/quicktemplate/qtc@latest
|
||||
|
||||
@@ -27,9 +27,6 @@ victoria-metrics-linux-ppc64le-prod:
|
||||
victoria-metrics-linux-386-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-386
|
||||
|
||||
victoria-metrics-linux-s390x-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
victoria-metrics-darwin-amd64-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -10,11 +10,9 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
@@ -50,7 +48,6 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
|
||||
var bb bytesutil.ByteBuffer
|
||||
var rows prometheus.Rows
|
||||
var metadataRows prometheus.MetadataRows
|
||||
var mrs []storage.MetricRow
|
||||
var labels []prompb.Label
|
||||
t := time.NewTicker(scrapeInterval)
|
||||
@@ -60,12 +57,8 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
appmetrics.WritePrometheusMetrics(&bb)
|
||||
s := bytesutil.ToUnsafeString(bb.B)
|
||||
rows.Reset()
|
||||
// Parse metrics and optionally metadata when enabled
|
||||
if prommetadata.IsEnabled() {
|
||||
rows, metadataRows = prometheus.UnmarshalWithMetadata(rows, metadataRows, s, nil)
|
||||
} else {
|
||||
rows.UnmarshalWithErrLogger(s, nil)
|
||||
}
|
||||
// VictoriaMetrics components don't expose metadata yet, only need to parse samples
|
||||
rows.UnmarshalWithErrLogger(s, nil)
|
||||
mrs = mrs[:0]
|
||||
for i := range rows.Rows {
|
||||
r := &rows.Rows[i]
|
||||
@@ -98,19 +91,6 @@ func selfScraper(scrapeInterval time.Duration) {
|
||||
if err := vmstorage.AddRows(mrs); err != nil {
|
||||
logger.Errorf("cannot store self-scraped metrics: %s", err)
|
||||
}
|
||||
if len(metadataRows.Rows) > 0 {
|
||||
mms := make([]metricsmetadata.Row, 0, len(metadataRows.Rows))
|
||||
for _, mm := range metadataRows.Rows {
|
||||
mms = append(mms, metricsmetadata.Row{
|
||||
MetricFamilyName: bytesutil.ToUnsafeBytes(mm.Metric),
|
||||
Help: bytesutil.ToUnsafeBytes(mm.Help),
|
||||
Type: mm.Type,
|
||||
})
|
||||
}
|
||||
if err := vmstorage.AddMetadataRows(mms); err != nil {
|
||||
logger.Errorf("cannot store self-scraped metrics metadata: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -27,9 +27,6 @@ vmagent-linux-ppc64le-prod:
|
||||
vmagent-linux-386-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmagent-linux-s390x-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
vmagent-darwin-amd64-prod:
|
||||
APP_NAME=vmagent $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/promremotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/zabbixconnector"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
@@ -75,7 +74,7 @@ var (
|
||||
"See also -opentsdbHTTPListenAddr.useProxyProtocol")
|
||||
opentsdbHTTPUseProxyProtocol = flag.Bool("opentsdbHTTPListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted "+
|
||||
"at -opentsdbHTTPListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt")
|
||||
configAuthKey = flagutil.NewPassword("configAuthKey", "Authorization key for accessing /config and /remotewrite-.*-config pages. It must be passed via authKey query arg. It overrides -httpAuth.*")
|
||||
configAuthKey = flagutil.NewPassword("configAuthKey", "Authorization key for accessing /config page. It must be passed via authKey query arg. It overrides -httpAuth.*")
|
||||
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running vmagent. The following files are checked: "+
|
||||
"-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig, -remoteWrite.streamAggr.config . "+
|
||||
@@ -112,6 +111,7 @@ func main() {
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
flagutil.ApplySecretFlags()
|
||||
remotewrite.InitSecretFlags()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
@@ -253,8 +253,6 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
{"metric-relabel-debug", "debug metric relabeling"},
|
||||
{"api/v1/targets", "advanced information about discovered targets in JSON format"},
|
||||
{"config", "-promscrape.config contents"},
|
||||
{"remotewrite-relabel-config", "-remoteWrite.relabelConfig contents"},
|
||||
{"remotewrite-url-relabel-config", "-remoteWrite.urlRelabelConfig contents"},
|
||||
{"metrics", "available service metrics"},
|
||||
{"flags", "command-line flags"},
|
||||
{"-/reload", "reload configuration"},
|
||||
@@ -351,17 +349,6 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
return true
|
||||
case "/zabbixconnector/api/v1/history":
|
||||
zabbixconnectorHistoryRequests.Inc()
|
||||
if err := zabbixconnector.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
zabbixconnectorHistoryErrors.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprintf(w, `{"error":%q}`, err.Error())
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -491,42 +478,6 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
promscrape.WriteConfigData(&bb)
|
||||
fmt.Fprintf(w, `{"status":"success","data":{"yaml":%s}}`, stringsutil.JSONString(string(bb.B)))
|
||||
return true
|
||||
case "/remotewrite-relabel-config":
|
||||
if !httpserver.CheckAuthFlag(w, r, configAuthKey) {
|
||||
return true
|
||||
}
|
||||
remoteWriteRelabelConfigRequests.Inc()
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
remotewrite.WriteRelabelConfigData(w)
|
||||
return true
|
||||
case "/api/v1/status/remotewrite-relabel-config":
|
||||
if !httpserver.CheckAuthFlag(w, r, configAuthKey) {
|
||||
return true
|
||||
}
|
||||
remoteWriteStatusRelabelConfigRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var bb bytesutil.ByteBuffer
|
||||
remotewrite.WriteRelabelConfigData(&bb)
|
||||
fmt.Fprintf(w, `{"status":"success","data":{"yaml":%s}}`, stringsutil.JSONString(string(bb.B)))
|
||||
return true
|
||||
case "/remotewrite-url-relabel-config":
|
||||
if !httpserver.CheckAuthFlag(w, r, configAuthKey) {
|
||||
return true
|
||||
}
|
||||
remoteWriteURLRelabelConfigRequests.Inc()
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
remotewrite.WriteURLRelabelConfigData(w)
|
||||
return true
|
||||
case "/api/v1/status/remotewrite-url-relabel-config":
|
||||
if !httpserver.CheckAuthFlag(w, r, configAuthKey) {
|
||||
return true
|
||||
}
|
||||
remoteWriteStatusURLRelabelConfigRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var bb bytesutil.ByteBuffer
|
||||
remotewrite.WriteURLRelabelConfigData(&bb)
|
||||
fmt.Fprintf(w, `{"status":"success","data":{"yaml":%s}}`, stringsutil.JSONString(string(bb.B)))
|
||||
return true
|
||||
case "/prometheus/-/reload", "/-/reload":
|
||||
if !httpserver.CheckAuthFlag(w, r, reloadAuthKey) {
|
||||
return true
|
||||
@@ -656,17 +607,6 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
||||
}
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
return true
|
||||
case "zabbixconnector/api/v1/history":
|
||||
zabbixconnectorHistoryRequests.Inc()
|
||||
if err := zabbixconnector.InsertHandlerForHTTP(at, r); err != nil {
|
||||
zabbixconnectorHistoryErrors.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprintf(w, `{"error":%q}`, err.Error())
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -788,9 +728,6 @@ var (
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
|
||||
zabbixconnectorHistoryRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/zabbixconnector/api/v1/history", protocol="zabbixconnector"}`)
|
||||
zabbixconnectorHistoryErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/zabbixconnector/api/v1/history", protocol="zabbixconnector"}`)
|
||||
|
||||
newrelicWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
newrelicWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
|
||||
@@ -811,12 +748,6 @@ var (
|
||||
promscrapeConfigRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/config"}`)
|
||||
promscrapeStatusConfigRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/api/v1/status/config"}`)
|
||||
|
||||
remoteWriteRelabelConfigRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/remotewrite-relabel-config"}`)
|
||||
remoteWriteStatusRelabelConfigRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/api/v1/status/remotewrite-relabel-config"}`)
|
||||
|
||||
remoteWriteURLRelabelConfigRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/remotewrite-url-relabel-config"}`)
|
||||
remoteWriteStatusURLRelabelConfigRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/api/v1/status/remotewrite-url-relabel-config"}`)
|
||||
|
||||
promscrapeConfigReloadRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/-/reload"}`)
|
||||
)
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ func insertRows(at *auth.Token, rows []newrelic.Row, extraLabels []prompb.Label)
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
rowsInserted.Add(samplesCount)
|
||||
rowsInserted.Add(len(rows))
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(samplesCount)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ var (
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="opentelemetry"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForReader processes metrics from given reader.
|
||||
// InsertHandler processes metrics from given reader.
|
||||
func InsertHandlerForReader(at *auth.Token, r io.Reader, encoding string) error {
|
||||
return stream.ParseStream(r, encoding, nil, func(tss []prompb.TimeSeries, mms []prompb.MetricMetadata) error {
|
||||
return insertRows(at, tss, mms, nil)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/awsapi"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
@@ -553,9 +554,9 @@ func getRetryDuration(retryAfterDuration, retryDuration, maxRetryDuration time.D
|
||||
// For more details, see: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9417
|
||||
func repackBlockFromZstdToSnappy(zstdBlock []byte) ([]byte, error) {
|
||||
plainBlock := make([]byte, 0, len(zstdBlock)*2)
|
||||
plainBlock, err := encoding.DecompressZSTD(plainBlock, zstdBlock)
|
||||
plainBlock, err := zstd.Decompress(plainBlock, zstdBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("zstd: decompress: %s", err)
|
||||
}
|
||||
|
||||
return snappy.Encode(nil, plainBlock), nil
|
||||
|
||||
@@ -3,18 +3,15 @@ package remotewrite
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
@@ -35,12 +32,9 @@ var (
|
||||
"See https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels")
|
||||
)
|
||||
|
||||
var labelsGlobal []prompb.Label
|
||||
|
||||
var (
|
||||
labelsGlobal []prompb.Label
|
||||
|
||||
remoteWriteRelabelConfigData atomic.Pointer[[]byte]
|
||||
remoteWriteURLRelabelConfigData atomic.Pointer[[]interface{}]
|
||||
|
||||
relabelConfigReloads *metrics.Counter
|
||||
relabelConfigReloadErrors *metrics.Counter
|
||||
relabelConfigSuccess *metrics.Gauge
|
||||
@@ -73,42 +67,6 @@ func initRelabelConfigs() {
|
||||
}
|
||||
}
|
||||
|
||||
// WriteRelabelConfigData writes -remoteWrite.relabelConfig contents to w
|
||||
func WriteRelabelConfigData(w io.Writer) {
|
||||
p := remoteWriteRelabelConfigData.Load()
|
||||
if p == nil {
|
||||
// Nothing to write to w
|
||||
return
|
||||
}
|
||||
_, _ = w.Write(*p)
|
||||
}
|
||||
|
||||
// WriteURLRelabelConfigData writes -remoteWrite.urlRelabelConfig contents to w
|
||||
func WriteURLRelabelConfigData(w io.Writer) {
|
||||
p := remoteWriteURLRelabelConfigData.Load()
|
||||
if p == nil {
|
||||
// Nothing to write to w
|
||||
return
|
||||
}
|
||||
type urlRelabelCfg struct {
|
||||
Url string `yaml:"url"`
|
||||
RelabelConfig interface{} `yaml:"relabel_config"`
|
||||
}
|
||||
var cs []urlRelabelCfg
|
||||
for i, url := range *remoteWriteURLs {
|
||||
cfgData := (*p)[i]
|
||||
if !*showRemoteWriteURL {
|
||||
url = fmt.Sprintf("%d:secret-url", i+1)
|
||||
}
|
||||
cs = append(cs, urlRelabelCfg{
|
||||
Url: url,
|
||||
RelabelConfig: cfgData,
|
||||
})
|
||||
}
|
||||
d, _ := yaml.Marshal(cs)
|
||||
_, _ = w.Write(d)
|
||||
}
|
||||
|
||||
func reloadRelabelConfigs() {
|
||||
rcs := allRelabelConfigs.Load()
|
||||
if !rcs.isSet() {
|
||||
@@ -132,42 +90,28 @@ func reloadRelabelConfigs() {
|
||||
func loadRelabelConfigs() (*relabelConfigs, error) {
|
||||
var rcs relabelConfigs
|
||||
if *relabelConfigPathGlobal != "" {
|
||||
global, rawCfg, err := promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal)
|
||||
global, err := promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %w", *relabelConfigPathGlobal, err)
|
||||
}
|
||||
remoteWriteRelabelConfigData.Store(&rawCfg)
|
||||
rcs.global = global
|
||||
}
|
||||
if len(*relabelConfigPaths) > len(*remoteWriteURLs) {
|
||||
return nil, fmt.Errorf("too many -remoteWrite.urlRelabelConfig args: %d; it mustn't exceed the number of -remoteWrite.url args: %d",
|
||||
len(*relabelConfigPaths), (len(*remoteWriteURLs)))
|
||||
}
|
||||
|
||||
var urlRelabelCfgs []interface{}
|
||||
rcs.perURL = make([]*promrelabel.ParsedConfigs, len(*remoteWriteURLs))
|
||||
for i, path := range *relabelConfigPaths {
|
||||
if len(path) == 0 {
|
||||
urlRelabelCfgs = append(urlRelabelCfgs, nil)
|
||||
// Skip empty relabel config.
|
||||
continue
|
||||
}
|
||||
prc, rawCfg, err := promrelabel.LoadRelabelConfigs(path)
|
||||
prc, err := promrelabel.LoadRelabelConfigs(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %w", path, err)
|
||||
}
|
||||
rcs.perURL[i] = prc
|
||||
|
||||
var parsedCfg interface{}
|
||||
_ = yaml.Unmarshal(rawCfg, &parsedCfg)
|
||||
urlRelabelCfgs = append(urlRelabelCfgs, parsedCfg)
|
||||
}
|
||||
if len(*remoteWriteURLs) > len(*relabelConfigPaths) {
|
||||
// fill the urlRelabelCfgs with empty relabel configs if not set
|
||||
for i := len(*relabelConfigPaths); i < len(*remoteWriteURLs); i++ {
|
||||
urlRelabelCfgs = append(urlRelabelCfgs, nil)
|
||||
}
|
||||
}
|
||||
remoteWriteURLRelabelConfigData.Store(&urlRelabelCfgs)
|
||||
return &rcs, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -486,9 +485,6 @@ func tryPush(at *auth.Token, wr *prompb.WriteRequest, forceDropSamplesOnFailure
|
||||
matchIdxs.B = sas.Push(tssBlock, matchIdxs.B)
|
||||
if !*streamAggrGlobalKeepInput {
|
||||
tssBlock = dropAggregatedSeries(tssBlock, matchIdxs.B, *streamAggrGlobalDropInput)
|
||||
} else if *streamAggrGlobalDropInput {
|
||||
// if both keep_input and drop_input are true, we keep only the aggregated series
|
||||
tssBlock = dropUnaggregatedSeries(tssBlock, matchIdxs.B)
|
||||
}
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
}
|
||||
@@ -992,17 +988,7 @@ func (rwctx *remoteWriteCtx) TryPushTimeSeries(tss []prompb.TimeSeries, forceDro
|
||||
tss = append(*v, tss...)
|
||||
}
|
||||
tss = dropAggregatedSeries(tss, matchIdxs.B, rwctx.streamAggrDropInput)
|
||||
} else if rwctx.streamAggrDropInput {
|
||||
// if both keep_input and drop_input are true, we keep only the aggregated series
|
||||
if rctx == nil {
|
||||
rctx = getRelabelCtx()
|
||||
// Make a copy of tss before dropping aggregated series
|
||||
v = tssPool.Get().(*[]prompb.TimeSeries)
|
||||
tss = append(*v, tss...)
|
||||
}
|
||||
tss = dropUnaggregatedSeries(tss, matchIdxs.B)
|
||||
}
|
||||
|
||||
matchIdxsPool.Put(matchIdxs)
|
||||
}
|
||||
if rwctx.deduplicator != nil {
|
||||
@@ -1025,10 +1011,9 @@ func (rwctx *remoteWriteCtx) TryPushTimeSeries(tss []prompb.TimeSeries, forceDro
|
||||
return false
|
||||
}
|
||||
|
||||
var matchIdxsPool slicesutil.BufferPool[uint32]
|
||||
var matchIdxsPool bytesutil.ByteBufferPool
|
||||
|
||||
// dropAggregatedSeries drops matched series, also the unmatched if dropInput is true.
|
||||
func dropAggregatedSeries(src []prompb.TimeSeries, matchIdxs []uint32, dropInput bool) []prompb.TimeSeries {
|
||||
func dropAggregatedSeries(src []prompb.TimeSeries, matchIdxs []byte, dropInput bool) []prompb.TimeSeries {
|
||||
dst := src[:0]
|
||||
if !dropInput {
|
||||
for i, match := range matchIdxs {
|
||||
@@ -1043,20 +1028,6 @@ func dropAggregatedSeries(src []prompb.TimeSeries, matchIdxs []uint32, dropInput
|
||||
return dst
|
||||
}
|
||||
|
||||
// dropUnaggregatedSeries drops unmatched series.
|
||||
func dropUnaggregatedSeries(src []prompb.TimeSeries, matchIdxs []uint32) []prompb.TimeSeries {
|
||||
dst := src[:0]
|
||||
for i, match := range matchIdxs {
|
||||
if match == 0 {
|
||||
continue
|
||||
}
|
||||
dst = append(dst, src[i])
|
||||
}
|
||||
tail := src[len(dst):]
|
||||
clear(tail)
|
||||
return dst
|
||||
}
|
||||
|
||||
func (rwctx *remoteWriteCtx) pushInternalTrackDropped(tss []prompb.TimeSeries) {
|
||||
if rwctx.tryPushTimeSeriesInternal(tss) {
|
||||
return
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/consistenthash"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
@@ -59,8 +57,8 @@ func TestGetLabelsHash_Distribution(t *testing.T) {
|
||||
f(10)
|
||||
}
|
||||
|
||||
func TestRemoteWriteContext_TryPushTimeSeries(t *testing.T) {
|
||||
f := func(streamAggrConfig, relabelConfig string, enableWindows bool, dedupInterval time.Duration, keepInput, dropInput bool, input string, expectedRowsPushedAfterRelabel, expectedPushedSample int) {
|
||||
func TestRemoteWriteContext_TryPush_ImmutableTimeseries(t *testing.T) {
|
||||
f := func(streamAggrConfig, relabelConfig string, enableWindows bool, dedupInterval time.Duration, keepInput, dropInput bool, input string) {
|
||||
t.Helper()
|
||||
perURLRelabel, err := promrelabel.ParseRelabelConfigsData([]byte(relabelConfig))
|
||||
if err != nil {
|
||||
@@ -73,16 +71,10 @@ func TestRemoteWriteContext_TryPushTimeSeries(t *testing.T) {
|
||||
}
|
||||
allRelabelConfigs.Store(rcs)
|
||||
|
||||
path := "fast-queue-write-test"
|
||||
fs.MustRemoveDir(path)
|
||||
fq := persistentqueue.MustOpenFastQueue(path, "test", 100, 0, false)
|
||||
defer fs.MustRemoveDir(path)
|
||||
defer fq.MustClose()
|
||||
|
||||
pss := make([]*pendingSeries, 1)
|
||||
isVMProto := &atomic.Bool{}
|
||||
isVMProto.Store(true)
|
||||
pss[0] = newPendingSeries(fq, isVMProto, 0, 100)
|
||||
pss[0] = newPendingSeries(nil, isVMProto, 0, 100)
|
||||
rwctx := &remoteWriteCtx{
|
||||
idx: 0,
|
||||
streamAggrKeepInput: keepInput,
|
||||
@@ -91,8 +83,6 @@ func TestRemoteWriteContext_TryPushTimeSeries(t *testing.T) {
|
||||
rowsPushedAfterRelabel: metrics.GetOrCreateCounter(`foo`),
|
||||
rowsDroppedByRelabel: metrics.GetOrCreateCounter(`bar`),
|
||||
}
|
||||
defer metrics.UnregisterAllMetrics()
|
||||
|
||||
if dedupInterval > 0 {
|
||||
rwctx.deduplicator = streamaggr.NewDeduplicator(nil, enableWindows, dedupInterval, nil, "dedup-global")
|
||||
}
|
||||
@@ -114,27 +104,23 @@ func TestRemoteWriteContext_TryPushTimeSeries(t *testing.T) {
|
||||
inputTss := prometheus.MustParsePromMetrics(input, offsetMsecs)
|
||||
expectedTss := make([]prompb.TimeSeries, len(inputTss))
|
||||
|
||||
// check inputTss is not modified after TryPushTimeSeries
|
||||
// copy inputTss to make sure it is not mutated during TryPush call
|
||||
copy(expectedTss, inputTss)
|
||||
if !rwctx.TryPushTimeSeries(inputTss, false) {
|
||||
t.Fatalf("cannot push samples to rwctx")
|
||||
}
|
||||
|
||||
if int(rwctx.rowsPushedAfterRelabel.Get()) != expectedRowsPushedAfterRelabel {
|
||||
t.Fatalf("unexpected number of rows after relabel; got %d; want %d", rwctx.rowsPushedAfterRelabel.Get(), expectedRowsPushedAfterRelabel)
|
||||
}
|
||||
|
||||
if len(pss[0].wr.tss) != expectedPushedSample {
|
||||
t.Fatalf("unexpected number of pushed samples; got %d; want %d", len(pss[0].wr.tss), expectedPushedSample)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedTss, inputTss) {
|
||||
t.Fatalf("unexpected samples;\ngot\n%v\nwant\n%v", inputTss, expectedTss)
|
||||
}
|
||||
}
|
||||
|
||||
// relabeling
|
||||
f(``, `
|
||||
f(`
|
||||
- interval: 1m
|
||||
outputs: [sum_samples]
|
||||
- interval: 2m
|
||||
outputs: [count_series]
|
||||
`, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
@@ -143,66 +129,53 @@ metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`, 2, 2)
|
||||
|
||||
// relabeling + aggregation
|
||||
f(`
|
||||
- match: '{env="dev"}'
|
||||
interval: 1m
|
||||
outputs: [sum_samples]
|
||||
`, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: ".*"
|
||||
`, false, 0, false, false, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`, 4, 2)
|
||||
|
||||
// aggregation + keepInput
|
||||
f(`
|
||||
- match: '{env="dev"}'
|
||||
interval: 1m
|
||||
outputs: [sum_samples]
|
||||
`, ``, false, 0, true, false, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`, 4, 4)
|
||||
|
||||
// aggregation + dropInput
|
||||
f(`
|
||||
- match: '{env="dev"}'
|
||||
interval: 1m
|
||||
outputs: [sum_samples]
|
||||
`, ``, false, 0, false, true, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`, 4, 0)
|
||||
|
||||
// aggregation + keepInput + dropInput
|
||||
f(`
|
||||
- match: '{env="dev"}'
|
||||
interval: 1m
|
||||
outputs: [sum_samples]
|
||||
`, ``, false, 0, true, true, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="bar"} 25
|
||||
`, 3, 1)
|
||||
|
||||
// aggregation + deduplication
|
||||
`)
|
||||
f(``, ``, true, time.Hour, false, false, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="foo"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="foo"} 25
|
||||
`, 4, 0)
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, true, time.Hour, false, false, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="bar"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, true, time.Hour, true, false, `
|
||||
metric{env="test"} 10
|
||||
metric{env="dev"} 20
|
||||
metric{env="foo"} 15
|
||||
metric{env="dev"} 25
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, true, time.Hour, false, true, `
|
||||
metric{env="foo"} 10
|
||||
metric{env="dev"} 20
|
||||
metric{env="foo"} 15
|
||||
metric{env="dev"} 25
|
||||
`)
|
||||
f(``, `
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dev"
|
||||
`, true, time.Hour, true, true, `
|
||||
metric{env="dev"} 10
|
||||
metric{env="test"} 20
|
||||
metric{env="dev"} 15
|
||||
metric{env="bar"} 25
|
||||
`)
|
||||
}
|
||||
|
||||
func TestShardAmountRemoteWriteCtx(t *testing.T) {
|
||||
|
||||
@@ -18,12 +18,12 @@ var (
|
||||
streamAggrGlobalConfig = flag.String("streamAggr.config", "", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/stream-aggregation/ . "+
|
||||
"See also -streamAggr.keepInput, -streamAggr.dropInput and -streamAggr.dedupInterval")
|
||||
streamAggrGlobalKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep input samples that match any rule in "+
|
||||
"-streamAggr.config. By default, matched raw samples are aggregated and dropped, while unmatched samples "+
|
||||
"are written to the remote storage. See also -streamAggr.dropInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrGlobalDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop input samples that not matching any rule in "+
|
||||
"-streamAggr.config. By default, only matched raw samples are dropped, while unmatched samples "+
|
||||
"are written to the remote storage. See also -streamAggr.keepInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrGlobalKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep all the input samples after the aggregation "+
|
||||
"with -streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to remote storages write. See also -streamAggr.dropInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrGlobalDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to remote storages write. See also -streamAggr.keepInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrGlobalDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval on "+
|
||||
"aggregator before optional aggregation with -streamAggr.config . "+
|
||||
"See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/#deduplication")
|
||||
@@ -43,11 +43,11 @@ var (
|
||||
streamAggrConfig = flagutil.NewArrayString("remoteWrite.streamAggr.config", "Optional path to file with stream aggregation config for the corresponding -remoteWrite.url. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/stream-aggregation/ . "+
|
||||
"See also -remoteWrite.streamAggr.keepInput, -remoteWrite.streamAggr.dropInput and -remoteWrite.streamAggr.dedupInterval")
|
||||
streamAggrDropInput = flagutil.NewArrayBool("remoteWrite.streamAggr.dropInput", "Whether to drop input samples that not matching any rule in "+
|
||||
"the corresponding -remoteWrite.streamAggr.config. By default, only matched raw samples are dropped, while unmatched samples "+
|
||||
streamAggrDropInput = flagutil.NewArrayBool("remoteWrite.streamAggr.dropInput", "Whether to drop all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config at the corresponding -remoteWrite.url. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.keepInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrKeepInput = flagutil.NewArrayBool("remoteWrite.streamAggr.keepInput", "Whether to keep input samples that match any rule in "+
|
||||
"the corresponding -remoteWrite.streamAggr.config. By default, matched raw samples are aggregated and dropped, while unmatched samples "+
|
||||
streamAggrKeepInput = flagutil.NewArrayBool("remoteWrite.streamAggr.keepInput", "Whether to keep all the input samples after the aggregation "+
|
||||
"with -remoteWrite.streamAggr.config at the corresponding -remoteWrite.url. By default, only aggregates samples are dropped, while the remaining samples "+
|
||||
"are written to the corresponding -remoteWrite.url . See also -remoteWrite.streamAggr.dropInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrDedupInterval = flagutil.NewArrayDuration("remoteWrite.streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation "+
|
||||
"with -remoteWrite.streamAggr.config at the corresponding -remoteWrite.url. See also -dedup.minScrapeInterval and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/#deduplication")
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
package zabbixconnector
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/zabbixconnector"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/zabbixconnector/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="zabbixconnector"}`)
|
||||
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="zabbixconnector"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="zabbixconnector"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForHTTP processes remote write for ZabbixConnector POST /zabbixconnector/v1/history request.
|
||||
func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
||||
extraLabels, err := protoparserutil.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encoding := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, encoding, func(rows []zabbixconnector.Row) error {
|
||||
return insertRows(at, rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, rows []zabbixconnector.Row, extraLabels []prompb.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
rowsTotal := len(rows)
|
||||
tssDst := ctx.WriteRequest.Timeseries[:0]
|
||||
labels := ctx.Labels[:0]
|
||||
samples := ctx.Samples[:0]
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
|
||||
labelsLen := len(labels)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
labels = append(labels, prompb.Label{
|
||||
Name: bytesutil.ToUnsafeString(tag.Key),
|
||||
Value: bytesutil.ToUnsafeString(tag.Value),
|
||||
})
|
||||
}
|
||||
labels = append(labels, extraLabels...)
|
||||
|
||||
samplesLen := len(samples)
|
||||
samples = append(samples, prompb.Sample{
|
||||
Value: r.Value,
|
||||
Timestamp: r.Timestamp,
|
||||
})
|
||||
|
||||
tssDst = append(tssDst, prompb.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
||||
return remotewrite.ErrQueueFullHTTPRetry
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
}
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return nil
|
||||
}
|
||||
@@ -27,9 +27,6 @@ vmalert-tool-linux-ppc64le-prod:
|
||||
vmalert-tool-linux-386-prod:
|
||||
APP_NAME=vmalert-tool $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmalert-tool-linux-s390x-prod:
|
||||
APP_NAME=vmalert-tool $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
vmalert-tool-darwin-amd64-prod:
|
||||
APP_NAME=vmalert-tool $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
|
||||
}
|
||||
labels[s[:n]] = s[n+1:]
|
||||
}
|
||||
err = notifier.Init(labels, externalURL)
|
||||
_, err = notifier.Init(labels, externalURL)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to init notifier: %v", err)
|
||||
}
|
||||
@@ -379,7 +379,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
||||
if len(g.Rules) == 0 {
|
||||
continue
|
||||
}
|
||||
errs := g.ExecOnce(context.Background(), rw, ts)
|
||||
errs := g.ExecOnce(context.Background(), func() []notifier.Notifier { return nil }, rw, ts)
|
||||
for err := range errs {
|
||||
if err != nil {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("\nfailed to exec group: %q, time: %s, err: %w", g.Name,
|
||||
|
||||
@@ -27,9 +27,6 @@ vmalert-linux-ppc64le-prod:
|
||||
vmalert-linux-386-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmalert-linux-s390x-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
vmalert-darwin-amd64-prod:
|
||||
APP_NAME=vmalert $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ func TestParse_Failure(t *testing.T) {
|
||||
|
||||
f([]string{"testdata/rules/rules_interval_bad.rules"}, "eval_offset should be smaller than interval")
|
||||
f([]string{"testdata/rules/rules0-bad.rules"}, "unexpected token")
|
||||
f([]string{"testdata/dir/rules0-bad.rules"}, "invalid annotations")
|
||||
f([]string{"testdata/dir/rules0-bad.rules"}, "error parsing annotation")
|
||||
f([]string{"testdata/dir/rules1-bad.rules"}, "duplicate in file")
|
||||
f([]string{"testdata/dir/rules2-bad.rules"}, "function \"unknown\" not defined")
|
||||
f([]string{"testdata/dir/rules3-bad.rules"}, "either `record` or `alert` must be set")
|
||||
@@ -343,6 +343,7 @@ func TestGroupValidate_Failure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}, true, "bad prometheus expr")
|
||||
|
||||
}
|
||||
|
||||
func TestGroupValidate_Success(t *testing.T) {
|
||||
|
||||
@@ -179,11 +179,11 @@ func (c *Client) Query(ctx context.Context, query string, ts time.Time) (Result,
|
||||
var parseFn func(resp *http.Response) (Result, error)
|
||||
switch c.dataSourceType {
|
||||
case datasourcePrometheus:
|
||||
parseFn = parsePrometheusInstantResponse
|
||||
parseFn = parsePrometheusResponse
|
||||
case datasourceGraphite:
|
||||
parseFn = parseGraphiteResponse
|
||||
case datasourceVLogs:
|
||||
parseFn = parseVLogsInstantResponse
|
||||
parseFn = parseVLogsResponse
|
||||
default:
|
||||
logger.Panicf("BUG: unsupported datasource type %q to parse query response", c.dataSourceType)
|
||||
}
|
||||
@@ -239,9 +239,9 @@ func (c *Client) QueryRange(ctx context.Context, query string, start, end time.T
|
||||
var parseFn func(resp *http.Response) (Result, error)
|
||||
switch c.dataSourceType {
|
||||
case datasourcePrometheus:
|
||||
parseFn = parsePrometheusRangeResponse
|
||||
parseFn = parsePrometheusResponse
|
||||
case datasourceVLogs:
|
||||
parseFn = parseVLogsRangeResponse
|
||||
parseFn = parseVLogsResponse
|
||||
default:
|
||||
logger.Panicf("BUG: unsupported datasource type %q to parse query range response", c.dataSourceType)
|
||||
}
|
||||
|
||||
@@ -172,26 +172,17 @@ const (
|
||||
rtVector, rtMatrix, rScalar = "vector", "matrix", "scalar"
|
||||
)
|
||||
|
||||
func parsePromResponse(resp *http.Response) (*promResponse, error) {
|
||||
func parsePrometheusResponse(resp *http.Response) (res Result, err error) {
|
||||
r := &promResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||
if err = json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return res, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
if r.Status == statusError {
|
||||
return nil, fmt.Errorf("response error %q: %s", r.ErrorType, r.Error)
|
||||
return res, fmt.Errorf("response error %q: %s", r.ErrorType, r.Error)
|
||||
}
|
||||
if r.Status != statusSuccess {
|
||||
return nil, fmt.Errorf("unknown response status %q", r.Status)
|
||||
return res, fmt.Errorf("unknown response status %q", r.Status)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func parsePrometheusInstantResponse(resp *http.Response) (res Result, err error) {
|
||||
r, err := parsePromResponse(resp)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("failed to parse response: %w", err)
|
||||
}
|
||||
|
||||
var parseFn func() ([]Metric, error)
|
||||
switch r.Data.ResultType {
|
||||
case rtVector:
|
||||
@@ -200,6 +191,12 @@ func parsePrometheusInstantResponse(resp *http.Response) (res Result, err error)
|
||||
return res, fmt.Errorf("unmarshal err %w; \n %#v", err, string(r.Data.Result))
|
||||
}
|
||||
parseFn = pi.metrics
|
||||
case rtMatrix:
|
||||
var pr promRange
|
||||
if err := json.Unmarshal(r.Data.Result, &pr.Result); err != nil {
|
||||
return res, err
|
||||
}
|
||||
parseFn = pr.metrics
|
||||
case rScalar:
|
||||
var ps promScalar
|
||||
if err := json.Unmarshal(r.Data.Result, &ps); err != nil {
|
||||
@@ -209,6 +206,7 @@ func parsePrometheusInstantResponse(resp *http.Response) (res Result, err error)
|
||||
default:
|
||||
return res, fmt.Errorf("unknown result type %q", r.Data.ResultType)
|
||||
}
|
||||
|
||||
ms, err := parseFn()
|
||||
if err != nil {
|
||||
return res, err
|
||||
@@ -224,34 +222,6 @@ func parsePrometheusInstantResponse(resp *http.Response) (res Result, err error)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parsePrometheusRangeResponse(resp *http.Response) (res Result, err error) {
|
||||
r, err := parsePromResponse(resp)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("failed to parse response: %w", err)
|
||||
}
|
||||
if r.Data.ResultType != rtMatrix {
|
||||
return res, fmt.Errorf("unexpected result type %q; expected result type %q", r.Data.ResultType, rtMatrix)
|
||||
}
|
||||
|
||||
var pr promRange
|
||||
if err := json.Unmarshal(r.Data.Result, &pr.Result); err != nil {
|
||||
return res, err
|
||||
}
|
||||
ms, err := pr.metrics()
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
res = Result{Data: ms, IsPartial: r.IsPartial}
|
||||
if r.Stats.SeriesFetched != nil {
|
||||
intV, err := strconv.Atoi(*r.Stats.SeriesFetched)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("failed to convert stats.seriesFetched to int: %w", err)
|
||||
}
|
||||
res.SeriesFetched = &intV
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *Client) setPrometheusInstantReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
if c.appendTypePrefix {
|
||||
r.URL.Path += "/prometheus"
|
||||
|
||||
@@ -65,23 +65,21 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
case 3:
|
||||
w.Write([]byte(`{"status":"unknown"}`))
|
||||
case 4:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector"}}`))
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix"}}`))
|
||||
case 5:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"vm_rows"},"values":[[1583786142,"13763"]]}]}}`))
|
||||
case 6:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"vm_rows","foo":"bar"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests","foo":"baz"},"value":[1583786140,"2000"]}]}}`))
|
||||
case 7:
|
||||
case 6:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]}}`))
|
||||
case 8:
|
||||
case 7:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]},"stats":{"seriesFetched": "42"}}`))
|
||||
case 9:
|
||||
case 8:
|
||||
w.Write([]byte(`{"status":"success", "isPartial":true, "data":{"resultType":"scalar","result":[1583786142, "1"]}}`))
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/render", func(w http.ResponseWriter, _ *http.Request) {
|
||||
c++
|
||||
switch c {
|
||||
case 10:
|
||||
case 9:
|
||||
w.Write([]byte(`[{"target":"constantLine(10)","tags":{"name":"constantLine(10)"},"datapoints":[[10,1611758343],[10,1611758373],[10,1611758403]]}]`))
|
||||
}
|
||||
})
|
||||
@@ -104,9 +102,9 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
t.Fatalf("failed to parse 'time' query param %q: %s", timeParam, err)
|
||||
}
|
||||
switch c {
|
||||
case 11:
|
||||
case 10:
|
||||
w.Write([]byte("[]"))
|
||||
case 12:
|
||||
case 11:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"total","foo":"bar"},"value":[1583786142,"13763"]},{"metric":{"__name__":"total","foo":"baz"},"value":[1583786140,"2000"]}]}}`))
|
||||
}
|
||||
})
|
||||
@@ -125,7 +123,6 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
ts := time.Now()
|
||||
|
||||
expErr := func(query, err string) {
|
||||
t.Helper()
|
||||
_, _, gotErr := pq.Query(ctx, query, ts)
|
||||
if gotErr == nil {
|
||||
t.Fatalf("expected %q got nil", err)
|
||||
@@ -140,9 +137,8 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
expErr(vmQuery, "response error") // 2
|
||||
expErr(vmQuery, "unknown response status") // 3
|
||||
expErr(vmQuery, "unexpected end of JSON input") // 4
|
||||
expErr(vmQuery, "unknown result type") // 5
|
||||
|
||||
res, _, err := pq.Query(ctx, vmQuery, ts) // 6 - vector
|
||||
res, _, err := pq.Query(ctx, vmQuery, ts) // 5 - vector
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
@@ -163,7 +159,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
}
|
||||
metricsEqual(t, res.Data, expected)
|
||||
|
||||
res, req, err := pq.Query(ctx, vmQuery, ts) // 7 - scalar
|
||||
res, req, err := pq.Query(ctx, vmQuery, ts) // 6 - scalar
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
@@ -188,7 +184,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
res.SeriesFetched)
|
||||
}
|
||||
|
||||
res, _, err = pq.Query(ctx, vmQuery, ts) // 8 - scalar with stats
|
||||
res, _, err = pq.Query(ctx, vmQuery, ts) // 7 - scalar with stats
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
@@ -209,7 +205,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
*res.SeriesFetched)
|
||||
}
|
||||
|
||||
res, _, err = pq.Query(ctx, vmQuery, ts) // 9
|
||||
res, _, err = pq.Query(ctx, vmQuery, ts) // 8
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
@@ -220,7 +216,7 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
// test graphite
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
|
||||
|
||||
res, _, err = gq.Query(ctx, queryRender, ts) // 10 - graphite
|
||||
res, _, err = gq.Query(ctx, queryRender, ts) // 9 - graphite
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
@@ -240,9 +236,9 @@ func TestVMInstantQuery(t *testing.T) {
|
||||
vlogs := datasourceVLogs
|
||||
pq = s.BuildWithParams(QuerierParams{DataSourceType: string(vlogs), EvaluationInterval: 15 * time.Second})
|
||||
|
||||
expErr(vlogsQuery, "error parsing response") // 11
|
||||
expErr(vlogsQuery, "error parsing response") // 10
|
||||
|
||||
res, _, err = pq.Query(ctx, vlogsQuery, ts) // 12
|
||||
res, _, err = pq.Query(ctx, vlogsQuery, ts) // 11
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
@@ -394,8 +390,6 @@ func TestVMRangeQuery(t *testing.T) {
|
||||
switch c {
|
||||
case 0:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"vm_rows"},"values":[[1583786142,"13763"]]}]}}`))
|
||||
case 1:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[1583786142, "1"]}}`))
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/select/logsql/stats_query_range", func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -428,7 +422,7 @@ func TestVMRangeQuery(t *testing.T) {
|
||||
t.Fatalf("expected 'step' query param to be 60s; got %q instead", step)
|
||||
}
|
||||
switch c {
|
||||
case 2:
|
||||
case 1:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"total"},"values":[[1583786142,"10"]]}]}}`))
|
||||
}
|
||||
})
|
||||
@@ -452,13 +446,13 @@ func TestVMRangeQuery(t *testing.T) {
|
||||
|
||||
start, end := time.Now().Add(-time.Minute), time.Now()
|
||||
|
||||
res, err := pq.QueryRange(ctx, vmQuery, start, end) // case 0
|
||||
res, err := pq.QueryRange(ctx, vmQuery, start, end)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
m := res.Data
|
||||
if len(m) != 1 {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
}
|
||||
expected := Metric{
|
||||
Labels: []prompb.Label{{Value: "vm_rows", Name: "__name__"}},
|
||||
@@ -469,9 +463,6 @@ func TestVMRangeQuery(t *testing.T) {
|
||||
t.Fatalf("unexpected metric %+v want %+v", m[0], expected)
|
||||
}
|
||||
|
||||
_, err = pq.QueryRange(ctx, vmQuery, start, end) // case 1
|
||||
expectError(t, err, "unexpected result type")
|
||||
|
||||
// test unsupported graphite
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
|
||||
|
||||
|
||||
@@ -40,28 +40,8 @@ func (c *Client) setVLogsRangeReqParams(r *http.Request, query string, start, en
|
||||
c.setReqParams(r, query)
|
||||
}
|
||||
|
||||
func parseVLogsInstantResponse(resp *http.Response) (res Result, err error) {
|
||||
res, err = parsePrometheusInstantResponse(resp)
|
||||
if err != nil {
|
||||
return Result{}, err
|
||||
}
|
||||
for i := range res.Data {
|
||||
m := &res.Data[i]
|
||||
for j := range m.Labels {
|
||||
// reserve the stats func result name with a new label `stats_result` instead of dropping it,
|
||||
// since there could be multiple stats results in a single query, for instance:
|
||||
// _time:5m | stats quantile(0.5, request_duration_seconds) p50, quantile(0.9, request_duration_seconds) p90
|
||||
if m.Labels[j].Name == "__name__" {
|
||||
m.Labels[j].Name = "stats_result"
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseVLogsRangeResponse(resp *http.Response) (res Result, err error) {
|
||||
res, err = parsePrometheusRangeResponse(resp)
|
||||
func parseVLogsResponse(resp *http.Response) (res Result, err error) {
|
||||
res, err = parsePrometheusResponse(resp)
|
||||
if err != nil {
|
||||
return Result{}, err
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ absolute path to all .tpl files in root.
|
||||
`Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. `+
|
||||
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
|
||||
externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
|
||||
"In case of conflicts, original labels are kept with prefix 'exported_'.")
|
||||
"In case of conflicts, original labels are kept with prefix `exported_`.")
|
||||
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.")
|
||||
)
|
||||
@@ -90,6 +90,7 @@ func main() {
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
flagutil.ApplySecretFlags()
|
||||
remoteread.InitSecretFlags()
|
||||
remotewrite.InitSecretFlags()
|
||||
datasource.InitSecretFlags()
|
||||
@@ -226,13 +227,14 @@ func newManager(ctx context.Context) (*manager, error) {
|
||||
labels[s[:n]] = s[n+1:]
|
||||
}
|
||||
|
||||
err = notifier.Init(labels, *externalURL)
|
||||
nts, err := notifier.Init(labels, *externalURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init notifier: %w", err)
|
||||
}
|
||||
manager := &manager{
|
||||
groups: make(map[uint64]*rule.Group),
|
||||
querierBuilder: q,
|
||||
notifiers: nts,
|
||||
labels: labels,
|
||||
}
|
||||
rw, err := remotewrite.Init(ctx)
|
||||
|
||||
@@ -96,10 +96,9 @@ groups:
|
||||
querierBuilder: &datasource.FakeQuerier{},
|
||||
groups: make(map[uint64]*rule.Group),
|
||||
labels: map[string]string{},
|
||||
notifiers: func() []notifier.Notifier { return []notifier.Notifier{¬ifier.FakeNotifier{}} },
|
||||
rw: &remotewrite.Client{},
|
||||
}
|
||||
_, cleanup := notifier.InitFakeNotifier()
|
||||
defer cleanup()
|
||||
|
||||
syncCh := make(chan struct{})
|
||||
sighupCh := procutil.NewSighupChan()
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
// manager controls group states
|
||||
type manager struct {
|
||||
querierBuilder datasource.QuerierBuilder
|
||||
notifiers func() []notifier.Notifier
|
||||
|
||||
rw remotewrite.RWClient
|
||||
// remote read builder.
|
||||
@@ -46,15 +46,13 @@ func (m *manager) ruleAPI(gID, rID uint64) (rule.ApiRule, error) {
|
||||
m.groupsMu.RLock()
|
||||
defer m.groupsMu.RUnlock()
|
||||
|
||||
group, ok := m.groups[gID]
|
||||
g, ok := m.groups[gID]
|
||||
if !ok {
|
||||
return rule.ApiRule{}, fmt.Errorf("can't find group with id %d", gID)
|
||||
}
|
||||
g := group.ToAPI()
|
||||
ruleID := strconv.FormatUint(rID, 10)
|
||||
for _, r := range g.Rules {
|
||||
if r.ID == ruleID {
|
||||
return r, nil
|
||||
if r.ID() == rID {
|
||||
return r.ToAPI(), nil
|
||||
}
|
||||
}
|
||||
return rule.ApiRule{}, fmt.Errorf("can't find rule with id %d in group %q", rID, g.Name)
|
||||
@@ -65,20 +63,17 @@ func (m *manager) alertAPI(gID, aID uint64) (*rule.ApiAlert, error) {
|
||||
m.groupsMu.RLock()
|
||||
defer m.groupsMu.RUnlock()
|
||||
|
||||
group, ok := m.groups[gID]
|
||||
g, ok := m.groups[gID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't find group with id %d", gID)
|
||||
}
|
||||
g := group.ToAPI()
|
||||
for _, r := range g.Rules {
|
||||
if r.Type != rule.TypeAlerting {
|
||||
ar, ok := r.(*rule.AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
alertID := strconv.FormatUint(aID, 10)
|
||||
for _, a := range r.Alerts {
|
||||
if a.ID == alertID {
|
||||
return a, nil
|
||||
}
|
||||
if apiAlert := ar.AlertToAPI(aID); apiAlert != nil {
|
||||
return apiAlert, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("can't find alert with id %d in group %q", aID, g.Name)
|
||||
@@ -99,16 +94,17 @@ func (m *manager) close() {
|
||||
}
|
||||
|
||||
func (m *manager) startGroup(ctx context.Context, g *rule.Group, restore bool) error {
|
||||
m.wg.Add(1)
|
||||
id := g.GetID()
|
||||
g.Init()
|
||||
m.wg.Go(func() {
|
||||
go func() {
|
||||
defer m.wg.Done()
|
||||
if restore {
|
||||
g.Start(ctx, m.rw, m.rr)
|
||||
g.Start(ctx, m.notifiers, m.rw, m.rr)
|
||||
} else {
|
||||
g.Start(ctx, m.rw, nil)
|
||||
g.Start(ctx, m.notifiers, m.rw, nil)
|
||||
}
|
||||
})
|
||||
|
||||
}()
|
||||
m.groups[id] = g
|
||||
return nil
|
||||
}
|
||||
@@ -135,7 +131,7 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
|
||||
if rrPresent && m.rw == nil {
|
||||
return fmt.Errorf("config contains recording rules but `-remoteWrite.url` isn't set")
|
||||
}
|
||||
if arPresent && notifier.GetTargets() == nil {
|
||||
if arPresent && m.notifiers == nil {
|
||||
return fmt.Errorf("config contains alerting rules but neither `-notifier.url` nor `-notifier.config` nor `-notifier.blackhole` aren't set")
|
||||
}
|
||||
|
||||
@@ -172,15 +168,15 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
|
||||
if len(toUpdate) > 0 {
|
||||
var wg sync.WaitGroup
|
||||
for _, item := range toUpdate {
|
||||
oldG := item.old
|
||||
newG := item.new
|
||||
wg.Go(func() {
|
||||
// cancel evaluation so the Update will be applied as fast as possible.
|
||||
// it is important to call InterruptEval before the update, because cancel fn
|
||||
// can be re-assigned during the update.
|
||||
oldG.InterruptEval()
|
||||
oldG.UpdateWith(newG)
|
||||
})
|
||||
wg.Add(1)
|
||||
// cancel evaluation so the Update will be applied as fast as possible.
|
||||
// it is important to call InterruptEval before the update, because cancel fn
|
||||
// can be re-assigned during the update.
|
||||
item.old.InterruptEval()
|
||||
go func(oldGroup *rule.Group, newGroup *rule.Group) {
|
||||
oldGroup.UpdateWith(newGroup)
|
||||
wg.Done()
|
||||
}(item.old, item.new)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -40,11 +40,10 @@ func TestManagerEmptyRulesDir(t *testing.T) {
|
||||
// execution of configuration update.
|
||||
// Should be executed with -race flag
|
||||
func TestManagerUpdateConcurrent(t *testing.T) {
|
||||
_, cleanup := notifier.InitFakeNotifier()
|
||||
defer cleanup()
|
||||
m := &manager{
|
||||
groups: make(map[uint64]*rule.Group),
|
||||
querierBuilder: &datasource.FakeQuerier{},
|
||||
notifiers: func() []notifier.Notifier { return []notifier.Notifier{¬ifier.FakeNotifier{}} },
|
||||
}
|
||||
paths := []string{
|
||||
"config/testdata/dir/rules0-good.rules",
|
||||
@@ -128,9 +127,8 @@ func TestManagerUpdate_Success(t *testing.T) {
|
||||
m := &manager{
|
||||
groups: make(map[uint64]*rule.Group),
|
||||
querierBuilder: &datasource.FakeQuerier{},
|
||||
notifiers: func() []notifier.Notifier { return []notifier.Notifier{¬ifier.FakeNotifier{}} },
|
||||
}
|
||||
_, cleanup := notifier.InitFakeNotifier()
|
||||
defer cleanup()
|
||||
|
||||
cfgInit := loadCfg(t, []string{initPath}, true, true)
|
||||
if err := m.update(ctx, cfgInit, false); err != nil {
|
||||
@@ -279,8 +277,7 @@ func TestManagerUpdate_Failure(t *testing.T) {
|
||||
rw: rw,
|
||||
}
|
||||
if notifiers != nil {
|
||||
_, cleanup := notifier.InitFakeNotifier()
|
||||
defer cleanup()
|
||||
m.notifiers = func() []notifier.Notifier { return notifiers }
|
||||
}
|
||||
err := m.update(context.Background(), []config.Group{cfg}, false)
|
||||
if err == nil {
|
||||
|
||||
@@ -166,8 +166,8 @@ func templateAnnotations(annotations map[string]string, data AlertTplData, tmpl
|
||||
ctmpl, _ := tmpl.Clone()
|
||||
ctmpl = ctmpl.Option("missingkey=zero")
|
||||
if err := templateAnnotation(&buf, builder.String(), tData, ctmpl, execute); err != nil {
|
||||
r[key] = err.Error()
|
||||
eg.Add(fmt.Errorf("(key: %q, value: %q): %w", key, text, err))
|
||||
r[key] = text
|
||||
eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err))
|
||||
continue
|
||||
}
|
||||
r[key] = buf.String()
|
||||
@@ -184,13 +184,13 @@ type tplData struct {
|
||||
func templateAnnotation(dst io.Writer, text string, data tplData, tpl *textTpl.Template, execute bool) error {
|
||||
tpl, err := tpl.Parse(text)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing template: %w", err)
|
||||
return fmt.Errorf("error parsing annotation template: %w", err)
|
||||
}
|
||||
if !execute {
|
||||
return nil
|
||||
}
|
||||
if err = tpl.Execute(dst, data); err != nil {
|
||||
return fmt.Errorf("error evaluating template: %w", err)
|
||||
return fmt.Errorf("error evaluating annotation template: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestAlertExecTemplate(t *testing.T) {
|
||||
)
|
||||
extLabels["cluster"] = extCluster
|
||||
extLabels["dc"] = extDC
|
||||
err := Init(extLabels, extURL)
|
||||
_, err := Init(extLabels, extURL)
|
||||
checkErr(t, err)
|
||||
|
||||
f := func(alert *Alert, annotations map[string]string, tplExpected map[string]string) {
|
||||
|
||||
@@ -3,7 +3,6 @@ package notifier
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -78,20 +77,12 @@ func (am *AlertManager) LastError() string {
|
||||
}
|
||||
|
||||
// Send an alert or resolve message
|
||||
func (am *AlertManager) Send(ctx context.Context, alerts []Alert, alertLabels [][]prompb.Label, headers map[string]string) error {
|
||||
if len(alerts) != len(alertLabels) {
|
||||
return fmt.Errorf("mismatched number of alerts and label sets after global alert relabeling")
|
||||
}
|
||||
func (am *AlertManager) Send(ctx context.Context, alerts []Alert, headers map[string]string) error {
|
||||
am.metrics.alertsSent.Add(len(alerts))
|
||||
startTime := time.Now()
|
||||
err := am.send(ctx, alerts, alertLabels, headers)
|
||||
err := am.send(ctx, alerts, headers)
|
||||
am.metrics.alertsSendDuration.UpdateDuration(startTime)
|
||||
if err != nil {
|
||||
// the context can be cancelled on graceful shutdown
|
||||
// or on group update. So no need to handle the error as usual.
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return nil
|
||||
}
|
||||
am.metrics.alertsSendErrors.Add(len(alerts))
|
||||
am.lastError = err.Error()
|
||||
} else {
|
||||
@@ -100,15 +91,12 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert, alertLabels []
|
||||
return err
|
||||
}
|
||||
|
||||
func (am *AlertManager) send(ctx context.Context, alerts []Alert, alertLabels [][]prompb.Label, headers map[string]string) error {
|
||||
func (am *AlertManager) send(ctx context.Context, alerts []Alert, headers map[string]string) error {
|
||||
b := &bytes.Buffer{}
|
||||
alertsToSend := make([]Alert, 0, len(alerts))
|
||||
lblss := make([][]prompb.Label, 0, len(alerts))
|
||||
for i, a := range alerts {
|
||||
lbls := alertLabels[i]
|
||||
if am.relabelConfigs != nil {
|
||||
lbls = am.relabelConfigs.Apply(lbls, 0)
|
||||
}
|
||||
for _, a := range alerts {
|
||||
lbls := a.applyRelabelingIfNeeded(am.relabelConfigs)
|
||||
if len(lbls) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
)
|
||||
|
||||
@@ -146,11 +145,11 @@ func TestAlertManager_Send(t *testing.T) {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if err := am.Send(context.Background(), []Alert{{Labels: map[string]string{"a": "b"}}}, [][]prompb.Label{{{Name: "a", Value: "b"}}}, nil); err == nil {
|
||||
if err := am.Send(context.Background(), []Alert{{Labels: map[string]string{"a": "b"}}}, nil); err == nil {
|
||||
t.Fatalf("expected connection error got nil")
|
||||
}
|
||||
|
||||
if err := am.Send(context.Background(), []Alert{{Labels: map[string]string{"a": "b"}}}, [][]prompb.Label{{{Name: "a", Value: "b"}}}, nil); err == nil {
|
||||
if err := am.Send(context.Background(), []Alert{{Labels: map[string]string{"a": "b"}}}, nil); err == nil {
|
||||
t.Fatalf("expected wrong http code error got nil")
|
||||
}
|
||||
|
||||
@@ -161,7 +160,7 @@ func TestAlertManager_Send(t *testing.T) {
|
||||
End: time.Now().UTC(),
|
||||
Labels: map[string]string{"alertname": "alert0"},
|
||||
Annotations: map[string]string{"a": "b", "c": "d"},
|
||||
}}, [][]prompb.Label{{{Name: "alertname", Value: "alert0"}}}, map[string]string{headerKey: "bar"}); err != nil {
|
||||
}}, map[string]string{headerKey: "bar"}); err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
@@ -175,7 +174,7 @@ func TestAlertManager_Send(t *testing.T) {
|
||||
Name: "alert2",
|
||||
Labels: map[string]string{"rule": "test", "tenant": "1"},
|
||||
},
|
||||
}, [][]prompb.Label{{{Name: "rule", Value: "test"}, {Name: "tenant", Value: "0"}}, {{Name: "rule", Value: "test"}, {Name: "tenant", Value: "1"}}}, map[string]string{headerKey: "bar"}); err != nil {
|
||||
}, map[string]string{headerKey: "bar"}); err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
@@ -188,7 +187,7 @@ func TestAlertManager_Send(t *testing.T) {
|
||||
Name: "alert2",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
}, [][]prompb.Label{{{Name: "rule", Value: "test"}}, {{}}}, map[string]string{}); err != nil {
|
||||
}, map[string]string{}); err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,9 +27,15 @@ type Config struct {
|
||||
// PathPrefix is added to URL path before adding alertManagerPath value
|
||||
PathPrefix string `yaml:"path_prefix,omitempty"`
|
||||
|
||||
ConsulSDConfigs []ConsulSDConfigs `yaml:"consul_sd_configs,omitempty"`
|
||||
DNSSDConfigs []DNSSDConfigs `yaml:"dns_sd_configs,omitempty"`
|
||||
StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"`
|
||||
// ConsulSDConfigs contains list of settings for service discovery via Consul
|
||||
// see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
|
||||
ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
|
||||
// DNSSDConfigs contains list of settings for service discovery via DNS.
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config
|
||||
DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
|
||||
|
||||
// StaticConfigs contains list of static targets
|
||||
StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"`
|
||||
|
||||
// HTTPClientConfig contains HTTP configuration for Notifier clients
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
@@ -56,29 +62,14 @@ type Config struct {
|
||||
parsedAlertRelabelConfigs *promrelabel.ParsedConfigs
|
||||
}
|
||||
|
||||
// staticConfig contains list of static targets in the following form:
|
||||
// StaticConfig contains list of static targets in the following form:
|
||||
//
|
||||
// targets:
|
||||
// [ - '<host>' ]
|
||||
type StaticConfig struct {
|
||||
Targets []string `yaml:"targets"`
|
||||
// HTTPClientConfig contains HTTP configuration for the Targets
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
AlertRelabelConfigs []promrelabel.RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
|
||||
}
|
||||
|
||||
// ConsulSDConfigs contains list of settings for service discovery via Consul,
|
||||
// see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
|
||||
type ConsulSDConfigs struct {
|
||||
consul.SDConfig `yaml:",inline"`
|
||||
AlertRelabelConfigs []promrelabel.RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
|
||||
}
|
||||
|
||||
// DNSSDConfigs contains list of settings for service discovery via DNS,
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config
|
||||
type DNSSDConfigs struct {
|
||||
dns.SDConfig `yaml:",inline"`
|
||||
AlertRelabelConfigs []promrelabel.RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
@@ -104,31 +95,6 @@ func (cfg *Config) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
}
|
||||
cfg.parsedAlertRelabelConfigs = arCfg
|
||||
|
||||
for _, s := range cfg.StaticConfigs {
|
||||
if len(s.AlertRelabelConfigs) > 0 {
|
||||
_, err := promrelabel.ParseRelabelConfigs(s.AlertRelabelConfigs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse alert_relabel_configs in static_config: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, s := range cfg.ConsulSDConfigs {
|
||||
if len(s.AlertRelabelConfigs) > 0 {
|
||||
_, err := promrelabel.ParseRelabelConfigs(s.AlertRelabelConfigs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse alert_relabel_configs in consul_sd_config: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, s := range cfg.DNSSDConfigs {
|
||||
if len(s.AlertRelabelConfigs) > 0 {
|
||||
_, err := promrelabel.ParseRelabelConfigs(s.AlertRelabelConfigs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse alert_relabel_configs in dns_sd_config: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal configuration for checksum: %w", err)
|
||||
|
||||
@@ -35,6 +35,4 @@ func TestParseConfig_Failure(t *testing.T) {
|
||||
|
||||
f("testdata/unknownFields.bad.yaml", "unknown field")
|
||||
f("non-existing-file", "error reading")
|
||||
f("testdata/consul.bad.yaml", "failed to parse alert_relabel_configs in consul_sd_config")
|
||||
f("testdata/dns.bad.yaml", "failed to parse alert relabeling config")
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dns"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutil"
|
||||
@@ -29,7 +28,11 @@ type configWatcher struct {
|
||||
targets map[TargetType][]Target
|
||||
}
|
||||
|
||||
func newWatcher(cfg *Config, gen AlertURLGenerator) (*configWatcher, error) {
|
||||
func newWatcher(path string, gen AlertURLGenerator) (*configWatcher, error) {
|
||||
cfg, err := parseConfig(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cw := &configWatcher{
|
||||
cfg: cfg,
|
||||
wg: sync.WaitGroup{},
|
||||
@@ -85,15 +88,18 @@ func (cw *configWatcher) reload(path string) error {
|
||||
return cw.start()
|
||||
}
|
||||
|
||||
func (cw *configWatcher) add(typeK TargetType, interval time.Duration, targetsFn getTargets) error {
|
||||
targetMetadata, errors := getTargetMetadata(targetsFn, cw.cfg)
|
||||
func (cw *configWatcher) add(typeK TargetType, interval time.Duration, labelsFn getLabels) error {
|
||||
targetMetadata, errors := getTargetMetadata(labelsFn, cw.cfg)
|
||||
for _, err := range errors {
|
||||
return fmt.Errorf("failed to init notifier for %q: %w", typeK, err)
|
||||
}
|
||||
|
||||
cw.updateTargets(typeK, targetMetadata, cw.cfg, cw.genFn)
|
||||
|
||||
cw.wg.Go(func() {
|
||||
cw.wg.Add(1)
|
||||
go func() {
|
||||
defer cw.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -103,77 +109,62 @@ func (cw *configWatcher) add(typeK TargetType, interval time.Duration, targetsFn
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
targetMetadata, errors := getTargetMetadata(targetsFn, cw.cfg)
|
||||
targetMetadata, errors := getTargetMetadata(labelsFn, cw.cfg)
|
||||
for _, err := range errors {
|
||||
logger.Errorf("failed to init notifier for %q: %w", typeK, err)
|
||||
}
|
||||
cw.updateTargets(typeK, targetMetadata, cw.cfg, cw.genFn)
|
||||
}
|
||||
})
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
type targetMetadata struct {
|
||||
*promutil.Labels
|
||||
alertRelabelConfigs *promrelabel.ParsedConfigs
|
||||
}
|
||||
|
||||
func getTargetMetadata(targetsFn getTargets, cfg *Config) (map[string]targetMetadata, []error) {
|
||||
metaLabelsList, alertRelabelCfgs, err := targetsFn()
|
||||
func getTargetMetadata(labelsFn getLabels, cfg *Config) (map[string]*promutil.Labels, []error) {
|
||||
metaLabels, err := labelsFn()
|
||||
if err != nil {
|
||||
return nil, []error{fmt.Errorf("failed to get labels: %w", err)}
|
||||
}
|
||||
targetMts := make(map[string]targetMetadata, len(metaLabelsList))
|
||||
targetMetadata := make(map[string]*promutil.Labels, len(metaLabels))
|
||||
var errors []error
|
||||
duplicates := make(map[string]struct{})
|
||||
for i := range metaLabelsList {
|
||||
metaLabels := metaLabelsList[i]
|
||||
alertRelabelCfg := alertRelabelCfgs[i]
|
||||
for _, labels := range metaLabels {
|
||||
target := labels.Get("__address__")
|
||||
u, processedLabels, err := parseLabels(target, labels, cfg)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
if len(u) == 0 {
|
||||
continue
|
||||
}
|
||||
// check for duplicated targets
|
||||
// targets with same address but different alert_relabel_configs are still considered duplicates since it's mostly due to misconfiguration and could cause duplicated notifications.
|
||||
if _, ok := duplicates[u]; ok {
|
||||
if !*suppressDuplicateTargetErrors {
|
||||
logger.Errorf("skipping duplicate target with identical address %q; "+
|
||||
"make sure service discovery and relabeling is set up properly; "+
|
||||
"original labels: %s; resulting labels: %s",
|
||||
u, labels, processedLabels)
|
||||
}
|
||||
continue
|
||||
}
|
||||
duplicates[u] = struct{}{}
|
||||
targetMts[u] = targetMetadata{
|
||||
Labels: processedLabels,
|
||||
alertRelabelConfigs: alertRelabelCfg,
|
||||
}
|
||||
for _, labels := range metaLabels {
|
||||
target := labels.Get("__address__")
|
||||
u, processedLabels, err := parseLabels(target, labels, cfg)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
if len(u) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := duplicates[u]; ok { // check for duplicates
|
||||
if !*suppressDuplicateTargetErrors {
|
||||
logger.Errorf("skipping duplicate target with identical address %q; "+
|
||||
"make sure service discovery and relabeling is set up properly; "+
|
||||
"original labels: %s; resulting labels: %s",
|
||||
u, labels, processedLabels)
|
||||
}
|
||||
continue
|
||||
}
|
||||
duplicates[u] = struct{}{}
|
||||
targetMetadata[u] = processedLabels
|
||||
}
|
||||
return targetMts, errors
|
||||
return targetMetadata, errors
|
||||
}
|
||||
|
||||
type getTargets func() ([][]*promutil.Labels, []*promrelabel.ParsedConfigs, error)
|
||||
type getLabels func() ([]*promutil.Labels, error)
|
||||
|
||||
func (cw *configWatcher) start() error {
|
||||
if len(cw.cfg.StaticConfigs) > 0 {
|
||||
var targets []Target
|
||||
for i, cfg := range cw.cfg.StaticConfigs {
|
||||
alertRelabelConfig, _ := promrelabel.ParseRelabelConfigs(cw.cfg.StaticConfigs[i].AlertRelabelConfigs)
|
||||
for _, cfg := range cw.cfg.StaticConfigs {
|
||||
httpCfg := mergeHTTPClientConfigs(cw.cfg.HTTPClientConfig, cfg.HTTPClientConfig)
|
||||
for _, target := range cfg.Targets {
|
||||
address, labels, err := parseLabels(target, nil, cw.cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse labels for target %q: %w", target, err)
|
||||
}
|
||||
notifier, err := NewAlertManager(address, cw.genFn, httpCfg, alertRelabelConfig, cw.cfg.Timeout.Duration())
|
||||
notifier, err := NewAlertManager(address, cw.genFn, httpCfg, cw.cfg.parsedAlertRelabelConfigs, cw.cfg.Timeout.Duration())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init alertmanager for addr %q: %w", address, err)
|
||||
}
|
||||
@@ -187,20 +178,17 @@ func (cw *configWatcher) start() error {
|
||||
}
|
||||
|
||||
if len(cw.cfg.ConsulSDConfigs) > 0 {
|
||||
err := cw.add(TargetConsul, *consul.SDCheckInterval, func() ([][]*promutil.Labels, []*promrelabel.ParsedConfigs, error) {
|
||||
var labels [][]*promutil.Labels
|
||||
var alertRelabelConfigs []*promrelabel.ParsedConfigs
|
||||
err := cw.add(TargetConsul, *consul.SDCheckInterval, func() ([]*promutil.Labels, error) {
|
||||
var labels []*promutil.Labels
|
||||
for i := range cw.cfg.ConsulSDConfigs {
|
||||
alertRelabelConfig, _ := promrelabel.ParseRelabelConfigs(cw.cfg.ConsulSDConfigs[i].AlertRelabelConfigs)
|
||||
sdc := &cw.cfg.ConsulSDConfigs[i]
|
||||
targetLabels, err := sdc.GetLabels(cw.cfg.baseDir)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("got labels err: %w", err)
|
||||
return nil, fmt.Errorf("got labels err: %w", err)
|
||||
}
|
||||
labels = append(labels, targetLabels)
|
||||
alertRelabelConfigs = append(alertRelabelConfigs, alertRelabelConfig)
|
||||
labels = append(labels, targetLabels...)
|
||||
}
|
||||
return labels, alertRelabelConfigs, nil
|
||||
return labels, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start consulSD discovery: %w", err)
|
||||
@@ -208,21 +196,17 @@ func (cw *configWatcher) start() error {
|
||||
}
|
||||
|
||||
if len(cw.cfg.DNSSDConfigs) > 0 {
|
||||
err := cw.add(TargetDNS, *dns.SDCheckInterval, func() ([][]*promutil.Labels, []*promrelabel.ParsedConfigs, error) {
|
||||
var labels [][]*promutil.Labels
|
||||
var alertRelabelConfigs []*promrelabel.ParsedConfigs
|
||||
err := cw.add(TargetDNS, *dns.SDCheckInterval, func() ([]*promutil.Labels, error) {
|
||||
var labels []*promutil.Labels
|
||||
for i := range cw.cfg.DNSSDConfigs {
|
||||
alertRelabelConfig, _ := promrelabel.ParseRelabelConfigs(cw.cfg.DNSSDConfigs[i].AlertRelabelConfigs)
|
||||
sdc := &cw.cfg.DNSSDConfigs[i]
|
||||
targetLabels, err := sdc.GetLabels(cw.cfg.baseDir)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("got labels err: %w", err)
|
||||
return nil, fmt.Errorf("got labels err: %w", err)
|
||||
}
|
||||
labels = append(labels, targetLabels)
|
||||
alertRelabelConfigs = append(alertRelabelConfigs, alertRelabelConfig)
|
||||
|
||||
labels = append(labels, targetLabels...)
|
||||
}
|
||||
return labels, alertRelabelConfigs, nil
|
||||
return labels, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start DNSSD discovery: %w", err)
|
||||
@@ -256,30 +240,30 @@ func (cw *configWatcher) setTargets(key TargetType, targets []Target) {
|
||||
cw.targetsMu.Unlock()
|
||||
}
|
||||
|
||||
func (cw *configWatcher) updateTargets(key TargetType, targetMts map[string]targetMetadata, cfg *Config, genFn AlertURLGenerator) {
|
||||
func (cw *configWatcher) updateTargets(key TargetType, targetMetadata map[string]*promutil.Labels, cfg *Config, genFn AlertURLGenerator) {
|
||||
cw.targetsMu.Lock()
|
||||
defer cw.targetsMu.Unlock()
|
||||
oldTargets := cw.targets[key]
|
||||
var updatedTargets []Target
|
||||
for _, ot := range oldTargets {
|
||||
if _, ok := targetMts[ot.Addr()]; !ok {
|
||||
if _, ok := targetMetadata[ot.Addr()]; !ok {
|
||||
// if target not exists in currentTargets, close it
|
||||
ot.Close()
|
||||
} else {
|
||||
updatedTargets = append(updatedTargets, ot)
|
||||
delete(targetMts, ot.Addr())
|
||||
delete(targetMetadata, ot.Addr())
|
||||
}
|
||||
}
|
||||
// create new resources for the new targets
|
||||
for addr, metadata := range targetMts {
|
||||
am, err := NewAlertManager(addr, genFn, cfg.HTTPClientConfig, metadata.alertRelabelConfigs, cfg.Timeout.Duration())
|
||||
for addr, labels := range targetMetadata {
|
||||
am, err := NewAlertManager(addr, genFn, cfg.HTTPClientConfig, cfg.parsedAlertRelabelConfigs, cfg.Timeout.Duration())
|
||||
if err != nil {
|
||||
logger.Errorf("failed to init %s notifier with addr %q: %w", key, addr, err)
|
||||
continue
|
||||
}
|
||||
updatedTargets = append(updatedTargets, Target{
|
||||
Notifier: am,
|
||||
Labels: metadata.Labels,
|
||||
Labels: labels,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -29,11 +28,7 @@ static_configs:
|
||||
- localhost:9093
|
||||
- localhost:9094
|
||||
`)
|
||||
cfg, err := parseConfig(f.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse config: %s", err)
|
||||
}
|
||||
cw, err := newWatcher(cfg, nil)
|
||||
cw, err := newWatcher(f.Name(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start config watcher: %s", err)
|
||||
}
|
||||
@@ -88,64 +83,33 @@ consul_sd_configs:
|
||||
- server: %s
|
||||
services:
|
||||
- alertmanager
|
||||
- server: %s
|
||||
services:
|
||||
- alertmanager
|
||||
alert_relabel_configs:
|
||||
- target_label: "foo"
|
||||
replacement: "tar"
|
||||
`, consulSDServer.URL, consulSDServer.URL))
|
||||
`, consulSDServer.URL))
|
||||
|
||||
cfg, err := parseConfig(consulSDFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse config: %s", err)
|
||||
}
|
||||
cw, err := newWatcher(cfg, nil)
|
||||
cw, err := newWatcher(consulSDFile.Name(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start config watcher: %s", err)
|
||||
}
|
||||
defer cw.mustStop()
|
||||
|
||||
if len(cw.notifiers()) != 3 {
|
||||
t.Fatalf("expected to get 3 notifiers; got %d", len(cw.notifiers()))
|
||||
if len(cw.notifiers()) != 2 {
|
||||
t.Fatalf("expected to get 2 notifiers; got %d", len(cw.notifiers()))
|
||||
}
|
||||
|
||||
expAddr1 := fmt.Sprintf("https://%s/proxy/api/v2/alerts", fakeConsulService1)
|
||||
expAddr2 := fmt.Sprintf("https://%s/proxy/api/v2/alerts", fakeConsulService2)
|
||||
expAddr3 := fmt.Sprintf("https://%s/proxy/api/v2/alerts", fakeConsulService3)
|
||||
|
||||
n1, n2, n3 := cw.notifiers()[0], cw.notifiers()[1], cw.notifiers()[2]
|
||||
n1, n2 := cw.notifiers()[0], cw.notifiers()[1]
|
||||
if n1.Addr() != expAddr1 {
|
||||
t.Fatalf("exp address %q; got %q", expAddr1, n1.Addr())
|
||||
}
|
||||
if n2.Addr() != expAddr2 {
|
||||
t.Fatalf("exp address %q; got %q", expAddr2, n2.Addr())
|
||||
}
|
||||
if n3.Addr() != expAddr3 {
|
||||
t.Fatalf("exp address %q; got %q", expAddr3, n3.Addr())
|
||||
}
|
||||
|
||||
if n1.(*AlertManager).relabelConfigs.String() != "" {
|
||||
t.Fatalf("unexpected relabel configs: %q", n1.(*AlertManager).relabelConfigs.String())
|
||||
}
|
||||
if n2.(*AlertManager).relabelConfigs.String() != "" {
|
||||
t.Fatalf("unexpected relabel configs: %q", n2.(*AlertManager).relabelConfigs.String())
|
||||
}
|
||||
if n3.(*AlertManager).relabelConfigs.String() != "- target_label: foo\n replacement: tar\n" {
|
||||
t.Fatalf("unexpected relabel configs: %q", n3.(*AlertManager).relabelConfigs.String())
|
||||
}
|
||||
|
||||
f := func() bool { return len(cw.notifiers()) == 1 }
|
||||
if !waitFor(f, time.Second) {
|
||||
t.Fatalf("expected to get 1 notifiers; got %d", len(cw.notifiers()))
|
||||
}
|
||||
n3 = cw.notifiers()[0]
|
||||
if n3.Addr() != expAddr3 {
|
||||
t.Fatalf("exp address %q; got %q", expAddr3, n3.Addr())
|
||||
}
|
||||
if n3.(*AlertManager).relabelConfigs.String() != "- target_label: foo\n replacement: tar\n" {
|
||||
t.Fatalf("unexpected relabel configs: %q", n3.(*AlertManager).relabelConfigs.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TestConfigWatcherReloadConcurrent supposed to test concurrent
|
||||
@@ -200,11 +164,7 @@ consul_sd_configs:
|
||||
"unknownFields.bad.yaml",
|
||||
}
|
||||
|
||||
cfg, err := parseConfig(paths[0])
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse config: %s", err)
|
||||
}
|
||||
cw, err := newWatcher(cfg, nil)
|
||||
cw, err := newWatcher(paths[0], nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start config watcher: %s", err)
|
||||
}
|
||||
@@ -242,11 +202,10 @@ func checkErr(t *testing.T, err error) {
|
||||
const (
|
||||
fakeConsulService1 = "127.0.0.1:9093"
|
||||
fakeConsulService2 = "127.0.0.1:9095"
|
||||
fakeConsulService3 = "127.0.0.1:9097"
|
||||
)
|
||||
|
||||
func newFakeConsulServer() *httptest.Server {
|
||||
var requestCount atomic.Int32
|
||||
requestCount := 0
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/v1/agent/self", func(rw http.ResponseWriter, _ *http.Request) {
|
||||
rw.Write([]byte(`{"Config": {"Datacenter": "dc1"}}`))
|
||||
@@ -261,7 +220,7 @@ func newFakeConsulServer() *httptest.Server {
|
||||
}`))
|
||||
})
|
||||
mux.HandleFunc("/v1/health/service/alertmanager", func(rw http.ResponseWriter, _ *http.Request) {
|
||||
if requestCount.Load() == 0 {
|
||||
if requestCount == 0 {
|
||||
rw.Header().Set("X-Consul-Index", "1")
|
||||
rw.Write([]byte(`
|
||||
[
|
||||
@@ -401,7 +360,7 @@ func newFakeConsulServer() *httptest.Server {
|
||||
}
|
||||
]`))
|
||||
}
|
||||
requestCount.Add(1)
|
||||
requestCount++
|
||||
})
|
||||
|
||||
return httptest.NewServer(mux)
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
|
||||
// FakeNotifier is a mock notifier
|
||||
@@ -17,19 +15,6 @@ type FakeNotifier struct {
|
||||
counter int
|
||||
}
|
||||
|
||||
// InitFakeNotifier initializes global notifier to FakeNotifier,
|
||||
// and returns a cleanup function to restore the original getActiveNotifiers.
|
||||
func InitFakeNotifier() (*FakeNotifier, func()) {
|
||||
originalGetActiveNotifiers := getActiveNotifiers
|
||||
fn := &FakeNotifier{}
|
||||
getActiveNotifiers = func() []Notifier {
|
||||
return []Notifier{fn}
|
||||
}
|
||||
return fn, func() {
|
||||
getActiveNotifiers = originalGetActiveNotifiers
|
||||
}
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
func (*FakeNotifier) Close() {}
|
||||
|
||||
@@ -42,7 +27,7 @@ func (*FakeNotifier) LastError() string {
|
||||
func (*FakeNotifier) Addr() string { return "" }
|
||||
|
||||
// Send sets alerts and increases counter
|
||||
func (fn *FakeNotifier) Send(_ context.Context, alerts []Alert, _ [][]prompb.Label, _ map[string]string) error {
|
||||
func (fn *FakeNotifier) Send(_ context.Context, alerts []Alert, _ map[string]string) error {
|
||||
fn.Lock()
|
||||
defer fn.Unlock()
|
||||
fn.counter += len(alerts)
|
||||
|
||||
@@ -1,22 +1,17 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutil"
|
||||
)
|
||||
|
||||
@@ -101,25 +96,11 @@ func InitAlertURLGeneratorFn(externalURL *url.URL, externalAlertSource string, v
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
// getActiveNotifiers returns the current list of Notifier objects.
|
||||
getActiveNotifiers func() []Notifier
|
||||
// globalRelabelCfg stores the parsed alert relabeling config from the config file if there is
|
||||
globalRelabelCfg *promrelabel.ParsedConfigs
|
||||
|
||||
// cw holds a configWatcher for configPath configuration file
|
||||
// configWatcher provides a list of Notifier objects discovered
|
||||
// from static config or via service discovery.
|
||||
// cw is not nil only if configPath is provided.
|
||||
cw *configWatcher
|
||||
|
||||
// externalLabels is a global variable for holding external labels configured via flags
|
||||
// It is supposed to be inited via Init function only.
|
||||
externalLabels map[string]string
|
||||
// externalURL is a global variable for holding external URL value configured via flag
|
||||
// It is supposed to be inited via Init function only.
|
||||
externalURL string
|
||||
)
|
||||
// cw holds a configWatcher for configPath configuration file
|
||||
// configWatcher provides a list of Notifier objects discovered
|
||||
// from static config or via service discovery.
|
||||
// cw is not nil only if configPath is provided.
|
||||
var cw *configWatcher
|
||||
|
||||
// Reload checks the changes in configPath configuration file
|
||||
// and applies changes if any.
|
||||
@@ -130,62 +111,66 @@ func Reload() error {
|
||||
return cw.reload(*configPath)
|
||||
}
|
||||
|
||||
var staticNotifiersFn func() []Notifier
|
||||
|
||||
var (
|
||||
// externalLabels is a global variable for holding external labels configured via flags
|
||||
// It is supposed to be inited via Init function only.
|
||||
externalLabels map[string]string
|
||||
// externalURL is a global variable for holding external URL value configured via flag
|
||||
// It is supposed to be inited via Init function only.
|
||||
externalURL string
|
||||
)
|
||||
|
||||
// Init returns a function for retrieving actual list of Notifier objects.
|
||||
// Init works in two mods:
|
||||
// - configuration via flags (for backward compatibility). Is always static
|
||||
// and don't support live reloads.
|
||||
// - configuration via file. Supports live reloads and service discovery.
|
||||
//
|
||||
// Init returns an error if both mods are used.
|
||||
func Init(extLabels map[string]string, extURL string) error {
|
||||
func Init(extLabels map[string]string, extURL string) (func() []Notifier, error) {
|
||||
externalURL = extURL
|
||||
externalLabels = extLabels
|
||||
_, err := url.Parse(externalURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse external URL: %w", err)
|
||||
return nil, fmt.Errorf("failed to parse external URL: %w", err)
|
||||
}
|
||||
|
||||
if *blackHole {
|
||||
if len(*addrs) > 0 || *configPath != "" {
|
||||
return fmt.Errorf("only one of -notifier.blackhole, -notifier.url and -notifier.config flags must be specified")
|
||||
return nil, fmt.Errorf("only one of -notifier.blackhole, -notifier.url and -notifier.config flags must be specified")
|
||||
}
|
||||
notifier := newBlackHoleNotifier()
|
||||
getActiveNotifiers = func() []Notifier {
|
||||
staticNotifiersFn = func() []Notifier {
|
||||
return []Notifier{notifier}
|
||||
}
|
||||
return nil
|
||||
return staticNotifiersFn, nil
|
||||
}
|
||||
|
||||
if *configPath == "" && len(*addrs) == 0 {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
if *configPath != "" && len(*addrs) > 0 {
|
||||
return fmt.Errorf("only one of -notifier.config or -notifier.url flags must be specified")
|
||||
return nil, fmt.Errorf("only one of -notifier.config or -notifier.url flags must be specified")
|
||||
}
|
||||
|
||||
if len(*addrs) > 0 {
|
||||
notifiers, err := notifiersFromFlags(AlertURLGeneratorFn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create notifier from flag values: %w", err)
|
||||
return nil, fmt.Errorf("failed to create notifier from flag values: %w", err)
|
||||
}
|
||||
getActiveNotifiers = func() []Notifier {
|
||||
staticNotifiersFn = func() []Notifier {
|
||||
return notifiers
|
||||
}
|
||||
return nil
|
||||
return staticNotifiersFn, nil
|
||||
}
|
||||
|
||||
cfg, err := parseConfig(*configPath)
|
||||
cw, err = newWatcher(*configPath, AlertURLGeneratorFn)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fmt.Errorf("failed to init config watcher: %w", err)
|
||||
}
|
||||
if cfg.AlertRelabelConfigs != nil {
|
||||
globalRelabelCfg = cfg.parsedAlertRelabelConfigs
|
||||
}
|
||||
cw, err = newWatcher(cfg, AlertURLGeneratorFn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init config watcher: %w", err)
|
||||
}
|
||||
getActiveNotifiers = cw.notifiers
|
||||
return nil
|
||||
return cw.notifiers, nil
|
||||
}
|
||||
|
||||
// InitSecretFlags must be called after flag.Parse and before any logging
|
||||
@@ -260,57 +245,23 @@ const (
|
||||
|
||||
// GetTargets returns list of static or discovered targets
|
||||
// via notifier configuration.
|
||||
//
|
||||
// Must be called after Init.
|
||||
func GetTargets() map[TargetType][]Target {
|
||||
if getActiveNotifiers == nil {
|
||||
return nil
|
||||
}
|
||||
var targets = make(map[TargetType][]Target)
|
||||
// use cached targets from configWatcher instead of getActiveNotifiers for the extra target labels
|
||||
|
||||
if staticNotifiersFn != nil {
|
||||
for _, ns := range staticNotifiersFn() {
|
||||
targets[TargetStatic] = append(targets[TargetStatic], Target{
|
||||
Notifier: ns,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if cw != nil {
|
||||
cw.targetsMu.RLock()
|
||||
for key, ns := range cw.targets {
|
||||
targets[key] = append(targets[key], ns...)
|
||||
}
|
||||
cw.targetsMu.RUnlock()
|
||||
return targets
|
||||
}
|
||||
|
||||
// static notifiers don't have labels
|
||||
for _, ns := range getActiveNotifiers() {
|
||||
targets[TargetStatic] = append(targets[TargetStatic], Target{
|
||||
Notifier: ns,
|
||||
})
|
||||
}
|
||||
return targets
|
||||
}
|
||||
|
||||
// Send sends alerts to all active notifiers
|
||||
func Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string) *vmalertutil.ErrGroup {
|
||||
alertsToSend := make([]Alert, 0, len(alerts))
|
||||
lblss := make([][]prompb.Label, 0, len(alerts))
|
||||
// apply global relabel config first without modifying original alerts in alerts
|
||||
for _, a := range alerts {
|
||||
lbls := a.applyRelabelingIfNeeded(globalRelabelCfg)
|
||||
if len(lbls) == 0 {
|
||||
continue
|
||||
}
|
||||
alertsToSend = append(alertsToSend, a)
|
||||
lblss = append(lblss, lbls)
|
||||
}
|
||||
|
||||
errGr := new(vmalertutil.ErrGroup)
|
||||
wg := sync.WaitGroup{}
|
||||
activeNotifiers := getActiveNotifiers()
|
||||
for i := range activeNotifiers {
|
||||
nt := activeNotifiers[i]
|
||||
wg.Go(func() {
|
||||
if err := nt.Send(ctx, alertsToSend, lblss, notifierHeaders); err != nil {
|
||||
errGr.Add(fmt.Errorf("failed to send alerts to addr %q: %w", nt.Addr(), err))
|
||||
}
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
return errGr
|
||||
}
|
||||
|
||||
@@ -1,17 +1,11 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
@@ -20,13 +14,14 @@ func TestInit(t *testing.T) {
|
||||
|
||||
*addrs = flagutil.ArrayString{"127.0.0.1", "127.0.0.2"}
|
||||
|
||||
err := Init(nil, "")
|
||||
fn, err := Init(nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
if len(getActiveNotifiers()) != 2 {
|
||||
t.Fatalf("expected to get 2 notifiers; got %d", len(getActiveNotifiers()))
|
||||
nfs := fn()
|
||||
if len(nfs) != 2 {
|
||||
t.Fatalf("expected to get 2 notifiers; got %d", len(nfs))
|
||||
}
|
||||
|
||||
targets := GetTargets()
|
||||
@@ -59,7 +54,7 @@ func TestInitNegative(t *testing.T) {
|
||||
*configPath = path
|
||||
*addrs = flagutil.ArrayString{addr}
|
||||
*blackHole = bh
|
||||
if err := Init(nil, ""); err == nil {
|
||||
if _, err := Init(nil, ""); err == nil {
|
||||
t.Fatalf("expected to get error; got nil instead")
|
||||
}
|
||||
}
|
||||
@@ -76,13 +71,14 @@ func TestBlackHole(t *testing.T) {
|
||||
|
||||
*blackHole = true
|
||||
|
||||
err := Init(nil, "")
|
||||
fn, err := Init(nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
if len(getActiveNotifiers()) != 1 {
|
||||
t.Fatalf("expected to get 1 notifier; got %d", len(getActiveNotifiers()))
|
||||
nfs := fn()
|
||||
if len(nfs) != 1 {
|
||||
t.Fatalf("expected to get 1 notifier; got %d", len(nfs))
|
||||
}
|
||||
|
||||
targets := GetTargets()
|
||||
@@ -124,85 +120,3 @@ func TestGetAlertURLGenerator(t *testing.T) {
|
||||
t.Fatalf("unexpected url want %s, got %s", exp, AlertURLGeneratorFn(testAlert))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendAlerts(t *testing.T) {
|
||||
oldAlertURLGeneratorFn := AlertURLGeneratorFn
|
||||
defer func() { AlertURLGeneratorFn = oldAlertURLGeneratorFn }()
|
||||
AlertURLGeneratorFn = func(alert Alert) string {
|
||||
return ""
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(_ http.ResponseWriter, _ *http.Request) {
|
||||
t.Fatalf("should not be called")
|
||||
})
|
||||
mux.HandleFunc(alertManagerPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
var a []struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&a); err != nil {
|
||||
t.Fatalf("can not unmarshal data into alert %s", err)
|
||||
}
|
||||
if len(a) != 2 {
|
||||
t.Fatalf("expected 2 alert in array got %d", len(a))
|
||||
}
|
||||
if len(a[0].Labels) != 4 {
|
||||
t.Fatalf("expected 4 labels got %d", len(a[0].Labels))
|
||||
}
|
||||
if a[0].Labels["env"] != "prod" {
|
||||
t.Fatalf("expected env label to be prod during relabeling, got %s", a[0].Labels["env"])
|
||||
}
|
||||
if a[0].Labels["c"] != "baz" {
|
||||
t.Fatalf("expected c label to be baz during relabeling, got %s", a[0].Labels["c"])
|
||||
}
|
||||
if len(a[1].Labels) != 1 {
|
||||
t.Fatalf("expected 1 labels got %d", len(a[1].Labels))
|
||||
}
|
||||
})
|
||||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
|
||||
f, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer fs.MustRemovePath(f.Name())
|
||||
|
||||
rawConfig := `
|
||||
static_configs:
|
||||
- targets:
|
||||
- %s
|
||||
alert_relabel_configs:
|
||||
- source_labels: [b]
|
||||
target_label: "c"
|
||||
alert_relabel_configs:
|
||||
- source_labels: [a]
|
||||
target_label: "b"
|
||||
- target_label: "env"
|
||||
replacement: "prod"
|
||||
`
|
||||
config := fmt.Sprintf(rawConfig, srv.URL+alertManagerPath)
|
||||
writeToFile(f.Name(), config)
|
||||
|
||||
oldConfigPath := configPath
|
||||
defer func() { configPath = oldConfigPath }()
|
||||
*configPath = f.Name()
|
||||
err = Init(nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parse notifier config: %s", err)
|
||||
}
|
||||
|
||||
firingAlerts := []Alert{
|
||||
{
|
||||
Name: "alert1",
|
||||
Labels: map[string]string{"a": "baz"},
|
||||
},
|
||||
{
|
||||
Name: "alert2",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
}
|
||||
errG := Send(context.Background(), firingAlerts, nil)
|
||||
if errG.Err() != nil {
|
||||
t.Fatalf("unexpected error when sending alerts: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
import "context"
|
||||
|
||||
// Notifier is a common interface for alert manager provider
|
||||
type Notifier interface {
|
||||
// Send sends the given list of alerts.
|
||||
// Returns an error if fails to send the alerts.
|
||||
// Must unblock if the given ctx is cancelled.
|
||||
Send(ctx context.Context, alerts []Alert, alertLabels [][]prompb.Label, notifierHeaders map[string]string) error
|
||||
Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string) error
|
||||
// Addr returns address where alerts are sent.
|
||||
Addr() string
|
||||
// LastError returns error, that occured during last attempt to send data
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
import "context"
|
||||
|
||||
// blackHoleNotifier is a Notifier stub, used when no notifications need
|
||||
// to be sent.
|
||||
@@ -14,7 +10,7 @@ type blackHoleNotifier struct {
|
||||
}
|
||||
|
||||
// Send will send no notifications, but increase the metric.
|
||||
func (bh *blackHoleNotifier) Send(_ context.Context, alerts []Alert, _ [][]prompb.Label, _ map[string]string) error { //nolint:revive
|
||||
func (bh *blackHoleNotifier) Send(_ context.Context, alerts []Alert, _ map[string]string) error { //nolint:revive
|
||||
bh.metrics.alertsSent.Add(len(alerts))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
metricset "github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
@@ -17,7 +16,7 @@ func TestBlackHoleNotifier_Send(t *testing.T) {
|
||||
Start: time.Now().UTC(),
|
||||
End: time.Now().UTC(),
|
||||
Annotations: map[string]string{"a": "b", "c": "d", "e": "f"},
|
||||
}}, [][]prompb.Label{{}}, nil); err != nil {
|
||||
}}, nil); err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
@@ -35,7 +34,7 @@ func TestBlackHoleNotifier_Close(t *testing.T) {
|
||||
Start: time.Now().UTC(),
|
||||
End: time.Now().UTC(),
|
||||
Annotations: map[string]string{"a": "b", "c": "d", "e": "f"},
|
||||
}}, [][]prompb.Label{{}}, nil); err != nil {
|
||||
}}, nil); err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
|
||||
19
app/vmalert/notifier/testdata/consul.bad.yaml
vendored
19
app/vmalert/notifier/testdata/consul.bad.yaml
vendored
@@ -1,19 +0,0 @@
|
||||
consul_sd_configs:
|
||||
- server: localhost:8500
|
||||
scheme: http
|
||||
services:
|
||||
- alertmanager
|
||||
alert_relabel_configs:
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "prod"
|
||||
- server: localhost:8500
|
||||
services:
|
||||
- consul
|
||||
alert_relabel_configs:
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "(abc"
|
||||
alert_relabel_configs:
|
||||
- target_label: "foo"
|
||||
replacement: "aaa"
|
||||
13
app/vmalert/notifier/testdata/dns.bad.yaml
vendored
13
app/vmalert/notifier/testdata/dns.bad.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
dns_sd_configs:
|
||||
- names:
|
||||
- cloudflare.com
|
||||
type: 'A'
|
||||
port: 9093
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_dns_name]
|
||||
replacement: '${1}'
|
||||
target_label: dns_name
|
||||
alert_relabel_configs:
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "(abc"
|
||||
15
app/vmalert/notifier/testdata/mixed.good.yaml
vendored
15
app/vmalert/notifier/testdata/mixed.good.yaml
vendored
@@ -2,19 +2,12 @@ static_configs:
|
||||
- targets:
|
||||
- localhost:9093
|
||||
- localhost:9095
|
||||
alert_relabel_configs:
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "static"
|
||||
|
||||
consul_sd_configs:
|
||||
- server: localhost:8500
|
||||
scheme: http
|
||||
services:
|
||||
- alertmanager
|
||||
alert_relabel_configs:
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "consul"
|
||||
- server: localhost:8500
|
||||
services:
|
||||
- consul
|
||||
@@ -24,10 +17,6 @@ dns_sd_configs:
|
||||
- cloudflare.com
|
||||
type: 'A'
|
||||
port: 9093
|
||||
alert_relabel_configs:
|
||||
- action: keep
|
||||
source_labels: [env]
|
||||
regex: "dns"
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_consul_tags]
|
||||
@@ -36,4 +25,4 @@ relabel_configs:
|
||||
target_label: __scheme__
|
||||
- source_labels: [__meta_dns_name]
|
||||
replacement: '${1}'
|
||||
target_label: dns_name
|
||||
target_label: dns_name
|
||||
26
app/vmalert/notifier/testdata/static.good.yaml
vendored
26
app/vmalert/notifier/testdata/static.good.yaml
vendored
@@ -1,14 +1,22 @@
|
||||
headers:
|
||||
- 'CustomHeader: foo'
|
||||
|
||||
static_configs:
|
||||
- targets:
|
||||
- http://192.168.0.101:9093
|
||||
alert_relabel_configs:
|
||||
- target_label: "foo"
|
||||
replacement: "aaa"
|
||||
- localhost:9093
|
||||
- localhost:9095
|
||||
- https://localhost:9093/test/api/v2/alerts
|
||||
basic_auth:
|
||||
username: foo
|
||||
password: bar
|
||||
|
||||
- targets:
|
||||
- http://192.168.0.101:9093
|
||||
alert_relabel_configs:
|
||||
- target_label: "foo"
|
||||
replacement: "ccc"
|
||||
|
||||
- localhost:9096
|
||||
- localhost:9097
|
||||
basic_auth:
|
||||
username: foo
|
||||
password: baz
|
||||
|
||||
alert_relabel_configs:
|
||||
- target_label: "foo"
|
||||
replacement: "aaa"
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with MetricsQL. It can be single node VictoriaMetrics or vmselect. "+
|
||||
"Remote read is used to restore alerts state. "+
|
||||
"This configuration makes sense only if vmalert was configured with '-remoteWrite.url' before and has been successfully persisted its state. "+
|
||||
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with MetricsQL. It can be single node VictoriaMetrics or vmselect."+
|
||||
"Remote read is used to restore alerts state."+
|
||||
"This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
|
||||
"Supports address in the form of IP address with a port (e.g., http://127.0.0.1:8428) or DNS SRV record. "+
|
||||
"See also '-remoteRead.disablePathAppend', '-remoteRead.showURL'.")
|
||||
|
||||
|
||||
@@ -173,8 +173,9 @@ func (c *Client) run(ctx context.Context) {
|
||||
|
||||
cancel()
|
||||
}
|
||||
|
||||
c.wg.Go(func() {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
@@ -196,7 +197,7 @@ func (c *Client) run(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -2,7 +2,6 @@ package rule
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
@@ -247,6 +246,16 @@ func (ar *AlertingRule) GetAlerts() []*notifier.Alert {
|
||||
return alerts
|
||||
}
|
||||
|
||||
// GetAlert returns alert if id exists
|
||||
func (ar *AlertingRule) GetAlert(id uint64) *notifier.Alert {
|
||||
ar.alertsMu.RLock()
|
||||
defer ar.alertsMu.RUnlock()
|
||||
if ar.alerts == nil {
|
||||
return nil
|
||||
}
|
||||
return ar.alerts[id]
|
||||
}
|
||||
|
||||
func (ar *AlertingRule) logDebugf(at time.Time, a *notifier.Alert, format string, args ...any) {
|
||||
if !ar.Debug {
|
||||
return
|
||||
@@ -312,11 +321,6 @@ type labelSet struct {
|
||||
// On k conflicts in origin set, the original value is preferred and copied
|
||||
// to processed with `exported_%k` key. The copy happens only if passed v isn't equal to origin[k] value.
|
||||
func (ls *labelSet) add(k, v string) {
|
||||
// do not add label with empty value, since it has no meaning.
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
ls.processed[k] = v
|
||||
ov, ok := ls.origin[k]
|
||||
if !ok {
|
||||
@@ -351,6 +355,9 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
|
||||
Value: m.Values[0],
|
||||
Expr: ar.Expr,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to expand labels: %w", err)
|
||||
}
|
||||
for k, v := range extraLabels {
|
||||
ls.add(k, v)
|
||||
}
|
||||
@@ -361,7 +368,7 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
|
||||
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
||||
ls.add(alertGroupNameLabel, ar.GroupName)
|
||||
}
|
||||
return ls, err
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// execRange executes alerting rule on the given time range similarly to exec.
|
||||
@@ -454,7 +461,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
|
||||
defer func() {
|
||||
ar.state.add(curState)
|
||||
if curState.Err != nil && !errors.Is(curState.Err, context.Canceled) {
|
||||
if curState.Err != nil {
|
||||
ar.metrics.errors.Inc()
|
||||
}
|
||||
}()
|
||||
@@ -477,9 +484,8 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
for i, m := range res.Data {
|
||||
ls, err := ar.expandLabelTemplates(m, qFn)
|
||||
if err != nil {
|
||||
// only set error in current state, but do not break alert processing
|
||||
curState.Err = err
|
||||
logger.Errorf("got templating error in rule %s: %q", ar.Name, err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
at := ts
|
||||
alertID := hash(ls.processed)
|
||||
@@ -491,9 +497,8 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
}
|
||||
as, err := ar.expandAnnotationTemplates(m, qFn, at, ls)
|
||||
if err != nil {
|
||||
// only set error in current state, but do not break alert processing
|
||||
curState.Err = err
|
||||
logger.Errorf("got templating error in rule %s: %q", ar.Name, err)
|
||||
return nil, curState.Err
|
||||
}
|
||||
expandedLabels[i] = ls
|
||||
expandedAnnotations[i] = as
|
||||
@@ -602,7 +607,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
||||
func (ar *AlertingRule) expandLabelTemplates(m datasource.Metric, qFn templates.QueryFn) (*labelSet, error) {
|
||||
ls, err := ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
return ls, fmt.Errorf("failed to expand label templates: %s", err)
|
||||
return nil, fmt.Errorf("failed to expand label templates: %s", err)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
@@ -620,7 +625,7 @@ func (ar *AlertingRule) expandAnnotationTemplates(m datasource.Metric, qFn templ
|
||||
}
|
||||
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
|
||||
if err != nil {
|
||||
return as, fmt.Errorf("failed to expand annotation templates: %s", err)
|
||||
return nil, fmt.Errorf("failed to expand annotation templates: %s", err)
|
||||
}
|
||||
return as, nil
|
||||
}
|
||||
|
||||
@@ -827,9 +827,12 @@ func TestGroup_Restore(t *testing.T) {
|
||||
fg := NewGroup(config.Group{Name: "TestRestore", Rules: rules}, fqr, time.Second, nil)
|
||||
fg.Init()
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Go(func() {
|
||||
fg.Start(context.Background(), nil, fqr)
|
||||
})
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
nts := func() []notifier.Notifier { return []notifier.Notifier{¬ifier.FakeNotifier{}} }
|
||||
fg.Start(context.Background(), nts, nil, fqr)
|
||||
wg.Done()
|
||||
}()
|
||||
fg.Close()
|
||||
wg.Wait()
|
||||
|
||||
@@ -1370,10 +1373,8 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
|
||||
ar := &AlertingRule{
|
||||
Labels: map[string]string{
|
||||
"instance": "override", // this should override instance with new value
|
||||
"group": "vmalert", // this shouldn't have effect since value in metric is equal
|
||||
"invalid_label": "{{ .Values.mustRuntimeFail }}",
|
||||
"empty_label": "", // this should be dropped
|
||||
"instance": "override", // this should override instance with new value
|
||||
"group": "vmalert", // this shouldn't have effect since value in metric is equal
|
||||
},
|
||||
Expr: "sum(vmalert_alerting_rules_error) by(instance, group, alertname) > 0",
|
||||
Name: "AlertingRulesError",
|
||||
@@ -1381,11 +1382,10 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
}
|
||||
|
||||
expectedOriginLabels := map[string]string{
|
||||
"instance": "0.0.0.0:8800",
|
||||
"group": "vmalert",
|
||||
"alertname": "ConfigurationReloadFailure",
|
||||
"alertgroup": "vmalert",
|
||||
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
|
||||
"instance": "0.0.0.0:8800",
|
||||
"group": "vmalert",
|
||||
"alertname": "ConfigurationReloadFailure",
|
||||
"alertgroup": "vmalert",
|
||||
}
|
||||
|
||||
expectedProcessedLabels := map[string]string{
|
||||
@@ -1395,12 +1395,11 @@ func TestAlertingRule_ToLabels(t *testing.T) {
|
||||
"exported_alertname": "ConfigurationReloadFailure",
|
||||
"group": "vmalert",
|
||||
"alertgroup": "vmalert",
|
||||
"invalid_label": `error evaluating template: template: :1:268: executing "" at <.Values.mustRuntimeFail>: can't evaluate field Values in type notifier.tplData`,
|
||||
}
|
||||
|
||||
ls, err := ar.toLabels(metric, nil)
|
||||
if err == nil || !strings.Contains(err.Error(), "error evaluating template") {
|
||||
t.Fatalf("unexpected error %q", err.Error())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ls.origin, expectedOriginLabels) {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/vmalertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
@@ -38,8 +39,6 @@ var (
|
||||
disableAlertGroupLabel = flag.Bool("disableAlertgroupLabel", false, "Whether to disable adding group's Name as label to generated alerts and time series.")
|
||||
remoteReadLookBack = flag.Duration("remoteRead.lookback", time.Hour, "Lookback defines how far to look into past for alerts timeseries. "+
|
||||
"For example, if lookback=1h then range from now() to now()-1h will be scanned.")
|
||||
maxStartDelay = flag.Duration("group.maxStartDelay", 5*time.Minute, "Defines the max delay before starting the group evaluation. Group's start is artificially delayed for random duration on interval"+
|
||||
" [0..min(--group.maxStartDelay, group.interval)]. This helps smoothing out the load on the configured datasource, so evaluations aren't executed too close to each other.")
|
||||
)
|
||||
|
||||
// Group is an entity for grouping rules
|
||||
@@ -331,13 +330,13 @@ func (g *Group) Init() {
|
||||
}
|
||||
|
||||
// Start starts group's evaluation
|
||||
func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasource.QuerierBuilder) {
|
||||
func (g *Group) Start(ctx context.Context, nts func() []notifier.Notifier, rw remotewrite.RWClient, rr datasource.QuerierBuilder) {
|
||||
defer func() { close(g.finishedCh) }()
|
||||
evalTS := time.Now()
|
||||
// sleep random duration to spread group rules evaluation
|
||||
// over maxStartDelay to reduce the load on datasource.
|
||||
// over time to reduce the load on datasource.
|
||||
if !SkipRandSleepOnGroupStart {
|
||||
sleepBeforeStart := g.delayBeforeStart(evalTS, *maxStartDelay)
|
||||
sleepBeforeStart := delayBeforeStart(evalTS, g.GetID(), g.Interval, g.EvalOffset)
|
||||
g.infof("will start in %v", sleepBeforeStart)
|
||||
|
||||
sleepTimer := time.NewTimer(sleepBeforeStart)
|
||||
@@ -369,6 +368,7 @@ func (g *Group) Start(ctx context.Context, rw remotewrite.RWClient, rr datasourc
|
||||
|
||||
e := &executor{
|
||||
Rw: rw,
|
||||
Notifiers: nts,
|
||||
notifierHeaders: g.NotifierHeaders,
|
||||
}
|
||||
|
||||
@@ -475,31 +475,20 @@ func (g *Group) UpdateWith(newGroup *Group) {
|
||||
g.updateCh <- newGroup
|
||||
}
|
||||
|
||||
// delayBeforeStart returns duration for delaying the evaluation start
|
||||
// based on given ts and Group settings. The delay can't exceed maxDelay.
|
||||
// maxDelay is ignored if g.EvalOffset != nil.
|
||||
//
|
||||
// Delaying is important to smooth out the load on the datasource when all groups start at the same time.
|
||||
// delayBeforeStart calculates delay based on Group ID, so all groups will start at different moments of time.
|
||||
func (g *Group) delayBeforeStart(ts time.Time, maxDelay time.Duration) time.Duration {
|
||||
if g.EvalOffset != nil {
|
||||
// if offset is specified, ignore the maxDelay and return a duration aligned with offset
|
||||
currentOffsetPoint := ts.Truncate(g.Interval).Add(*g.EvalOffset)
|
||||
// if offset is specified, delayBeforeStart returns a duration to help aligning timestamp with offset;
|
||||
// otherwise, it returns a random duration between [0..interval] based on group key.
|
||||
func delayBeforeStart(ts time.Time, key uint64, interval time.Duration, offset *time.Duration) time.Duration {
|
||||
if offset != nil {
|
||||
currentOffsetPoint := ts.Truncate(interval).Add(*offset)
|
||||
if currentOffsetPoint.Before(ts) {
|
||||
// wait until the next offset point
|
||||
return currentOffsetPoint.Add(g.Interval).Sub(ts)
|
||||
return currentOffsetPoint.Add(interval).Sub(ts)
|
||||
}
|
||||
return currentOffsetPoint.Sub(ts)
|
||||
}
|
||||
|
||||
// otherwise, return a random duration between [0..min(interval, maxDelay)] based on group ID
|
||||
interval := g.Interval
|
||||
if interval > maxDelay {
|
||||
// artificially limit interval, so groups with big intervals could start sooner.
|
||||
interval = maxDelay
|
||||
}
|
||||
var randSleep time.Duration
|
||||
randSleep = time.Duration(float64(interval) * (float64(g.GetID()) / (1 << 64)))
|
||||
randSleep = time.Duration(float64(interval) * (float64(key) / (1 << 64)))
|
||||
sleepOffset := time.Duration(ts.UnixNano() % interval.Nanoseconds())
|
||||
if randSleep < sleepOffset {
|
||||
randSleep += interval
|
||||
@@ -561,13 +550,15 @@ func (g *Group) Replay(start, end time.Time, rw remotewrite.RWClient, maxDataPoi
|
||||
if !disableProgressBar {
|
||||
bar = pb.StartNew(iterations * len(g.Rules))
|
||||
}
|
||||
for i := range g.Rules {
|
||||
rule := g.Rules[i]
|
||||
for _, r := range g.Rules {
|
||||
sem <- struct{}{}
|
||||
wg.Go(func() {
|
||||
res <- replayRuleRange(rule, ri, bar, rw, replayRuleRetryAttempts, ruleEvaluationConcurrency)
|
||||
wg.Add(1)
|
||||
go func(r Rule, ri rangeIterator) {
|
||||
// pass ri as a copy, so it can be modified within the replayRuleRange
|
||||
res <- replayRuleRange(r, ri, bar, rw, replayRuleRetryAttempts, ruleEvaluationConcurrency)
|
||||
<-sem
|
||||
})
|
||||
wg.Done()
|
||||
}(r, ri)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -597,10 +588,10 @@ func replayRuleRange(r Rule, ri rangeIterator, bar *pb.ProgressBar, rw remotewri
|
||||
res := make(chan int, int(ri.end.Sub(ri.start)/ri.step)+1)
|
||||
for ri.next() {
|
||||
sem <- struct{}{}
|
||||
start := ri.s
|
||||
end := ri.e
|
||||
wg.Go(func() {
|
||||
n, err := replayRule(r, start, end, rw, replayRuleRetryAttempts)
|
||||
wg.Add(1)
|
||||
|
||||
go func(s, e time.Time) {
|
||||
n, err := replayRule(r, s, e, rw, replayRuleRetryAttempts)
|
||||
if err != nil {
|
||||
logger.Fatalf("rule %q: %s", r, err)
|
||||
}
|
||||
@@ -609,7 +600,8 @@ func replayRuleRange(r Rule, ri rangeIterator, bar *pb.ProgressBar, rw remotewri
|
||||
}
|
||||
res <- n
|
||||
<-sem
|
||||
})
|
||||
wg.Done()
|
||||
}(ri.s, ri.e)
|
||||
}
|
||||
wg.Wait()
|
||||
close(res)
|
||||
@@ -623,9 +615,10 @@ func replayRuleRange(r Rule, ri rangeIterator, bar *pb.ProgressBar, rw remotewri
|
||||
}
|
||||
|
||||
// ExecOnce evaluates all the rules under group for once with given timestamp.
|
||||
func (g *Group) ExecOnce(ctx context.Context, rw remotewrite.RWClient, evalTS time.Time) chan error {
|
||||
func (g *Group) ExecOnce(ctx context.Context, nts func() []notifier.Notifier, rw remotewrite.RWClient, evalTS time.Time) chan error {
|
||||
e := &executor{
|
||||
Rw: rw,
|
||||
Notifiers: nts,
|
||||
notifierHeaders: g.NotifierHeaders,
|
||||
}
|
||||
if len(g.Rules) < 1 {
|
||||
@@ -700,6 +693,7 @@ func (g *Group) getEvalDelay() time.Duration {
|
||||
|
||||
// executor contains group's notify and rw configs
|
||||
type executor struct {
|
||||
Notifiers func() []notifier.Notifier
|
||||
notifierHeaders map[string]string
|
||||
|
||||
Rw remotewrite.RWClient
|
||||
@@ -720,13 +714,14 @@ func (e *executor) execConcurrently(ctx context.Context, rules []Rule, ts time.T
|
||||
sem := make(chan struct{}, concurrency)
|
||||
go func() {
|
||||
wg := sync.WaitGroup{}
|
||||
for i := range rules {
|
||||
rule := rules[i]
|
||||
for _, r := range rules {
|
||||
sem <- struct{}{}
|
||||
wg.Go(func() {
|
||||
res <- e.exec(ctx, rule, ts, resolveDuration, limit)
|
||||
wg.Add(1)
|
||||
go func(r Rule) {
|
||||
res <- e.exec(ctx, r, ts, resolveDuration, limit)
|
||||
<-sem
|
||||
})
|
||||
wg.Done()
|
||||
}(r)
|
||||
}
|
||||
wg.Wait()
|
||||
close(res)
|
||||
@@ -780,6 +775,17 @@ func (e *executor) exec(ctx context.Context, r Rule, ts time.Time, resolveDurati
|
||||
return nil
|
||||
}
|
||||
|
||||
errGr := notifier.Send(ctx, alerts, e.notifierHeaders)
|
||||
wg := sync.WaitGroup{}
|
||||
errGr := new(vmalertutil.ErrGroup)
|
||||
for _, nt := range e.Notifiers() {
|
||||
wg.Add(1)
|
||||
go func(nt notifier.Notifier) {
|
||||
if err := nt.Send(ctx, alerts, e.notifierHeaders); err != nil {
|
||||
errGr.Add(fmt.Errorf("rule %q: failed to send alerts to addr %q: %w", r, nt.Addr(), err))
|
||||
}
|
||||
wg.Done()
|
||||
}(nt)
|
||||
}
|
||||
wg.Wait()
|
||||
return errGr.Err()
|
||||
}
|
||||
|
||||
@@ -262,7 +262,7 @@ func TestUpdateDuringRandSleep(t *testing.T) {
|
||||
updateCh: make(chan *Group),
|
||||
}
|
||||
g.Init()
|
||||
go g.Start(context.Background(), nil, nil)
|
||||
go g.Start(context.Background(), nil, nil, nil)
|
||||
|
||||
rule1 := AlertingRule{
|
||||
Name: "jobDown",
|
||||
@@ -346,8 +346,7 @@ func TestGroupStart(t *testing.T) {
|
||||
}
|
||||
|
||||
fs := &datasource.FakeQuerier{}
|
||||
fn, cleanup := notifier.InitFakeNotifier()
|
||||
defer cleanup()
|
||||
fn := ¬ifier.FakeNotifier{}
|
||||
|
||||
const evalInterval = time.Millisecond
|
||||
g := NewGroup(groups[0], fs, evalInterval, map[string]string{"cluster": "east-1"})
|
||||
@@ -396,7 +395,7 @@ func TestGroupStart(t *testing.T) {
|
||||
fs.Add(m2)
|
||||
g.Init()
|
||||
go func() {
|
||||
g.Start(context.Background(), nil, fs)
|
||||
g.Start(context.Background(), func() []notifier.Notifier { return []notifier.Notifier{fn} }, nil, fs)
|
||||
close(finished)
|
||||
}()
|
||||
|
||||
@@ -473,10 +472,15 @@ func TestFaultyNotifier(t *testing.T) {
|
||||
r := newTestAlertingRule("instant", 0)
|
||||
r.q = fq
|
||||
|
||||
fn, cleanup := notifier.InitFakeNotifier()
|
||||
defer cleanup()
|
||||
|
||||
e := &executor{}
|
||||
fn := ¬ifier.FakeNotifier{}
|
||||
e := &executor{
|
||||
Notifiers: func() []notifier.Notifier {
|
||||
return []notifier.Notifier{
|
||||
¬ifier.FaultyNotifier{},
|
||||
fn,
|
||||
}
|
||||
},
|
||||
}
|
||||
delay := 5 * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), delay)
|
||||
defer cancel()
|
||||
@@ -549,7 +553,7 @@ func TestCloseWithEvalInterruption(t *testing.T) {
|
||||
g := NewGroup(groups[0], fq, evalInterval, nil)
|
||||
g.Init()
|
||||
|
||||
go g.Start(context.Background(), nil, nil)
|
||||
go g.Start(context.Background(), nil, nil, nil)
|
||||
|
||||
time.Sleep(evalInterval * 20)
|
||||
|
||||
@@ -567,10 +571,9 @@ func TestCloseWithEvalInterruption(t *testing.T) {
|
||||
|
||||
func TestGroupStartDelay(t *testing.T) {
|
||||
g := &Group{}
|
||||
g.id = uint64(math.MaxUint64 / 10)
|
||||
// interval of 5min and key generate a static delay of 30s
|
||||
g.Interval = time.Minute * 5
|
||||
maxDelay := time.Minute * 5
|
||||
key := uint64(math.MaxUint64 / 10)
|
||||
|
||||
f := func(atS, expS string) {
|
||||
t.Helper()
|
||||
@@ -582,7 +585,7 @@ func TestGroupStartDelay(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
delay := g.delayBeforeStart(at, maxDelay)
|
||||
delay := delayBeforeStart(at, key, g.Interval, g.EvalOffset)
|
||||
gotStart := at.Add(delay)
|
||||
if expTS != gotStart {
|
||||
t.Fatalf("expected to get %v; got %v instead", expTS, gotStart)
|
||||
@@ -603,15 +606,6 @@ func TestGroupStartDelay(t *testing.T) {
|
||||
f("2023-01-01T00:01:00.000+00:00", "2023-01-01T00:03:00.000+00:00")
|
||||
f("2023-01-01T00:03:30.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
f("2023-01-01T00:08:00.000+00:00", "2023-01-01T00:08:00.000+00:00")
|
||||
|
||||
maxDelay = time.Minute * 1
|
||||
g.EvalOffset = nil
|
||||
|
||||
// test group with maxDelay, and offset disabled
|
||||
f("2023-01-01T00:00:00.000+00:00", "2023-01-01T00:00:06.000+00:00")
|
||||
f("2023-01-01T00:00:01.000+00:00", "2023-01-01T00:00:06.000+00:00")
|
||||
f("2023-01-01T00:00:06.100+00:00", "2023-01-01T00:01:06.000+00:00")
|
||||
f("2023-01-01T00:00:11.000+00:00", "2023-01-01T00:01:06.000+00:00")
|
||||
}
|
||||
|
||||
func TestGetPrometheusReqTimestamp(t *testing.T) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package rule
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -198,7 +197,7 @@ func (rr *RecordingRule) exec(ctx context.Context, ts time.Time, limit int) ([]p
|
||||
|
||||
defer func() {
|
||||
rr.state.add(curState)
|
||||
if curState.Err != nil && !errors.Is(curState.Err, context.Canceled) {
|
||||
if curState.Err != nil {
|
||||
rr.metrics.errors.Inc()
|
||||
}
|
||||
}()
|
||||
@@ -237,8 +236,7 @@ func (rr *RecordingRule) exec(ctx context.Context, ts time.Time, limit int) ([]p
|
||||
Labels: stringToLabels(k),
|
||||
Samples: []prompb.Sample{
|
||||
{Value: decimal.StaleNaN, Timestamp: ts.UnixNano() / 1e6},
|
||||
},
|
||||
})
|
||||
}})
|
||||
}
|
||||
rr.lastEvaluation = curEvaluation
|
||||
return tss, nil
|
||||
@@ -293,11 +291,6 @@ func (rr *RecordingRule) toTimeSeries(m datasource.Metric) prompb.TimeSeries {
|
||||
}
|
||||
// add extra labels configured by user
|
||||
for k := range rr.Labels {
|
||||
// do not add label with empty value, since it has no meaning.
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9984
|
||||
if rr.Labels[k] == "" {
|
||||
continue
|
||||
}
|
||||
existingLabel := promrelabel.GetLabelByName(m.Labels, k)
|
||||
if existingLabel != nil { // there is a conflict between extra and existing label
|
||||
if existingLabel.Value == rr.Labels[k] {
|
||||
|
||||
@@ -209,6 +209,15 @@ func (ar *AlertingRule) AlertsToAPI() []*ApiAlert {
|
||||
return alerts
|
||||
}
|
||||
|
||||
// AlertToAPI generates apiAlert object from alert by its id(hash)
|
||||
func (ar *AlertingRule) AlertToAPI(id uint64) *ApiAlert {
|
||||
a := ar.GetAlert(id)
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return NewAlertAPI(ar, a)
|
||||
}
|
||||
|
||||
// NewAlertAPI creates apiAlert for notifier.Alert
|
||||
func NewAlertAPI(ar *AlertingRule, a *notifier.Alert) *ApiAlert {
|
||||
aa := &ApiAlert{
|
||||
|
||||
@@ -34,12 +34,11 @@ body {
|
||||
padding-top: 4.5rem;
|
||||
}
|
||||
|
||||
.vm-group {
|
||||
.group-items {
|
||||
cursor: pointer;
|
||||
padding: 5px;
|
||||
margin-top: 5px;
|
||||
position: relative;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.btn svg, .dropdown-item svg {
|
||||
@@ -56,22 +55,14 @@ body {
|
||||
height: 38px;
|
||||
}
|
||||
|
||||
.vm-item:not(.vm-found) {
|
||||
display: none;
|
||||
.group-items:not(:has(.sub-item:not(.d-none))) {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.vm-group:has(.vm-item:is(.vm-found)), .vm-group:is(.vm-found) {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.vm-group:hover {
|
||||
.group-items:hover {
|
||||
background-color: #f8f9fa!important;
|
||||
}
|
||||
|
||||
.vm-group:is(.vm-found) .vm-item {
|
||||
display: table-row;
|
||||
}
|
||||
|
||||
.table {
|
||||
table-layout: fixed;
|
||||
}
|
||||
@@ -120,9 +111,3 @@ textarea.curl-area {
|
||||
.w-60 {
|
||||
width: 60%;
|
||||
}
|
||||
|
||||
.annotations {
|
||||
white-space: pre-wrap;
|
||||
color: gray;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
@@ -65,34 +65,32 @@ function getParamURL(key) {
|
||||
return url.searchParams.get(key)
|
||||
}
|
||||
|
||||
function matchText(search, item) {
|
||||
const text = item.innerText.toLowerCase();
|
||||
return text.indexOf(search) >= 0;
|
||||
}
|
||||
|
||||
function filterRules(searchPhrase) {
|
||||
document.querySelectorAll('.vm-group').forEach((group) => {
|
||||
if (!searchPhrase) {
|
||||
group.classList.add('vm-found');
|
||||
return;
|
||||
}
|
||||
for (const item of group.querySelectorAll('.vm-group-search')) {
|
||||
if (matchText(searchPhrase, item)) {
|
||||
group.classList.add('vm-found');
|
||||
return;
|
||||
document.querySelectorAll('.sub-items').forEach((rules) => {
|
||||
let found = false;
|
||||
rules.querySelectorAll('.sub-item').forEach((rule) => {
|
||||
if (searchPhrase) {
|
||||
const ruleName = rule.innerText.toLowerCase();
|
||||
const matches = []
|
||||
const hasValue = ruleName.indexOf(searchPhrase) >= 0;
|
||||
rule.querySelectorAll('.label').forEach((label) => {
|
||||
const text = label.innerText.toLowerCase();
|
||||
if (text.indexOf(searchPhrase) >= 0) {
|
||||
matches.push(text);
|
||||
}
|
||||
});
|
||||
if (!matches.length && !hasValue) {
|
||||
rule.classList.add('d-none');
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
group.classList.remove('vm-found');
|
||||
for (const item of group.querySelectorAll('.vm-item')) {
|
||||
if (matchText(searchPhrase, item)) {
|
||||
item.classList.add('vm-found');
|
||||
continue;
|
||||
}
|
||||
if (Array.from(item.querySelectorAll('.label')).find(l => matchText(searchPhrase, l))) {
|
||||
item.classList.add('vm-found');
|
||||
continue;
|
||||
}
|
||||
item.classList.remove('vm-found');
|
||||
rule.classList.remove('d-none');
|
||||
found = true;
|
||||
});
|
||||
if (found && searchPhrase || !searchPhrase) {
|
||||
rules.classList.remove('d-none');
|
||||
} else {
|
||||
rules.classList.add('d-none');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -485,12 +485,6 @@ func templateFuncs() textTpl.FuncMap {
|
||||
|
||||
/* Helpers */
|
||||
|
||||
// now returns the Unix timestamp in seconds at the time of the template evaluation.
|
||||
// For example: {{ (now | toTime).Sub $activeAt }} will return the duration the alert has been active.
|
||||
"now": func() float64 {
|
||||
return float64(time.Now().Unix())
|
||||
},
|
||||
|
||||
// Converts a list of objects to a map with keys arg0, arg1 etc.
|
||||
// This is intended to allow multiple arguments to be passed to templates.
|
||||
"args": func(args ...any) map[string]any {
|
||||
|
||||
@@ -412,18 +412,18 @@ func (rh *requestHandler) groupAlerts() []rule.GroupAlerts {
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
var gAlerts []rule.GroupAlerts
|
||||
for _, group := range rh.m.groups {
|
||||
for _, g := range rh.m.groups {
|
||||
var alerts []*rule.ApiAlert
|
||||
g := group.ToAPI()
|
||||
for _, r := range g.Rules {
|
||||
if r.Type != rule.TypeAlerting {
|
||||
a, ok := r.(*rule.AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
alerts = append(alerts, r.Alerts...)
|
||||
alerts = append(alerts, a.AlertsToAPI()...)
|
||||
}
|
||||
if len(alerts) > 0 {
|
||||
gAlerts = append(gAlerts, rule.GroupAlerts{
|
||||
Group: g,
|
||||
Group: g.ToAPI(),
|
||||
Alerts: alerts,
|
||||
})
|
||||
}
|
||||
@@ -444,12 +444,12 @@ func (rh *requestHandler) listAlerts(rf *rulesFilter) ([]byte, error) {
|
||||
if !rf.matchesGroup(group) {
|
||||
continue
|
||||
}
|
||||
g := group.ToAPI()
|
||||
for _, r := range g.Rules {
|
||||
if r.Type != rule.TypeAlerting {
|
||||
for _, r := range group.Rules {
|
||||
a, ok := r.(*rule.AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
lr.Data.Alerts = append(lr.Data.Alerts, r.Alerts...)
|
||||
lr.Data.Alerts = append(lr.Data.Alerts, a.AlertsToAPI()...)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -114,17 +114,14 @@
|
||||
{%= Controls(prefix, currentIcon, currentText, icons, filters, true) %}
|
||||
{% if len(groups) > 0 %}
|
||||
{% for _, g := range groups %}
|
||||
<div id="group-{%s g.ID %}" class="w-100 border-0 flex-column vm-group{% if g.Unhealthy > 0 %} alert-danger{% endif %}">
|
||||
<div id="group-{%s g.ID %}" class="d-flex w-100 border-0 flex-column group-items{% if g.Unhealthy > 0 %} alert-danger{% endif %}">
|
||||
<span class="d-flex justify-content-between">
|
||||
<a
|
||||
class="vm-group-search"
|
||||
href="#group-{%s g.ID %}"
|
||||
>{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%f.0 g.Interval %}s) #</a>
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%f.0 g.Interval %}s) #</a>
|
||||
<span
|
||||
class="flex-grow-1 d-flex justify-content-end"
|
||||
role="button"
|
||||
data-bs-toggle="collapse"
|
||||
data-bs-target="#item-{%s g.ID %}"
|
||||
data-bs-target="#sub-{%s g.ID %}"
|
||||
>
|
||||
<span class="d-flex gap-2">
|
||||
{% if g.Unhealthy > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d g.Unhealthy %}</span> {% endif %}
|
||||
@@ -137,9 +134,9 @@
|
||||
class="d-flex flex-column row-gap-2 mb-2"
|
||||
role="button"
|
||||
data-bs-toggle="collapse"
|
||||
data-bs-target="#item-{%s g.ID %}"
|
||||
data-bs-target="#sub-{%s g.ID %}"
|
||||
>
|
||||
<span class="fs-6 text-start vm-group-search w-100 fw-lighter">{%s g.File %}</span>
|
||||
<span class="fs-6 text-start w-100 fw-lighter">{%s g.File %}</span>
|
||||
{% if len(g.Params) > 0 %}
|
||||
<span class="fs-6 text-start w-100 d-flex justify-content-between fw-lighter">
|
||||
<span>Extra params</span>
|
||||
@@ -161,7 +158,7 @@
|
||||
</span>
|
||||
{% endif %}
|
||||
</span>
|
||||
<div class="collapse" id="item-{%s g.ID %}">
|
||||
<div class="collapse sub-items" id="sub-{%s g.ID %}">
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
@@ -172,7 +169,7 @@
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for _, r := range g.Rules %}
|
||||
<tr class="vm-item{% if r.LastError != "" %} alert-danger{% endif %}">
|
||||
<tr class="sub-item{% if r.LastError != "" %} alert-danger{% endif %}">
|
||||
<td>
|
||||
<div class="row">
|
||||
<div class="col-12 mb-2">
|
||||
@@ -209,12 +206,7 @@
|
||||
</div>
|
||||
</td>
|
||||
<td class="text-center">{%d r.LastSamples %}</td>
|
||||
<td class="text-center">{% if r.LastEvaluation.IsZero() %}
|
||||
Never
|
||||
{% else %}
|
||||
{%f.3 time.Since(r.LastEvaluation).Seconds() %}s ago
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="text-center">{%f.3 time.Since(r.LastEvaluation).Seconds() %}s ago</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
@@ -249,14 +241,14 @@
|
||||
}
|
||||
sort.Strings(keys)
|
||||
%}
|
||||
<div class="w-100 flex-column vm-group alert-danger">
|
||||
<div class="d-flex w-100 flex-column group-items alert-danger">
|
||||
<span id="group-{%s g.ID %}" class="d-flex justify-content-between">
|
||||
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %}</a>
|
||||
<span
|
||||
class="flex-grow-1 d-flex justify-content-end"
|
||||
role="button"
|
||||
data-bs-toggle="collapse"
|
||||
data-bs-target="#item-{%s g.ID %}"
|
||||
data-bs-target="#sub-{%s g.ID %}"
|
||||
>
|
||||
<span class="badge bg-danger" title="Number of active alerts">{%d len(ga.Alerts) %}</span>
|
||||
</span>
|
||||
@@ -266,10 +258,10 @@
|
||||
class="fs-6 text-start w-100 fw-lighter"
|
||||
role="button"
|
||||
data-bs-toggle="collapse"
|
||||
data-bs-target="#item-{%s g.ID %}"
|
||||
data-bs-target="#sub-{%s g.ID %}"
|
||||
>{%s g.File %}</span>
|
||||
</span>
|
||||
<div class="collapse" id="item-{%s g.ID %}">
|
||||
<div class="collapse sub-items" id="sub-{%s g.ID %}">
|
||||
{% for _, ruleID := range keys %}
|
||||
{%code
|
||||
defaultAR := alertsByRule[ruleID][0]
|
||||
@@ -280,7 +272,7 @@
|
||||
sort.Strings(labelKeys)
|
||||
%}
|
||||
<br>
|
||||
<div class="vm-item">
|
||||
<div class="sub-item">
|
||||
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})
|
||||
| <span><a target="_blank" href="{%s defaultAR.SourceLink %}">Source</a></span>
|
||||
<br>
|
||||
@@ -345,20 +337,20 @@
|
||||
typeK, ns := keys[i], targets[notifier.TargetType(keys[i])]
|
||||
count := len(ns)
|
||||
%}
|
||||
<div class="w-100 flex-column vm-group">
|
||||
<div class="d-flex w-100 flex-column group-items">
|
||||
<span class="d-flex justify-content-between" id="group-{%s typeK %}">
|
||||
<a href="#group-{%s typeK %}">{%s typeK %} ({%d count %})</a>
|
||||
<span
|
||||
class="flex-grow-1"
|
||||
role="button"
|
||||
data-bs-toggle="collapse"
|
||||
data-bs-target="#item-{%s typeK %}"
|
||||
data-bs-target="#sub-{%s typeK %}"
|
||||
></span>
|
||||
</span>
|
||||
<div id="item-{%s typeK %}" class="collapse show">
|
||||
<div id="sub-{%s typeK %}" class="collapse show sub-items">
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr class="vm-item">
|
||||
<tr class="sub-item">
|
||||
<th scope="col">Labels</th>
|
||||
<th scope="col">Address</th>
|
||||
</tr>
|
||||
@@ -443,7 +435,7 @@
|
||||
<div class="col">
|
||||
{% for _, k := range annotationKeys %}
|
||||
<b>{%s k %}:</b><br>
|
||||
<p class="annotations">{%s alert.Annotations[k] %}</p>
|
||||
<p>{%s alert.Annotations[k] %}</p>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
@@ -557,7 +549,7 @@
|
||||
<div class="col">
|
||||
{% for _, k := range annotationKeys %}
|
||||
<b>{%s k %}:</b><br>
|
||||
<p class="annotations">{%s rule.Annotations[k] %}</p>
|
||||
<p>{%s rule.Annotations[k] %}</p>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
@@ -602,11 +594,11 @@
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col" title="The time when the rule was executed">Updated at</th>
|
||||
<th scope="col" title="The time when event was created">Updated at</th>
|
||||
<th scope="col" class="w-10 text-center" title="How many series expression returns. Each series will represent an alert.">Series returned</th>
|
||||
{% if seriesFetchedEnabled %}<th scope="col" class="w-10 text-center" title="How many series were scanned by datasource during the evaluation">Series fetched</th>{% endif %}
|
||||
<th scope="col" class="w-10 text-center" title="How many seconds request took">Duration</th>
|
||||
<th scope="col" class="text-center" title="The time used in execution query request">Execution timestamp</th>
|
||||
<th scope="col" class="text-center" title="Time used for rule execution">Executed at</th>
|
||||
<th scope="col" class="text-center" title="cURL command with request example">cURL</th>
|
||||
</tr>
|
||||
</thead>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,9 +23,6 @@ func TestHandler(t *testing.T) {
|
||||
Timestamps: []int64{0},
|
||||
})
|
||||
m := &manager{groups: map[uint64]*rule.Group{}}
|
||||
_, cleanup := notifier.InitFakeNotifier()
|
||||
defer cleanup()
|
||||
|
||||
var ar *rule.AlertingRule
|
||||
var rr *rule.RecordingRule
|
||||
var groupIDs []uint64
|
||||
@@ -48,7 +45,7 @@ func TestHandler(t *testing.T) {
|
||||
}, fq, 1*time.Minute, nil)
|
||||
ar = g.Rules[0].(*rule.AlertingRule)
|
||||
rr = g.Rules[1].(*rule.RecordingRule)
|
||||
g.ExecOnce(context.Background(), nil, time.Time{})
|
||||
g.ExecOnce(context.Background(), func() []notifier.Notifier { return nil }, nil, time.Time{})
|
||||
id := g.CreateID()
|
||||
m.groups[id] = g
|
||||
groupIDs = append(groupIDs, id)
|
||||
|
||||
@@ -27,9 +27,6 @@ vmauth-linux-ppc64le-prod:
|
||||
vmauth-linux-386-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmauth-linux-s390x-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
vmauth-darwin-amd64-prod:
|
||||
APP_NAME=vmauth $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -95,8 +94,6 @@ type UserInfo struct {
|
||||
rt http.RoundTripper
|
||||
|
||||
requests *metrics.Counter
|
||||
requestErrors *metrics.Counter
|
||||
backendRequests *metrics.Counter
|
||||
backendErrors *metrics.Counter
|
||||
requestsDuration *metrics.Summary
|
||||
}
|
||||
@@ -108,29 +105,13 @@ type HeadersConf struct {
|
||||
KeepOriginalHost *bool `yaml:"keep_original_host,omitempty"`
|
||||
}
|
||||
|
||||
func (ui *UserInfo) beginConcurrencyLimit(ctx context.Context) error {
|
||||
func (ui *UserInfo) beginConcurrencyLimit() error {
|
||||
select {
|
||||
case ui.concurrencyLimitCh <- struct{}{}:
|
||||
return nil
|
||||
default:
|
||||
ui.concurrencyLimitReached.Inc()
|
||||
|
||||
// The per-user limit for the number of concurrent requests is reached.
|
||||
// Wait until the currently executed requests are finished, so the current request could be executed.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
|
||||
select {
|
||||
case ui.concurrencyLimitCh <- struct{}{}:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because %d concurrent requests from the user %s are executed",
|
||||
*maxQueueDuration, ui.getMaxConcurrentRequests(), ui.name())
|
||||
}
|
||||
|
||||
return fmt.Errorf("cannot start executing the request because %d concurrent requests from the user %s are executed: %w",
|
||||
ui.getMaxConcurrentRequests(), ui.name(), err)
|
||||
}
|
||||
return fmt.Errorf("cannot handle more than %d concurrent requests from user %s", ui.getMaxConcurrentRequests(), ui.name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,18 +127,6 @@ func (ui *UserInfo) getMaxConcurrentRequests() int {
|
||||
return mcr
|
||||
}
|
||||
|
||||
func (ui *UserInfo) stopHealthChecks() {
|
||||
if ui == nil {
|
||||
return
|
||||
}
|
||||
if ui.URLPrefix == nil {
|
||||
return
|
||||
}
|
||||
|
||||
bus := ui.URLPrefix.bus.Load()
|
||||
bus.stopHealthChecks()
|
||||
}
|
||||
|
||||
// Header is `Name: Value` http header, which must be added to the proxied request.
|
||||
type Header struct {
|
||||
Name string
|
||||
@@ -293,7 +262,7 @@ type URLPrefix struct {
|
||||
// the list of backend urls
|
||||
//
|
||||
// the list can be dynamically updated if `discover_backend_ips` option is set.
|
||||
bus atomic.Pointer[backendURLs]
|
||||
bus atomic.Pointer[[]*backendURL]
|
||||
|
||||
// if this option is set, then backend ips for busOriginal are periodically re-discovered and put to bus.
|
||||
discoverBackendIPs bool
|
||||
@@ -317,93 +286,21 @@ func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error {
|
||||
}
|
||||
}
|
||||
|
||||
type backendURLs struct {
|
||||
healthChecksContext context.Context
|
||||
healthChecksCancel func()
|
||||
healthChecksWG sync.WaitGroup
|
||||
|
||||
bus []*backendURL
|
||||
}
|
||||
|
||||
func newBackendURLs() *backendURLs {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &backendURLs{
|
||||
healthChecksContext: ctx,
|
||||
healthChecksCancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
func (bus *backendURLs) add(u *url.URL) {
|
||||
bus.bus = append(bus.bus, &backendURL{
|
||||
url: u,
|
||||
healthCheckContext: bus.healthChecksContext,
|
||||
healthCheckWG: &bus.healthChecksWG,
|
||||
})
|
||||
}
|
||||
|
||||
func (bus *backendURLs) stopHealthChecks() {
|
||||
bus.healthChecksCancel()
|
||||
bus.healthChecksWG.Wait()
|
||||
}
|
||||
|
||||
type backendURL struct {
|
||||
broken atomic.Bool
|
||||
|
||||
healthCheckContext context.Context
|
||||
healthCheckWG *sync.WaitGroup
|
||||
|
||||
brokenDeadline atomic.Uint64
|
||||
concurrentRequests atomic.Int32
|
||||
|
||||
url *url.URL
|
||||
}
|
||||
|
||||
func (bu *backendURL) isBroken() bool {
|
||||
return bu.broken.Load()
|
||||
ct := fasttime.UnixTimestamp()
|
||||
return ct < bu.brokenDeadline.Load()
|
||||
}
|
||||
|
||||
func (bu *backendURL) setBroken() {
|
||||
if bu.broken.CompareAndSwap(false, true) {
|
||||
bu.healthCheckWG.Add(1)
|
||||
go func() {
|
||||
defer bu.healthCheckWG.Done()
|
||||
bu.runHealthCheck()
|
||||
bu.broken.Store(false)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (bu *backendURL) runHealthCheck() {
|
||||
port := bu.url.Port()
|
||||
if port == "" {
|
||||
port = "80"
|
||||
}
|
||||
addr := net.JoinHostPort(bu.url.Hostname(), port)
|
||||
|
||||
t := time.NewTicker(*failTimeout)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
// Verify network connectivity via TCP dial before marking backend healthy.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9997
|
||||
ctx, cancel := context.WithTimeout(bu.healthCheckContext, time.Second)
|
||||
c, err := netutil.Dialer.DialContext(ctx, "tcp", addr)
|
||||
cancel()
|
||||
if err != nil {
|
||||
if errors.Is(bu.healthCheckContext.Err(), context.Canceled) {
|
||||
return
|
||||
}
|
||||
logger.Warnf("ignoring the backend at %s for %s becasue of dial error: %s", addr, *failTimeout, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_ = c.Close()
|
||||
return
|
||||
case <-bu.healthCheckContext.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
deadline := fasttime.UnixTimestamp() + uint64((*failTimeout).Seconds())
|
||||
bu.brokenDeadline.Store(deadline)
|
||||
}
|
||||
|
||||
func (bu *backendURL) get() {
|
||||
@@ -415,8 +312,8 @@ func (bu *backendURL) put() {
|
||||
}
|
||||
|
||||
func (up *URLPrefix) getBackendsCount() int {
|
||||
bus := up.bus.Load()
|
||||
return len(bus.bus)
|
||||
pbus := up.bus.Load()
|
||||
return len(*pbus)
|
||||
}
|
||||
|
||||
// getBackendURL returns the backendURL depending on the load balance policy.
|
||||
@@ -427,15 +324,16 @@ func (up *URLPrefix) getBackendsCount() int {
|
||||
func (up *URLPrefix) getBackendURL() *backendURL {
|
||||
up.discoverBackendAddrsIfNeeded()
|
||||
|
||||
bus := up.bus.Load()
|
||||
if len(bus.bus) == 0 {
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
if len(bus) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if up.loadBalancingPolicy == "first_available" {
|
||||
return getFirstAvailableBackendURL(bus.bus)
|
||||
return getFirstAvailableBackendURL(bus)
|
||||
}
|
||||
return getLeastLoadedBackendURL(bus.bus, &up.n)
|
||||
return getLeastLoadedBackendURL(bus, &up.n)
|
||||
}
|
||||
|
||||
func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
@@ -509,24 +407,25 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
cancel()
|
||||
|
||||
// generate new backendURLs for the resolved IPs
|
||||
busNew := newBackendURLs()
|
||||
var busNew []*backendURL
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
for _, addr := range hostToAddrs[host] {
|
||||
buCopy := *bu
|
||||
buCopy.Host = addr
|
||||
busNew.add(&buCopy)
|
||||
busNew = append(busNew, &backendURL{
|
||||
url: &buCopy,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
bus := up.bus.Load()
|
||||
if areEqualBackendURLs(bus.bus, busNew.bus) {
|
||||
pbus := up.bus.Load()
|
||||
if areEqualBackendURLs(*pbus, busNew) {
|
||||
return
|
||||
}
|
||||
|
||||
// Store new backend urls
|
||||
up.bus.Store(busNew)
|
||||
bus.stopHealthChecks()
|
||||
up.bus.Store(&busNew)
|
||||
}
|
||||
|
||||
func areEqualBackendURLs(a, b []*backendURL) bool {
|
||||
@@ -557,23 +456,20 @@ func getFirstAvailableBackendURL(bus []*backendURL) *backendURL {
|
||||
for i := 1; i < len(bus); i++ {
|
||||
if !bus[i].isBroken() {
|
||||
bu = bus[i]
|
||||
bu.get()
|
||||
return bu
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
bu.get()
|
||||
return bu
|
||||
}
|
||||
|
||||
// getLeastLoadedBackendURL returns a non-broken backendURL with the lowest number of concurrent requests.
|
||||
// getLeastLoadedBackendURL returns the backendURL with the minimum number of concurrent requests.
|
||||
//
|
||||
// backendURL.put() must be called on the returned backendURL after the request is complete.
|
||||
func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *backendURL {
|
||||
if len(bus) == 1 {
|
||||
// Fast path - return the only backend url.
|
||||
bu := bus[0]
|
||||
if bu.isBroken() {
|
||||
return nil
|
||||
}
|
||||
bu.get()
|
||||
return bu
|
||||
}
|
||||
@@ -586,37 +482,27 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
|
||||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
|
||||
// The Load() in front of CompareAndSwap() avoids CAS overhead for items with values bigger than 0.
|
||||
if bu.concurrentRequests.Load() == 0 && bu.concurrentRequests.CompareAndSwap(0, 1) {
|
||||
atomicCounter.CompareAndSwap(n+1, idx+1)
|
||||
// There is no need in the call bu.get(), because we already incremented bu.concrrentRequests above.
|
||||
if bu.concurrentRequests.Load() == 0 {
|
||||
// Fast path - return the backend with zero concurrently executed requests.
|
||||
// Do not use CompareAndSwap() instead of Load(), since it is much slower on systems with many CPU cores.
|
||||
bu.concurrentRequests.Add(1)
|
||||
return bu
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path - return the backend with the minimum number of concurrently executed requests.
|
||||
buMinIdx := n % uint32(len(bus))
|
||||
minRequests := bus[buMinIdx].concurrentRequests.Load()
|
||||
for i := uint32(1); i < uint32(len(bus)); i++ {
|
||||
idx := (n + i) % uint32(len(bus))
|
||||
bu := bus[idx]
|
||||
buMin := bus[n%uint32(len(bus))]
|
||||
minRequests := buMin.concurrentRequests.Load()
|
||||
for _, bu := range bus {
|
||||
if bu.isBroken() {
|
||||
continue
|
||||
}
|
||||
|
||||
reqs := bu.concurrentRequests.Load()
|
||||
if reqs < minRequests || bus[buMinIdx].isBroken() {
|
||||
buMinIdx = idx
|
||||
minRequests = reqs
|
||||
if n := bu.concurrentRequests.Load(); n < minRequests || buMin.isBroken() {
|
||||
buMin = bu
|
||||
minRequests = n
|
||||
}
|
||||
}
|
||||
buMin := bus[buMinIdx]
|
||||
if buMin.isBroken() {
|
||||
return nil
|
||||
}
|
||||
buMin.get()
|
||||
atomicCounter.CompareAndSwap(n+1, buMinIdx+1)
|
||||
return buMin
|
||||
}
|
||||
|
||||
@@ -839,11 +725,6 @@ func reloadAuthConfigData(data []byte) (bool, error) {
|
||||
|
||||
acPrev := authConfig.Load()
|
||||
if acPrev != nil {
|
||||
acPrev.UnauthorizedUser.stopHealthChecks()
|
||||
for i := range acPrev.Users {
|
||||
acPrev.Users[i].stopHealthChecks()
|
||||
}
|
||||
|
||||
metrics.UnregisterSet(acPrev.ms, true)
|
||||
}
|
||||
metrics.RegisterSet(ac.ms)
|
||||
@@ -890,8 +771,6 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
return nil, fmt.Errorf("cannot parse metric_labels for unauthorized_user: %w", err)
|
||||
}
|
||||
ui.requests = ac.ms.NewCounter(`vmauth_unauthorized_user_requests_total` + metricLabels)
|
||||
ui.requestErrors = ac.ms.NewCounter(`vmauth_unauthorized_user_request_errors_total` + metricLabels)
|
||||
ui.backendRequests = ac.ms.NewCounter(`vmauth_unauthorized_user_request_backend_requests_total` + metricLabels)
|
||||
ui.backendErrors = ac.ms.NewCounter(`vmauth_unauthorized_user_request_backend_errors_total` + metricLabels)
|
||||
ui.requestsDuration = ac.ms.NewSummary(`vmauth_unauthorized_user_request_duration_seconds` + metricLabels)
|
||||
ui.concurrencyLimitCh = make(chan struct{}, ui.getMaxConcurrentRequests())
|
||||
@@ -940,8 +819,6 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
return nil, fmt.Errorf("cannot parse metric_labels: %w", err)
|
||||
}
|
||||
ui.requests = ac.ms.GetOrCreateCounter(`vmauth_user_requests_total` + metricLabels)
|
||||
ui.requestErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_errors_total` + metricLabels)
|
||||
ui.backendRequests = ac.ms.GetOrCreateCounter(`vmauth_user_request_backend_requests_total` + metricLabels)
|
||||
ui.backendErrors = ac.ms.GetOrCreateCounter(`vmauth_user_request_backend_errors_total` + metricLabels)
|
||||
ui.requestsDuration = ac.ms.GetOrCreateSummary(`vmauth_user_request_duration_seconds` + metricLabels)
|
||||
mcr := ui.getMaxConcurrentRequests()
|
||||
@@ -1176,11 +1053,13 @@ func (up *URLPrefix) sanitizeAndInitialize() error {
|
||||
}
|
||||
|
||||
// Initialize up.bus
|
||||
bus := newBackendURLs()
|
||||
for _, bu := range up.busOriginal {
|
||||
bus.add(bu)
|
||||
bus := make([]*backendURL, len(up.busOriginal))
|
||||
for i, bu := range up.busOriginal {
|
||||
bus[i] = &backendURL{
|
||||
url: bu,
|
||||
}
|
||||
}
|
||||
up.bus.Store(bus)
|
||||
up.bus.Store(&bus)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -752,12 +752,10 @@ func TestGetLeastLoadedBackendURL(t *testing.T) {
|
||||
})
|
||||
up.loadBalancingPolicy = "least_loaded"
|
||||
|
||||
pbus := up.bus.Load()
|
||||
bus := pbus.bus
|
||||
|
||||
fn := func(ns ...int) {
|
||||
t.Helper()
|
||||
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
for i, b := range bus {
|
||||
got := int(b.concurrentRequests.Load())
|
||||
exp := ns[i]
|
||||
@@ -769,52 +767,45 @@ func TestGetLeastLoadedBackendURL(t *testing.T) {
|
||||
|
||||
up.getBackendURL()
|
||||
fn(1, 0, 0)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(1, 1, 0)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(1, 1, 1)
|
||||
|
||||
bus[1].put()
|
||||
bus[2].put()
|
||||
fn(1, 0, 0)
|
||||
up.getBackendURL()
|
||||
up.getBackendURL()
|
||||
fn(2, 2, 1)
|
||||
|
||||
bus := up.bus.Load()
|
||||
pbus := *bus
|
||||
pbus[0].concurrentRequests.Add(2)
|
||||
pbus[2].concurrentRequests.Add(5)
|
||||
fn(4, 2, 6)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(1, 1, 0)
|
||||
fn(4, 3, 6)
|
||||
|
||||
bus[1].put()
|
||||
up.getBackendURL()
|
||||
fn(1, 0, 1)
|
||||
fn(4, 4, 6)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(4, 5, 6)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(5, 5, 6)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(6, 5, 6)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(6, 6, 6)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(6, 6, 7)
|
||||
|
||||
up.getBackendURL()
|
||||
up.getBackendURL()
|
||||
fn(1, 1, 2)
|
||||
|
||||
bus[0].concurrentRequests.Add(2)
|
||||
bus[2].concurrentRequests.Add(2)
|
||||
fn(3, 1, 4)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(3, 2, 4)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(3, 3, 4)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(4, 3, 4)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(4, 4, 4)
|
||||
|
||||
bus[0].put()
|
||||
bus[2].put()
|
||||
|
||||
up.getBackendURL()
|
||||
fn(3, 4, 4)
|
||||
|
||||
up.getBackendURL()
|
||||
fn(4, 4, 4)
|
||||
fn(7, 7, 7)
|
||||
}
|
||||
|
||||
func TestBrokenBackend(t *testing.T) {
|
||||
@@ -825,7 +816,7 @@ func TestBrokenBackend(t *testing.T) {
|
||||
})
|
||||
up.loadBalancingPolicy = "least_loaded"
|
||||
pbus := up.bus.Load()
|
||||
bus := pbus.bus
|
||||
bus := *pbus
|
||||
|
||||
// explicitly mark one of the backends as broken
|
||||
bus[1].setBroken()
|
||||
@@ -848,7 +839,7 @@ func TestDiscoverBackendIPsWithIPV6(t *testing.T) {
|
||||
|
||||
up.discoverBackendAddrsIfNeeded()
|
||||
pbus := up.bus.Load()
|
||||
bus := pbus.bus
|
||||
bus := *pbus
|
||||
|
||||
if len(bus) != 1 {
|
||||
t.Fatalf("expected url list to be of size 1; got %d instead", len(bus))
|
||||
@@ -942,14 +933,16 @@ func mustParseURL(u string) *URLPrefix {
|
||||
}
|
||||
|
||||
func mustParseURLs(us []string) *URLPrefix {
|
||||
bus := newBackendURLs()
|
||||
bus := make([]*backendURL, len(us))
|
||||
urls := make([]*url.URL, len(us))
|
||||
for i, u := range us {
|
||||
pu, err := url.Parse(u)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot parse %q: %w", u, err))
|
||||
}
|
||||
bus.add(pu)
|
||||
bus[i] = &backendURL{
|
||||
url: pu,
|
||||
}
|
||||
urls[i] = pu
|
||||
}
|
||||
up := &URLPrefix{}
|
||||
@@ -958,7 +951,7 @@ func mustParseURLs(us []string) *URLPrefix {
|
||||
} else {
|
||||
up.vOriginal = us
|
||||
}
|
||||
up.bus.Store(bus)
|
||||
up.bus.Store(&bus)
|
||||
up.busOriginal = urls
|
||||
return up
|
||||
}
|
||||
|
||||
@@ -44,17 +44,12 @@ var (
|
||||
"See also -maxConcurrentRequests")
|
||||
idleConnTimeout = flag.Duration("idleConnTimeout", 50*time.Second, "The timeout for HTTP keep-alive connections to backend services. "+
|
||||
"It is recommended setting this value to values smaller than -http.idleConnTimeout set at backend services")
|
||||
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
|
||||
|
||||
responseTimeout = flag.Duration("responseTimeout", 5*time.Minute, "The timeout for receiving a response from backend")
|
||||
maxConcurrentRequests = flag.Int("maxConcurrentRequests", 1000, "The maximum number of concurrent requests vmauth can process. Other requests are rejected with "+
|
||||
"'429 Too Many Requests' http status code. See also -maxQueueDuration, -maxConcurrentPerUserRequests and -maxIdleConnsPerBackend command-line options")
|
||||
"'429 Too Many Requests' http status code. See also -maxConcurrentPerUserRequests and -maxIdleConnsPerBackend command-line options")
|
||||
maxConcurrentPerUserRequests = flag.Int("maxConcurrentPerUserRequests", 300, "The maximum number of concurrent requests vmauth can process per each configured user. "+
|
||||
"Other requests are rejected with '429 Too Many Requests' http status code. See also -maxQueueDuration and -maxConcurrentRequests command-line options "+
|
||||
"and max_concurrent_requests option in per-user config")
|
||||
maxQueueDuration = flag.Duration("maxQueueDuration", 10*time.Second, "The maximum duration the request waits for execution when the number of concurrently executed "+
|
||||
"requests reach -maxConcurrentRequests or -maxConcurrentPerUserRequests before returning '429 Too Many Requests' error. "+
|
||||
"This allows graceful handling of short spikes in the number of concurrent requests")
|
||||
|
||||
"Other requests are rejected with '429 Too Many Requests' http status code. See also -maxConcurrentRequests command-line option and max_concurrent_requests option "+
|
||||
"in per-user config")
|
||||
reloadAuthKey = flagutil.NewPassword("reloadAuthKey", "Auth key for /-/reload http endpoint. It must be passed via authKey query arg. It overrides -httpAuth.*")
|
||||
logInvalidAuthTokens = flag.Bool("logInvalidAuthTokens", false, "Whether to log requests with invalid auth tokens. "+
|
||||
`Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`)
|
||||
@@ -156,6 +151,7 @@ func requestHandlerWithInternalRoutes(w http.ResponseWriter, r *http.Request) bo
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
ats := getAuthTokensFromRequest(r)
|
||||
if len(ats) == 0 {
|
||||
// Process requests for unauthorized users
|
||||
@@ -212,45 +208,20 @@ func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
|
||||
ui.requests.Inc()
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), *maxQueueDuration)
|
||||
defer cancel()
|
||||
|
||||
// Limit the concurrency of requests to backends
|
||||
concurrencyLimitOnce.Do(concurrencyLimitInit)
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
if err := ui.beginConcurrencyLimit(ctx); err != nil {
|
||||
if err := ui.beginConcurrencyLimit(); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
<-concurrencyLimitCh
|
||||
return
|
||||
}
|
||||
default:
|
||||
// The -maxConcurrentRequests are executed. Wait until some of the requests are finished,
|
||||
// so the current request could be executed.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10078
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
if err := ui.beginConcurrencyLimit(ctx); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
<-concurrencyLimitCh
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
|
||||
concurrentRequestsLimitReached.Inc()
|
||||
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
err = fmt.Errorf("cannot start executing the request during -maxQueueDuration=%s because -maxConcurrentRequests=%d concurrent requests are executed",
|
||||
*maxQueueDuration, cap(concurrencyLimitCh))
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = fmt.Errorf("cannot start executing the request because -maxConcurrentRequests=%d concurrent requests are executed: %w", cap(concurrencyLimitCh), err)
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return
|
||||
}
|
||||
concurrentRequestsLimitReached.Inc()
|
||||
err := fmt.Errorf("cannot serve more than -maxConcurrentRequests=%d concurrent requests", cap(concurrencyLimitCh))
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return
|
||||
}
|
||||
processRequest(w, r, ui)
|
||||
ui.endConcurrencyLimit()
|
||||
@@ -314,18 +285,16 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
return
|
||||
}
|
||||
bu.setBroken()
|
||||
ui.backendErrors.Inc()
|
||||
}
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("all the %d backends for the user %q are unavailable", up.getBackendsCount(), ui.name()),
|
||||
StatusCode: http.StatusBadGateway,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
ui.requestErrors.Inc()
|
||||
ui.backendErrors.Inc()
|
||||
}
|
||||
|
||||
func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url.URL, hc HeadersConf, retryStatusCodes []int, ui *UserInfo) (bool, bool) {
|
||||
ui.backendRequests.Inc()
|
||||
req := sanitizeRequestHeaders(r)
|
||||
|
||||
req.URL = targetURL
|
||||
@@ -341,21 +310,15 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
|
||||
rtb, rtbOK := req.Body.(*readTrackingBody)
|
||||
res, err := ui.rt.RoundTrip(req)
|
||||
|
||||
if ctxErr := r.Context().Err(); ctxErr != nil {
|
||||
// Override the error returned by the RoundTrip with the context error if it isn't non-nil
|
||||
// This makes sure the proper logging for canceled and timed out requests - log the real cause of the error
|
||||
// instead of the random error, which could be returned from RoundTrip because of canceled or timed out request.
|
||||
err = ctxErr
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
// Do not retry canceled or timed out requests
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying response body from %s: %s", remoteAddr, requestURI, targetURL, err)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
// Timed out request must be counted as errors, since this usually means that the backend is slow.
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; timeout while proxying the response from %s: %s", remoteAddr, requestURI, targetURL, err)
|
||||
ui.backendErrors.Inc()
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
@@ -367,7 +330,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
ui.backendErrors.Inc()
|
||||
ui.requestErrors.Inc()
|
||||
return true, false
|
||||
}
|
||||
if netutil.IsTrivialNetworkError(err) {
|
||||
@@ -375,11 +337,11 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
return false, true
|
||||
}
|
||||
|
||||
// Request body wasn't read yet, this usually means that the backend isn't reachable; retry the request at another backend
|
||||
// Retry the request if its body wasn't read yet. This usually means that the backend isn't reachable.
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
// NOTE: do not use httpserver.GetRequestURI
|
||||
// it explicitly reads request body, which may fail retries.
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed: %s, retrying the request at another backend", remoteAddr, req.URL, targetURL, err)
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; retrying the request to %s because of response error: %s", remoteAddr, req.URL, targetURL, err)
|
||||
return false, false
|
||||
}
|
||||
if slices.Contains(retryStatusCodes, res.StatusCode) {
|
||||
@@ -388,13 +350,12 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
// If we get an error from the retry_status_codes list, but cannot execute retry,
|
||||
// we consider such a request an error as well.
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("got response status code=%d from %s, but cannot retry the request at another backend, because the request has been already consumed",
|
||||
Err: fmt.Errorf("got response status code=%d from %s, but cannot retry the request on another backend, because the request has been already consumed",
|
||||
res.StatusCode, targetURL),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
ui.backendErrors.Inc()
|
||||
ui.requestErrors.Inc()
|
||||
return true, false
|
||||
}
|
||||
// Retry requests at other backends if it matches retryStatusCodes.
|
||||
@@ -402,7 +363,7 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
// NOTE: do not use httpserver.GetRequestURI
|
||||
// it explicitly reads request body, which may fail retries.
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; request to %s failed, retrying the request at another backend because response status code=%d belongs to retry_status_codes=%d",
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; retrying the request to %s because response status code=%d belongs to retry_status_codes=%d",
|
||||
remoteAddr, req.URL, targetURL, res.StatusCode, retryStatusCodes)
|
||||
return false, false
|
||||
}
|
||||
@@ -418,7 +379,6 @@ func tryProcessingRequest(w http.ResponseWriter, r *http.Request, targetURL *url
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
|
||||
logger.Warnf("remoteAddr: %s; requestURI: %s; error when proxying response body from %s: %s", remoteAddr, requestURI, targetURL, err)
|
||||
ui.requestErrors.Inc()
|
||||
return true, false
|
||||
}
|
||||
return true, false
|
||||
@@ -629,13 +589,6 @@ func handleMissingAuthorizationError(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
func handleConcurrencyLimitError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
ctx := r.Context()
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
// Do not return any response for the request canceled by the client,
|
||||
// since the connection to the client is already closed.
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Add("Retry-After", "10")
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: err,
|
||||
@@ -692,7 +645,6 @@ type zeroReader struct{}
|
||||
func (r *zeroReader) Read(_ []byte) (int, error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func (r *zeroReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,9 +31,6 @@ vmbackup-linux-ppc64le-prod:
|
||||
vmbackup-linux-386-prod:
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmbackup-linux-s390x-prod:
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
vmbackup-darwin-amd64-prod:
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -212,7 +212,7 @@ func newSrcFS() (*fslocal.FS, error) {
|
||||
}
|
||||
|
||||
func newDstFS(ctx context.Context) (common.RemoteFS, error) {
|
||||
fs, err := actions.NewRemoteFS(ctx, *dst, nil)
|
||||
fs, err := actions.NewRemoteFS(ctx, *dst)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-dst`=%q: %w", *dst, err)
|
||||
}
|
||||
@@ -255,7 +255,7 @@ func newOriginFS(ctx context.Context) (common.OriginFS, error) {
|
||||
if len(*origin) == 0 {
|
||||
return &fsnil.FS{}, nil
|
||||
}
|
||||
fs, err := actions.NewRemoteFS(ctx, *origin, nil)
|
||||
fs, err := actions.NewRemoteFS(ctx, *origin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-origin`=%q: %w", *origin, err)
|
||||
}
|
||||
@@ -266,7 +266,7 @@ func newRemoteOriginFS(ctx context.Context) (common.RemoteFS, error) {
|
||||
if len(*origin) == 0 {
|
||||
return nil, fmt.Errorf("-origin cannot be empty when -snapshotName and -snapshot.createURL aren't set")
|
||||
}
|
||||
fs, err := actions.NewRemoteFS(ctx, *origin, nil)
|
||||
fs, err := actions.NewRemoteFS(ctx, *origin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-origin`=%q: %w", *origin, err)
|
||||
}
|
||||
|
||||
@@ -27,9 +27,6 @@ vmctl-linux-ppc64le-prod:
|
||||
vmctl-linux-386-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmctl-linux-s390x-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
vmctl-darwin-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -689,15 +689,15 @@ var (
|
||||
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||
Layout: time.RFC3339,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: remoteReadFilterLabel,
|
||||
Usage: "Prometheus label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.",
|
||||
DefaultText: "__name__",
|
||||
&cli.StringFlag{
|
||||
Name: remoteReadFilterLabel,
|
||||
Usage: "Prometheus label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.",
|
||||
Value: "__name__",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: remoteReadFilterLabelValue,
|
||||
Usage: fmt.Sprintf("Prometheus regular expression to filter label from %q flag.", remoteReadFilterLabelValue),
|
||||
DefaultText: ".*",
|
||||
&cli.StringFlag{
|
||||
Name: remoteReadFilterLabelValue,
|
||||
Usage: fmt.Sprintf("Prometheus regular expression to filter label from %q flag.", remoteReadFilterLabelValue),
|
||||
Value: ".*",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: remoteRead,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -38,7 +37,7 @@ func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator st
|
||||
}
|
||||
}
|
||||
|
||||
func (ip *influxProcessor) run(ctx context.Context) error {
|
||||
func (ip *influxProcessor) run() error {
|
||||
series, err := ip.ic.Explore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("explore query failed: %s", err)
|
||||
@@ -48,7 +47,7 @@ func (ip *influxProcessor) run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
question := fmt.Sprintf("Found %d timeseries to import. Continue?", len(series))
|
||||
if !prompt(ctx, question) {
|
||||
if !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ func main() {
|
||||
}
|
||||
|
||||
otsdbProcessor := newOtsdbProcessor(otsdbClient, importer, c.Int(otsdbConcurrency), c.Bool(globalVerbose))
|
||||
return otsdbProcessor.run(ctx)
|
||||
return otsdbProcessor.run()
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -164,7 +164,7 @@ func main() {
|
||||
c.Bool(influxSkipDatabaseLabel),
|
||||
c.Bool(influxPrometheusMode),
|
||||
c.Bool(globalVerbose))
|
||||
return processor.run(ctx)
|
||||
return processor.run()
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -192,14 +192,6 @@ func main() {
|
||||
return fmt.Errorf("failed to create transport for -%s=%q: %s", remoteReadSrcAddr, addr, err)
|
||||
}
|
||||
|
||||
// Backwards compatible default values if none provided by user
|
||||
rrLabelNames := c.StringSlice(remoteReadFilterLabel)
|
||||
rrLabelValues := c.StringSlice(remoteReadFilterLabelValue)
|
||||
if len(rrLabelNames) == 0 && len(rrLabelValues) == 0 {
|
||||
rrLabelNames = []string{"__name__"}
|
||||
rrLabelValues = []string{".*"}
|
||||
}
|
||||
|
||||
rr, err := remoteread.NewClient(remoteread.Config{
|
||||
Addr: addr,
|
||||
Transport: tr,
|
||||
@@ -208,8 +200,8 @@ func main() {
|
||||
Timeout: c.Duration(remoteReadHTTPTimeout),
|
||||
UseStream: c.Bool(remoteReadUseStream),
|
||||
Headers: c.String(remoteReadHeaders),
|
||||
LabelNames: rrLabelNames,
|
||||
LabelValues: rrLabelValues,
|
||||
LabelName: c.String(remoteReadFilterLabel),
|
||||
LabelValue: c.String(remoteReadFilterLabelValue),
|
||||
DisablePathAppend: c.Bool(remoteReadDisablePathAppend),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -279,7 +271,7 @@ func main() {
|
||||
cc: c.Int(promConcurrency),
|
||||
isVerbose: c.Bool(globalVerbose),
|
||||
}
|
||||
return pp.run(ctx)
|
||||
return pp.run()
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
@@ -38,7 +37,7 @@ func newOtsdbProcessor(oc *opentsdb.Client, im *vm.Importer, otsdbcc int, verbos
|
||||
}
|
||||
}
|
||||
|
||||
func (op *otsdbProcessor) run(ctx context.Context) error {
|
||||
func (op *otsdbProcessor) run() error {
|
||||
log.Println("Loading all metrics from OpenTSDB for filters: ", op.oc.Filters)
|
||||
var metrics []string
|
||||
for _, filter := range op.oc.Filters {
|
||||
@@ -54,7 +53,7 @@ func (op *otsdbProcessor) run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
question := fmt.Sprintf("Found %d metrics to import. Continue?", len(metrics))
|
||||
if !prompt(ctx, question) {
|
||||
if !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
op.im.ResetStats()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
@@ -31,7 +30,7 @@ type prometheusProcessor struct {
|
||||
isVerbose bool
|
||||
}
|
||||
|
||||
func (pp *prometheusProcessor) run(ctx context.Context) error {
|
||||
func (pp *prometheusProcessor) run() error {
|
||||
blocks, err := pp.cl.Explore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("explore failed: %s", err)
|
||||
@@ -40,7 +39,7 @@ func (pp *prometheusProcessor) run(ctx context.Context) error {
|
||||
return fmt.Errorf("found no blocks to import")
|
||||
}
|
||||
question := fmt.Sprintf("Found %d blocks to import. Continue?", len(blocks))
|
||||
if !prompt(ctx, question) {
|
||||
if !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||
|
||||
question := fmt.Sprintf("Selected time range %q - %q will be split into %d ranges according to %q step. Continue?",
|
||||
rrp.filter.timeStart.String(), rrp.filter.timeEnd.String(), len(ranges), rrp.filter.chunk)
|
||||
if !prompt(ctx, question) {
|
||||
if !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -11,15 +11,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,9 +63,9 @@ type Config struct {
|
||||
UseStream bool
|
||||
// Headers optional HTTP headers to send with each request to the corresponding remote storage
|
||||
Headers string
|
||||
// LabelNames, LabelValues stands for label=~value pair used for read requests.
|
||||
// LabelName, LabelValue stands for label=~value pair used for read requests.
|
||||
// Is optional.
|
||||
LabelNames, LabelValues []string
|
||||
LabelName, LabelValue string
|
||||
}
|
||||
|
||||
// Filter defines a list of filters applied to requested data
|
||||
@@ -95,22 +94,12 @@ func NewClient(cfg Config) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var matchers []*prompb.LabelMatcher
|
||||
if len(cfg.LabelNames) > 0 || len(cfg.LabelValues) > 0 {
|
||||
if len(cfg.LabelNames) != len(cfg.LabelValues) {
|
||||
return nil, fmt.Errorf("the number of label names and label values must be the same")
|
||||
}
|
||||
|
||||
for i := range cfg.LabelNames {
|
||||
if cfg.LabelNames[i] == "" {
|
||||
return nil, fmt.Errorf("label name cannot be empty")
|
||||
}
|
||||
matcher := &prompb.LabelMatcher{
|
||||
Type: prompb.LabelMatcher_RE,
|
||||
Name: cfg.LabelNames[i],
|
||||
Value: cfg.LabelValues[i],
|
||||
}
|
||||
matchers = append(matchers, matcher)
|
||||
var m *prompb.LabelMatcher
|
||||
if cfg.LabelName != "" && cfg.LabelValue != "" {
|
||||
m = &prompb.LabelMatcher{
|
||||
Type: prompb.LabelMatcher_RE,
|
||||
Name: cfg.LabelName,
|
||||
Value: cfg.LabelValue,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,7 +116,7 @@ func NewClient(cfg Config) (*Client, error) {
|
||||
password: cfg.Password,
|
||||
useStream: cfg.UseStream,
|
||||
headers: headers,
|
||||
matchers: matchers,
|
||||
matchers: []*prompb.LabelMatcher{m},
|
||||
}
|
||||
|
||||
return c, nil
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -16,7 +15,7 @@ const barTpl = `{{ blue "%s:" }} {{ counters . }} {{ bar . "[" "█" (cycle . "
|
||||
// isSilent should be inited in main
|
||||
var isSilent bool
|
||||
|
||||
func prompt(ctx context.Context, question string) bool {
|
||||
func prompt(question string) bool {
|
||||
if isSilent {
|
||||
return true
|
||||
}
|
||||
@@ -26,32 +25,15 @@ func prompt(ctx context.Context, question string) bool {
|
||||
}
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Print(question, " [Y/n] ")
|
||||
|
||||
answerCh := make(chan string, 1)
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
answer, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
answerCh <- answer
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Println("\nCanceled.")
|
||||
return false
|
||||
case err := <-errCh:
|
||||
answer, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
panic(err)
|
||||
case answer := <-answerCh:
|
||||
answer = strings.TrimSpace(strings.ToLower(answer))
|
||||
if answer == "" || answer == "yes" || answer == "y" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
answer = strings.TrimSpace(strings.ToLower(answer))
|
||||
if answer == "" || answer == "yes" || answer == "y" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func wrapErr(vmErr *vm.ImportError, verbose bool) error {
|
||||
|
||||
@@ -79,7 +79,7 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||
return fmt.Errorf("failed to get tenants: %w", err)
|
||||
}
|
||||
question := fmt.Sprintf("The following tenants were discovered: %s.\n Continue?", tenants)
|
||||
if !prompt(ctx, question) {
|
||||
if !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -233,7 +233,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
||||
// do not prompt for intercluster because there could be many tenants,
|
||||
// and we don't want to interrupt the process when moving to the next tenant.
|
||||
question := foundSeriesMsg + ". Continue?"
|
||||
if !prompt(ctx, question) {
|
||||
if !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -11,11 +11,9 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/ratelimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeserieslimits"
|
||||
)
|
||||
|
||||
@@ -52,9 +50,8 @@ var (
|
||||
type InsertCtx struct {
|
||||
Labels sortedLabels
|
||||
|
||||
mrs []storage.MetricRow
|
||||
mms []metricsmetadata.Row
|
||||
metricNameBuf []byte
|
||||
mrs []storage.MetricRow
|
||||
metricNamesBuf []byte
|
||||
|
||||
relabelCtx relabel.Ctx
|
||||
streamAggrCtx streamAggrCtx
|
||||
@@ -76,13 +73,8 @@ func (ctx *InsertCtx) Reset(rowsLen int) {
|
||||
}
|
||||
mrs = slicesutil.SetLength(mrs, rowsLen)
|
||||
ctx.mrs = mrs[:0]
|
||||
mms := ctx.mms
|
||||
for i := range mms {
|
||||
cleanMetricMetadata(&mms[i])
|
||||
}
|
||||
ctx.mms = mms[:0]
|
||||
|
||||
ctx.metricNameBuf = ctx.metricNameBuf[:0]
|
||||
ctx.metricNamesBuf = ctx.metricNamesBuf[:0]
|
||||
ctx.relabelCtx.Reset()
|
||||
ctx.streamAggrCtx.Reset()
|
||||
ctx.skipStreamAggr = false
|
||||
@@ -92,20 +84,11 @@ func cleanMetricRow(mr *storage.MetricRow) {
|
||||
mr.MetricNameRaw = nil
|
||||
}
|
||||
|
||||
func cleanMetricMetadata(mm *metricsmetadata.Row) {
|
||||
mm.MetricFamilyName = nil
|
||||
mm.Unit = nil
|
||||
mm.Help = nil
|
||||
mm.Type = 0
|
||||
mm.ProjectID = 0
|
||||
mm.AccountID = 0
|
||||
}
|
||||
|
||||
func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompb.Label) []byte {
|
||||
start := len(ctx.metricNameBuf)
|
||||
ctx.metricNameBuf = append(ctx.metricNameBuf, prefix...)
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf, labels)
|
||||
metricNameRaw := ctx.metricNameBuf[start:]
|
||||
start := len(ctx.metricNamesBuf)
|
||||
ctx.metricNamesBuf = append(ctx.metricNamesBuf, prefix...)
|
||||
ctx.metricNamesBuf = storage.MarshalMetricNameRaw(ctx.metricNamesBuf, labels)
|
||||
metricNameRaw := ctx.metricNamesBuf[start:]
|
||||
return metricNameRaw[:len(metricNameRaw):len(metricNameRaw)]
|
||||
}
|
||||
|
||||
@@ -160,7 +143,7 @@ func (ctx *InsertCtx) addRow(metricNameRaw []byte, timestamp int64, value float6
|
||||
mr.MetricNameRaw = metricNameRaw
|
||||
mr.Timestamp = timestamp
|
||||
mr.Value = value
|
||||
if len(ctx.metricNameBuf) > 16*1024*1024 {
|
||||
if len(ctx.metricNamesBuf) > 16*1024*1024 {
|
||||
if err := ctx.FlushBufs(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -168,55 +151,6 @@ func (ctx *InsertCtx) addRow(metricNameRaw []byte, timestamp int64, value float6
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMetadata writes given prometheus protobuf metadata into the storage.
|
||||
func (ctx *InsertCtx) WriteMetadata(mmpbs []prompb.MetricMetadata) error {
|
||||
if len(mmpbs) == 0 {
|
||||
return nil
|
||||
}
|
||||
mms := ctx.mms
|
||||
mms = slicesutil.SetLength(mms, len(mmpbs))
|
||||
for idx, mmpb := range mmpbs {
|
||||
mm := &mms[idx]
|
||||
mm.MetricFamilyName = bytesutil.ToUnsafeBytes(mmpb.MetricFamilyName)
|
||||
mm.Help = bytesutil.ToUnsafeBytes(mmpb.Help)
|
||||
mm.Type = mmpb.Type
|
||||
mm.Unit = bytesutil.ToUnsafeBytes(mmpb.Unit)
|
||||
}
|
||||
|
||||
err := vmstorage.AddMetadataRows(mms)
|
||||
if err != nil {
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot store metrics metadata: %w", err),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WritePromMetadata writes given prometheus metric metadata into the storage
|
||||
func (ctx *InsertCtx) WritePromMetadata(mmps []prometheus.Metadata) error {
|
||||
if len(mmps) == 0 {
|
||||
return nil
|
||||
}
|
||||
mms := ctx.mms
|
||||
mms = slicesutil.SetLength(mms, len(mmps))
|
||||
for idx, mmpb := range mmps {
|
||||
mm := &mms[idx]
|
||||
mm.MetricFamilyName = bytesutil.ToUnsafeBytes(mmpb.Metric)
|
||||
mm.Help = bytesutil.ToUnsafeBytes(mmpb.Help)
|
||||
mm.Type = mmpb.Type
|
||||
}
|
||||
|
||||
err := vmstorage.AddMetadataRows(mms)
|
||||
if err != nil {
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot store prometheus metrics metadata: %w", err),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddLabelBytes adds (name, value) label to ctx.Labels.
|
||||
//
|
||||
// name and value must exist until ctx.Labels is used.
|
||||
@@ -287,7 +221,7 @@ func (ctx *InsertCtx) FlushBufs() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *InsertCtx) dropAggregatedRows(matchIdxs []uint32) {
|
||||
func (ctx *InsertCtx) dropAggregatedRows(matchIdxs []byte) {
|
||||
dst := ctx.mrs[:0]
|
||||
src := ctx.mrs
|
||||
if !*streamAggrDropInput {
|
||||
@@ -305,4 +239,4 @@ func (ctx *InsertCtx) dropAggregatedRows(matchIdxs []uint32) {
|
||||
ctx.mrs = dst
|
||||
}
|
||||
|
||||
var matchIdxsPool slicesutil.BufferPool[uint32]
|
||||
var matchIdxsPool bytesutil.ByteBufferPool
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/streamaggr"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -23,11 +22,11 @@ var (
|
||||
streamAggrConfig = flag.String("streamAggr.config", "", "Optional path to file with stream aggregation config. "+
|
||||
"See https://docs.victoriametrics.com/victoriametrics/stream-aggregation/ . "+
|
||||
"See also -streamAggr.keepInput, -streamAggr.dropInput and -streamAggr.dedupInterval")
|
||||
streamAggrKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep input samples that match any rule in -streamAggr.config. "+
|
||||
"By default, matched raw samples are aggregated and dropped, while unmatched samples are written to the remote storage. "+
|
||||
streamAggrKeepInput = flag.Bool("streamAggr.keepInput", false, "Whether to keep all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.dropInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop input samples that not matching any rule in -streamAggr.config. "+
|
||||
"By default, only matched raw samples are dropped, while unmatched samples are written to the remote storage."+
|
||||
streamAggrDropInput = flag.Bool("streamAggr.dropInput", false, "Whether to drop all the input samples after the aggregation with -streamAggr.config. "+
|
||||
"By default, only aggregated samples are dropped, while the remaining samples are stored in the database. "+
|
||||
"See also -streamAggr.keepInput and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/")
|
||||
streamAggrDedupInterval = flag.Duration("streamAggr.dedupInterval", 0, "Input samples are de-duplicated with this interval before optional aggregation with -streamAggr.config . "+
|
||||
"See also -streamAggr.dropInputLabels and -dedup.minScrapeInterval and https://docs.victoriametrics.com/victoriametrics/stream-aggregation/#deduplication")
|
||||
@@ -190,7 +189,7 @@ func (ctx *streamAggrCtx) Reset() {
|
||||
ctx.buf = ctx.buf[:0]
|
||||
}
|
||||
|
||||
func (ctx *streamAggrCtx) push(mrs []storage.MetricRow, matchIdxs []uint32) []uint32 {
|
||||
func (ctx *streamAggrCtx) push(mrs []storage.MetricRow, matchIdxs []byte) []byte {
|
||||
mn := &ctx.mn
|
||||
tss := ctx.tss
|
||||
labels := ctx.labels
|
||||
@@ -249,7 +248,7 @@ func (ctx *streamAggrCtx) push(mrs []storage.MetricRow, matchIdxs []uint32) []ui
|
||||
if sas.IsEnabled() {
|
||||
matchIdxs = sas.Push(tss, matchIdxs)
|
||||
} else if deduplicator != nil {
|
||||
matchIdxs = slicesutil.SetLength(matchIdxs, len(tss))
|
||||
matchIdxs = bytesutil.ResizeNoCopyMayOverallocate(matchIdxs, len(tss))
|
||||
for i := range matchIdxs {
|
||||
matchIdxs[i] = 1
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/promremotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/zabbixconnector"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
@@ -232,17 +231,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
firehose.WriteSuccessResponse(w, r)
|
||||
return true
|
||||
case "zabbixconnector/api/v1/history":
|
||||
zabbixconnectorHistoryRequests.Inc()
|
||||
if err := zabbixconnector.InsertHandlerForHTTP(r); err != nil {
|
||||
zabbixconnectorHistoryErrors.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprintf(w, `{"error":%q}`, err.Error())
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
return true
|
||||
case "/newrelic":
|
||||
newrelicCheckRequest.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -435,9 +423,6 @@ var (
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/v1/metrics", protocol="opentelemetry"}`)
|
||||
|
||||
zabbixconnectorHistoryRequests = metrics.NewCounter(`vm_http_requests_total{path="/zabbixconnector/api/v1/history", protocol="zabbixconnector"}`)
|
||||
zabbixconnectorHistoryErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/zabbixconnector/api/v1/history", protocol="zabbixconnector"}`)
|
||||
|
||||
newrelicWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
newrelicWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/newrelic/infra/v2/metrics/events/bulk", protocol="newrelic"}`)
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/firehose"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
@@ -15,9 +14,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentelemetry"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="opentelemetry"}`)
|
||||
metadataInserted = metrics.NewCounter(`vm_metadata_rows_inserted_total{type="opentelemetry"}`)
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentelemetry"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="opentelemetry"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes opentelemetry metrics.
|
||||
@@ -35,12 +33,12 @@ func InsertHandler(req *http.Request) error {
|
||||
return fmt.Errorf("json encoding isn't supported for opentelemetry format. Use protobuf encoding")
|
||||
}
|
||||
}
|
||||
return stream.ParseStream(req.Body, encoding, processBody, func(tss []prompb.TimeSeries, mms []prompb.MetricMetadata) error {
|
||||
return insertRows(tss, mms, extraLabels)
|
||||
return stream.ParseStream(req.Body, encoding, processBody, func(tss []prompb.TimeSeries, _ []prompb.MetricMetadata) error {
|
||||
return insertRows(tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(tss []prompb.TimeSeries, mms []prompb.MetricMetadata, extraLabels []prompb.Label) error {
|
||||
func insertRows(tss []prompb.TimeSeries, extraLabels []prompb.Label) error {
|
||||
ctx := common.GetInsertCtx()
|
||||
defer common.PutInsertCtx(ctx)
|
||||
|
||||
@@ -77,14 +75,5 @@ func insertRows(tss []prompb.TimeSeries, mms []prompb.MetricMetadata, extraLabel
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
if err := ctx.FlushBufs(); err != nil {
|
||||
return fmt.Errorf("cannot flush metric bufs: %w", err)
|
||||
}
|
||||
if prommetadata.IsEnabled() {
|
||||
if err := ctx.WriteMetadata(mms); err != nil {
|
||||
return err
|
||||
}
|
||||
metadataInserted.Add(len(mms))
|
||||
}
|
||||
return nil
|
||||
return ctx.FlushBufs()
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package prometheusimport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
@@ -16,9 +15,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="prometheus"}`)
|
||||
metadataInserted = metrics.NewCounter(`vm_metadata_rows_inserted_total{type="prometheus"}`)
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="prometheus"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes `/api/v1/import/prometheus` request.
|
||||
@@ -32,14 +30,14 @@ func InsertHandler(req *http.Request) error {
|
||||
return err
|
||||
}
|
||||
encoding := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, defaultTimestamp, encoding, true, prommetadata.IsEnabled(), func(rows []prometheus.Row, mms []prometheus.Metadata) error {
|
||||
return insertRows(rows, mms, extraLabels)
|
||||
return stream.Parse(req.Body, defaultTimestamp, encoding, true, prommetadata.IsEnabled(), func(rows []prometheus.Row, _ []prometheus.Metadata) error {
|
||||
return insertRows(rows, extraLabels)
|
||||
}, func(s string) {
|
||||
httpserver.LogError(req, s)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(rows []prometheus.Row, mms []prometheus.Metadata, extraLabels []prompb.Label) error {
|
||||
func insertRows(rows []prometheus.Row, extraLabels []prompb.Label) error {
|
||||
ctx := common.GetInsertCtx()
|
||||
defer common.PutInsertCtx(ctx)
|
||||
|
||||
@@ -66,15 +64,5 @@ func insertRows(rows []prometheus.Row, mms []prometheus.Metadata, extraLabels []
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
rowsPerInsert.Update(float64(len(rows)))
|
||||
if err := ctx.FlushBufs(); err != nil {
|
||||
return fmt.Errorf("cannot flush metric bufs: %w", err)
|
||||
}
|
||||
|
||||
if prommetadata.IsEnabled() {
|
||||
if err := ctx.WritePromMetadata(mms); err != nil {
|
||||
return err
|
||||
}
|
||||
metadataInserted.Add(len(mms))
|
||||
}
|
||||
return nil
|
||||
return ctx.FlushBufs()
|
||||
}
|
||||
|
||||
@@ -4,15 +4,13 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="promscrape"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="promscrape"}`)
|
||||
metadataRowsInserted = metrics.NewCounter(`vm_metadata_rows_inserted_total{type="promscrape"}`)
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="promscrape"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="promscrape"}`)
|
||||
)
|
||||
|
||||
const maxRowsPerBlock = 10000
|
||||
@@ -43,13 +41,6 @@ func Push(wr *prompb.WriteRequest) {
|
||||
}
|
||||
push(ctx, tssBlock)
|
||||
}
|
||||
if prommetadata.IsEnabled() {
|
||||
if err := ctx.WriteMetadata(wr.Metadata); err != nil {
|
||||
logger.Errorf("cannot write promscrape metrics metadata to storage: %s", err)
|
||||
} else {
|
||||
metadataRowsInserted.Add(len(wr.Metadata))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func push(ctx *common.InsertCtx, tss []prompb.TimeSeries) {
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package promremotewrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prommetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/promremotewrite/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
@@ -14,9 +12,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="promremotewrite"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="promremotewrite"}`)
|
||||
metadataInserted = metrics.NewCounter(`vm_metadata_rows_inserted_total{type="promremotewrite"}`)
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="promremotewrite"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="promremotewrite"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes remote write for prometheus.
|
||||
@@ -26,12 +23,12 @@ func InsertHandler(req *http.Request) error {
|
||||
return err
|
||||
}
|
||||
isVMRemoteWrite := req.Header.Get("Content-Encoding") == "zstd"
|
||||
return stream.Parse(req.Body, isVMRemoteWrite, func(tss []prompb.TimeSeries, mms []prompb.MetricMetadata) error {
|
||||
return insertRows(tss, mms, extraLabels)
|
||||
return stream.Parse(req.Body, isVMRemoteWrite, func(tss []prompb.TimeSeries, _ []prompb.MetricMetadata) error {
|
||||
return insertRows(tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(timeseries []prompb.TimeSeries, mms []prompb.MetricMetadata, extraLabels []prompb.Label) error {
|
||||
func insertRows(timeseries []prompb.TimeSeries, extraLabels []prompb.Label) error {
|
||||
ctx := common.GetInsertCtx()
|
||||
defer common.PutInsertCtx(ctx)
|
||||
|
||||
@@ -71,15 +68,5 @@ func insertRows(timeseries []prompb.TimeSeries, mms []prompb.MetricMetadata, ext
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
|
||||
if err := ctx.FlushBufs(); err != nil {
|
||||
return fmt.Errorf("cannot flush metric bufs: %w", err)
|
||||
}
|
||||
if prommetadata.IsEnabled() {
|
||||
if err := ctx.WriteMetadata(mms); err != nil {
|
||||
return err
|
||||
}
|
||||
metadataInserted.Add(len(mms))
|
||||
}
|
||||
return nil
|
||||
return ctx.FlushBufs()
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func loadRelabelConfig() (*promrelabel.ParsedConfigs, error) {
|
||||
if len(*relabelConfig) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
pcs, _, err := promrelabel.LoadRelabelConfigs(*relabelConfig)
|
||||
pcs, err := promrelabel.LoadRelabelConfigs(*relabelConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when reading -relabelConfig=%q: %w", *relabelConfig, err)
|
||||
}
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
package zabbixconnector
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/zabbixconnector"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/zabbixconnector/stream"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="zabbixconnector"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="zabbixconnector"}`)
|
||||
)
|
||||
|
||||
// InsertHandlerForHTTP processes remote write for ZabbixConnector POST /zabbixconnector/v1/history request.
|
||||
func InsertHandlerForHTTP(req *http.Request) error {
|
||||
extraLabels, err := protoparserutil.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encoding := req.Header.Get("Content-Encoding")
|
||||
return stream.Parse(req.Body, encoding, func(rows []zabbixconnector.Row) error {
|
||||
return insertRows(rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(rows []zabbixconnector.Row, extraLabels []prompb.Label) error {
|
||||
ctx := common.GetInsertCtx()
|
||||
defer common.PutInsertCtx(ctx)
|
||||
|
||||
rowsTotal := len(rows)
|
||||
ctx.Reset(rowsTotal)
|
||||
hasRelabeling := relabel.HasRelabeling()
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
for k := range r.Tags {
|
||||
t := &r.Tags[k]
|
||||
ctx.AddLabelBytes(t.Key, t.Value)
|
||||
}
|
||||
for k := range extraLabels {
|
||||
label := &extraLabels[k]
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
if err := ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ctx.FlushBufs()
|
||||
}
|
||||
@@ -31,9 +31,6 @@ vmrestore-linux-ppc64le-prod:
|
||||
vmrestore-linux-386-prod:
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmrestore-linux-s390x-prod:
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-s390x
|
||||
|
||||
vmrestore-darwin-amd64-prod:
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ func newDstFS() (*fslocal.FS, error) {
|
||||
}
|
||||
|
||||
func newSrcFS(ctx context.Context) (common.RemoteFS, error) {
|
||||
fs, err := actions.NewRemoteFS(ctx, *src, nil)
|
||||
fs, err := actions.NewRemoteFS(ctx, *src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-src`=%q: %w", *src, err)
|
||||
}
|
||||
|
||||
@@ -421,16 +421,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/metadata":
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metric-metadata
|
||||
metadataRequests.Inc()
|
||||
if err := prometheus.MetadataHandler(qt, startTime, w, r); err != nil {
|
||||
metadataErrors.Inc()
|
||||
httpserver.SendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
@@ -584,6 +574,12 @@ func handleStaticAndSimpleRequests(w http.ResponseWriter, r *http.Request, path
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprint(w, `{"status":"success","data":{"notifiers":[]}}`)
|
||||
return true
|
||||
case "/api/v1/metadata":
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metric-metadata
|
||||
metadataRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintf(w, "%s", `{"status":"success","data":{}}`)
|
||||
return true
|
||||
case "/api/v1/status/buildinfo":
|
||||
buildInfoRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -712,9 +708,7 @@ var (
|
||||
alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`)
|
||||
notifiersRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/notifiers"}`)
|
||||
|
||||
metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`)
|
||||
metadataErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/metadata"}`)
|
||||
|
||||
metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`)
|
||||
buildInfoRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/buildinfo"}`)
|
||||
queryExemplarsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/query_exemplars"}`)
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -866,23 +865,6 @@ func LabelValues(qt *querytracer.Tracer, labelName string, sq *storage.SearchQue
|
||||
return labelValues, nil
|
||||
}
|
||||
|
||||
// GetMetricsMetadata returns time series metric names metadata for the given args
|
||||
func GetMetricsMetadata(qt *querytracer.Tracer, limit int, metricName string) ([]*metricsmetadata.Row, error) {
|
||||
qt = qt.NewChild("get metrics metadata: limit=%d, metric_name=%q", limit, metricName)
|
||||
defer qt.Done()
|
||||
|
||||
metadata := vmstorage.Storage.GetMetadataRows(qt, limit, metricName)
|
||||
|
||||
sort.Slice(metadata, func(i, j int) bool {
|
||||
return string(metadata[i].MetricFamilyName) < string(metadata[j].MetricFamilyName)
|
||||
})
|
||||
if limit > 0 && len(metadata) >= limit {
|
||||
metadata = metadata[:limit]
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// GraphiteTagValues returns tag values for the given tagName until the given deadline.
|
||||
func GraphiteTagValues(qt *querytracer.Tracer, tagName, filter string, limit int, deadline searchutil.Deadline) ([]string, error) {
|
||||
qt = qt.NewChild("get graphite tag values for tagName=%s, filter=%s, limit=%d", tagName, filter, limit)
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
MetadataResponse generates response for /api/v1/metadata
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metric-metadata
|
||||
{% func MetadataResponse( result []*metricsmetadata.Row, qt *querytracer.Tracer) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data": {
|
||||
{% code
|
||||
mapItems := len(result)
|
||||
currentItem := 0
|
||||
%}
|
||||
{% for _, row := range result %}
|
||||
"{%s string(row.MetricFamilyName) %}": [
|
||||
{
|
||||
"type": {%q= row.Type.String() %},
|
||||
{% if len(row.Unit) > 0 -%}
|
||||
"unit": {%q= string(row.Unit) %},
|
||||
{% endif -%}
|
||||
"help": {%q= string(row.Help) %}
|
||||
}
|
||||
]
|
||||
{% if currentItem != mapItems-1 %},{% endif %}
|
||||
{% code currentItem++ %}
|
||||
{% endfor %}
|
||||
}
|
||||
{%= dumpQueryTrace(qt) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
@@ -1,108 +0,0 @@
|
||||
// Code generated by qtc from "metadata_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/metricsmetadata"
|
||||
)
|
||||
|
||||
// MetadataResponse generates response for /api/v1/metadataSee https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metric-metadata
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:9
|
||||
func StreamMetadataResponse(qw422016 *qt422016.Writer, result []*metricsmetadata.Row, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:9
|
||||
qw422016.N().S(`{"status":"success","data": {`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:14
|
||||
mapItems := len(result)
|
||||
currentItem := 0
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:17
|
||||
for _, row := range result {
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:17
|
||||
qw422016.N().S(`"`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:18
|
||||
qw422016.E().S(string(row.MetricFamilyName))
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:18
|
||||
qw422016.N().S(`": [{"type":`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:20
|
||||
qw422016.N().Q(row.Type.String())
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:20
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:21
|
||||
if len(row.Unit) > 0 {
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:21
|
||||
qw422016.N().S(`"unit":`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:22
|
||||
qw422016.N().Q(string(row.Unit))
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:22
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:23
|
||||
}
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:23
|
||||
qw422016.N().S(`"help":`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:24
|
||||
qw422016.N().Q(string(row.Help))
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:24
|
||||
qw422016.N().S(`}]`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:27
|
||||
if currentItem != mapItems-1 {
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:27
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:28
|
||||
currentItem++
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:29
|
||||
}
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:29
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:31
|
||||
streamdumpQueryTrace(qw422016, qt)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:31
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
func WriteMetadataResponse(qq422016 qtio422016.Writer, result []*metricsmetadata.Row, qt *querytracer.Tracer) {
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
StreamMetadataResponse(qw422016, result, qt)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
func MetadataResponse(result []*metricsmetadata.Row, qt *querytracer.Tracer) string {
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
WriteMetadataResponse(qb422016, result, qt)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/metadata_response.qtpl:33
|
||||
}
|
||||
@@ -56,7 +56,7 @@ var (
|
||||
maxTSDBStatusSeries = flag.Int("search.maxTSDBStatusSeries", 10e6, "The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage")
|
||||
maxSeriesLimit = flag.Int("search.maxSeries", 30e3, "The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage")
|
||||
maxDeleteSeries = flag.Int("search.maxDeleteSeries", 1e6, "The maximum number of time series, which can be deleted using /api/v1/admin/tsdb/delete_series. This option allows limiting memory usage")
|
||||
maxTSDBStatusTopNSeries = flag.Int("search.maxTSDBStatusTopNSeries", 1000, "The maximum value of 'topN' argument that can be passed to /api/v1/status/tsdb API. This option allows limiting memory usage. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#tsdb-stats")
|
||||
maxTSDBStatusTopNSeries = flag.Int("search.maxTSDBStatusTopNSeries", 1000, "The maximum value of `topN` argument that can be passed to /api/v1/status/tsdb API. This option allows limiting memory usage. See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#tsdb-stats")
|
||||
maxLabelsAPISeries = flag.Int("search.maxLabelsAPISeries", 1e6, "The maximum number of time series, which could be scanned when searching for the matching time series "+
|
||||
"at /api/v1/labels and /api/v1/label/.../values. This option allows limiting memory usage and CPU usage. See also -search.maxLabelsAPIDuration, "+
|
||||
"-search.maxTagKeys, -search.maxTagValues and -search.ignoreExtraFiltersAtLabelsAPI")
|
||||
@@ -639,37 +639,6 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
||||
return nil
|
||||
}
|
||||
|
||||
// MetadataHandler processes /api/v1/metadata request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metric-metadata
|
||||
func MetadataHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
|
||||
limit, err := httputil.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
metricName := r.FormValue("metric")
|
||||
|
||||
metadata, err := netstorage.GetMetricsMetadata(qt, limit, metricName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get metadata: %w", err)
|
||||
}
|
||||
qt.Done()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
WriteMetadataResponse(bw, metadata, qt)
|
||||
if err := bw.Flush(); err != nil {
|
||||
return fmt.Errorf("cannot send metadata response to remote client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var labelsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/labels"}`)
|
||||
|
||||
// SeriesCountHandler processes /api/v1/series/count request.
|
||||
|
||||
@@ -49,7 +49,7 @@ var (
|
||||
minWindowForInstantRollupOptimization = flag.Duration("search.minWindowForInstantRollupOptimization", time.Hour*3, "Enable cache-based optimization for repeated queries "+
|
||||
"to /api/v1/query (aka instant queries), which contain rollup functions with lookbehind window exceeding the given value")
|
||||
maxBinaryOpPushdownLabelValues = flag.Int("search.maxBinaryOpPushdownLabelValues", 100, "The maximum number of values for a label in the first expression that can be extracted as a common label filter and pushed down to the second expression in a binary operation. "+
|
||||
"A larger value makes the pushed-down filter more complex but fewer time series will be returned. This flag is useful when selective label (e.g., 'instance') contains numerous values, and storage resources are abundant.")
|
||||
"A larger value makes the pushed-down filter more complex but fewer time series will be returned. This flag is useful when selective label contains numerous values, for example `instance`, and storage resources are abundant.")
|
||||
)
|
||||
|
||||
// The minimum number of points per timeseries for enabling time rounding.
|
||||
@@ -1169,6 +1169,60 @@ func evalInstantRollup(qt *querytracer.Tracer, ec *EvalConfig, funcName string,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
case "rate":
|
||||
if iafc != nil {
|
||||
if !strings.EqualFold(iafc.ae.Name, "sum") {
|
||||
qt.Printf("do not apply instant rollup optimization for incremental aggregate %s()", iafc.ae.Name)
|
||||
return evalAt(qt, timestamp, window)
|
||||
}
|
||||
qt.Printf("optimized calculation for sum(rate(m[d])) as (sum(increase(m[d])) / d)")
|
||||
afe := expr.(*metricsql.AggrFuncExpr)
|
||||
fe := afe.Args[0].(*metricsql.FuncExpr)
|
||||
feIncrease := *fe
|
||||
feIncrease.Name = "increase"
|
||||
// copy RollupExpr to drop possible offset,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
|
||||
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
|
||||
newArg.Offset = nil
|
||||
feIncrease.Args = []metricsql.Expr{newArg}
|
||||
d := newArg.Window.Duration(ec.Step)
|
||||
if d == 0 {
|
||||
d = ec.Step
|
||||
}
|
||||
afeIncrease := *afe
|
||||
afeIncrease.Args = []metricsql.Expr{&feIncrease}
|
||||
be := &metricsql.BinaryOpExpr{
|
||||
Op: "/",
|
||||
KeepMetricNames: true,
|
||||
Left: &afeIncrease,
|
||||
Right: &metricsql.NumberExpr{
|
||||
N: float64(d) / 1000,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
}
|
||||
qt.Printf("optimized calculation for instant rollup rate(m[d]) as (increase(m[d]) / d)")
|
||||
fe := expr.(*metricsql.FuncExpr)
|
||||
feIncrease := *fe
|
||||
feIncrease.Name = "increase"
|
||||
// copy RollupExpr to drop possible offset,
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/9762
|
||||
newArg := copyRollupExpr(fe.Args[0].(*metricsql.RollupExpr))
|
||||
newArg.Offset = nil
|
||||
feIncrease.Args = []metricsql.Expr{newArg}
|
||||
d := newArg.Window.Duration(ec.Step)
|
||||
if d == 0 {
|
||||
d = ec.Step
|
||||
}
|
||||
be := &metricsql.BinaryOpExpr{
|
||||
Op: "/",
|
||||
KeepMetricNames: fe.KeepMetricNames,
|
||||
Left: &feIncrease,
|
||||
Right: &metricsql.NumberExpr{
|
||||
N: float64(d) / 1000,
|
||||
},
|
||||
}
|
||||
return evalExpr(qt, ec, be)
|
||||
case "max_over_time":
|
||||
if iafc != nil {
|
||||
if !strings.EqualFold(iafc.ae.Name, "max") {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user