Compare commits

...

1 Commits

Author SHA1 Message Date
Jayice
534c57b79a make scrape_series_added negative when the number of series decreased 2026-03-18 16:02:09 +08:00
6 changed files with 64 additions and 44 deletions

View File

@@ -30,6 +30,7 @@ See also [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-rel
* FEATURE: [dashboards/unused-metrics](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards/unused-metrics.json): add a new dashboard for exploring stored metrics based on [Caridnality Explorer](https://docs.victoriametrics.com/victoriametrics/#cardinality-explorer) and [ingested metrics usage API](https://docs.victoriametrics.com/victoriametrics/#track-ingested-metrics-usage). The dashboard requires [Infinity Grafana plugin](https://grafana.com/grafana/plugins/yesoreyeram-infinity-datasource/) to be installed. See [#10617](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/10617) for details.
* BUGFIX: `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): retry RPC by dialing a new connection instead of reusing a pooled one when the previous attempt fails with `io.EOF`, `broken pipe` or `reset by peer`. This reduces query failures caused by stale connections to restarted vmstorage nodes. See [#10314](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10314)
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): make `scrape_series_added` negative when the number of series exposed by target decrease or fail to scrape the target. See [#10653](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10653).
## [v1.138.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.138.0)

View File

@@ -598,7 +598,7 @@ and attaches `instance`, `job` and other target-specific labels to these metrics
The limit can be set via `label_limit` option at [scrape_configs](https://docs.victoriametrics.com/victoriametrics/sd_configs/#scrape_configs).
This metric is exposed only if the `label_limit` is set.
* `scrape_series_added` - **an approximate** number of new [series](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#time-series) the given target generates during the current scrape.
* `scrape_series_added` - **an approximate** number of exposed [series](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#time-series) the given target changes during the current scrape.
This metric allows detecting targets (identified by `instance` label),
which lead to [high churn rate](https://docs.victoriametrics.com/victoriametrics/faq/#what-is-high-churn-rate).
For example, the following [MetricsQL query](https://docs.victoriametrics.com/victoriametrics/metricsql/) returns targets,
@@ -611,6 +611,7 @@ and attaches `instance`, `job` and other target-specific labels to these metrics
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line flag
or when it scrapes target with `no_stale_markers: true` option, e.g. when [staleness markers](#prometheus-staleness-markers) are disabled.
When `vmagent` starts, the `scrape_series_added` for all targets will be increased because all series are newly added for a newly started `vmagent`.
* `scrape_series_limit` - the limit on the number of unique [series](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#time-series) the given target can expose according to [these docs](#cardinality-limiter).
This metric is exposed only if the series limit is set.

View File

@@ -577,7 +577,8 @@ func (sw *scrapeWork) processDataOneShot(scrapeTimestamp, realTimestamp int64, b
// The returned value for seriesAdded may be bigger than the real number of added series
// if some series were removed during relabeling.
// This is a trade-off between performance and accuracy.
seriesAdded = getSeriesAdded(lastScrapeStr, bodyString)
// The value can be negative if the number of exposed series decreased or the scrape failed.
seriesAdded = getSeriesDiff(lastScrapeStr, bodyString)
}
samplesDropped := 0
if sw.seriesLimitExceeded.Load() || !areIdenticalSeries {
@@ -688,7 +689,8 @@ func (sw *scrapeWork) processDataInStreamMode(scrapeTimestamp, realTimestamp int
// The returned value for seriesAdded may be bigger than the real number of added series
// if some series were removed during relabeling.
// This is a trade-off between performance and accuracy.
seriesAdded = getSeriesAdded(lastScrapeStr, bodyString)
// The value can be negative if the number of exposed series decreased or the scrape failed.
seriesAdded = getSeriesDiff(lastScrapeStr, bodyString)
}
responseSize := len(bodyString)
@@ -813,12 +815,9 @@ func (wc *writeRequestCtx) reset() {
var writeRequestCtxPool leveledWriteRequestCtxPool
func getSeriesAdded(lastScrape, currScrape string) int {
if currScrape == "" {
return 0
}
bodyString := parser.GetRowsDiff(currScrape, lastScrape)
return strings.Count(bodyString, "\n")
func getSeriesDiff(lastScrape, currScrape string) int {
added, deleted := parser.GetRowsDiff(currScrape, lastScrape)
return strings.Count(added, "\n") - strings.Count(deleted, "\n")
}
func (sw *scrapeWork) initSeriesLimiter() {
@@ -876,7 +875,7 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
}
bodyString := lastScrape
if currScrape != "" {
bodyString = parser.GetRowsDiff(lastScrape, currScrape)
bodyString, _ = parser.GetRowsDiff(lastScrape, currScrape)
}
if bodyString != "" {
// Send stale markers in streaming mode in order to reduce memory usage

View File

@@ -485,10 +485,11 @@ func prevBackslashesCount(s string) int {
return n
}
// GetRowsDiff returns rows from s1, which are missing in s2.
//
// GetRowsDiff
// The first returned string contains rows from s1, which are missing in s2.
// The second returned string contains rows from s2, which are missing in s1.
// The returned rows have default value 0 and have no timestamps.
func GetRowsDiff(s1, s2 string) string {
func GetRowsDiff(s1, s2 string) (string, string) {
li1 := getLinesIterator()
li2 := getLinesIterator()
defer func() {
@@ -497,33 +498,48 @@ func GetRowsDiff(s1, s2 string) string {
}()
li1.Init(s1)
li2.Init(s2)
if !li1.NextKey() {
return ""
var diff1, diff2 []byte
has1 := li1.NextKey()
has2 := li2.NextKey()
if !has1 && !has2 {
return "", ""
}
var diff []byte
if !li2.NextKey() {
diff = appendKeys(diff, li1)
return string(diff)
if !has1 {
diff2 = appendKeys(diff2, li2)
return "", string(diff2)
}
if !has2 {
diff1 = appendKeys(diff1, li1)
return string(diff1), ""
}
for {
switch bytes.Compare(li1.Key, li2.Key) {
case -1:
diff = appendKey(diff, li1.Key)
diff1 = appendKey(diff1, li1.Key)
if !li1.NextKey() {
return string(diff)
diff2 = appendKeys(diff2, li2)
return string(diff1), string(diff2)
}
case 1:
diff2 = appendKey(diff2, li2.Key)
if !li2.NextKey() {
diff1 = appendKeys(diff1, li1)
return string(diff1), string(diff2)
}
case 0:
if !li1.NextKey() {
return string(diff)
if !li2.NextKey() {
return string(diff1), string(diff2)
}
diff2 = appendKeys(diff2, li2)
return string(diff1), string(diff2)
}
if !li2.NextKey() {
diff = appendKeys(diff, li1)
return string(diff)
}
case 1:
if !li2.NextKey() {
diff = appendKeys(diff, li1)
return string(diff)
diff1 = appendKeys(diff1, li1)
return string(diff1), string(diff2)
}
}
}

View File

@@ -7,21 +7,24 @@ import (
)
func TestGetRowsDiff(t *testing.T) {
f := func(s1, s2, resultExpected string) {
f := func(s1, s2, addedExpected, deletedExpected string) {
t.Helper()
result := GetRowsDiff(s1, s2)
if result != resultExpected {
t.Fatalf("unexpected result for GetRowsDiff(%q, %q); got %q; want %q", s1, s2, result, resultExpected)
added, deleted := GetRowsDiff(s1, s2)
if added != addedExpected {
t.Fatalf("unexpected added result for GetRowsDiff(%q, %q); got %q; want %q", s1, s2, added, addedExpected)
}
if deleted != deletedExpected {
t.Fatalf("unexpected deleted result for GetRowsDiff(%q, %q); got %q; want %q", s1, s2, deleted, deletedExpected)
}
}
f("", "", "")
f("", "foo 1", "")
f(" ", "foo 1", "")
f("foo 123", "", "foo 0\n")
f("foo 123", "bar 3", "foo 0\n")
f("foo 123", "bar 3\nfoo 344", "")
f("foo{x=\"y\", z=\"a a a\"} 123", "bar 3\nfoo{x=\"y\", z=\"b b b\"} 344", "foo{x=\"y\",z=\"a a a\"} 0\n")
f("foo{bar=\"baz\"} 123\nx 3.4 5\ny 5 6", "x 34 342", "foo{bar=\"baz\"} 0\ny 0\n")
f("", "", "", "")
f("", "foo 1", "", "foo 0\n")
f(" ", "foo 1", "", "foo 0\n")
f("foo 123", "", "foo 0\n", "")
f("foo 123", "bar 3", "foo 0\n", "bar 0\n")
f("foo 123", "bar 3\nfoo 344", "", "bar 0\n")
f("foo{x=\"y\", z=\"a a a\"} 123", "bar 3\nfoo{x=\"y\", z=\"b b b\"} 344", "foo{x=\"y\",z=\"a a a\"} 0\n", "bar 0\nfoo{x=\"y\",z=\"b b b\"} 0\n")
f("foo{bar=\"baz\"} 123\nx 3.4 5\ny 5 6", "x 34 342", "foo{bar=\"baz\"} 0\ny 0\n", "")
}
func TestAreIdenticalSeriesFast(t *testing.T) {

View File

@@ -137,9 +137,9 @@ container_ulimits_soft{container="kube-scheduler",id="/kubelet/kubepods/burstabl
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
diff := GetRowsDiff(s2, s1)
if diff != "foo 0\n" {
panic(fmt.Errorf("unexpected diff; got %q; want %q", diff, "foo 0\n"))
added, _ := GetRowsDiff(s2, s1)
if added != "foo 0\n" {
panic(fmt.Errorf("unexpected diff; got %q; want %q", added, "foo 0\n"))
}
}
})