lib/promscape: update last scrape result only when current scrape is successful

Previously, last scrape result was unconditionally update, despite possible scrape error.

The commit updates last scrape result only at successful scrape. It properly accounts `scrape_series_added` metric and aligns it with the same metric in Prometheus.

fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10653
This commit is contained in:
JAYICE
2026-04-06 23:14:47 +08:00
committed by GitHub
parent b3c03c023c
commit 0a256002e5
3 changed files with 28 additions and 2 deletions

View File

@@ -38,6 +38,8 @@ See also [LTS releases](https://docs.victoriametrics.com/victoriametrics/lts-rel
* BUGFIX: [vmauth](https://docs.victoriametrics.com/victoriametrics/vmauth/): align request body buffering flags - `maxRequestBodySizeToRetry` and `requestBufferSize` to the same `16KB` value. Allow disabling request buffering by setting `requestBufferSize=0`. See [#10675](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10675)
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): fix `scrape_series_added` metric to update only on successful scrapes, aligning its behavior with Prometheus. See [#10653](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10653).
## [v1.139.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.139.0)
Released at 2026-03-27

View File

@@ -613,6 +613,8 @@ and attaches `instance`, `job` and other target-specific labels to these metrics
`vmagent` sets `scrape_series_added` to zero when it runs with `-promscrape.noStaleMarkers` command-line flag
or when it scrapes target with `no_stale_markers: true` option, e.g. when [staleness markers](#prometheus-staleness-markers) are disabled.
Restarting `vmagent` can cause `scrape_series_added` to rise because all time series are new to a newly started `vmagent`.
* `scrape_series_limit` - the limit on the number of unique [series](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#time-series) the given target can expose according to [these docs](#cardinality-limiter).
This metric is exposed only if the series limit is set.

View File

@@ -244,6 +244,9 @@ type scrapeWork struct {
// successRequestsCount is the number of success requests during the last suppressScrapeErrorsDelay
successRequestsCount int
// lastScrapeSuccess indicates whether last scrape is success or not.
lastScrapeSuccess bool
}
// loadLastScrape appends last scrape response to dst and returns the result.
@@ -530,6 +533,8 @@ func (sw *scrapeWork) processDataOneShot(scrapeTimestamp, realTimestamp int64, b
areIdenticalSeries := areIdenticalSeries(cfg, lastScrapeStr, bodyString)
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
lastScrapeSuccess := sw.lastScrapeSuccess
if err != nil {
up = 0
scrapesFailed.Inc()
@@ -571,6 +576,9 @@ func (sw *scrapeWork) processDataOneShot(scrapeTimestamp, realTimestamp int64, b
if up == 0 {
bodyString = ""
sw.lastScrapeSuccess = false
} else {
sw.lastScrapeSuccess = true
}
seriesAdded := 0
if !areIdenticalSeries {
@@ -600,10 +608,15 @@ func (sw *scrapeWork) processDataOneShot(scrapeTimestamp, realTimestamp int64, b
sw.prevLabelsLen = len(wc.labels)
writeRequestCtxPool.Put(wc)
if !areIdenticalSeries {
if !areIdenticalSeries && (lastScrapeSuccess || err == nil) {
// Send stale markers for disappeared metrics with the real scrape timestamp
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
sw.sendStaleSeries(lastScrapeStr, bodyString, realTimestamp, false)
}
if !areIdenticalSeries && err == nil {
// Only update last scrape result when the current scrape is successful.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10653.
sw.storeLastScrape(bodyString)
sw.lastScrapeLen = len(bodyString)
}
@@ -620,6 +633,7 @@ func (sw *scrapeWork) processDataInStreamMode(scrapeTimestamp, realTimestamp int
var maxLabelsLen atomic.Int64
maxLabelsLen.Store(int64(sw.prevLabelsLen))
lastScrapeSuccess := sw.lastScrapeSuccess
bbLastScrape := leveledbytebufferpool.Get(sw.lastScrapeLen)
bbLastScrape.B = sw.loadLastScrape(bbLastScrape.B)
@@ -682,6 +696,9 @@ func (sw *scrapeWork) processDataInStreamMode(scrapeTimestamp, realTimestamp int
up = 0
bodyString = ""
scrapesFailed.Inc()
sw.lastScrapeSuccess = false
} else {
sw.lastScrapeSuccess = true
}
seriesAdded := 0
if !areIdenticalSeries {
@@ -703,10 +720,15 @@ func (sw *scrapeWork) processDataInStreamMode(scrapeTimestamp, realTimestamp int
}
sw.pushAutoMetrics(am, scrapeTimestamp)
if !areIdenticalSeries {
if !areIdenticalSeries && (lastScrapeSuccess || err == nil) {
// Send stale markers for disappeared metrics with the real scrape timestamp
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
sw.sendStaleSeries(lastScrapeStr, bodyString, realTimestamp, false)
}
if !areIdenticalSeries && err == nil {
// Only update last scrape result when the current scrape is successful.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10653.
sw.storeLastScrape(bodyString)
sw.lastScrapeLen = len(bodyString)
}