mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
155 Commits
debug/erro
...
pmm-6401-v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e1668e7441 | ||
|
|
0d0469cc80 | ||
|
|
8d6d4e8033 | ||
|
|
b894f25f21 | ||
|
|
b6bae2f05f | ||
|
|
9e15858baf | ||
|
|
3f5b1084eb | ||
|
|
c2e9be96a7 | ||
|
|
a72dadb8f4 | ||
|
|
08219faf8d | ||
|
|
288620ca40 | ||
|
|
2847c84a7b | ||
|
|
6a64823581 | ||
|
|
b94e986710 | ||
|
|
a29565d1bd | ||
|
|
39332cfc5c | ||
|
|
d07d2811d4 | ||
|
|
206e451cae | ||
|
|
307034fc2f | ||
|
|
c149132b14 | ||
|
|
6dd7a90c7c | ||
|
|
dc5507754f | ||
|
|
c68663deee | ||
|
|
114a40e63f | ||
|
|
163f2a46fd | ||
|
|
375c46cb1f | ||
|
|
bb2d1128b8 | ||
|
|
479b9da827 | ||
|
|
62857fc30e | ||
|
|
253315b1fe | ||
|
|
efe6e30008 | ||
|
|
bc2512abdd | ||
|
|
a07f8017ba | ||
|
|
cf70b766eb | ||
|
|
b00732074c | ||
|
|
8df8c414de | ||
|
|
ce844238a4 | ||
|
|
452720c5dc | ||
|
|
bbca1740c1 | ||
|
|
e1c85395eb | ||
|
|
b348114dab | ||
|
|
bb54e34dc5 | ||
|
|
e0d0b9447e | ||
|
|
fae6e4fc85 | ||
|
|
e49bf9bc73 | ||
|
|
a142390014 | ||
|
|
bceb8082f6 | ||
|
|
276969500e | ||
|
|
030e3a63f2 | ||
|
|
1c5e0564af | ||
|
|
b8300338f0 | ||
|
|
660c3c7251 | ||
|
|
80ba07dc95 | ||
|
|
11ded82e60 | ||
|
|
558b390ebc | ||
|
|
343f444e87 | ||
|
|
16884c20c0 | ||
|
|
7d44cdd8ce | ||
|
|
5d2394ad9b | ||
|
|
8582fba4b1 | ||
|
|
b045f506f2 | ||
|
|
6197440bb9 | ||
|
|
966e9c227a | ||
|
|
edb2ab7d8e | ||
|
|
0ad887fd4d | ||
|
|
d5dde7f6b1 | ||
|
|
a54ca9bd8f | ||
|
|
3588687f84 | ||
|
|
687eb4ab00 | ||
|
|
b04fece006 | ||
|
|
d0c364d93d | ||
|
|
63c88d8ea2 | ||
|
|
dc6636e2b2 | ||
|
|
c13f1d99e0 | ||
|
|
079888f719 | ||
|
|
b68264b4f5 | ||
|
|
aed049f660 | ||
|
|
7fcc0a1ef0 | ||
|
|
48951073c4 | ||
|
|
d0dfcb72b4 | ||
|
|
4cf7a55808 | ||
|
|
d72fc60108 | ||
|
|
0b92e18047 | ||
|
|
aa8ea16160 | ||
|
|
f5e70f0ab9 | ||
|
|
9e10d5083e | ||
|
|
30c2d75815 | ||
|
|
0e80f3f45a | ||
|
|
6e3cbae0b3 | ||
|
|
a5583ddaff | ||
|
|
5db9e82e54 | ||
|
|
80676cf1fd | ||
|
|
ba4c49dde6 | ||
|
|
35e5e8ff1e | ||
|
|
4cdbc4642d | ||
|
|
23c0fb1efc | ||
|
|
441d3e4b3f | ||
|
|
a0ea5777f0 | ||
|
|
fb006fc6c0 | ||
|
|
8593358965 | ||
|
|
d0311b7fe5 | ||
|
|
4edd38a906 | ||
|
|
56054f4eb7 | ||
|
|
0ff0787797 | ||
|
|
f9c706e186 | ||
|
|
d74d22460c | ||
|
|
d1193c87a8 | ||
|
|
4f311e5827 | ||
|
|
142e6b6ecf | ||
|
|
1b4ef473b9 | ||
|
|
8beb1f9519 | ||
|
|
501fd8efd9 | ||
|
|
45f2ba2572 | ||
|
|
cb2342029e | ||
|
|
ff0088ceec | ||
|
|
afe6d2e736 | ||
|
|
e1a6262302 | ||
|
|
f000a10cd0 | ||
|
|
4aee6ef4c0 | ||
|
|
f4dfacd493 | ||
|
|
fb2d4e56ce | ||
|
|
36b748dfc7 | ||
|
|
c625dc5b96 | ||
|
|
e32620afa1 | ||
|
|
3f298272a8 | ||
|
|
7a473798b7 | ||
|
|
00ce906d97 | ||
|
|
41c9565aa1 | ||
|
|
56303aee5b | ||
|
|
8d8e2ccf5f | ||
|
|
8772cb617c | ||
|
|
65fbfc5cbc | ||
|
|
1b389674c0 | ||
|
|
98529e16ee | ||
|
|
1b112405a8 | ||
|
|
8bbc83e85e | ||
|
|
8349140744 | ||
|
|
4dc13754d8 | ||
|
|
83b7eb8ca6 | ||
|
|
e5ef3288dd | ||
|
|
e7f2907138 | ||
|
|
757c5cfbe0 | ||
|
|
317ddb84b9 | ||
|
|
2b1d0510fa | ||
|
|
40d2f6fee4 | ||
|
|
9fbb84d5c2 | ||
|
|
bdaa9a91f3 | ||
|
|
1a91da35be | ||
|
|
f85be226bb | ||
|
|
8df5a3c5f6 | ||
|
|
9d3eb3f4b8 | ||
|
|
2cd48959d4 | ||
|
|
8fc8874db4 | ||
|
|
ff1cbb524e | ||
|
|
a70df4bd83 |
@@ -28,8 +28,16 @@ var (
|
||||
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+
|
||||
"Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse")
|
||||
downsamplingPeriods = flagutil.NewArray("downsampling.period", "Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs "+
|
||||
"to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details")
|
||||
)
|
||||
|
||||
// custom api help links [["/api","doc"]] without http.pathPrefix.
|
||||
var customAPIPathList = [][]string{
|
||||
{"/graph/explore", "explore metrics grafana page"},
|
||||
{"/graph/d/prometheus-advanced/advanced-data-exploration", "PMM grafana dashboard"},
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
@@ -51,7 +59,10 @@ func main() {
|
||||
|
||||
logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
storage.SetDedupInterval(*minScrapeInterval)
|
||||
err := storage.SetDownsamplingPeriods(*downsamplingPeriods, *minScrapeInterval)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -downsampling.period: %s", err)
|
||||
}
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
@@ -100,6 +111,10 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
{"api/v1/status/top_queries", "top queries"},
|
||||
{"api/v1/status/active_queries", "active queries"},
|
||||
})
|
||||
for _, p := range customAPIPathList {
|
||||
p, doc := p[0], p[1]
|
||||
fmt.Fprintf(w, "<a href=%q>%s</a> - %s<br/>", p, p, doc)
|
||||
}
|
||||
return true
|
||||
}
|
||||
if vminsert.RequestHandler(w, r) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
@@ -267,6 +268,12 @@ var gomaxprocs = cgroup.AvailableCPUs()
|
||||
type packedTimeseries struct {
|
||||
metricName string
|
||||
brs []blockRef
|
||||
pd *promData
|
||||
}
|
||||
|
||||
type promData struct {
|
||||
values []float64
|
||||
timestamps []int64
|
||||
}
|
||||
|
||||
type unpackWorkItem struct {
|
||||
@@ -468,11 +475,46 @@ func (pts *packedTimeseries) Unpack(dst *Result, tbf *tmpBlocksFile, tr storage.
|
||||
if firstErr != nil {
|
||||
return firstErr
|
||||
}
|
||||
dedupInterval := storage.GetDedupInterval()
|
||||
if pts.pd != nil {
|
||||
// Add data from Prometheus to dst.
|
||||
// It usually has smaller timestamps than the data from sbs, so put it first.
|
||||
//
|
||||
// There is no need in check for fetchData, since this is already checked when initializing pts.pd.
|
||||
dst.Values = append(dst.Values, pts.pd.values...)
|
||||
dst.Timestamps = append(dst.Timestamps, pts.pd.timestamps...)
|
||||
}
|
||||
dedupInterval := storage.GetDedupInterval(tr.MinTimestamp)
|
||||
mergeSortBlocks(dst, sbs, dedupInterval)
|
||||
if pts.pd != nil {
|
||||
if !sort.IsSorted(dst) {
|
||||
sort.Sort(dst)
|
||||
}
|
||||
pts.pd = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sort.Interface implementation for Result
|
||||
|
||||
// Len implements sort.Interface
|
||||
func (r *Result) Len() int {
|
||||
return len(r.Timestamps)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface
|
||||
func (r *Result) Less(i, j int) bool {
|
||||
timestamps := r.Timestamps
|
||||
return timestamps[i] < timestamps[j]
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface
|
||||
func (r *Result) Swap(i, j int) {
|
||||
timestamps := r.Timestamps
|
||||
values := r.Values
|
||||
timestamps[i], timestamps[j] = timestamps[j], timestamps[i]
|
||||
values[i], values[j] = values[j], values[i]
|
||||
}
|
||||
|
||||
func getSortBlock() *sortBlock {
|
||||
v := sbPool.Get()
|
||||
if v == nil {
|
||||
@@ -622,6 +664,14 @@ func GetLabelsOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) (
|
||||
labels[i] = "__name__"
|
||||
}
|
||||
}
|
||||
|
||||
// Merge labels obtained from Prometheus storage.
|
||||
promLabels, err := promdb.GetLabelNamesOnTimeRange(tr, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain labels from Prometheus storage: %w", err)
|
||||
}
|
||||
labels = mergeStrings(labels, promLabels)
|
||||
|
||||
// Sort labels like Prometheus does
|
||||
sort.Strings(labels)
|
||||
return labels, nil
|
||||
@@ -687,11 +737,40 @@ func GetLabels(deadline searchutils.Deadline) ([]string, error) {
|
||||
labels[i] = "__name__"
|
||||
}
|
||||
}
|
||||
|
||||
// Merge labels obtained from Prometheus storage.
|
||||
promLabels, err := promdb.GetLabelNames(deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain labels from Prometheus storage: %w", err)
|
||||
}
|
||||
labels = mergeStrings(labels, promLabels)
|
||||
|
||||
// Sort labels like Prometheus does
|
||||
sort.Strings(labels)
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func mergeStrings(a, b []string) []string {
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
m := make(map[string]struct{}, len(a)+len(b))
|
||||
for _, s := range a {
|
||||
m[s] = struct{}{}
|
||||
}
|
||||
for _, s := range b {
|
||||
m[s] = struct{}{}
|
||||
}
|
||||
result := make([]string, 0, len(m))
|
||||
for s := range m {
|
||||
result = append(result, s)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetLabelValuesOnTimeRange returns label values for the given labelName on the given tr
|
||||
// until the given deadline.
|
||||
func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
||||
@@ -706,6 +785,14 @@ func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label values search on time range for labelName=%q: %w", labelName, err)
|
||||
}
|
||||
|
||||
// Merge label values obtained from Prometheus storage.
|
||||
promLabelValues, err := promdb.GetLabelValuesOnTimeRange(labelName, tr, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain label values on time range for %q from Prometheus storage: %w", labelName, err)
|
||||
}
|
||||
labelValues = mergeStrings(labelValues, promLabelValues)
|
||||
|
||||
// Sort labelValues like Prometheus does
|
||||
sort.Strings(labelValues)
|
||||
return labelValues, nil
|
||||
@@ -749,6 +836,14 @@ func GetLabelValues(labelName string, deadline searchutils.Deadline) ([]string,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label values search for labelName=%q: %w", labelName, err)
|
||||
}
|
||||
|
||||
// Merge label values obtained from Prometheus storage.
|
||||
promLabelValues, err := promdb.GetLabelValues(labelName, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain label values for %q from Prometheus storage: %w", labelName, err)
|
||||
}
|
||||
labelValues = mergeStrings(labelValues, promLabelValues)
|
||||
|
||||
// Sort labelValues like Prometheus does
|
||||
sort.Strings(labelValues)
|
||||
return labelValues, nil
|
||||
@@ -1087,6 +1182,26 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline search
|
||||
return nil, fmt.Errorf("cannot finalize temporary file: %w", err)
|
||||
}
|
||||
|
||||
// Fetch data from promdb.
|
||||
pm := make(map[string]*promData)
|
||||
err = promdb.VisitSeries(sq, fetchData, deadline, func(metricName []byte, values []float64, timestamps []int64) {
|
||||
pd := pm[string(metricName)]
|
||||
if pd == nil {
|
||||
if _, ok := m[string(metricName)]; !ok {
|
||||
orderedMetricNames = append(orderedMetricNames, string(metricName))
|
||||
}
|
||||
pd = &promData{}
|
||||
pm[string(metricName)] = pd
|
||||
}
|
||||
pd.values = append(pd.values, values...)
|
||||
pd.timestamps = append(pd.timestamps, timestamps...)
|
||||
})
|
||||
if err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
putStorageSearch(sr)
|
||||
return nil, fmt.Errorf("error when searching in Prometheus data: %w", err)
|
||||
}
|
||||
|
||||
var rss Results
|
||||
rss.tr = tr
|
||||
rss.fetchData = fetchData
|
||||
@@ -1096,6 +1211,7 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline search
|
||||
pts[i] = packedTimeseries{
|
||||
metricName: metricName,
|
||||
brs: m[metricName],
|
||||
pd: pm[metricName],
|
||||
}
|
||||
}
|
||||
rss.packedTimeseries = pts
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
@@ -104,6 +105,8 @@ func InitWithoutMetrics(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
||||
sizeBytes := tm.SmallSizeBytes + tm.BigSizeBytes
|
||||
logger.Infof("successfully opened storage %q in %.3f seconds; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
|
||||
*DataPath, time.Since(startTime).Seconds(), partsCount, blocksCount, rowsCount, sizeBytes)
|
||||
|
||||
promdb.Init(retentionPeriod.Msecs)
|
||||
}
|
||||
|
||||
// Storage is a storage.
|
||||
@@ -247,6 +250,7 @@ func Stop() {
|
||||
logger.Infof("gracefully closing the storage at %s", *DataPath)
|
||||
startTime := time.Now()
|
||||
WG.WaitAndBlock()
|
||||
promdb.MustClose()
|
||||
Storage.MustClose()
|
||||
logger.Infof("successfully closed the storage in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
|
||||
276
app/vmstorage/promdb/promdb.go
Normal file
276
app/vmstorage/promdb/promdb.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package promdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
promstorage "github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
var prometheusDataPath = flag.String("prometheusDataPath", "", "Optional path to readonly historical Prometheus data")
|
||||
|
||||
var prometheusRetentionMsecs int64
|
||||
|
||||
// Init must be called after flag.Parse and before using the package.
|
||||
//
|
||||
// See also MustClose.
|
||||
func Init(retentionMsecs int64) {
|
||||
if promDB != nil {
|
||||
logger.Fatalf("BUG: it looks like MustOpenPromDB is called multiple times without MustClosePromDB call")
|
||||
}
|
||||
prometheusRetentionMsecs = retentionMsecs
|
||||
if *prometheusDataPath == "" {
|
||||
return
|
||||
}
|
||||
l := log.LoggerFunc(func(a ...interface{}) error {
|
||||
logger.Infof("%v", a)
|
||||
return nil
|
||||
})
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.RetentionDuration = retentionMsecs
|
||||
|
||||
// Set max block duration to 10% of retention period or 31 days
|
||||
// according to https://prometheus.io/docs/prometheus/latest/storage/#compaction
|
||||
maxBlockDuration := int64((31 * 24 * time.Hour) / time.Millisecond)
|
||||
if maxBlockDuration > retentionMsecs/10 {
|
||||
maxBlockDuration = retentionMsecs / 10
|
||||
}
|
||||
if maxBlockDuration < opts.MinBlockDuration {
|
||||
maxBlockDuration = opts.MinBlockDuration
|
||||
}
|
||||
opts.MaxBlockDuration = maxBlockDuration
|
||||
|
||||
// Custom delete function is needed, because Prometheus by default doesn't delete
|
||||
// blocks outside the retention if no new blocks are created with samples with the current timestamps.
|
||||
// See https://github.com/prometheus/prometheus/blob/997bb7134fcfd7279f250e183e78681e48a56aff/tsdb/db.go#L1116
|
||||
opts.BlocksToDelete = func(blocks []*tsdb.Block) map[ulid.ULID]struct{} {
|
||||
m := make(map[ulid.ULID]struct{})
|
||||
minRetentionTime := time.Now().Unix()*1000 - retentionMsecs
|
||||
for _, block := range blocks {
|
||||
meta := block.Meta()
|
||||
// delete block marked for deletion by compaction code.
|
||||
if meta.Compaction.Deletable {
|
||||
m[meta.ULID] = struct{}{}
|
||||
continue
|
||||
}
|
||||
if block.MaxTime() < minRetentionTime {
|
||||
m[meta.ULID] = struct{}{}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
pdb, err := tsdb.Open(*prometheusDataPath, l, nil, opts)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot open Prometheus data at -prometheusDataPath=%q: %s", *prometheusDataPath, err)
|
||||
}
|
||||
promDB = pdb
|
||||
logger.Infof("successfully opened historical Prometheus data at -prometheusDataPath=%q with retentionMsecs=%d", *prometheusDataPath, retentionMsecs)
|
||||
}
|
||||
|
||||
// MustClose must be called on graceful shutdown.
|
||||
//
|
||||
// Package functionality cannot be used after this call.
|
||||
func MustClose() {
|
||||
if *prometheusDataPath == "" {
|
||||
return
|
||||
}
|
||||
if promDB == nil {
|
||||
logger.Panicf("BUG: it looks like MustClosePromDB is called without MustOpenPromDB call")
|
||||
}
|
||||
if err := promDB.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close promDB: %s", err)
|
||||
}
|
||||
promDB = nil
|
||||
logger.Infof("successfully closed historical Prometheus data at -prometheusDataPath=%q", *prometheusDataPath)
|
||||
}
|
||||
|
||||
var promDB *tsdb.DB
|
||||
|
||||
// GetLabelNamesOnTimeRange returns label names.
|
||||
func GetLabelNamesOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
||||
d := time.Unix(int64(deadline.Deadline()), 0)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
q, err := promDB.Querier(ctx, tr.MinTimestamp, tr.MaxTimestamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mustCloseQuerier(q)
|
||||
|
||||
names, _, err := q.LabelNames()
|
||||
// Make full copy of names, since they cannot be used after q is closed.
|
||||
names = copyStringsWithMemory(names)
|
||||
return names, err
|
||||
}
|
||||
|
||||
// GetLabelNames returns label names.
|
||||
func GetLabelNames(deadline searchutils.Deadline) ([]string, error) {
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: 0,
|
||||
MaxTimestamp: time.Now().UnixNano() / 1e6,
|
||||
}
|
||||
return GetLabelNamesOnTimeRange(tr, deadline)
|
||||
}
|
||||
|
||||
// GetLabelValuesOnTimeRange returns values for the given labelName on the given tr.
|
||||
func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
||||
d := time.Unix(int64(deadline.Deadline()), 0)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
q, err := promDB.Querier(ctx, tr.MinTimestamp, tr.MaxTimestamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mustCloseQuerier(q)
|
||||
|
||||
values, _, err := q.LabelValues(labelName)
|
||||
// Make full copy of values, since they cannot be used after q is closed.
|
||||
values = copyStringsWithMemory(values)
|
||||
return values, err
|
||||
}
|
||||
|
||||
// GetLabelValues returns values for the given labelName.
|
||||
func GetLabelValues(labelName string, deadline searchutils.Deadline) ([]string, error) {
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: 0,
|
||||
MaxTimestamp: time.Now().UnixNano() / 1e6,
|
||||
}
|
||||
return GetLabelValuesOnTimeRange(labelName, tr, deadline)
|
||||
}
|
||||
|
||||
func copyStringsWithMemory(a []string) []string {
|
||||
result := make([]string, len(a))
|
||||
for i, s := range a {
|
||||
result[i] = string(append([]byte{}, s...))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SeriesVisitor is called by VisitSeries for each matching time series.
|
||||
//
|
||||
// The caller shouldn't hold references to metricName, values and timestamps after returning.
|
||||
type SeriesVisitor func(metricName []byte, values []float64, timestamps []int64)
|
||||
|
||||
// VisitSeries calls f for each series found in the pdb.
|
||||
//
|
||||
// If fetchData is false, then empty values and timestamps are passed to f.
|
||||
func VisitSeries(sq *storage.SearchQuery, fetchData bool, deadline searchutils.Deadline, f SeriesVisitor) error {
|
||||
if *prometheusDataPath == "" {
|
||||
return nil
|
||||
}
|
||||
d := time.Unix(int64(deadline.Deadline()), 0)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
minTime, maxTime := getSearchTimeRange(sq)
|
||||
q, err := promDB.Querier(ctx, minTime, maxTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer mustCloseQuerier(q)
|
||||
var seriesSet []promstorage.SeriesSet
|
||||
for _, tf := range sq.TagFilterss {
|
||||
ms, err := convertTagFiltersToMatchers(tf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot convert tag filters to matchers: %w", err)
|
||||
}
|
||||
s := q.Select(false, nil, ms...)
|
||||
seriesSet = append(seriesSet, s)
|
||||
}
|
||||
ss := promstorage.NewMergeSeriesSet(seriesSet, promstorage.ChainedSeriesMerge)
|
||||
var (
|
||||
mn storage.MetricName
|
||||
metricName []byte
|
||||
values []float64
|
||||
timestamps []int64
|
||||
)
|
||||
for ss.Next() {
|
||||
s := ss.At()
|
||||
convertPromLabelsToMetricName(&mn, s.Labels())
|
||||
metricName = mn.SortAndMarshal(metricName[:0])
|
||||
values = values[:0]
|
||||
timestamps = timestamps[:0]
|
||||
if fetchData {
|
||||
it := s.Iterator()
|
||||
for it.Next() {
|
||||
ts, v := it.At()
|
||||
values = append(values, v)
|
||||
timestamps = append(timestamps, ts)
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return fmt.Errorf("error when iterating Prometheus series: %w", err)
|
||||
}
|
||||
}
|
||||
f(metricName, values, timestamps)
|
||||
}
|
||||
return ss.Err()
|
||||
}
|
||||
|
||||
func getSearchTimeRange(sq *storage.SearchQuery) (int64, int64) {
|
||||
maxTime := sq.MaxTimestamp
|
||||
minTime := sq.MinTimestamp
|
||||
minRetentionTime := time.Now().Unix()*1000 - prometheusRetentionMsecs
|
||||
if maxTime < minRetentionTime {
|
||||
maxTime = minRetentionTime
|
||||
}
|
||||
if minTime < minRetentionTime {
|
||||
minTime = minRetentionTime
|
||||
}
|
||||
return minTime, maxTime
|
||||
}
|
||||
|
||||
func convertPromLabelsToMetricName(dst *storage.MetricName, labels []labels.Label) {
|
||||
dst.Reset()
|
||||
for _, label := range labels {
|
||||
if label.Name == "__name__" {
|
||||
dst.MetricGroup = append(dst.MetricGroup[:0], label.Value...)
|
||||
} else {
|
||||
dst.AddTag(label.Name, label.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func convertTagFiltersToMatchers(tfs []storage.TagFilter) ([]*labels.Matcher, error) {
|
||||
ms := make([]*labels.Matcher, 0, len(tfs))
|
||||
for _, tf := range tfs {
|
||||
var mt labels.MatchType
|
||||
if tf.IsNegative {
|
||||
if tf.IsRegexp {
|
||||
mt = labels.MatchNotRegexp
|
||||
} else {
|
||||
mt = labels.MatchNotEqual
|
||||
}
|
||||
} else {
|
||||
if tf.IsRegexp {
|
||||
mt = labels.MatchRegexp
|
||||
} else {
|
||||
mt = labels.MatchEqual
|
||||
}
|
||||
}
|
||||
key := string(tf.Key)
|
||||
if key == "" {
|
||||
key = "__name__"
|
||||
}
|
||||
value := string(tf.Value)
|
||||
m, err := labels.NewMatcher(mt, key, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func mustCloseQuerier(q promstorage.Querier) {
|
||||
if err := q.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close querier: %s", err)
|
||||
}
|
||||
}
|
||||
14
go.mod
14
go.mod
@@ -14,9 +14,16 @@ require (
|
||||
github.com/aws/aws-sdk-go v1.42.35
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.0.8
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.5
|
||||
github.com/klauspost/compress v1.14.1
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
@@ -37,9 +44,6 @@ require (
|
||||
cloud.google.com/go/iam v0.1.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0 // indirect
|
||||
github.com/go-kit/log v0.2.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
@@ -47,15 +51,11 @@ require (
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
|
||||
@@ -161,7 +161,8 @@ func (b *Block) deduplicateSamplesDuringMerge() {
|
||||
// Nothing to dedup.
|
||||
return
|
||||
}
|
||||
dedupInterval := GetDedupInterval()
|
||||
maxTimestamp := srcTimestamps[len(srcTimestamps)-1]
|
||||
dedupInterval := GetDedupInterval(maxTimestamp)
|
||||
if dedupInterval <= 0 {
|
||||
// Deduplication is disabled.
|
||||
return
|
||||
|
||||
@@ -1,27 +1,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SetDedupInterval sets the deduplication interval, which is applied to raw samples during data ingestion and querying.
|
||||
//
|
||||
// De-duplication is disabled if dedupInterval is 0.
|
||||
//
|
||||
// This function must be called before initializing the storage.
|
||||
func SetDedupInterval(dedupInterval time.Duration) {
|
||||
globalDedupInterval = dedupInterval.Milliseconds()
|
||||
}
|
||||
|
||||
// GetDedupInterval returns the dedup interval in milliseconds, which has been set via SetDedupInterval.
|
||||
func GetDedupInterval() int64 {
|
||||
return globalDedupInterval
|
||||
}
|
||||
|
||||
var globalDedupInterval int64
|
||||
|
||||
func isDedupEnabled() bool {
|
||||
return globalDedupInterval > 0
|
||||
return len(downsamplingPeriods) > 0
|
||||
}
|
||||
|
||||
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in millseconds.
|
||||
|
||||
123
lib/storage/downsampling.go
Normal file
123
lib/storage/downsampling.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
// SetDownsamplingPeriods configures downsampling.
|
||||
//
|
||||
// The function must be called before opening or creating any storage.
|
||||
func SetDownsamplingPeriods(periods []string, dedupInterval time.Duration) error {
|
||||
dsps, err := parseDownsamplingPeriods(periods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dedupIntervalMs := dedupInterval.Milliseconds()
|
||||
if dedupIntervalMs > 0 {
|
||||
if len(dsps) > 0 && dsps[len(dsps)-1].Offset == 0 {
|
||||
return fmt.Errorf("-dedup.minScrapeInterval=%s cannot be used if -downsampling.period=%s contains zero offset", dedupInterval, periods)
|
||||
}
|
||||
// Deduplication is a special case of downsampling with zero offset.
|
||||
dsps = append(dsps, DownsamplingPeriod{
|
||||
Offset: 0,
|
||||
Interval: dedupIntervalMs,
|
||||
})
|
||||
}
|
||||
downsamplingPeriods = dsps
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownsamplingPeriod describes downsampling period
|
||||
type DownsamplingPeriod struct {
|
||||
// Offset in milliseconds from the current time when the downsampling with the given interval must be applied
|
||||
Offset int64
|
||||
// Interval for downsampling - only a single sample is left per each interval
|
||||
Interval int64
|
||||
}
|
||||
|
||||
// String implements interface
|
||||
func (dsp DownsamplingPeriod) String() string {
|
||||
offset := time.Duration(dsp.Offset) * time.Millisecond
|
||||
interval := time.Duration(dsp.Interval) * time.Millisecond
|
||||
return fmt.Sprintf("%s:%s", offset, interval)
|
||||
}
|
||||
|
||||
func (dsp *DownsamplingPeriod) parse(s string) error {
|
||||
idx := strings.Index(s, ":")
|
||||
if idx <= 0 {
|
||||
return fmt.Errorf("incorrect format for downsampling period: %s, want `offset:interval` format", s)
|
||||
}
|
||||
offsetStr, intervalStr := s[:idx], s[idx+1:]
|
||||
interval, err := metricsql.DurationValue(intervalStr, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect interval: %s format for downsampling interval: %s err: %w", intervalStr, s, err)
|
||||
}
|
||||
offset, err := metricsql.DurationValue(offsetStr, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect duration: %s format for downsampling offset: %s err: %w", offsetStr, s, err)
|
||||
}
|
||||
dsp.Interval = interval
|
||||
dsp.Offset = offset
|
||||
// sanity check
|
||||
if offset > 0 && interval > offset {
|
||||
return fmt.Errorf("downsampling interval=%d cannot exceed offset=%d", dsp.Interval, dsp.Offset)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var downsamplingPeriods []DownsamplingPeriod
|
||||
|
||||
// GetDedupInterval returns dedup interval, which must be applied to samples with the given timestamp.
|
||||
func GetDedupInterval(timestamp int64) int64 {
|
||||
dsp := getDownsamplingPeriod(timestamp)
|
||||
return dsp.Interval
|
||||
}
|
||||
|
||||
// getDownsamplingPeriod returns downsampling period, which must be used for the given timestamp
|
||||
func getDownsamplingPeriod(timestamp int64) DownsamplingPeriod {
|
||||
offset := int64(fasttime.UnixTimestamp())*1000 - timestamp
|
||||
for _, dsp := range downsamplingPeriods {
|
||||
if offset >= dsp.Offset {
|
||||
return dsp
|
||||
}
|
||||
}
|
||||
return DownsamplingPeriod{}
|
||||
}
|
||||
|
||||
func parseDownsamplingPeriods(periods []string) ([]DownsamplingPeriod, error) {
|
||||
if len(periods) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var dsps []DownsamplingPeriod
|
||||
for _, period := range periods {
|
||||
var dsp DownsamplingPeriod
|
||||
if err := dsp.parse(period); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse downsampling period %q: %w", period, err)
|
||||
}
|
||||
dsps = append(dsps, dsp)
|
||||
}
|
||||
sort.Slice(dsps, func(i, j int) bool {
|
||||
return dsps[i].Offset > dsps[j].Offset
|
||||
})
|
||||
dspPrev := dsps[0]
|
||||
// sanity checks.
|
||||
for _, dsp := range dsps[1:] {
|
||||
if dspPrev.Interval <= dsp.Interval {
|
||||
return nil, fmt.Errorf("prev downsampling interval %d must be bigger than the next interval %d", dspPrev.Interval, dsp.Interval)
|
||||
}
|
||||
if dspPrev.Offset == dsp.Offset {
|
||||
return nil, fmt.Errorf("duplicate downsampling offset: %d", dsp.Offset)
|
||||
}
|
||||
if dspPrev.Interval%dsp.Interval != 0 {
|
||||
return nil, fmt.Errorf("downsamping intervals must be multiples; prev: %d, current: %d", dspPrev.Interval, dsp.Interval)
|
||||
}
|
||||
dspPrev = dsp
|
||||
}
|
||||
return dsps, nil
|
||||
}
|
||||
62
lib/storage/downsampling_test.go
Normal file
62
lib/storage/downsampling_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseDownsamplingPeriodsFailure(t *testing.T) {
|
||||
f := func(name string, src []string) {
|
||||
t.Helper()
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if _, err := parseDownsamplingPeriods(src); err == nil {
|
||||
t.Fatalf("want fail for input: %s", strings.Join(src, ","))
|
||||
}
|
||||
})
|
||||
}
|
||||
f("empty duration", []string{"15d"})
|
||||
f("empty interval", []string{":1m"})
|
||||
f("incorrect duration decrease", []string{"30d:15h", "60d:1h"})
|
||||
f("duplicate offset", []string{"30d:15h", "30d:1h"})
|
||||
f("duplicate interval", []string{"60d:1h", "30d:1h"})
|
||||
f("not multiple intervals", []string{"90d:12h", "60:9h", "30d:7h"})
|
||||
}
|
||||
|
||||
func TestParseDownsamplingPeriodsSuccess(t *testing.T) {
|
||||
f := func(name string, src []string, expected []DownsamplingPeriod) {
|
||||
t.Helper()
|
||||
t.Run(name, func(t *testing.T) {
|
||||
dsps, err := parseDownsamplingPeriods(src)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse downsampling configuration for: %s, err: %s", strings.Join(src, ","), err)
|
||||
}
|
||||
assertDownsamplingPeriods(t, expected, dsps)
|
||||
})
|
||||
}
|
||||
f("one period", []string{"30d:1m"}, []DownsamplingPeriod{
|
||||
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
|
||||
})
|
||||
f("three periods", []string{"15d:30s", "30d:1m", "60d:15m"}, []DownsamplingPeriod{
|
||||
{Offset: 60 * 24 * 3600 * 1000, Interval: 15 * 60 * 1000},
|
||||
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
|
||||
{Offset: 15 * 24 * 3600 * 1000, Interval: 30 * 1000},
|
||||
})
|
||||
f("with the same divider periods", []string{"15d:1m", "30d:7m", "60d:14m", "90d:28m"}, []DownsamplingPeriod{
|
||||
{Offset: 90 * 24 * 3600 * 1000, Interval: 28 * 60 * 1000},
|
||||
{Offset: 60 * 24 * 3600 * 1000, Interval: 14 * 60 * 1000},
|
||||
{Offset: 30 * 24 * 3600 * 1000, Interval: 7 * 60 * 1000},
|
||||
{Offset: 15 * 24 * 3600 * 1000, Interval: 60 * 1000},
|
||||
})
|
||||
}
|
||||
|
||||
func assertDownsamplingPeriods(t *testing.T, want, got []DownsamplingPeriod) {
|
||||
t.Helper()
|
||||
if len(want) != len(got) {
|
||||
t.Fatalf("len mismatch, want: %d, got: %d", len(want), len(got))
|
||||
}
|
||||
for i := 0; i < len(want); i++ {
|
||||
if want[i] != got[i] {
|
||||
t.Fatalf("want period: %s, got period: %s, idx: %d", want[i], got[i], i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -358,6 +358,12 @@ func (mn *MetricName) String() string {
|
||||
return fmt.Sprintf("%s{%s}", mnCopy.MetricGroup, tagsStr)
|
||||
}
|
||||
|
||||
// SortAndMarshal sorts mn tags and then marshals them to dst.
|
||||
func (mn *MetricName) SortAndMarshal(dst []byte) []byte {
|
||||
mn.sortTags()
|
||||
return mn.Marshal(dst)
|
||||
}
|
||||
|
||||
// Marshal appends marshaled mn to dst and returns the result.
|
||||
//
|
||||
// mn.sortTags must be called before calling this function
|
||||
|
||||
@@ -1086,7 +1086,7 @@ func (pt *partition) runFinalDedup() error {
|
||||
func (pt *partition) getRequiredDedupInterval() (int64, int64) {
|
||||
pws := pt.GetParts(nil)
|
||||
defer pt.PutParts(pws)
|
||||
dedupInterval := GetDedupInterval()
|
||||
dedupInterval := GetDedupInterval(pt.tr.MaxTimestamp)
|
||||
minDedupInterval := getMinDedupInterval(pws)
|
||||
return dedupInterval, minDedupInterval
|
||||
}
|
||||
@@ -1195,7 +1195,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro
|
||||
}
|
||||
bsrs = nil
|
||||
|
||||
ph.MinDedupInterval = GetDedupInterval()
|
||||
ph.MinDedupInterval = GetDedupInterval(ph.MaxTimestamp)
|
||||
if err := ph.writeMinDedupInterval(tmpPartPath); err != nil {
|
||||
return fmt.Errorf("cannot store min dedup interval for part %q: %w", tmpPartPath, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user