mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
414 Commits
debug/erro
...
pmm-6401-v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54df0fa870 | ||
|
|
cd513b9758 | ||
|
|
cf7eb6bc7c | ||
|
|
2404b4bc00 | ||
|
|
e3e06b1f47 | ||
|
|
1d0ad32b30 | ||
|
|
2557e66ee0 | ||
|
|
381d4494e9 | ||
|
|
b7b731d340 | ||
|
|
1016aae126 | ||
|
|
5c2f85f38d | ||
|
|
2d8f54f831 | ||
|
|
778c092740 | ||
|
|
9f8ada83b6 | ||
|
|
0b503fba0b | ||
|
|
f6c91b49a2 | ||
|
|
2faa23c495 | ||
|
|
fd49331671 | ||
|
|
4de0514731 | ||
|
|
b65a9f2057 | ||
|
|
0eb733a31e | ||
|
|
6be10fb2ff | ||
|
|
7a503e0c91 | ||
|
|
31a3672982 | ||
|
|
1590ddecba | ||
|
|
b80ebb8bfd | ||
|
|
58ecb90665 | ||
|
|
f7d0d3a229 | ||
|
|
af85055f3a | ||
|
|
ca20478a69 | ||
|
|
c8c20b7f7a | ||
|
|
35263983a6 | ||
|
|
a2c901423b | ||
|
|
382721a3ac | ||
|
|
d688f9a744 | ||
|
|
c060c6d839 | ||
|
|
927ded6c3b | ||
|
|
d4123e135f | ||
|
|
4b86a18105 | ||
|
|
c6154f8f52 | ||
|
|
b4c79fc606 | ||
|
|
b4529df08d | ||
|
|
a63fb21ab2 | ||
|
|
7a19b2a14c | ||
|
|
e06d855636 | ||
|
|
e29fe89791 | ||
|
|
978594f50f | ||
|
|
e16015fa3b | ||
|
|
8033f1705c | ||
|
|
9f1e9c54c8 | ||
|
|
d59e66caa8 | ||
|
|
a2e224593e | ||
|
|
a2d68d249b | ||
|
|
713d3431fe | ||
|
|
02642248cf | ||
|
|
1aebd15549 | ||
|
|
43f0baabcd | ||
|
|
eba0e6dbc0 | ||
|
|
f0f1eb07dc | ||
|
|
bb7b59033d | ||
|
|
e0cef082f4 | ||
|
|
20fedaf7c2 | ||
|
|
efc5190950 | ||
|
|
14ab18375f | ||
|
|
4280cc281a | ||
|
|
740638ad30 | ||
|
|
3d377d0c22 | ||
|
|
99aeb3b21b | ||
|
|
d60c212784 | ||
|
|
dc9537f44e | ||
|
|
1b9a279494 | ||
|
|
f42572e049 | ||
|
|
827cde4c64 | ||
|
|
7c271d6a39 | ||
|
|
b61e9297a1 | ||
|
|
88b4c30021 | ||
|
|
ab535bf127 | ||
|
|
fee8a30f1a | ||
|
|
02ffbfb8dc | ||
|
|
3822d83276 | ||
|
|
8561bb48fd | ||
|
|
a32a9070c1 | ||
|
|
b596228765 | ||
|
|
d0f9a5d4c4 | ||
|
|
472a9360e6 | ||
|
|
b00fcad604 | ||
|
|
3d755041c3 | ||
|
|
e22a9d6ba6 | ||
|
|
9d7dc73038 | ||
|
|
63d9048990 | ||
|
|
8db1fd2f78 | ||
|
|
8f0afc656e | ||
|
|
be94882ada | ||
|
|
ff990ab0c5 | ||
|
|
5c8a01aecc | ||
|
|
2ce4d04d8e | ||
|
|
b026ebe91e | ||
|
|
c2b724d3ab | ||
|
|
e4a61581e1 | ||
|
|
a38bf70679 | ||
|
|
7b41c9ac72 | ||
|
|
c1d42f3288 | ||
|
|
4167344edb | ||
|
|
44e388ee6a | ||
|
|
b8ab0b2f31 | ||
|
|
dcc4b84319 | ||
|
|
37f48cdaa5 | ||
|
|
a39140baef | ||
|
|
30c0a37032 | ||
|
|
32e46ea35f | ||
|
|
6faaefef7b | ||
|
|
5cd89aaaa1 | ||
|
|
3a21fde0f3 | ||
|
|
274627943e | ||
|
|
21140318cc | ||
|
|
3f5bc2adce | ||
|
|
a5975c31c2 | ||
|
|
fad61eafc1 | ||
|
|
30453af768 | ||
|
|
7737321133 | ||
|
|
a2ab1f0ec9 | ||
|
|
a092df3f84 | ||
|
|
c3f178aa53 | ||
|
|
393e7636be | ||
|
|
ebc200846c | ||
|
|
0158237875 | ||
|
|
be5bbb7ba7 | ||
|
|
b79f02de21 | ||
|
|
ac58ab9664 | ||
|
|
0613ac5d02 | ||
|
|
22e48e6517 | ||
|
|
1f0432b5c1 | ||
|
|
079953b4ea | ||
|
|
d92da32041 | ||
|
|
8548650c2d | ||
|
|
2dd82e8355 | ||
|
|
bf0b5602d0 | ||
|
|
e25d05f992 | ||
|
|
5ce8fa8b10 | ||
|
|
881f22ca62 | ||
|
|
38294e2f17 | ||
|
|
2d909f4979 | ||
|
|
0821298471 | ||
|
|
fa5cda60d9 | ||
|
|
700eb5bb1d | ||
|
|
70bcc97d1c | ||
|
|
0074539441 | ||
|
|
fe0ab3840f | ||
|
|
c4fc87f8b8 | ||
|
|
8e3198ba29 | ||
|
|
6c7c0790a0 | ||
|
|
33343695a9 | ||
|
|
db553f12bc | ||
|
|
07fe2c5361 | ||
|
|
22e87b0088 | ||
|
|
f105e2e8c3 | ||
|
|
20414b3038 | ||
|
|
fcb7ef68f8 | ||
|
|
626142ab90 | ||
|
|
fd1b8be2e5 | ||
|
|
d39ba2536e | ||
|
|
e2c4578751 | ||
|
|
6ad7b0619c | ||
|
|
3a15bc761b | ||
|
|
bd79706eb3 | ||
|
|
e69fb9f3cf | ||
|
|
1a9cb85647 | ||
|
|
a80f0c9f42 | ||
|
|
4db1d24973 | ||
|
|
1c9f5b3580 | ||
|
|
9682c23786 | ||
|
|
bd2bb272f0 | ||
|
|
6111abd0e6 | ||
|
|
3f3f664b76 | ||
|
|
d1c6fb74fc | ||
|
|
b9668d5294 | ||
|
|
96160000e0 | ||
|
|
28e961e511 | ||
|
|
628e87e727 | ||
|
|
3600c97ad7 | ||
|
|
bb154f8829 | ||
|
|
d2e293b5c9 | ||
|
|
e80ddbebd4 | ||
|
|
bdd4940140 | ||
|
|
a8fee2d9b6 | ||
|
|
2dbbf51ea9 | ||
|
|
cd5cc4ec81 | ||
|
|
549d430907 | ||
|
|
69aef55ae7 | ||
|
|
274145af2d | ||
|
|
c444f7e2b9 | ||
|
|
10f41ea5f9 | ||
|
|
46f803fa7a | ||
|
|
ffe9bd248c | ||
|
|
151286f5a8 | ||
|
|
77a1af4f7f | ||
|
|
c83ff99e0d | ||
|
|
4a0c9a1069 | ||
|
|
2fd56ddb38 | ||
|
|
b42e5627fb | ||
|
|
57375e72fa | ||
|
|
0746766d95 | ||
|
|
6712a8269c | ||
|
|
4e20ea4b59 | ||
|
|
44dfb2ec0d | ||
|
|
e7b4e657a1 | ||
|
|
cd91c29243 | ||
|
|
8b8e547dc8 | ||
|
|
34a6b1fa3b | ||
|
|
af37ec8020 | ||
|
|
fff8ff946f | ||
|
|
fdccca238a | ||
|
|
1b24afec36 | ||
|
|
cacd3d6f6d | ||
|
|
8632b8200e | ||
|
|
0445ad59db | ||
|
|
f7b52b64a3 | ||
|
|
7fc62feddc | ||
|
|
0ea0168d98 | ||
|
|
3dec16702a | ||
|
|
993ecbb141 | ||
|
|
35eb512efa | ||
|
|
7f01217c3c | ||
|
|
2398b4a10a | ||
|
|
5a60387eea | ||
|
|
2685992ca9 | ||
|
|
ee63748753 | ||
|
|
620b0d11b7 | ||
|
|
316cac2c0b | ||
|
|
9eb61e67af | ||
|
|
a7333a7380 | ||
|
|
ee5bd20157 | ||
|
|
d713bdec20 | ||
|
|
6a5d6244d4 | ||
|
|
095feeee41 | ||
|
|
9dd493363c | ||
|
|
d964b04efd | ||
|
|
ec01a188fd | ||
|
|
40112df441 | ||
|
|
9e74fe3145 | ||
|
|
2c22e168f5 | ||
|
|
5747b78f6f | ||
|
|
d9166e899e | ||
|
|
38699170c9 | ||
|
|
5b4f7bbc0c | ||
|
|
db85f4a1cb | ||
|
|
780b2a139a | ||
|
|
9d2805320b | ||
|
|
e636cab272 | ||
|
|
90a1502335 | ||
|
|
f8a05d4ada | ||
|
|
ae64c2db61 | ||
|
|
37a4347a37 | ||
|
|
20cdb879e7 | ||
|
|
7917486d78 | ||
|
|
107607bf47 | ||
|
|
78b028064f | ||
|
|
db286fdd73 | ||
|
|
e8ff658b2e | ||
|
|
e1668e7441 | ||
|
|
0d0469cc80 | ||
|
|
8d6d4e8033 | ||
|
|
b894f25f21 | ||
|
|
b6bae2f05f | ||
|
|
9e15858baf | ||
|
|
3f5b1084eb | ||
|
|
c2e9be96a7 | ||
|
|
a72dadb8f4 | ||
|
|
08219faf8d | ||
|
|
288620ca40 | ||
|
|
2847c84a7b | ||
|
|
6a64823581 | ||
|
|
b94e986710 | ||
|
|
a29565d1bd | ||
|
|
39332cfc5c | ||
|
|
d07d2811d4 | ||
|
|
206e451cae | ||
|
|
307034fc2f | ||
|
|
c149132b14 | ||
|
|
6dd7a90c7c | ||
|
|
dc5507754f | ||
|
|
c68663deee | ||
|
|
114a40e63f | ||
|
|
163f2a46fd | ||
|
|
375c46cb1f | ||
|
|
bb2d1128b8 | ||
|
|
479b9da827 | ||
|
|
62857fc30e | ||
|
|
253315b1fe | ||
|
|
efe6e30008 | ||
|
|
bc2512abdd | ||
|
|
a07f8017ba | ||
|
|
cf70b766eb | ||
|
|
b00732074c | ||
|
|
8df8c414de | ||
|
|
ce844238a4 | ||
|
|
452720c5dc | ||
|
|
bbca1740c1 | ||
|
|
e1c85395eb | ||
|
|
b348114dab | ||
|
|
bb54e34dc5 | ||
|
|
e0d0b9447e | ||
|
|
fae6e4fc85 | ||
|
|
e49bf9bc73 | ||
|
|
a142390014 | ||
|
|
bceb8082f6 | ||
|
|
276969500e | ||
|
|
030e3a63f2 | ||
|
|
1c5e0564af | ||
|
|
b8300338f0 | ||
|
|
660c3c7251 | ||
|
|
80ba07dc95 | ||
|
|
11ded82e60 | ||
|
|
558b390ebc | ||
|
|
343f444e87 | ||
|
|
16884c20c0 | ||
|
|
7d44cdd8ce | ||
|
|
5d2394ad9b | ||
|
|
8582fba4b1 | ||
|
|
b045f506f2 | ||
|
|
6197440bb9 | ||
|
|
966e9c227a | ||
|
|
edb2ab7d8e | ||
|
|
0ad887fd4d | ||
|
|
d5dde7f6b1 | ||
|
|
a54ca9bd8f | ||
|
|
3588687f84 | ||
|
|
687eb4ab00 | ||
|
|
b04fece006 | ||
|
|
d0c364d93d | ||
|
|
63c88d8ea2 | ||
|
|
dc6636e2b2 | ||
|
|
c13f1d99e0 | ||
|
|
079888f719 | ||
|
|
b68264b4f5 | ||
|
|
aed049f660 | ||
|
|
7fcc0a1ef0 | ||
|
|
48951073c4 | ||
|
|
d0dfcb72b4 | ||
|
|
4cf7a55808 | ||
|
|
d72fc60108 | ||
|
|
0b92e18047 | ||
|
|
aa8ea16160 | ||
|
|
f5e70f0ab9 | ||
|
|
9e10d5083e | ||
|
|
30c2d75815 | ||
|
|
0e80f3f45a | ||
|
|
6e3cbae0b3 | ||
|
|
a5583ddaff | ||
|
|
5db9e82e54 | ||
|
|
80676cf1fd | ||
|
|
ba4c49dde6 | ||
|
|
35e5e8ff1e | ||
|
|
4cdbc4642d | ||
|
|
23c0fb1efc | ||
|
|
441d3e4b3f | ||
|
|
a0ea5777f0 | ||
|
|
fb006fc6c0 | ||
|
|
8593358965 | ||
|
|
d0311b7fe5 | ||
|
|
4edd38a906 | ||
|
|
56054f4eb7 | ||
|
|
0ff0787797 | ||
|
|
f9c706e186 | ||
|
|
d74d22460c | ||
|
|
d1193c87a8 | ||
|
|
4f311e5827 | ||
|
|
142e6b6ecf | ||
|
|
1b4ef473b9 | ||
|
|
8beb1f9519 | ||
|
|
501fd8efd9 | ||
|
|
45f2ba2572 | ||
|
|
cb2342029e | ||
|
|
ff0088ceec | ||
|
|
afe6d2e736 | ||
|
|
e1a6262302 | ||
|
|
f000a10cd0 | ||
|
|
4aee6ef4c0 | ||
|
|
f4dfacd493 | ||
|
|
fb2d4e56ce | ||
|
|
36b748dfc7 | ||
|
|
c625dc5b96 | ||
|
|
e32620afa1 | ||
|
|
3f298272a8 | ||
|
|
7a473798b7 | ||
|
|
00ce906d97 | ||
|
|
41c9565aa1 | ||
|
|
56303aee5b | ||
|
|
8d8e2ccf5f | ||
|
|
8772cb617c | ||
|
|
65fbfc5cbc | ||
|
|
1b389674c0 | ||
|
|
98529e16ee | ||
|
|
1b112405a8 | ||
|
|
8bbc83e85e | ||
|
|
8349140744 | ||
|
|
4dc13754d8 | ||
|
|
83b7eb8ca6 | ||
|
|
e5ef3288dd | ||
|
|
e7f2907138 | ||
|
|
757c5cfbe0 | ||
|
|
317ddb84b9 | ||
|
|
2b1d0510fa | ||
|
|
40d2f6fee4 | ||
|
|
9fbb84d5c2 | ||
|
|
bdaa9a91f3 | ||
|
|
1a91da35be | ||
|
|
f85be226bb | ||
|
|
8df5a3c5f6 | ||
|
|
9d3eb3f4b8 | ||
|
|
2cd48959d4 | ||
|
|
8fc8874db4 | ||
|
|
ff1cbb524e | ||
|
|
a70df4bd83 |
@@ -39,8 +39,16 @@ var (
|
||||
"The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. "+
|
||||
"Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
downsamplingPeriods = flagutil.NewArrayString("downsampling.period", "Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs "+
|
||||
"to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details")
|
||||
)
|
||||
|
||||
// custom api help links [["/api","doc"]] without http.pathPrefix.
|
||||
var customAPIPathList = [][]string{
|
||||
{"/graph/explore", "explore metrics grafana page"},
|
||||
{"/graph/d/prometheus-advanced/advanced-data-exploration", "PMM grafana dashboard"},
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
@@ -72,7 +80,10 @@ func main() {
|
||||
}
|
||||
logger.Infof("starting VictoriaMetrics at %q...", listenAddrs)
|
||||
startTime := time.Now()
|
||||
storage.SetDedupInterval(*minScrapeInterval)
|
||||
err := storage.SetDownsamplingPeriods(*downsamplingPeriods, *minScrapeInterval)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -downsampling.period: %s", err)
|
||||
}
|
||||
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
@@ -130,6 +141,10 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
{"api/v1/status/active_queries", "active queries"},
|
||||
{"-/reload", "reload configuration"},
|
||||
})
|
||||
for _, p := range customAPIPathList {
|
||||
p, doc := p[0], p[1]
|
||||
fmt.Fprintf(w, "<a href=%q>%s</a> - %s<br/>", p, p, doc)
|
||||
}
|
||||
return true
|
||||
}
|
||||
if vminsert.RequestHandler(w, r) {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
@@ -337,6 +338,12 @@ var (
|
||||
type packedTimeseries struct {
|
||||
metricName string
|
||||
brs []blockRef
|
||||
pd *promData
|
||||
}
|
||||
|
||||
type promData struct {
|
||||
values []float64
|
||||
timestamps []int64
|
||||
}
|
||||
|
||||
type unpackWork struct {
|
||||
@@ -440,9 +447,21 @@ func (pts *packedTimeseries) Unpack(dst *Result, tbf *tmpBlocksFile, tr storage.
|
||||
putSortBlocksHeap(sbh)
|
||||
return err
|
||||
}
|
||||
dedupInterval := storage.GetDedupInterval()
|
||||
if pts.pd != nil {
|
||||
// Add data from Prometheus to dst.
|
||||
// It usually has smaller timestamps than the data from sbs, so put it first.
|
||||
dst.Values = append(dst.Values, pts.pd.values...)
|
||||
dst.Timestamps = append(dst.Timestamps, pts.pd.timestamps...)
|
||||
}
|
||||
dedupInterval := storage.GetDedupInterval(tr.MinTimestamp)
|
||||
mergeSortBlocks(dst, sbh, dedupInterval)
|
||||
putSortBlocksHeap(sbh)
|
||||
if pts.pd != nil {
|
||||
if !sort.IsSorted(dst) {
|
||||
sort.Sort(dst)
|
||||
}
|
||||
pts.pd = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -559,6 +578,27 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
|
||||
return dst, firstErr
|
||||
}
|
||||
|
||||
// sort.Interface implementation for Result
|
||||
|
||||
// Len implements sort.Interface
|
||||
func (r *Result) Len() int {
|
||||
return len(r.Timestamps)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface
|
||||
func (r *Result) Less(i, j int) bool {
|
||||
timestamps := r.Timestamps
|
||||
return timestamps[i] < timestamps[j]
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface
|
||||
func (r *Result) Swap(i, j int) {
|
||||
timestamps := r.Timestamps
|
||||
values := r.Values
|
||||
timestamps[i], timestamps[j] = timestamps[j], timestamps[i]
|
||||
values[i], values[j] = values[j], values[i]
|
||||
}
|
||||
|
||||
func getSortBlock() *sortBlock {
|
||||
v := sbPool.Get()
|
||||
if v == nil {
|
||||
@@ -796,6 +836,15 @@ func LabelNames(qt *querytracer.Tracer, sq *storage.SearchQuery, maxLabelNames i
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during labels search on time range: %w", err)
|
||||
}
|
||||
|
||||
// Merge labels obtained from Prometheus storage.
|
||||
promLabels, err := promdb.GetLabelNamesOnTimeRange(tr, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain labels from Prometheus storage: %w", err)
|
||||
}
|
||||
qt.Printf("get %d label names from Prometheus storage", len(promLabels))
|
||||
labels = mergeStrings(labels, promLabels)
|
||||
|
||||
// Sort labels like Prometheus does
|
||||
sort.Strings(labels)
|
||||
qt.Printf("sort %d labels", len(labels))
|
||||
@@ -867,14 +916,44 @@ func LabelValues(qt *querytracer.Tracer, labelName string, sq *storage.SearchQue
|
||||
}
|
||||
labelValues, err := vmstorage.SearchLabelValuesWithFiltersOnTimeRange(qt, labelName, tfss, tr, maxLabelValues, sq.MaxMetrics, deadline.Deadline())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label values search on time range for labelName=%q: %w", labelName, err)
|
||||
return nil, fmt.Errorf("error during label values search on time range: %w", err)
|
||||
}
|
||||
|
||||
// Merge label values obtained from Prometheus storage.
|
||||
promLabelValues, err := promdb.GetLabelValuesOnTimeRange(labelName, tr, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain label values on time range for %q from Prometheus storage: %w", labelName, err)
|
||||
}
|
||||
qt.Printf("get %d label values from Prometheus storage", len(promLabelValues))
|
||||
labelValues = mergeStrings(labelValues, promLabelValues)
|
||||
|
||||
// Sort labelValues like Prometheus does
|
||||
sort.Strings(labelValues)
|
||||
qt.Printf("sort %d label values", len(labelValues))
|
||||
return labelValues, nil
|
||||
}
|
||||
|
||||
func mergeStrings(a, b []string) []string {
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
m := make(map[string]struct{}, len(a)+len(b))
|
||||
for _, s := range a {
|
||||
m[s] = struct{}{}
|
||||
}
|
||||
for _, s := range b {
|
||||
m[s] = struct{}{}
|
||||
}
|
||||
result := make([]string, 0, len(m))
|
||||
for s := range m {
|
||||
result = append(result, s)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GraphiteTagValues returns tag values for the given tagName until the given deadline.
|
||||
func GraphiteTagValues(qt *querytracer.Tracer, tagName, filter string, limit int, deadline searchutils.Deadline) ([]string, error) {
|
||||
qt = qt.NewChild("get graphite tag values for tagName=%s, filter=%s, limit=%d", tagName, filter, limit)
|
||||
@@ -1280,6 +1359,26 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
|
||||
}
|
||||
qt.Printf("fetch unique series=%d, blocks=%d, samples=%d, bytes=%d", len(m), blocksRead, samples, tbf.Len())
|
||||
|
||||
// Fetch data from promdb.
|
||||
pm := make(map[string]*promData)
|
||||
err = promdb.VisitSeries(sq, deadline, func(metricName []byte, values []float64, timestamps []int64) {
|
||||
pd := pm[string(metricName)]
|
||||
if pd == nil {
|
||||
if _, ok := m[string(metricName)]; !ok {
|
||||
orderedMetricNames = append(orderedMetricNames, string(metricName))
|
||||
}
|
||||
pd = &promData{}
|
||||
pm[string(metricName)] = pd
|
||||
}
|
||||
pd.values = append(pd.values, values...)
|
||||
pd.timestamps = append(pd.timestamps, timestamps...)
|
||||
})
|
||||
if err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
putStorageSearch(sr)
|
||||
return nil, fmt.Errorf("error when searching in Prometheus data: %w", err)
|
||||
}
|
||||
|
||||
var rss Results
|
||||
rss.tr = tr
|
||||
rss.deadline = deadline
|
||||
@@ -1288,6 +1387,7 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
|
||||
pts[i] = packedTimeseries{
|
||||
metricName: metricName,
|
||||
brs: brssPool[m[metricName]].brs,
|
||||
pd: pm[metricName],
|
||||
}
|
||||
}
|
||||
rss.packedTimeseries = pts
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/promdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
@@ -124,9 +125,11 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
||||
// register storage metrics
|
||||
storageMetrics = metrics.NewSet()
|
||||
storageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, strg)
|
||||
writeStorageMetrics(w, Storage)
|
||||
})
|
||||
metrics.RegisterSet(storageMetrics)
|
||||
|
||||
promdb.Init(retentionPeriod.Milliseconds())
|
||||
}
|
||||
|
||||
var storageMetrics *metrics.Set
|
||||
@@ -247,6 +250,7 @@ func Stop() {
|
||||
logger.Infof("gracefully closing the storage at %s", *DataPath)
|
||||
startTime := time.Now()
|
||||
WG.WaitAndBlock()
|
||||
promdb.MustClose()
|
||||
stopStaleSnapshotsRemover()
|
||||
Storage.MustClose()
|
||||
logger.Infof("successfully closed the storage in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
270
app/vmstorage/promdb/promdb.go
Normal file
270
app/vmstorage/promdb/promdb.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package promdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
promstorage "github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
var prometheusDataPath = flag.String("prometheusDataPath", "", "Optional path to readonly historical Prometheus data")
|
||||
|
||||
var prometheusRetentionMsecs int64
|
||||
|
||||
// Init must be called after flag.Parse and before using the package.
|
||||
//
|
||||
// See also MustClose.
|
||||
func Init(retentionMsecs int64) {
|
||||
if promDB != nil {
|
||||
logger.Fatalf("BUG: promdb.Init is called multiple times without promdb.MustClose call")
|
||||
}
|
||||
prometheusRetentionMsecs = retentionMsecs
|
||||
if *prometheusDataPath == "" {
|
||||
return
|
||||
}
|
||||
l := log.LoggerFunc(func(a ...interface{}) error {
|
||||
logger.Infof("%v", a)
|
||||
return nil
|
||||
})
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.RetentionDuration = retentionMsecs
|
||||
|
||||
// Set max block duration to 10% of retention period or 31 days
|
||||
// according to https://prometheus.io/docs/prometheus/latest/storage/#compaction
|
||||
maxBlockDuration := int64((31 * 24 * time.Hour) / time.Millisecond)
|
||||
if maxBlockDuration > retentionMsecs/10 {
|
||||
maxBlockDuration = retentionMsecs / 10
|
||||
}
|
||||
if maxBlockDuration < opts.MinBlockDuration {
|
||||
maxBlockDuration = opts.MinBlockDuration
|
||||
}
|
||||
opts.MaxBlockDuration = maxBlockDuration
|
||||
|
||||
// Custom delete function is needed, because Prometheus by default doesn't delete
|
||||
// blocks outside the retention if no new blocks are created with samples with the current timestamps.
|
||||
// See https://github.com/prometheus/prometheus/blob/997bb7134fcfd7279f250e183e78681e48a56aff/tsdb/db.go#L1116
|
||||
opts.BlocksToDelete = func(blocks []*tsdb.Block) map[ulid.ULID]struct{} {
|
||||
m := make(map[ulid.ULID]struct{})
|
||||
minRetentionTime := time.Now().Unix()*1000 - retentionMsecs
|
||||
for _, block := range blocks {
|
||||
meta := block.Meta()
|
||||
// delete block marked for deletion by compaction code.
|
||||
if meta.Compaction.Deletable {
|
||||
m[meta.ULID] = struct{}{}
|
||||
continue
|
||||
}
|
||||
if block.MaxTime() < minRetentionTime {
|
||||
m[meta.ULID] = struct{}{}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
pdb, err := tsdb.Open(*prometheusDataPath, l, nil, opts, nil)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot open Prometheus data at -prometheusDataPath=%q: %s", *prometheusDataPath, err)
|
||||
}
|
||||
promDB = pdb
|
||||
logger.Infof("successfully opened historical Prometheus data at -prometheusDataPath=%q with retentionMsecs=%d", *prometheusDataPath, retentionMsecs)
|
||||
}
|
||||
|
||||
// MustClose must be called on graceful shutdown.
|
||||
//
|
||||
// Package functionality cannot be used after this call.
|
||||
func MustClose() {
|
||||
if *prometheusDataPath == "" {
|
||||
return
|
||||
}
|
||||
if promDB == nil {
|
||||
logger.Panicf("BUG: promdb.MustClose is called without promdb.Init call")
|
||||
}
|
||||
if err := promDB.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close promDB: %s", err)
|
||||
}
|
||||
promDB = nil
|
||||
logger.Infof("successfully closed historical Prometheus data at -prometheusDataPath=%q", *prometheusDataPath)
|
||||
}
|
||||
|
||||
var promDB *tsdb.DB
|
||||
|
||||
// GetLabelNamesOnTimeRange returns label names.
|
||||
func GetLabelNamesOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
||||
if *prometheusDataPath == "" {
|
||||
return nil, nil
|
||||
}
|
||||
d := time.Unix(int64(deadline.Deadline()), 0)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
q, err := promDB.Querier(tr.MinTimestamp, tr.MaxTimestamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mustCloseQuerier(q)
|
||||
|
||||
names, _, err := q.LabelNames(ctx)
|
||||
// Make full copy of names, since they cannot be used after q is closed.
|
||||
names = copyStringsWithMemory(names)
|
||||
return names, err
|
||||
}
|
||||
|
||||
// GetLabelValuesOnTimeRange returns values for the given labelName on the given tr.
|
||||
func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline searchutils.Deadline) ([]string, error) {
|
||||
if *prometheusDataPath == "" {
|
||||
return nil, nil
|
||||
}
|
||||
d := time.Unix(int64(deadline.Deadline()), 0)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
q, err := promDB.Querier(tr.MinTimestamp, tr.MaxTimestamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mustCloseQuerier(q)
|
||||
|
||||
values, _, err := q.LabelValues(ctx, labelName)
|
||||
// Make full copy of values, since they cannot be used after q is closed.
|
||||
values = copyStringsWithMemory(values)
|
||||
return values, err
|
||||
}
|
||||
|
||||
func copyStringsWithMemory(a []string) []string {
|
||||
result := make([]string, len(a))
|
||||
for i, s := range a {
|
||||
result[i] = string(append([]byte{}, s...))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SeriesVisitor is called by VisitSeries for each matching time series.
|
||||
//
|
||||
// The caller shouldn't hold references to metricName, values and timestamps after returning.
|
||||
type SeriesVisitor func(metricName []byte, values []float64, timestamps []int64)
|
||||
|
||||
// VisitSeries calls f for each series found in the pdb.
|
||||
func VisitSeries(sq *storage.SearchQuery, deadline searchutils.Deadline, f SeriesVisitor) error {
|
||||
if *prometheusDataPath == "" {
|
||||
return nil
|
||||
}
|
||||
d := time.Unix(int64(deadline.Deadline()), 0)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
minTime, maxTime := getSearchTimeRange(sq)
|
||||
q, err := promDB.Querier(minTime, maxTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer mustCloseQuerier(q)
|
||||
var seriesSet []promstorage.SeriesSet
|
||||
for _, tf := range sq.TagFilterss {
|
||||
ms, err := convertTagFiltersToMatchers(tf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot convert tag filters to matchers: %w", err)
|
||||
}
|
||||
s := q.Select(ctx, false, nil, ms...)
|
||||
seriesSet = append(seriesSet, s)
|
||||
}
|
||||
ss := promstorage.NewMergeSeriesSet(seriesSet, promstorage.ChainedSeriesMerge)
|
||||
var (
|
||||
mn storage.MetricName
|
||||
metricName []byte
|
||||
values []float64
|
||||
timestamps []int64
|
||||
)
|
||||
var it chunkenc.Iterator
|
||||
for ss.Next() {
|
||||
s := ss.At()
|
||||
convertPromLabelsToMetricName(&mn, s.Labels())
|
||||
metricName = mn.SortAndMarshal(metricName[:0])
|
||||
values = values[:0]
|
||||
timestamps = timestamps[:0]
|
||||
it = s.Iterator(it)
|
||||
for {
|
||||
typ := it.Next()
|
||||
if typ == chunkenc.ValNone {
|
||||
break
|
||||
}
|
||||
if typ != chunkenc.ValFloat {
|
||||
// Skip unsupported values
|
||||
continue
|
||||
}
|
||||
ts, v := it.At()
|
||||
values = append(values, v)
|
||||
timestamps = append(timestamps, ts)
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return fmt.Errorf("error when iterating Prometheus series: %w", err)
|
||||
}
|
||||
f(metricName, values, timestamps)
|
||||
}
|
||||
return ss.Err()
|
||||
}
|
||||
|
||||
func getSearchTimeRange(sq *storage.SearchQuery) (int64, int64) {
|
||||
maxTime := sq.MaxTimestamp
|
||||
minTime := sq.MinTimestamp
|
||||
minRetentionTime := time.Now().Unix()*1000 - prometheusRetentionMsecs
|
||||
if maxTime < minRetentionTime {
|
||||
maxTime = minRetentionTime
|
||||
}
|
||||
if minTime < minRetentionTime {
|
||||
minTime = minRetentionTime
|
||||
}
|
||||
return minTime, maxTime
|
||||
}
|
||||
|
||||
func convertPromLabelsToMetricName(dst *storage.MetricName, labels []labels.Label) {
|
||||
dst.Reset()
|
||||
for _, label := range labels {
|
||||
if label.Name == "__name__" {
|
||||
dst.MetricGroup = append(dst.MetricGroup[:0], label.Value...)
|
||||
} else {
|
||||
dst.AddTag(label.Name, label.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func convertTagFiltersToMatchers(tfs []storage.TagFilter) ([]*labels.Matcher, error) {
|
||||
ms := make([]*labels.Matcher, 0, len(tfs))
|
||||
for _, tf := range tfs {
|
||||
var mt labels.MatchType
|
||||
if tf.IsNegative {
|
||||
if tf.IsRegexp {
|
||||
mt = labels.MatchNotRegexp
|
||||
} else {
|
||||
mt = labels.MatchNotEqual
|
||||
}
|
||||
} else {
|
||||
if tf.IsRegexp {
|
||||
mt = labels.MatchRegexp
|
||||
} else {
|
||||
mt = labels.MatchEqual
|
||||
}
|
||||
}
|
||||
key := string(tf.Key)
|
||||
if key == "" {
|
||||
key = "__name__"
|
||||
}
|
||||
value := string(tf.Value)
|
||||
m, err := labels.NewMatcher(mt, key, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func mustCloseQuerier(q promstorage.Querier) {
|
||||
if err := q.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close querier: %s", err)
|
||||
}
|
||||
}
|
||||
@@ -81,7 +81,6 @@ publish-via-docker:
|
||||
--build-arg root_image=$(ROOT_IMAGE) \
|
||||
--build-arg APP_NAME=$(APP_NAME) \
|
||||
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) \
|
||||
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(LATEST_TAG)$(RACE) \
|
||||
-o type=image \
|
||||
--provenance=false \
|
||||
-f app/$(APP_NAME)/multiarch/Dockerfile \
|
||||
|
||||
10
go.mod
10
go.mod
@@ -26,7 +26,8 @@ require (
|
||||
github.com/influxdata/influxdb v1.11.6
|
||||
github.com/klauspost/compress v1.17.10
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/prometheus/prometheus v0.54.1
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/prometheus v0.53.1
|
||||
github.com/urfave/cli/v2 v2.27.4
|
||||
github.com/valyala/fastjson v1.6.4
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
@@ -41,6 +42,11 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.115.1 // indirect
|
||||
cloud.google.com/go/auth v0.9.5 // indirect
|
||||
@@ -94,12 +100,10 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.20.4 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.59.1 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
|
||||
58
go.sum
58
go.sum
@@ -160,12 +160,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
|
||||
github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
|
||||
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
|
||||
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -199,6 +199,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
|
||||
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
@@ -304,40 +306,40 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
|
||||
github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
|
||||
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
|
||||
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw=
|
||||
github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk=
|
||||
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
|
||||
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
@@ -380,8 +382,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso=
|
||||
github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
|
||||
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
|
||||
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -392,8 +394,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
|
||||
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
|
||||
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
|
||||
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
@@ -418,8 +420,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
|
||||
github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
|
||||
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -447,8 +449,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
|
||||
github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
@@ -457,8 +459,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ=
|
||||
github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY=
|
||||
github.com/prometheus/prometheus v0.53.1 h1:B0xu4VuVTKYrIuBMn/4YSUoIPYxs956qsOfcS4rqCuA=
|
||||
github.com/prometheus/prometheus v0.53.1/go.mod h1:RZDkzs+ShMBDkAPQkLEaLBXpjmDcjhNxU2drUVPgKUU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -467,8 +469,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
|
||||
@@ -163,7 +163,8 @@ func (b *Block) deduplicateSamplesDuringMerge() {
|
||||
// Nothing to dedup.
|
||||
return
|
||||
}
|
||||
dedupInterval := GetDedupInterval()
|
||||
maxTimestamp := srcTimestamps[len(srcTimestamps)-1]
|
||||
dedupInterval := GetDedupInterval(maxTimestamp)
|
||||
if dedupInterval <= 0 {
|
||||
// Deduplication is disabled.
|
||||
return
|
||||
|
||||
@@ -1,27 +1,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SetDedupInterval sets the deduplication interval, which is applied to raw samples during data ingestion and querying.
|
||||
//
|
||||
// De-duplication is disabled if dedupInterval is 0.
|
||||
//
|
||||
// This function must be called before initializing the storage.
|
||||
func SetDedupInterval(dedupInterval time.Duration) {
|
||||
globalDedupInterval = dedupInterval.Milliseconds()
|
||||
}
|
||||
|
||||
// GetDedupInterval returns the dedup interval in milliseconds, which has been set via SetDedupInterval.
|
||||
func GetDedupInterval() int64 {
|
||||
return globalDedupInterval
|
||||
}
|
||||
|
||||
var globalDedupInterval int64
|
||||
|
||||
func isDedupEnabled() bool {
|
||||
return globalDedupInterval > 0
|
||||
return len(downsamplingPeriods) > 0
|
||||
}
|
||||
|
||||
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in milliseconds.
|
||||
|
||||
123
lib/storage/downsampling.go
Normal file
123
lib/storage/downsampling.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
// SetDownsamplingPeriods configures downsampling.
|
||||
//
|
||||
// The function must be called before opening or creating any storage.
|
||||
func SetDownsamplingPeriods(periods []string, dedupInterval time.Duration) error {
|
||||
dsps, err := parseDownsamplingPeriods(periods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dedupIntervalMs := dedupInterval.Milliseconds()
|
||||
if dedupIntervalMs > 0 {
|
||||
if len(dsps) > 0 && dsps[len(dsps)-1].Offset == 0 {
|
||||
return fmt.Errorf("-dedup.minScrapeInterval=%s cannot be used if -downsampling.period=%s contains zero offset", dedupInterval, periods)
|
||||
}
|
||||
// Deduplication is a special case of downsampling with zero offset.
|
||||
dsps = append(dsps, DownsamplingPeriod{
|
||||
Offset: 0,
|
||||
Interval: dedupIntervalMs,
|
||||
})
|
||||
}
|
||||
downsamplingPeriods = dsps
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownsamplingPeriod describes downsampling period
|
||||
type DownsamplingPeriod struct {
|
||||
// Offset in milliseconds from the current time when the downsampling with the given interval must be applied
|
||||
Offset int64
|
||||
// Interval for downsampling - only a single sample is left per each interval
|
||||
Interval int64
|
||||
}
|
||||
|
||||
// String implements interface
|
||||
func (dsp DownsamplingPeriod) String() string {
|
||||
offset := time.Duration(dsp.Offset) * time.Millisecond
|
||||
interval := time.Duration(dsp.Interval) * time.Millisecond
|
||||
return fmt.Sprintf("%s:%s", offset, interval)
|
||||
}
|
||||
|
||||
func (dsp *DownsamplingPeriod) parse(s string) error {
|
||||
idx := strings.Index(s, ":")
|
||||
if idx <= 0 {
|
||||
return fmt.Errorf("incorrect format for downsampling period: %s, want `offset:interval` format", s)
|
||||
}
|
||||
offsetStr, intervalStr := s[:idx], s[idx+1:]
|
||||
interval, err := metricsql.DurationValue(intervalStr, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect interval: %s format for downsampling interval: %s err: %w", intervalStr, s, err)
|
||||
}
|
||||
offset, err := metricsql.DurationValue(offsetStr, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect duration: %s format for downsampling offset: %s err: %w", offsetStr, s, err)
|
||||
}
|
||||
dsp.Interval = interval
|
||||
dsp.Offset = offset
|
||||
// sanity check
|
||||
if offset > 0 && interval > offset {
|
||||
return fmt.Errorf("downsampling interval=%d cannot exceed offset=%d", dsp.Interval, dsp.Offset)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var downsamplingPeriods []DownsamplingPeriod
|
||||
|
||||
// GetDedupInterval returns dedup interval, which must be applied to samples with the given timestamp.
|
||||
func GetDedupInterval(timestamp int64) int64 {
|
||||
dsp := getDownsamplingPeriod(timestamp)
|
||||
return dsp.Interval
|
||||
}
|
||||
|
||||
// getDownsamplingPeriod returns downsampling period, which must be used for the given timestamp
|
||||
func getDownsamplingPeriod(timestamp int64) DownsamplingPeriod {
|
||||
offset := int64(fasttime.UnixTimestamp())*1000 - timestamp
|
||||
for _, dsp := range downsamplingPeriods {
|
||||
if offset >= dsp.Offset {
|
||||
return dsp
|
||||
}
|
||||
}
|
||||
return DownsamplingPeriod{}
|
||||
}
|
||||
|
||||
func parseDownsamplingPeriods(periods []string) ([]DownsamplingPeriod, error) {
|
||||
if len(periods) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var dsps []DownsamplingPeriod
|
||||
for _, period := range periods {
|
||||
var dsp DownsamplingPeriod
|
||||
if err := dsp.parse(period); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse downsampling period %q: %w", period, err)
|
||||
}
|
||||
dsps = append(dsps, dsp)
|
||||
}
|
||||
sort.Slice(dsps, func(i, j int) bool {
|
||||
return dsps[i].Offset > dsps[j].Offset
|
||||
})
|
||||
dspPrev := dsps[0]
|
||||
// sanity checks.
|
||||
for _, dsp := range dsps[1:] {
|
||||
if dspPrev.Interval <= dsp.Interval {
|
||||
return nil, fmt.Errorf("prev downsampling interval %d must be bigger than the next interval %d", dspPrev.Interval, dsp.Interval)
|
||||
}
|
||||
if dspPrev.Offset == dsp.Offset {
|
||||
return nil, fmt.Errorf("duplicate downsampling offset: %d", dsp.Offset)
|
||||
}
|
||||
if dspPrev.Interval%dsp.Interval != 0 {
|
||||
return nil, fmt.Errorf("downsamping intervals must be multiples; prev: %d, current: %d", dspPrev.Interval, dsp.Interval)
|
||||
}
|
||||
dspPrev = dsp
|
||||
}
|
||||
return dsps, nil
|
||||
}
|
||||
62
lib/storage/downsampling_test.go
Normal file
62
lib/storage/downsampling_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseDownsamplingPeriodsFailure(t *testing.T) {
|
||||
f := func(name string, src []string) {
|
||||
t.Helper()
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if _, err := parseDownsamplingPeriods(src); err == nil {
|
||||
t.Fatalf("want fail for input: %s", strings.Join(src, ","))
|
||||
}
|
||||
})
|
||||
}
|
||||
f("empty duration", []string{"15d"})
|
||||
f("empty interval", []string{":1m"})
|
||||
f("incorrect duration decrease", []string{"30d:15h", "60d:1h"})
|
||||
f("duplicate offset", []string{"30d:15h", "30d:1h"})
|
||||
f("duplicate interval", []string{"60d:1h", "30d:1h"})
|
||||
f("not multiple intervals", []string{"90d:12h", "60:9h", "30d:7h"})
|
||||
}
|
||||
|
||||
func TestParseDownsamplingPeriodsSuccess(t *testing.T) {
|
||||
f := func(name string, src []string, expected []DownsamplingPeriod) {
|
||||
t.Helper()
|
||||
t.Run(name, func(t *testing.T) {
|
||||
dsps, err := parseDownsamplingPeriods(src)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse downsampling configuration for: %s, err: %s", strings.Join(src, ","), err)
|
||||
}
|
||||
assertDownsamplingPeriods(t, expected, dsps)
|
||||
})
|
||||
}
|
||||
f("one period", []string{"30d:1m"}, []DownsamplingPeriod{
|
||||
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
|
||||
})
|
||||
f("three periods", []string{"15d:30s", "30d:1m", "60d:15m"}, []DownsamplingPeriod{
|
||||
{Offset: 60 * 24 * 3600 * 1000, Interval: 15 * 60 * 1000},
|
||||
{Offset: 30 * 24 * 3600 * 1000, Interval: 60 * 1000},
|
||||
{Offset: 15 * 24 * 3600 * 1000, Interval: 30 * 1000},
|
||||
})
|
||||
f("with the same divider periods", []string{"15d:1m", "30d:7m", "60d:14m", "90d:28m"}, []DownsamplingPeriod{
|
||||
{Offset: 90 * 24 * 3600 * 1000, Interval: 28 * 60 * 1000},
|
||||
{Offset: 60 * 24 * 3600 * 1000, Interval: 14 * 60 * 1000},
|
||||
{Offset: 30 * 24 * 3600 * 1000, Interval: 7 * 60 * 1000},
|
||||
{Offset: 15 * 24 * 3600 * 1000, Interval: 60 * 1000},
|
||||
})
|
||||
}
|
||||
|
||||
func assertDownsamplingPeriods(t *testing.T, want, got []DownsamplingPeriod) {
|
||||
t.Helper()
|
||||
if len(want) != len(got) {
|
||||
t.Fatalf("len mismatch, want: %d, got: %d", len(want), len(got))
|
||||
}
|
||||
for i := 0; i < len(want); i++ {
|
||||
if want[i] != got[i] {
|
||||
t.Fatalf("want period: %s, got period: %s, idx: %d", want[i], got[i], i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -408,6 +408,12 @@ func (mn *MetricName) String() string {
|
||||
return fmt.Sprintf("%s{%s}", mnCopy.MetricGroup, tagsStr)
|
||||
}
|
||||
|
||||
// SortAndMarshal sorts mn tags and then marshals them to dst.
|
||||
func (mn *MetricName) SortAndMarshal(dst []byte) []byte {
|
||||
mn.sortTags()
|
||||
return mn.Marshal(dst)
|
||||
}
|
||||
|
||||
// Marshal appends marshaled mn to dst and returns the result.
|
||||
//
|
||||
// mn.sortTags must be called before calling this function
|
||||
|
||||
@@ -1344,7 +1344,7 @@ func (pt *partition) runFinalDedup(stopCh <-chan struct{}) error {
|
||||
}
|
||||
|
||||
func (pt *partition) isFinalDedupNeeded() bool {
|
||||
dedupInterval := GetDedupInterval()
|
||||
dedupInterval := GetDedupInterval(pt.tr.MaxTimestamp)
|
||||
|
||||
pws := pt.GetParts(nil, false)
|
||||
minDedupInterval := getMinDedupInterval(pws)
|
||||
@@ -1577,7 +1577,7 @@ func (pt *partition) mergePartsInternal(dstPartPath string, bsw *blockStreamWrit
|
||||
return nil, fmt.Errorf("cannot merge %d parts to %s: %w", len(bsrs), dstPartPath, err)
|
||||
}
|
||||
if dstPartPath != "" {
|
||||
ph.MinDedupInterval = GetDedupInterval()
|
||||
ph.MinDedupInterval = GetDedupInterval(ph.MaxTimestamp)
|
||||
ph.MustWriteMetadata(dstPartPath)
|
||||
}
|
||||
return &ph, nil
|
||||
|
||||
22
vendor/github.com/go-kit/kit/LICENSE
generated
vendored
Normal file
22
vendor/github.com/go-kit/kit/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Peter Bourgon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
160
vendor/github.com/go-kit/kit/log/README.md
generated
vendored
Normal file
160
vendor/github.com/go-kit/kit/log/README.md
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
# package log
|
||||
|
||||
**Deprecation notice:** The core Go kit log packages (log, log/level, log/term, and
|
||||
log/syslog) have been moved to their own repository at github.com/go-kit/log.
|
||||
The corresponding packages in this directory remain for backwards compatibility.
|
||||
Their types alias the types and their functions call the functions provided by
|
||||
the new repository. Using either import path should be equivalent. Prefer the
|
||||
new import path when practical.
|
||||
|
||||
______
|
||||
|
||||
`package log` provides a minimal interface for structured logging in services.
|
||||
It may be wrapped to encode conventions, enforce type-safety, provide leveled
|
||||
logging, and so on. It can be used for both typical application log events,
|
||||
and log-structured data streams.
|
||||
|
||||
## Structured logging
|
||||
|
||||
Structured logging is, basically, conceding to the reality that logs are
|
||||
_data_, and warrant some level of schematic rigor. Using a stricter,
|
||||
key/value-oriented message format for our logs, containing contextual and
|
||||
semantic information, makes it much easier to get insight into the
|
||||
operational activity of the systems we build. Consequently, `package log` is
|
||||
of the strong belief that "[the benefits of structured logging outweigh the
|
||||
minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
|
||||
|
||||
Migrating from unstructured to structured logging is probably a lot easier
|
||||
than you'd expect.
|
||||
|
||||
```go
|
||||
// Unstructured
|
||||
log.Printf("HTTP server listening on %s", addr)
|
||||
|
||||
// Structured
|
||||
logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Typical application logging
|
||||
|
||||
```go
|
||||
w := log.NewSyncWriter(os.Stderr)
|
||||
logger := log.NewLogfmtLogger(w)
|
||||
logger.Log("question", "what is the meaning of life?", "answer", 42)
|
||||
|
||||
// Output:
|
||||
// question="what is the meaning of life?" answer=42
|
||||
```
|
||||
|
||||
### Contextual Loggers
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.With(logger, "instance_id", 123)
|
||||
|
||||
logger.Log("msg", "starting")
|
||||
NewWorker(log.With(logger, "component", "worker")).Run()
|
||||
NewSlacker(log.With(logger, "component", "slacker")).Run()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// instance_id=123 msg=starting
|
||||
// instance_id=123 component=worker msg=running
|
||||
// instance_id=123 component=slacker msg=running
|
||||
```
|
||||
|
||||
### Interact with stdlib logger
|
||||
|
||||
Redirect stdlib logger to Go kit logger.
|
||||
|
||||
```go
|
||||
import (
|
||||
"os"
|
||||
stdlog "log"
|
||||
kitlog "github.com/go-kit/kit/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
|
||||
stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
|
||||
stdlog.Print("I sure like pie")
|
||||
}
|
||||
|
||||
// Output:
|
||||
// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
|
||||
```
|
||||
|
||||
Or, if, for legacy reasons, you need to pipe all of your logging through the
|
||||
stdlib log package, you can redirect Go kit logger to the stdlib logger.
|
||||
|
||||
```go
|
||||
logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
|
||||
logger.Log("legacy", true, "msg", "at least it's something")
|
||||
|
||||
// Output:
|
||||
// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
|
||||
```
|
||||
|
||||
### Timestamps and callers
|
||||
|
||||
```go
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
|
||||
logger.Log("msg", "hello")
|
||||
|
||||
// Output:
|
||||
// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
|
||||
```
|
||||
|
||||
## Levels
|
||||
|
||||
Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level).
|
||||
|
||||
## Supported output formats
|
||||
|
||||
- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
|
||||
- JSON
|
||||
|
||||
## Enhancements
|
||||
|
||||
`package log` is centered on the one-method Logger interface.
|
||||
|
||||
```go
|
||||
type Logger interface {
|
||||
Log(keyvals ...interface{}) error
|
||||
}
|
||||
```
|
||||
|
||||
This interface, and its supporting code like is the product of much iteration
|
||||
and evaluation. For more details on the evolution of the Logger interface,
|
||||
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
|
||||
a talk by [Chris Hines](https://github.com/ChrisHines).
|
||||
Also, please see
|
||||
[#63](https://github.com/go-kit/kit/issues/63),
|
||||
[#76](https://github.com/go-kit/kit/pull/76),
|
||||
[#131](https://github.com/go-kit/kit/issues/131),
|
||||
[#157](https://github.com/go-kit/kit/pull/157),
|
||||
[#164](https://github.com/go-kit/kit/issues/164), and
|
||||
[#252](https://github.com/go-kit/kit/pull/252)
|
||||
to review historical conversations about package log and the Logger interface.
|
||||
|
||||
Value-add packages and suggestions,
|
||||
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
|
||||
are of course welcome. Good proposals should
|
||||
|
||||
- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
|
||||
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
|
||||
- Be friendly to packages that accept only an unadorned log.Logger.
|
||||
|
||||
## Benchmarks & comparisons
|
||||
|
||||
There are a few Go logging benchmarks and comparisons that include Go kit's package log.
|
||||
|
||||
- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
|
||||
- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
|
||||
118
vendor/github.com/go-kit/kit/log/doc.go
generated
vendored
Normal file
118
vendor/github.com/go-kit/kit/log/doc.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Package log provides a structured logger.
|
||||
//
|
||||
// Deprecated: Use github.com/go-kit/log instead.
|
||||
//
|
||||
// Structured logging produces logs easily consumed later by humans or
|
||||
// machines. Humans might be interested in debugging errors, or tracing
|
||||
// specific requests. Machines might be interested in counting interesting
|
||||
// events, or aggregating information for off-line processing. In both cases,
|
||||
// it is important that the log messages are structured and actionable.
|
||||
// Package log is designed to encourage both of these best practices.
|
||||
//
|
||||
// Basic Usage
|
||||
//
|
||||
// The fundamental interface is Logger. Loggers create log events from
|
||||
// key/value data. The Logger interface has a single method, Log, which
|
||||
// accepts a sequence of alternating key/value pairs, which this package names
|
||||
// keyvals.
|
||||
//
|
||||
// type Logger interface {
|
||||
// Log(keyvals ...interface{}) error
|
||||
// }
|
||||
//
|
||||
// Here is an example of a function using a Logger to create log events.
|
||||
//
|
||||
// func RunTask(task Task, logger log.Logger) string {
|
||||
// logger.Log("taskID", task.ID, "event", "starting task")
|
||||
// ...
|
||||
// logger.Log("taskID", task.ID, "event", "task complete")
|
||||
// }
|
||||
//
|
||||
// The keys in the above example are "taskID" and "event". The values are
|
||||
// task.ID, "starting task", and "task complete". Every key is followed
|
||||
// immediately by its value.
|
||||
//
|
||||
// Keys are usually plain strings. Values may be any type that has a sensible
|
||||
// encoding in the chosen log format. With structured logging it is a good
|
||||
// idea to log simple values without formatting them. This practice allows
|
||||
// the chosen logger to encode values in the most appropriate way.
|
||||
//
|
||||
// Contextual Loggers
|
||||
//
|
||||
// A contextual logger stores keyvals that it includes in all log events.
|
||||
// Building appropriate contextual loggers reduces repetition and aids
|
||||
// consistency in the resulting log output. With, WithPrefix, and WithSuffix
|
||||
// add context to a logger. We can use With to improve the RunTask example.
|
||||
//
|
||||
// func RunTask(task Task, logger log.Logger) string {
|
||||
// logger = log.With(logger, "taskID", task.ID)
|
||||
// logger.Log("event", "starting task")
|
||||
// ...
|
||||
// taskHelper(task.Cmd, logger)
|
||||
// ...
|
||||
// logger.Log("event", "task complete")
|
||||
// }
|
||||
//
|
||||
// The improved version emits the same log events as the original for the
|
||||
// first and last calls to Log. Passing the contextual logger to taskHelper
|
||||
// enables each log event created by taskHelper to include the task.ID even
|
||||
// though taskHelper does not have access to that value. Using contextual
|
||||
// loggers this way simplifies producing log output that enables tracing the
|
||||
// life cycle of individual tasks. (See the Contextual example for the full
|
||||
// code of the above snippet.)
|
||||
//
|
||||
// Dynamic Contextual Values
|
||||
//
|
||||
// A Valuer function stored in a contextual logger generates a new value each
|
||||
// time an event is logged. The Valuer example demonstrates how this feature
|
||||
// works.
|
||||
//
|
||||
// Valuers provide the basis for consistently logging timestamps and source
|
||||
// code location. The log package defines several valuers for that purpose.
|
||||
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
|
||||
// DefaultCaller. A common logger initialization sequence that ensures all log
|
||||
// entries contain a timestamp and source location looks like this:
|
||||
//
|
||||
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
//
|
||||
// Concurrent Safety
|
||||
//
|
||||
// Applications with multiple goroutines want each log event written to the
|
||||
// same logger to remain separate from other log events. Package log provides
|
||||
// two simple solutions for concurrent safe logging.
|
||||
//
|
||||
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
|
||||
// method. Using a SyncWriter has the benefit that the smallest practical
|
||||
// portion of the logging logic is performed within a mutex, but it requires
|
||||
// the formatting Logger to make only one call to Write per log event.
|
||||
//
|
||||
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
|
||||
// Using a SyncLogger has the benefit that it guarantees each log event is
|
||||
// handled atomically within the wrapped logger, but it typically serializes
|
||||
// both the formatting and output logic. Use a SyncLogger if the formatting
|
||||
// logger may perform multiple writes per log event.
|
||||
//
|
||||
// Error Handling
|
||||
//
|
||||
// This package relies on the practice of wrapping or decorating loggers with
|
||||
// other loggers to provide composable pieces of functionality. It also means
|
||||
// that Logger.Log must return an error because some
|
||||
// implementations—especially those that output log data to an io.Writer—may
|
||||
// encounter errors that cannot be handled locally. This in turn means that
|
||||
// Loggers that wrap other loggers should return errors from the wrapped
|
||||
// logger up the stack.
|
||||
//
|
||||
// Fortunately, the decorator pattern also provides a way to avoid the
|
||||
// necessity to check for errors every time an application calls Logger.Log.
|
||||
// An application required to panic whenever its Logger encounters
|
||||
// an error could initialize its logger as follows.
|
||||
//
|
||||
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
|
||||
// if err := fmtlogger.Log(keyvals...); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
package log
|
||||
15
vendor/github.com/go-kit/kit/log/json_logger.go
generated
vendored
Normal file
15
vendor/github.com/go-kit/kit/log/json_logger.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
|
||||
// single JSON object. Each log event produces no more than one call to
|
||||
// w.Write. The passed Writer must be safe for concurrent use by multiple
|
||||
// goroutines if the returned Logger will be used concurrently.
|
||||
func NewJSONLogger(w io.Writer) Logger {
|
||||
return log.NewJSONLogger(w)
|
||||
}
|
||||
51
vendor/github.com/go-kit/kit/log/log.go
generated
vendored
Normal file
51
vendor/github.com/go-kit/kit/log/log.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// Logger is the fundamental interface for all log operations. Log creates a
|
||||
// log event from keyvals, a variadic sequence of alternating keys and values.
|
||||
// Implementations must be safe for concurrent use by multiple goroutines. In
|
||||
// particular, any implementation of Logger that appends to keyvals or
|
||||
// modifies or retains any of its elements must make a copy first.
|
||||
type Logger = log.Logger
|
||||
|
||||
// ErrMissingValue is appended to keyvals slices with odd length to substitute
|
||||
// the missing value.
|
||||
var ErrMissingValue = log.ErrMissingValue
|
||||
|
||||
// With returns a new contextual logger with keyvals prepended to those passed
|
||||
// to calls to Log. If logger is also a contextual logger created by With,
|
||||
// WithPrefix, or WithSuffix, keyvals is appended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func With(logger Logger, keyvals ...interface{}) Logger {
|
||||
return log.With(logger, keyvals...)
|
||||
}
|
||||
|
||||
// WithPrefix returns a new contextual logger with keyvals prepended to those
|
||||
// passed to calls to Log. If logger is also a contextual logger created by
|
||||
// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
|
||||
return log.WithPrefix(logger, keyvals...)
|
||||
}
|
||||
|
||||
// WithSuffix returns a new contextual logger with keyvals appended to those
|
||||
// passed to calls to Log. If logger is also a contextual logger created by
|
||||
// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func WithSuffix(logger Logger, keyvals ...interface{}) Logger {
|
||||
return log.WithSuffix(logger, keyvals...)
|
||||
}
|
||||
|
||||
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
|
||||
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
|
||||
// object that calls f.
|
||||
type LoggerFunc = log.LoggerFunc
|
||||
15
vendor/github.com/go-kit/kit/log/logfmt_logger.go
generated
vendored
Normal file
15
vendor/github.com/go-kit/kit/log/logfmt_logger.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
|
||||
// logfmt format. Each log event produces no more than one call to w.Write.
|
||||
// The passed Writer must be safe for concurrent use by multiple goroutines if
|
||||
// the returned Logger will be used concurrently.
|
||||
func NewLogfmtLogger(w io.Writer) Logger {
|
||||
return log.NewLogfmtLogger(w)
|
||||
}
|
||||
8
vendor/github.com/go-kit/kit/log/nop_logger.go
generated
vendored
Normal file
8
vendor/github.com/go-kit/kit/log/nop_logger.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package log
|
||||
|
||||
import "github.com/go-kit/log"
|
||||
|
||||
// NewNopLogger returns a logger that doesn't do anything.
|
||||
func NewNopLogger() Logger {
|
||||
return log.NewNopLogger()
|
||||
}
|
||||
54
vendor/github.com/go-kit/kit/log/stdlib.go
generated
vendored
Normal file
54
vendor/github.com/go-kit/kit/log/stdlib.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
|
||||
// designed to be passed to a Go kit logger as the writer, for cases where
|
||||
// it's necessary to redirect all Go kit log output to the stdlib logger.
|
||||
//
|
||||
// If you have any choice in the matter, you shouldn't use this. Prefer to
|
||||
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
|
||||
type StdlibWriter = log.StdlibWriter
|
||||
|
||||
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
|
||||
// logger's SetOutput. It will extract date/timestamps, filenames, and
|
||||
// messages, and place them under relevant keys.
|
||||
type StdlibAdapter = log.StdlibAdapter
|
||||
|
||||
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
|
||||
type StdlibAdapterOption = log.StdlibAdapterOption
|
||||
|
||||
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
|
||||
func TimestampKey(key string) StdlibAdapterOption {
|
||||
return log.TimestampKey(key)
|
||||
}
|
||||
|
||||
// FileKey sets the key for the file and line field. By default, it's "caller".
|
||||
func FileKey(key string) StdlibAdapterOption {
|
||||
return log.FileKey(key)
|
||||
}
|
||||
|
||||
// MessageKey sets the key for the actual log message. By default, it's "msg".
|
||||
func MessageKey(key string) StdlibAdapterOption {
|
||||
return log.MessageKey(key)
|
||||
}
|
||||
|
||||
// Prefix configures the adapter to parse a prefix from stdlib log events. If
|
||||
// you provide a non-empty prefix to the stdlib logger, then your should provide
|
||||
// that same prefix to the adapter via this option.
|
||||
//
|
||||
// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to
|
||||
// true if you want to include the parsed prefix in the msg.
|
||||
func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption {
|
||||
return log.Prefix(prefix, joinPrefixToMsg)
|
||||
}
|
||||
|
||||
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
|
||||
// logger. It's designed to be passed to log.SetOutput.
|
||||
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
|
||||
return log.NewStdlibAdapter(logger, options...)
|
||||
}
|
||||
37
vendor/github.com/go-kit/kit/log/sync.go
generated
vendored
Normal file
37
vendor/github.com/go-kit/kit/log/sync.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// SwapLogger wraps another logger that may be safely replaced while other
|
||||
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
|
||||
// will discard all log events without error.
|
||||
//
|
||||
// SwapLogger serves well as a package global logger that can be changed by
|
||||
// importers.
|
||||
type SwapLogger = log.SwapLogger
|
||||
|
||||
// NewSyncWriter returns a new writer that is safe for concurrent use by
|
||||
// multiple goroutines. Writes to the returned writer are passed on to w. If
|
||||
// another write is already in progress, the calling goroutine blocks until
|
||||
// the writer is available.
|
||||
//
|
||||
// If w implements the following interface, so does the returned writer.
|
||||
//
|
||||
// interface {
|
||||
// Fd() uintptr
|
||||
// }
|
||||
func NewSyncWriter(w io.Writer) io.Writer {
|
||||
return log.NewSyncWriter(w)
|
||||
}
|
||||
|
||||
// NewSyncLogger returns a logger that synchronizes concurrent use of the
|
||||
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
|
||||
// only one goroutine will be allowed to log to the wrapped logger at a time.
|
||||
// The other goroutines will block until the logger is available.
|
||||
func NewSyncLogger(logger Logger) Logger {
|
||||
return log.NewSyncLogger(logger)
|
||||
}
|
||||
52
vendor/github.com/go-kit/kit/log/value.go
generated
vendored
Normal file
52
vendor/github.com/go-kit/kit/log/value.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// A Valuer generates a log value. When passed to With, WithPrefix, or
|
||||
// WithSuffix in a value element (odd indexes), it represents a dynamic
|
||||
// value which is re-evaluated with each log event.
|
||||
type Valuer = log.Valuer
|
||||
|
||||
// Timestamp returns a timestamp Valuer. It invokes the t function to get the
|
||||
// time; unless you are doing something tricky, pass time.Now.
|
||||
//
|
||||
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||
// are TimestampFormats that use the RFC3339Nano format.
|
||||
func Timestamp(t func() time.Time) Valuer {
|
||||
return log.Timestamp(t)
|
||||
}
|
||||
|
||||
// TimestampFormat returns a timestamp Valuer with a custom time format. It
|
||||
// invokes the t function to get the time to format; unless you are doing
|
||||
// something tricky, pass time.Now. The layout string is passed to
|
||||
// Time.Format.
|
||||
//
|
||||
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||
// are TimestampFormats that use the RFC3339Nano format.
|
||||
func TimestampFormat(t func() time.Time, layout string) Valuer {
|
||||
return log.TimestampFormat(t, layout)
|
||||
}
|
||||
|
||||
// Caller returns a Valuer that returns a file and line from a specified depth
|
||||
// in the callstack. Users will probably want to use DefaultCaller.
|
||||
func Caller(depth int) Valuer {
|
||||
return log.Caller(depth)
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultTimestamp is a Valuer that returns the current wallclock time,
|
||||
// respecting time zones, when bound.
|
||||
DefaultTimestamp = log.DefaultTimestamp
|
||||
|
||||
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
|
||||
// when bound.
|
||||
DefaultTimestampUTC = log.DefaultTimestampUTC
|
||||
|
||||
// DefaultCaller is a Valuer that returns the file and line where the Log
|
||||
// method was invoked. It can only be used with log.With.
|
||||
DefaultCaller = log.DefaultCaller
|
||||
)
|
||||
63
vendor/github.com/prometheus/common/config/headers.go
generated
vendored
63
vendor/github.com/prometheus/common/config/headers.go
generated
vendored
@@ -52,31 +52,7 @@ var reservedHeaders = map[string]struct{}{
|
||||
// Headers represents the configuration for HTTP headers.
|
||||
type Headers struct {
|
||||
Headers map[string]Header `yaml:",inline"`
|
||||
}
|
||||
|
||||
func (h Headers) MarshalJSON() ([]byte, error) {
|
||||
// Inline the Headers map when serializing JSON because json encoder doesn't support "inline" directive.
|
||||
return json.Marshal(h.Headers)
|
||||
}
|
||||
|
||||
// SetDirectory make headers file relative to the configuration file.
|
||||
func (h *Headers) SetDirectory(dir string) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
for _, h := range h.Headers {
|
||||
h.SetDirectory(dir)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the Headers config.
|
||||
func (h *Headers) Validate() error {
|
||||
for n := range h.Headers {
|
||||
if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok {
|
||||
return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
dir string
|
||||
}
|
||||
|
||||
// Header represents the configuration for a single HTTP header.
|
||||
@@ -86,11 +62,35 @@ type Header struct {
|
||||
Files []string `yaml:"files,omitempty" json:"files,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory makes headers file relative to the configuration file.
|
||||
func (h *Header) SetDirectory(dir string) {
|
||||
for i := range h.Files {
|
||||
h.Files[i] = JoinDir(dir, h.Files[i])
|
||||
func (h Headers) MarshalJSON() ([]byte, error) {
|
||||
// Inline the Headers map when serializing JSON because json encoder doesn't support "inline" directive.
|
||||
return json.Marshal(h.Headers)
|
||||
}
|
||||
|
||||
// SetDirectory records the directory to make headers file relative to the
|
||||
// configuration file.
|
||||
func (h *Headers) SetDirectory(dir string) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
h.dir = dir
|
||||
}
|
||||
|
||||
// Validate validates the Headers config.
|
||||
func (h *Headers) Validate() error {
|
||||
for n, header := range h.Headers {
|
||||
if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok {
|
||||
return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n))
|
||||
}
|
||||
for _, v := range header.Files {
|
||||
f := JoinDir(h.dir, v)
|
||||
_, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on
|
||||
@@ -121,9 +121,10 @@ func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, err
|
||||
req.Header.Add(n, string(v))
|
||||
}
|
||||
for _, v := range h.Files {
|
||||
b, err := os.ReadFile(v)
|
||||
f := JoinDir(rt.config.dir, v)
|
||||
b, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read headers file %s: %w", v, err)
|
||||
return nil, fmt.Errorf("unable to read headers file %s: %w", f, err)
|
||||
}
|
||||
req.Header.Add(n, strings.TrimSpace(string(b)))
|
||||
}
|
||||
|
||||
4
vendor/github.com/prometheus/common/config/http_config.go
generated
vendored
4
vendor/github.com/prometheus/common/config/http_config.go
generated
vendored
@@ -828,7 +828,7 @@ type basicAuthRoundTripper struct {
|
||||
|
||||
// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has
|
||||
// already been set.
|
||||
func NewBasicAuthRoundTripper(username, password SecretReader, rt http.RoundTripper) http.RoundTripper {
|
||||
func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper {
|
||||
return &basicAuthRoundTripper{username, password, rt}
|
||||
}
|
||||
|
||||
@@ -964,7 +964,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
|
||||
}
|
||||
|
||||
rt.mtx.Lock()
|
||||
rt.lastSecret = newSecret
|
||||
rt.lastSecret = secret
|
||||
rt.lastRT.Source = source
|
||||
if rt.client != nil {
|
||||
rt.client.CloseIdleConnections()
|
||||
|
||||
14
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
14
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
|
||||
|
||||
mediatype, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
|
||||
const textType = "text/plain"
|
||||
@@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format {
|
||||
switch mediatype {
|
||||
case ProtoType:
|
||||
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
if e, ok := params["encoding"]; ok && e != "delimited" {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
return FmtProtoDelim
|
||||
return fmtProtoDelim
|
||||
|
||||
case textType:
|
||||
if v, ok := params["version"]; ok && v != TextVersion {
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
return FmtText
|
||||
return fmtText
|
||||
}
|
||||
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder based on the given input format.
|
||||
|
||||
24
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
24
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format {
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return FmtProtoDelim + escapingScheme
|
||||
return fmtProtoDelim + escapingScheme
|
||||
case "text":
|
||||
return FmtProtoText + escapingScheme
|
||||
return fmtProtoText + escapingScheme
|
||||
case "compact-text":
|
||||
return FmtProtoCompact + escapingScheme
|
||||
return fmtProtoCompact + escapingScheme
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText + escapingScheme
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
}
|
||||
return FmtText + escapingScheme
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
|
||||
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||
@@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return FmtProtoDelim + escapingScheme
|
||||
return fmtProtoDelim + escapingScheme
|
||||
case "text":
|
||||
return FmtProtoText + escapingScheme
|
||||
return fmtProtoText + escapingScheme
|
||||
case "compact-text":
|
||||
return FmtProtoCompact + escapingScheme
|
||||
return fmtProtoCompact + escapingScheme
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText + escapingScheme
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
|
||||
switch ver {
|
||||
case OpenMetricsVersion_1_0_0:
|
||||
return FmtOpenMetrics_1_0_0 + escapingScheme
|
||||
return fmtOpenMetrics_1_0_0 + escapingScheme
|
||||
default:
|
||||
return FmtOpenMetrics_0_0_1 + escapingScheme
|
||||
return fmtOpenMetrics_0_0_1 + escapingScheme
|
||||
}
|
||||
}
|
||||
}
|
||||
return FmtText + escapingScheme
|
||||
return fmtText + escapingScheme
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||
|
||||
76
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
76
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@@ -32,31 +32,24 @@ type Format string
|
||||
// it on the wire, new content-type strings will have to be agreed upon and
|
||||
// added here.
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
OpenMetricsType = `application/openmetrics-text`
|
||||
OpenMetricsVersion_0_0_1 = "0.0.1"
|
||||
OpenMetricsVersion_1_0_0 = "1.0.0"
|
||||
|
||||
// The Content-Type values for the different wire protocols. Do not do direct
|
||||
// comparisons to these constants, instead use the comparison functions.
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
|
||||
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
|
||||
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
|
||||
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||
// The Content-Type values for the different wire protocols. Note that these
|
||||
// values are now unexported. If code was relying on comparisons to these
|
||||
// constants, instead use FormatType().
|
||||
fmtUnknown Format = `<unknown>`
|
||||
fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
fmtProtoDelim Format = protoFmt + ` encoding=delimited`
|
||||
fmtProtoText Format = protoFmt + ` encoding=text`
|
||||
fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
|
||||
fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||
fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -86,17 +79,17 @@ const (
|
||||
func NewFormat(t FormatType) Format {
|
||||
switch t {
|
||||
case TypeProtoCompact:
|
||||
return FmtProtoCompact
|
||||
return fmtProtoCompact
|
||||
case TypeProtoDelim:
|
||||
return FmtProtoDelim
|
||||
return fmtProtoDelim
|
||||
case TypeProtoText:
|
||||
return FmtProtoText
|
||||
return fmtProtoText
|
||||
case TypeTextPlain:
|
||||
return FmtText
|
||||
return fmtText
|
||||
case TypeOpenMetrics:
|
||||
return FmtOpenMetrics_1_0_0
|
||||
return fmtOpenMetrics_1_0_0
|
||||
default:
|
||||
return FmtUnknown
|
||||
return fmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,35 +97,12 @@ func NewFormat(t FormatType) Format {
|
||||
// specified version number.
|
||||
func NewOpenMetricsFormat(version string) (Format, error) {
|
||||
if version == OpenMetricsVersion_0_0_1 {
|
||||
return FmtOpenMetrics_0_0_1, nil
|
||||
return fmtOpenMetrics_0_0_1, nil
|
||||
}
|
||||
if version == OpenMetricsVersion_1_0_0 {
|
||||
return FmtOpenMetrics_1_0_0, nil
|
||||
return fmtOpenMetrics_1_0_0, nil
|
||||
}
|
||||
return FmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||
}
|
||||
|
||||
// WithEscapingScheme returns a copy of Format with the specified escaping
|
||||
// scheme appended to the end. If an escaping scheme already exists it is
|
||||
// removed.
|
||||
func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
|
||||
var terms []string
|
||||
for _, p := range strings.Split(string(f), ";") {
|
||||
toks := strings.Split(p, "=")
|
||||
if len(toks) != 2 {
|
||||
trimmed := strings.TrimSpace(p)
|
||||
if len(trimmed) > 0 {
|
||||
terms = append(terms, trimmed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(toks[0])
|
||||
if key != model.EscapingKey {
|
||||
terms = append(terms, strings.TrimSpace(p))
|
||||
}
|
||||
}
|
||||
terms = append(terms, model.EscapingKey+"="+s.String())
|
||||
return Format(strings.Join(terms, "; "))
|
||||
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||
}
|
||||
|
||||
// FormatType deduces an overall FormatType for the given format.
|
||||
|
||||
2
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
@@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs(
|
||||
if name != "" {
|
||||
// If the name does not pass the legacy validity check, we must put the
|
||||
// metric name inside the braces, quoted.
|
||||
if !model.IsValidLegacyMetricName(name) {
|
||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
metricInsideBraces = true
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
|
||||
4
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
|
||||
if name != "" {
|
||||
// If the name does not pass the legacy validity check, we must put the
|
||||
// metric name inside the braces.
|
||||
if !model.IsValidLegacyMetricName(name) {
|
||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
metricInsideBraces = true
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
@@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
|
||||
// writeName writes a string as-is if it complies with the legacy naming
|
||||
// scheme, or escapes it in double quotes if not.
|
||||
func writeName(w enhancedWriter, name string) (int, error) {
|
||||
if model.IsValidLegacyMetricName(name) {
|
||||
if model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
return w.WriteString(name)
|
||||
}
|
||||
var written int
|
||||
|
||||
162
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
162
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@@ -22,9 +22,9 @@ import (
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
@@ -60,7 +60,6 @@ type TextParser struct {
|
||||
currentMF *dto.MetricFamily
|
||||
currentMetric *dto.Metric
|
||||
currentLabelPair *dto.LabelPair
|
||||
currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
|
||||
|
||||
// The remaining member variables are only used for summaries/histograms.
|
||||
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
|
||||
@@ -75,9 +74,6 @@ type TextParser struct {
|
||||
// count and sum of that summary/histogram.
|
||||
currentIsSummaryCount, currentIsSummarySum bool
|
||||
currentIsHistogramCount, currentIsHistogramSum bool
|
||||
// These indicate if the metric name from the current line being parsed is inside
|
||||
// braces and if that metric name was found respectively.
|
||||
currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
|
||||
}
|
||||
|
||||
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
||||
@@ -141,15 +137,12 @@ func (p *TextParser) reset(in io.Reader) {
|
||||
}
|
||||
p.currentQuantile = math.NaN()
|
||||
p.currentBucket = math.NaN()
|
||||
p.currentMF = nil
|
||||
}
|
||||
|
||||
// startOfLine represents the state where the next byte read from p.buf is the
|
||||
// start of a line (or whitespace leading up to it).
|
||||
func (p *TextParser) startOfLine() stateFn {
|
||||
p.lineCount++
|
||||
p.currentMetricIsInsideBraces = false
|
||||
p.currentMetricInsideBracesIsPresent = false
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
// This is the only place that we expect to see io.EOF,
|
||||
// which is not an error but the signal that we are done.
|
||||
@@ -165,9 +158,6 @@ func (p *TextParser) startOfLine() stateFn {
|
||||
return p.startComment
|
||||
case '\n':
|
||||
return p.startOfLine // Empty line, start the next one.
|
||||
case '{':
|
||||
p.currentMetricIsInsideBraces = true
|
||||
return p.readingLabels
|
||||
}
|
||||
return p.readingMetricName
|
||||
}
|
||||
@@ -285,8 +275,6 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '}' {
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||
p.currentLabelPairs = nil
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
@@ -299,45 +287,6 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '=' {
|
||||
if p.currentMetricIsInsideBraces {
|
||||
if p.currentMetricInsideBracesIsPresent {
|
||||
p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
switch p.currentByte {
|
||||
case ',':
|
||||
p.setOrCreateCurrentMF()
|
||||
if p.currentMF.Type == nil {
|
||||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||
}
|
||||
p.currentMetric = &dto.Metric{}
|
||||
p.currentMetricInsideBracesIsPresent = true
|
||||
return p.startLabelName
|
||||
case '}':
|
||||
p.setOrCreateCurrentMF()
|
||||
if p.currentMF.Type == nil {
|
||||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||
}
|
||||
p.currentMetric = &dto.Metric{}
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||
p.currentLabelPairs = nil
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
||||
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
||||
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
||||
@@ -347,17 +296,23 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
// labels to 'real' labels.
|
||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
|
||||
}
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '=' {
|
||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||
return nil
|
||||
}
|
||||
// Check for duplicate label names.
|
||||
labels := make(map[string]struct{})
|
||||
for _, l := range p.currentLabelPairs {
|
||||
for _, l := range p.currentMetric.Label {
|
||||
lName := l.GetName()
|
||||
if _, exists := labels[lName]; !exists {
|
||||
labels[lName] = struct{}{}
|
||||
} else {
|
||||
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -390,7 +345,6 @@ func (p *TextParser) startLabelValue() stateFn {
|
||||
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
@@ -417,19 +371,12 @@ func (p *TextParser) startLabelValue() stateFn {
|
||||
return p.startLabelName
|
||||
|
||||
case '}':
|
||||
if p.currentMF == nil {
|
||||
p.parseError("invalid metric name")
|
||||
return nil
|
||||
}
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||
p.currentLabelPairs = nil
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -638,8 +585,6 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
case '"':
|
||||
p.currentToken.WriteByte('"')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
@@ -665,45 +610,13 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||
// but not into p.currentToken.
|
||||
func (p *TextParser) readTokenAsMetricName() {
|
||||
p.currentToken.Reset()
|
||||
// A UTF-8 metric name must be quoted and may have escaped characters.
|
||||
quoted := false
|
||||
escaped := false
|
||||
if !isValidMetricNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for p.err == nil {
|
||||
if escaped {
|
||||
switch p.currentByte {
|
||||
case '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
case '"':
|
||||
p.currentToken.WriteByte('"')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
switch p.currentByte {
|
||||
case '"':
|
||||
quoted = !quoted
|
||||
if !quoted {
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
return
|
||||
}
|
||||
case '\n':
|
||||
p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
|
||||
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -715,45 +628,13 @@ func (p *TextParser) readTokenAsMetricName() {
|
||||
// but not into p.currentToken.
|
||||
func (p *TextParser) readTokenAsLabelName() {
|
||||
p.currentToken.Reset()
|
||||
// A UTF-8 label name must be quoted and may have escaped characters.
|
||||
quoted := false
|
||||
escaped := false
|
||||
if !isValidLabelNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for p.err == nil {
|
||||
if escaped {
|
||||
switch p.currentByte {
|
||||
case '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
case '"':
|
||||
p.currentToken.WriteByte('"')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
switch p.currentByte {
|
||||
case '"':
|
||||
quoted = !quoted
|
||||
if !quoted {
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
return
|
||||
}
|
||||
case '\n':
|
||||
p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
|
||||
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -779,7 +660,6 @@ func (p *TextParser) readTokenAsLabelValue() {
|
||||
p.currentToken.WriteByte('\n')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
p.currentLabelPairs = nil
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
@@ -838,19 +718,19 @@ func (p *TextParser) setOrCreateCurrentMF() {
|
||||
}
|
||||
|
||||
func isValidLabelNameStart(b byte) bool {
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
|
||||
}
|
||||
|
||||
func isValidLabelNameContinuation(b byte, quoted bool) bool {
|
||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
|
||||
func isValidLabelNameContinuation(b byte) bool {
|
||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
|
||||
}
|
||||
|
||||
func isValidMetricNameStart(b byte) bool {
|
||||
return isValidLabelNameStart(b) || b == ':'
|
||||
}
|
||||
|
||||
func isValidMetricNameContinuation(b byte, quoted bool) bool {
|
||||
return isValidLabelNameContinuation(b, quoted) || b == ':'
|
||||
func isValidMetricNameContinuation(b byte) bool {
|
||||
return isValidLabelNameContinuation(b) || b == ':'
|
||||
}
|
||||
|
||||
func isBlankOrTab(b byte) bool {
|
||||
|
||||
27
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
27
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
@@ -97,35 +97,26 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// IsValid returns true iff the name matches the pattern of LabelNameRE when
|
||||
// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
|
||||
// NameValidationScheme is set to UTF8Validation.
|
||||
// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
|
||||
// names, and iff it's valid UTF-8 if NameValidationScheme is set to
|
||||
// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
|
||||
// check but a much faster hardcoded implementation.
|
||||
func (ln LabelName) IsValid() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
switch NameValidationScheme {
|
||||
case LegacyValidation:
|
||||
return ln.IsValidLegacy()
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case UTF8Validation:
|
||||
return utf8.ValidString(string(ln))
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
|
||||
}
|
||||
}
|
||||
|
||||
// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
|
||||
// legacy names. It does not use LabelNameRE for the check but a much faster
|
||||
// hardcoded implementation.
|
||||
func (ln LabelName) IsValidLegacy() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
31
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
31
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@@ -34,13 +34,10 @@ var (
|
||||
// goroutines are started.
|
||||
NameValidationScheme = LegacyValidation
|
||||
|
||||
// NameEscapingScheme defines the default way that names will be escaped when
|
||||
// presented to systems that do not support UTF-8 names. If the Content-Type
|
||||
// "escaping" term is specified, that will override this value.
|
||||
// NameEscapingScheme should not be set to the NoEscaping value. That string
|
||||
// is used in content negotiation to indicate that a system supports UTF-8 and
|
||||
// has that feature enabled.
|
||||
NameEscapingScheme = UnderscoreEscaping
|
||||
// NameEscapingScheme defines the default way that names will be
|
||||
// escaped when presented to systems that do not support UTF-8 names. If the
|
||||
// Content-Type "escaping" term is specified, that will override this value.
|
||||
NameEscapingScheme = ValueEncodingEscaping
|
||||
)
|
||||
|
||||
// ValidationScheme is a Go enum for determining how metric and label names will
|
||||
@@ -164,7 +161,7 @@ func (m Metric) FastFingerprint() Fingerprint {
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
switch NameValidationScheme {
|
||||
case LegacyValidation:
|
||||
return IsValidLegacyMetricName(string(n))
|
||||
return IsValidLegacyMetricName(n)
|
||||
case UTF8Validation:
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
@@ -179,7 +176,7 @@ func IsValidMetricName(n LabelValue) bool {
|
||||
// legacy validation scheme regardless of the value of NameValidationScheme.
|
||||
// This function, however, does not use MetricNameRE for the check but a much
|
||||
// faster hardcoded implementation.
|
||||
func IsValidLegacyMetricName(n string) bool {
|
||||
func IsValidLegacyMetricName(n LabelValue) bool {
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
}
|
||||
@@ -211,7 +208,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
}
|
||||
|
||||
// If the name is nil, copy as-is, don't try to escape.
|
||||
if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
|
||||
if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
|
||||
out.Name = v.Name
|
||||
} else {
|
||||
out.Name = proto.String(EscapeName(v.GetName(), scheme))
|
||||
@@ -233,7 +230,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
|
||||
for _, l := range m.Label {
|
||||
if l.GetName() == MetricNameLabel {
|
||||
if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
|
||||
if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
||||
escaped.Label = append(escaped.Label, l)
|
||||
continue
|
||||
}
|
||||
@@ -243,7 +240,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
})
|
||||
continue
|
||||
}
|
||||
if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
|
||||
if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
||||
escaped.Label = append(escaped.Label, l)
|
||||
continue
|
||||
}
|
||||
@@ -259,10 +256,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
|
||||
func metricNeedsEscaping(m *dto.Metric) bool {
|
||||
for _, l := range m.Label {
|
||||
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
|
||||
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
||||
return true
|
||||
}
|
||||
if !IsValidLegacyMetricName(l.GetName()) {
|
||||
if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -286,7 +283,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
|
||||
case NoEscaping:
|
||||
return name
|
||||
case UnderscoreEscaping:
|
||||
if IsValidLegacyMetricName(name) {
|
||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
||||
return name
|
||||
}
|
||||
for i, b := range name {
|
||||
@@ -312,7 +309,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
|
||||
}
|
||||
return escaped.String()
|
||||
case ValueEncodingEscaping:
|
||||
if IsValidLegacyMetricName(name) {
|
||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
||||
return name
|
||||
}
|
||||
escaped.WriteString("U__")
|
||||
@@ -455,6 +452,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) {
|
||||
case EscapeValues:
|
||||
return ValueEncodingEscaping, nil
|
||||
default:
|
||||
return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
|
||||
return NoEscaping, fmt.Errorf("unknown format scheme " + s)
|
||||
}
|
||||
}
|
||||
|
||||
54
vendor/github.com/prometheus/prometheus/config/config.go
generated
vendored
54
vendor/github.com/prometheus/prometheus/config/config.go
generated
vendored
@@ -180,7 +180,6 @@ var (
|
||||
// DefaultRemoteWriteConfig is the default remote write configuration.
|
||||
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||
QueueConfig: DefaultQueueConfig,
|
||||
MetadataConfig: DefaultMetadataConfig,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
@@ -280,7 +279,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||
|
||||
jobNames := map[string]string{}
|
||||
for i, scfg := range c.ScrapeConfigs {
|
||||
// We do these checks for library users that would not call validate in
|
||||
// We do these checks for library users that would not call Validate in
|
||||
// Unmarshal.
|
||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||
return nil, err
|
||||
@@ -1056,50 +1055,6 @@ func CheckTargetAddress(address model.LabelValue) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoteWriteProtoMsg represents the known protobuf message for the remote write
|
||||
// 1.0 and 2.0 specs.
|
||||
type RemoteWriteProtoMsg string
|
||||
|
||||
// Validate returns error if the given reference for the protobuf message is not supported.
|
||||
func (s RemoteWriteProtoMsg) Validate() error {
|
||||
switch s {
|
||||
case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String())
|
||||
}
|
||||
}
|
||||
|
||||
type RemoteWriteProtoMsgs []RemoteWriteProtoMsg
|
||||
|
||||
func (m RemoteWriteProtoMsgs) Strings() []string {
|
||||
ret := make([]string, 0, len(m))
|
||||
for _, typ := range m {
|
||||
ret = append(ret, string(typ))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m RemoteWriteProtoMsgs) String() string {
|
||||
return strings.Join(m.Strings(), ", ")
|
||||
}
|
||||
|
||||
var (
|
||||
// RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf
|
||||
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/,
|
||||
// which will eventually be deprecated.
|
||||
//
|
||||
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
||||
// this reference.
|
||||
RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest"
|
||||
// RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf
|
||||
// message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||
//
|
||||
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
||||
// this reference.
|
||||
RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request"
|
||||
)
|
||||
|
||||
// RemoteWriteConfig is the configuration for writing to remote storage.
|
||||
type RemoteWriteConfig struct {
|
||||
URL *config.URL `yaml:"url"`
|
||||
@@ -1109,9 +1064,6 @@ type RemoteWriteConfig struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||
// ProtobufMessage specifies the protobuf message to use against the remote
|
||||
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
@@ -1146,10 +1098,6 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ProtobufMessage.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid protobuf_message value: %w", err)
|
||||
}
|
||||
|
||||
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
|
||||
// We cannot make it a pointer as the parser panics for inlined pointer structs.
|
||||
// Thus we just do its validation here.
|
||||
|
||||
10
vendor/github.com/prometheus/prometheus/discovery/manager.go
generated
vendored
10
vendor/github.com/prometheus/prometheus/discovery/manager.go
generated
vendored
@@ -120,16 +120,6 @@ func Name(n string) func(*Manager) {
|
||||
}
|
||||
}
|
||||
|
||||
// Updatert sets the updatert of the manager.
|
||||
// Used to speed up tests.
|
||||
func Updatert(u time.Duration) func(*Manager) {
|
||||
return func(m *Manager) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.updatert = u
|
||||
}
|
||||
}
|
||||
|
||||
// HTTPClientOptions sets the list of HTTP client options to expose to
|
||||
// Discoverers. It is up to Discoverers to choose to use the options provided.
|
||||
func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {
|
||||
|
||||
322
vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
generated
vendored
322
vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
generated
vendored
@@ -30,12 +30,11 @@ import (
|
||||
type FloatHistogram struct {
|
||||
// Counter reset information.
|
||||
CounterResetHint CounterResetHint
|
||||
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets.
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
|
||||
// each case, and then each power of two is divided into 2^n logarithmic buckets.
|
||||
// Or in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
|
||||
// the CustomValues field.
|
||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
// in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n).
|
||||
Schema int32
|
||||
// Width of the zero bucket.
|
||||
ZeroThreshold float64
|
||||
@@ -50,16 +49,6 @@ type FloatHistogram struct {
|
||||
// Observation counts in buckets. Each represents an absolute count and
|
||||
// must be zero or positive.
|
||||
PositiveBuckets, NegativeBuckets []float64
|
||||
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
|
||||
// This slice is interned, to be treated as immutable and copied by reference.
|
||||
// These numbers should be strictly increasing. This field is only used when the
|
||||
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
|
||||
// and NegativeBuckets fields are not used in that case.
|
||||
CustomValues []float64
|
||||
}
|
||||
|
||||
func (h *FloatHistogram) UsesCustomBuckets() bool {
|
||||
return IsCustomBucketsSchema(h.Schema)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the Histogram.
|
||||
@@ -67,37 +56,28 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
|
||||
c := FloatHistogram{
|
||||
CounterResetHint: h.CounterResetHint,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.ZeroCount,
|
||||
Count: h.Count,
|
||||
Sum: h.Sum,
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if len(h.CustomValues) != 0 {
|
||||
c.CustomValues = make([]float64, len(h.CustomValues))
|
||||
copy(c.CustomValues, h.CustomValues)
|
||||
}
|
||||
} else {
|
||||
c.ZeroThreshold = h.ZeroThreshold
|
||||
c.ZeroCount = h.ZeroCount
|
||||
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
}
|
||||
|
||||
if len(h.PositiveSpans) != 0 {
|
||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||
copy(c.PositiveSpans, h.PositiveSpans)
|
||||
}
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.PositiveBuckets) != 0 {
|
||||
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
|
||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
@@ -107,53 +87,32 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
|
||||
func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
|
||||
to.CounterResetHint = h.CounterResetHint
|
||||
to.Schema = h.Schema
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
to.Count = h.Count
|
||||
to.Sum = h.Sum
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
to.ZeroThreshold = 0
|
||||
to.ZeroCount = 0
|
||||
|
||||
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
|
||||
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
|
||||
|
||||
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
|
||||
copy(to.CustomValues, h.CustomValues)
|
||||
} else {
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
|
||||
to.CustomValues = clearIfNotNil(to.CustomValues)
|
||||
}
|
||||
|
||||
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
||||
copy(to.PositiveSpans, h.PositiveSpans)
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
||||
copy(to.PositiveBuckets, h.PositiveBuckets)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
// CopyToSchema works like Copy, but the returned deep copy has the provided
|
||||
// target schema, which must be ≤ the original schema (i.e. it must have a lower
|
||||
// resolution). This method panics if a custom buckets schema is used in the
|
||||
// receiving FloatHistogram or as the provided targetSchema.
|
||||
// resolution).
|
||||
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
|
||||
if targetSchema == h.Schema {
|
||||
// Fast path.
|
||||
return h.Copy()
|
||||
}
|
||||
if h.UsesCustomBuckets() {
|
||||
panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema))
|
||||
}
|
||||
if IsCustomBucketsSchema(targetSchema) {
|
||||
panic("cannot reduce resolution to custom buckets schema")
|
||||
}
|
||||
if targetSchema > h.Schema {
|
||||
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
@@ -226,9 +185,6 @@ func (h *FloatHistogram) TestExpression() string {
|
||||
if m.ZeroThreshold != 0 {
|
||||
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
|
||||
}
|
||||
if m.UsesCustomBuckets() {
|
||||
res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues))
|
||||
}
|
||||
|
||||
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
|
||||
if len(spans) > 1 {
|
||||
@@ -254,18 +210,14 @@ func (h *FloatHistogram) TestExpression() string {
|
||||
return "{{" + strings.Join(res, " ") + "}}"
|
||||
}
|
||||
|
||||
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
|
||||
// ZeroBucket returns the zero bucket.
|
||||
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("histograms with custom buckets have no zero bucket")
|
||||
}
|
||||
return Bucket[float64]{
|
||||
Lower: -h.ZeroThreshold,
|
||||
Upper: h.ZeroThreshold,
|
||||
LowerInclusive: true,
|
||||
UpperInclusive: true,
|
||||
Count: h.ZeroCount,
|
||||
// Index is irrelevant for the zero bucket.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,18 +263,9 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
|
||||
//
|
||||
// The method reconciles differences in the zero threshold and in the schema, and
|
||||
// changes them if needed. The other histogram will not be modified in any case.
|
||||
// Adding is currently only supported between 2 exponential histograms, or between
|
||||
// 2 custom buckets histograms with the exact same custom bounds.
|
||||
//
|
||||
// This method returns a pointer to the receiving histogram for convenience.
|
||||
func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
|
||||
return nil, ErrHistogramsIncompatibleSchema
|
||||
}
|
||||
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
|
||||
return nil, ErrHistogramsIncompatibleBounds
|
||||
}
|
||||
|
||||
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||
switch {
|
||||
case other.CounterResetHint == h.CounterResetHint:
|
||||
// Adding apples to apples, all good. No need to change anything.
|
||||
@@ -347,28 +290,19 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
|
||||
}
|
||||
|
||||
if !h.UsesCustomBuckets() {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount += otherZeroCount
|
||||
}
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount += otherZeroCount
|
||||
h.Count += other.Count
|
||||
h.Sum += other.Sum
|
||||
|
||||
var (
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
|
||||
otherPositiveSpans = other.PositiveSpans
|
||||
otherPositiveBuckets = other.PositiveBuckets
|
||||
)
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var (
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
otherNegativeSpans = other.NegativeSpans
|
||||
otherNegativeBuckets = other.NegativeBuckets
|
||||
)
|
||||
@@ -387,40 +321,24 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||
|
||||
return h, nil
|
||||
return h
|
||||
}
|
||||
|
||||
// Sub works like Add but subtracts the other histogram.
|
||||
func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
|
||||
return nil, ErrHistogramsIncompatibleSchema
|
||||
}
|
||||
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
|
||||
return nil, ErrHistogramsIncompatibleBounds
|
||||
}
|
||||
|
||||
if !h.UsesCustomBuckets() {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount -= otherZeroCount
|
||||
}
|
||||
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount -= otherZeroCount
|
||||
h.Count -= other.Count
|
||||
h.Sum -= other.Sum
|
||||
|
||||
var (
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
|
||||
otherPositiveSpans = other.PositiveSpans
|
||||
otherPositiveBuckets = other.PositiveBuckets
|
||||
)
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var (
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
otherNegativeSpans = other.NegativeSpans
|
||||
otherNegativeBuckets = other.NegativeBuckets
|
||||
)
|
||||
@@ -438,7 +356,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||
|
||||
return h, nil
|
||||
return h
|
||||
}
|
||||
|
||||
// Equals returns true if the given float histogram matches exactly.
|
||||
@@ -447,42 +365,29 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
// but they must represent the same bucket layout to match.
|
||||
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
|
||||
// because this method is about data equality rather than mathematical equality.
|
||||
// We ignore fields that are not used based on the exponential / custom buckets schema,
|
||||
// but check fields where differences may cause unintended behaviour even if they are not
|
||||
// supposed to be used according to the schema.
|
||||
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
||||
if h2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.Schema != h2.Schema ||
|
||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) ||
|
||||
math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
|
||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||
return false
|
||||
}
|
||||
if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -498,7 +403,6 @@ func (h *FloatHistogram) Size() int {
|
||||
negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
|
||||
posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
|
||||
negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
|
||||
customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64).
|
||||
|
||||
// Total size of the struct.
|
||||
|
||||
@@ -513,10 +417,9 @@ func (h *FloatHistogram) Size() int {
|
||||
// fh.NegativeSpans is 24 bytes.
|
||||
// fh.PositiveBuckets is 24 bytes.
|
||||
// fh.NegativeBuckets is 24 bytes.
|
||||
// fh.CustomValues is 24 bytes.
|
||||
structSize := 168
|
||||
structSize := 144
|
||||
|
||||
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize
|
||||
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize
|
||||
}
|
||||
|
||||
// Compact eliminates empty buckets at the beginning and end of each span, then
|
||||
@@ -601,12 +504,6 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
|
||||
if h.Count < previous.Count {
|
||||
return true
|
||||
}
|
||||
if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) {
|
||||
// Mark that something has changed or that the application has been restarted. However, this does
|
||||
// not matter so much since the change in schema will be handled directly in the chunks and PromQL
|
||||
// functions.
|
||||
return true
|
||||
}
|
||||
if h.Schema > previous.Schema {
|
||||
return true
|
||||
}
|
||||
@@ -712,7 +609,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
|
||||
// positive buckets in descending order (starting at the highest bucket and
|
||||
// going down towards the zero bucket).
|
||||
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
|
||||
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
|
||||
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||
return &it
|
||||
}
|
||||
|
||||
@@ -720,7 +617,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64]
|
||||
// negative buckets in ascending order (starting at the lowest bucket and going
|
||||
// up towards the zero bucket).
|
||||
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
|
||||
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
|
||||
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||
return &it
|
||||
}
|
||||
|
||||
@@ -732,7 +629,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64]
|
||||
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||
return &allFloatBucketIterator{
|
||||
h: h,
|
||||
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil),
|
||||
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false),
|
||||
rightIter: h.floatBucketIterator(true, 0, h.Schema),
|
||||
state: -1,
|
||||
}
|
||||
@@ -746,52 +643,30 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
|
||||
return &allFloatBucketIterator{
|
||||
h: h,
|
||||
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues),
|
||||
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true),
|
||||
rightIter: h.floatBucketIterator(false, 0, h.Schema),
|
||||
state: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
||||
// against negative values. We check to make sure there are no unexpected fields or field values
|
||||
// based on the exponential / custom buckets schema.
|
||||
// against negative values.
|
||||
// We do not check for h.Count being at least as large as the sum of the
|
||||
// counts in the buckets because floating point precision issues can
|
||||
// create false positives here.
|
||||
func (h *FloatHistogram) Validate() error {
|
||||
var nCount, pCount float64
|
||||
if h.UsesCustomBuckets() {
|
||||
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||
}
|
||||
} else {
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||
}
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
var nCount, pCount float64
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
@@ -915,25 +790,17 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
|
||||
// If positive is true, the returned iterator iterates through the positive
|
||||
// buckets, otherwise through the negative buckets.
|
||||
//
|
||||
// Only for exponential schemas, if absoluteStartValue is < the lowest absolute
|
||||
// value of any upper bucket boundary, the iterator starts with the first bucket.
|
||||
// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤
|
||||
// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and
|
||||
// no buckets are skipped.
|
||||
// If absoluteStartValue is < the lowest absolute value of any upper bucket
|
||||
// boundary, the iterator starts with the first bucket. Otherwise, it will skip
|
||||
// all buckets with an absolute value of their upper boundary ≤
|
||||
// absoluteStartValue.
|
||||
//
|
||||
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
|
||||
// legal values for schemas in general). The buckets are merged to match the
|
||||
// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets
|
||||
// schemas cannot be merged with other schemas.
|
||||
// targetSchema prior to iterating (without mutating FloatHistogram).
|
||||
func (h *FloatHistogram) floatBucketIterator(
|
||||
positive bool, absoluteStartValue float64, targetSchema int32,
|
||||
) floatBucketIterator {
|
||||
if h.UsesCustomBuckets() && targetSchema != h.Schema {
|
||||
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
|
||||
}
|
||||
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
|
||||
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
|
||||
}
|
||||
if targetSchema > h.Schema {
|
||||
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
@@ -949,7 +816,6 @@ func (h *FloatHistogram) floatBucketIterator(
|
||||
if positive {
|
||||
i.spans = h.PositiveSpans
|
||||
i.buckets = h.PositiveBuckets
|
||||
i.customValues = h.CustomValues
|
||||
} else {
|
||||
i.spans = h.NegativeSpans
|
||||
i.buckets = h.NegativeBuckets
|
||||
@@ -959,15 +825,14 @@ func (h *FloatHistogram) floatBucketIterator(
|
||||
|
||||
// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
|
||||
func newReverseFloatBucketIterator(
|
||||
spans []Span, buckets []float64, schema int32, positive bool, customValues []float64,
|
||||
spans []Span, buckets []float64, schema int32, positive bool,
|
||||
) reverseFloatBucketIterator {
|
||||
r := reverseFloatBucketIterator{
|
||||
baseBucketIterator: baseBucketIterator[float64, float64]{
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
customValues: customValues,
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1081,9 +946,9 @@ func (i *floatBucketIterator) Next() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Skip buckets before absoluteStartValue for exponential schemas.
|
||||
// Skip buckets before absoluteStartValue.
|
||||
// TODO(beorn7): Maybe do something more efficient than this recursive call.
|
||||
if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
||||
if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
||||
return i.Next()
|
||||
}
|
||||
i.boundReachedStartValue = true
|
||||
@@ -1145,7 +1010,14 @@ func (i *allFloatBucketIterator) Next() bool {
|
||||
case 0:
|
||||
i.state = 1
|
||||
if i.h.ZeroCount > 0 {
|
||||
i.currBucket = i.h.ZeroBucket()
|
||||
i.currBucket = Bucket[float64]{
|
||||
Lower: -i.h.ZeroThreshold,
|
||||
Upper: i.h.ZeroThreshold,
|
||||
LowerInclusive: true,
|
||||
UpperInclusive: true,
|
||||
Count: i.h.ZeroCount,
|
||||
// Index is irrelevant for the zero bucket.
|
||||
}
|
||||
return true
|
||||
}
|
||||
return i.Next()
|
||||
@@ -1204,7 +1076,7 @@ func addBuckets(
|
||||
for _, spanB := range spansB {
|
||||
indexB += spanB.Offset
|
||||
for j := 0; j < int(spanB.Length); j++ {
|
||||
if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
|
||||
if lowerThanThreshold && getBound(indexB, schema) <= threshold {
|
||||
goto nextLoop
|
||||
}
|
||||
lowerThanThreshold = false
|
||||
@@ -1305,7 +1177,7 @@ func addBuckets(
|
||||
return spansA, bucketsA
|
||||
}
|
||||
|
||||
func FloatBucketsMatch(b1, b2 []float64) bool {
|
||||
func floatBucketsMatch(b1, b2 []float64) bool {
|
||||
if len(b1) != len(b2) {
|
||||
return false
|
||||
}
|
||||
@@ -1319,15 +1191,7 @@ func FloatBucketsMatch(b1, b2 []float64) bool {
|
||||
|
||||
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
|
||||
// The target schema must be smaller than the current float histogram's schema.
|
||||
// This will panic if the histogram has custom buckets or if the target schema is
|
||||
// a custom buckets schema.
|
||||
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("cannot reduce resolution when there are custom buckets")
|
||||
}
|
||||
if IsCustomBucketsSchema(targetSchema) {
|
||||
panic("cannot reduce resolution to custom buckets schema")
|
||||
}
|
||||
if targetSchema >= h.Schema {
|
||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
||||
105
vendor/github.com/prometheus/prometheus/model/histogram/generic.go
generated
vendored
105
vendor/github.com/prometheus/prometheus/model/histogram/generic.go
generated
vendored
@@ -20,33 +20,14 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
ExponentialSchemaMax int32 = 8
|
||||
ExponentialSchemaMin int32 = -4
|
||||
CustomBucketsSchema int32 = -53
|
||||
)
|
||||
|
||||
var (
|
||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
|
||||
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
|
||||
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
|
||||
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
|
||||
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
|
||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||
)
|
||||
|
||||
func IsCustomBucketsSchema(s int32) bool {
|
||||
return s == CustomBucketsSchema
|
||||
}
|
||||
|
||||
func IsExponentialSchema(s int32) bool {
|
||||
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
|
||||
}
|
||||
|
||||
// BucketCount is a type constraint for the count in a bucket, which can be
|
||||
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
|
||||
type BucketCount interface {
|
||||
@@ -134,8 +115,6 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
|
||||
|
||||
currCount IBC // Count in the current bucket.
|
||||
currIdx int32 // The actual bucket index.
|
||||
|
||||
customValues []float64 // Bounds (usually upper) for histograms with custom buckets.
|
||||
}
|
||||
|
||||
func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
|
||||
@@ -149,19 +128,14 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
|
||||
Index: b.currIdx,
|
||||
}
|
||||
if b.positive {
|
||||
bucket.Upper = getBound(b.currIdx, schema, b.customValues)
|
||||
bucket.Lower = getBound(b.currIdx-1, schema, b.customValues)
|
||||
bucket.Upper = getBound(b.currIdx, schema)
|
||||
bucket.Lower = getBound(b.currIdx-1, schema)
|
||||
} else {
|
||||
bucket.Lower = -getBound(b.currIdx, schema, b.customValues)
|
||||
bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues)
|
||||
}
|
||||
if IsCustomBucketsSchema(schema) {
|
||||
bucket.LowerInclusive = b.currIdx == 0
|
||||
bucket.UpperInclusive = true
|
||||
} else {
|
||||
bucket.LowerInclusive = bucket.Lower < 0
|
||||
bucket.UpperInclusive = bucket.Upper > 0
|
||||
bucket.Lower = -getBound(b.currIdx, schema)
|
||||
bucket.Upper = -getBound(b.currIdx-1, schema)
|
||||
}
|
||||
bucket.LowerInclusive = bucket.Lower < 0
|
||||
bucket.UpperInclusive = bucket.Upper > 0
|
||||
return bucket
|
||||
}
|
||||
|
||||
@@ -419,55 +393,7 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
|
||||
prev := math.Inf(-1)
|
||||
for _, curr := range bounds {
|
||||
if curr <= prev {
|
||||
return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
|
||||
}
|
||||
prev = curr
|
||||
}
|
||||
if prev == math.Inf(1) {
|
||||
return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite)
|
||||
}
|
||||
|
||||
var spanBuckets int
|
||||
var totalSpanLength int
|
||||
for n, span := range spans {
|
||||
if span.Offset < 0 {
|
||||
return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
|
||||
}
|
||||
spanBuckets += int(span.Length)
|
||||
totalSpanLength += int(span.Length) + int(span.Offset)
|
||||
}
|
||||
if spanBuckets != numBuckets {
|
||||
return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
|
||||
}
|
||||
if (len(bounds) + 1) < totalSpanLength {
|
||||
return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBound(idx, schema int32, customValues []float64) float64 {
|
||||
if IsCustomBucketsSchema(schema) {
|
||||
length := int32(len(customValues))
|
||||
switch {
|
||||
case idx > length || idx < -1:
|
||||
panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length))
|
||||
case idx == length:
|
||||
return math.Inf(1)
|
||||
case idx == -1:
|
||||
return math.Inf(-1)
|
||||
default:
|
||||
return customValues[idx]
|
||||
}
|
||||
}
|
||||
return getBoundExponential(idx, schema)
|
||||
}
|
||||
|
||||
func getBoundExponential(idx, schema int32) float64 {
|
||||
func getBound(idx, schema int32) float64 {
|
||||
// Here a bit of context about the behavior for the last bucket counting
|
||||
// regular numbers (called simply "last bucket" below) and the bucket
|
||||
// counting observations of ±Inf (called "inf bucket" below, with an idx
|
||||
@@ -777,10 +703,3 @@ func reduceResolution[IBC InternalBucketCount](
|
||||
|
||||
return targetSpans, targetBuckets
|
||||
}
|
||||
|
||||
func clearIfNotNil[T any](items []T) []T {
|
||||
if items == nil {
|
||||
return nil
|
||||
}
|
||||
return items[:0]
|
||||
}
|
||||
|
||||
224
vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
generated
vendored
224
vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
generated
vendored
@@ -49,12 +49,11 @@ const (
|
||||
type Histogram struct {
|
||||
// Counter reset information.
|
||||
CounterResetHint CounterResetHint
|
||||
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
|
||||
// each case, and then each power of two is divided into 2^n logarithmic buckets.
|
||||
// Or in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
|
||||
// the CustomValues field.
|
||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
// in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n).
|
||||
Schema int32
|
||||
// Width of the zero bucket.
|
||||
ZeroThreshold float64
|
||||
@@ -70,12 +69,6 @@ type Histogram struct {
|
||||
// count. All following ones are deltas relative to the previous
|
||||
// element.
|
||||
PositiveBuckets, NegativeBuckets []int64
|
||||
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
|
||||
// This slice is interned, to be treated as immutable and copied by reference.
|
||||
// These numbers should be strictly increasing. This field is only used when the
|
||||
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
|
||||
// and NegativeBuckets fields are not used in that case.
|
||||
CustomValues []float64
|
||||
}
|
||||
|
||||
// A Span defines a continuous sequence of buckets.
|
||||
@@ -87,46 +80,33 @@ type Span struct {
|
||||
Length uint32
|
||||
}
|
||||
|
||||
func (h *Histogram) UsesCustomBuckets() bool {
|
||||
return IsCustomBucketsSchema(h.Schema)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the Histogram.
|
||||
func (h *Histogram) Copy() *Histogram {
|
||||
c := Histogram{
|
||||
CounterResetHint: h.CounterResetHint,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.ZeroCount,
|
||||
Count: h.Count,
|
||||
Sum: h.Sum,
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if len(h.CustomValues) != 0 {
|
||||
c.CustomValues = make([]float64, len(h.CustomValues))
|
||||
copy(c.CustomValues, h.CustomValues)
|
||||
}
|
||||
} else {
|
||||
c.ZeroThreshold = h.ZeroThreshold
|
||||
c.ZeroCount = h.ZeroCount
|
||||
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
}
|
||||
|
||||
if len(h.PositiveSpans) != 0 {
|
||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||
copy(c.PositiveSpans, h.PositiveSpans)
|
||||
}
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.PositiveBuckets) != 0 {
|
||||
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
|
||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
@@ -136,36 +116,22 @@ func (h *Histogram) Copy() *Histogram {
|
||||
func (h *Histogram) CopyTo(to *Histogram) {
|
||||
to.CounterResetHint = h.CounterResetHint
|
||||
to.Schema = h.Schema
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
to.Count = h.Count
|
||||
to.Sum = h.Sum
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
to.ZeroThreshold = 0
|
||||
to.ZeroCount = 0
|
||||
|
||||
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
|
||||
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
|
||||
|
||||
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
|
||||
copy(to.CustomValues, h.CustomValues)
|
||||
} else {
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
|
||||
to.CustomValues = clearIfNotNil(to.CustomValues)
|
||||
}
|
||||
|
||||
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
||||
copy(to.PositiveSpans, h.PositiveSpans)
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
||||
copy(to.PositiveBuckets, h.PositiveBuckets)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
// String returns a string representation of the Histogram.
|
||||
@@ -199,11 +165,8 @@ func (h *Histogram) String() string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
|
||||
// ZeroBucket returns the zero bucket.
|
||||
func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("histograms with custom buckets have no zero bucket")
|
||||
}
|
||||
return Bucket[uint64]{
|
||||
Lower: -h.ZeroThreshold,
|
||||
Upper: h.ZeroThreshold,
|
||||
@@ -216,14 +179,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
||||
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
||||
// buckets in ascending order (starting next to the zero bucket and going up).
|
||||
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
|
||||
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
|
||||
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||
return &it
|
||||
}
|
||||
|
||||
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
||||
// buckets in descending order (starting next to the zero bucket and going down).
|
||||
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
|
||||
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
|
||||
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||
return &it
|
||||
}
|
||||
|
||||
@@ -244,42 +207,30 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
|
||||
// but they must represent the same bucket layout to match.
|
||||
// Sum is compared based on its bit pattern because this method
|
||||
// is about data equality rather than mathematical equality.
|
||||
// We ignore fields that are not used based on the exponential / custom buckets schema,
|
||||
// but check fields where differences may cause unintended behaviour even if they are not
|
||||
// supposed to be used according to the schema.
|
||||
func (h *Histogram) Equals(h2 *Histogram) bool {
|
||||
if h2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.Schema != h2.Schema || h.Count != h2.Count ||
|
||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count ||
|
||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||
return false
|
||||
}
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -370,36 +321,17 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
|
||||
}
|
||||
fh.CounterResetHint = h.CounterResetHint
|
||||
fh.Schema = h.Schema
|
||||
fh.ZeroThreshold = h.ZeroThreshold
|
||||
fh.ZeroCount = float64(h.ZeroCount)
|
||||
fh.Count = float64(h.Count)
|
||||
fh.Sum = h.Sum
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
fh.ZeroThreshold = 0
|
||||
fh.ZeroCount = 0
|
||||
fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans)
|
||||
fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets)
|
||||
|
||||
fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues))
|
||||
copy(fh.CustomValues, h.CustomValues)
|
||||
} else {
|
||||
fh.ZeroThreshold = h.ZeroThreshold
|
||||
fh.ZeroCount = float64(h.ZeroCount)
|
||||
|
||||
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(fh.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
|
||||
var currentNegative float64
|
||||
for i, b := range h.NegativeBuckets {
|
||||
currentNegative += float64(b)
|
||||
fh.NegativeBuckets[i] = currentNegative
|
||||
}
|
||||
fh.CustomValues = clearIfNotNil(fh.CustomValues)
|
||||
}
|
||||
|
||||
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
|
||||
copy(fh.PositiveSpans, h.PositiveSpans)
|
||||
|
||||
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(fh.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
|
||||
var currentPositive float64
|
||||
for i, b := range h.PositiveBuckets {
|
||||
@@ -407,6 +339,13 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
|
||||
fh.PositiveBuckets[i] = currentPositive
|
||||
}
|
||||
|
||||
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
|
||||
var currentNegative float64
|
||||
for i, b := range h.NegativeBuckets {
|
||||
currentNegative += float64(b)
|
||||
fh.NegativeBuckets[i] = currentNegative
|
||||
}
|
||||
|
||||
return fh
|
||||
}
|
||||
|
||||
@@ -418,47 +357,25 @@ func resize[T any](items []T, n int) []T {
|
||||
}
|
||||
|
||||
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
||||
// against negative values. We check to make sure there are no unexpected fields or field values
|
||||
// based on the exponential / custom buckets schema.
|
||||
// against negative values.
|
||||
// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
|
||||
// strict h.Count = nCount + pCount + h.ZeroCount check is performed.
|
||||
// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
|
||||
// because NaN observations do not increment the values of buckets (but they do increment
|
||||
// the total h.Count).
|
||||
func (h *Histogram) Validate() error {
|
||||
var nCount, pCount uint64
|
||||
if h.UsesCustomBuckets() {
|
||||
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||
}
|
||||
} else {
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||
}
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
var nCount, pCount uint64
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
@@ -481,13 +398,12 @@ type regularBucketIterator struct {
|
||||
baseBucketIterator[uint64, int64]
|
||||
}
|
||||
|
||||
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator {
|
||||
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator {
|
||||
i := baseBucketIterator[uint64, int64]{
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
customValues: customValues,
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
}
|
||||
return regularBucketIterator{i}
|
||||
}
|
||||
@@ -561,7 +477,7 @@ func (c *cumulativeBucketIterator) Next() bool {
|
||||
|
||||
if c.emptyBucketCount > 0 {
|
||||
// We are traversing through empty buckets at the moment.
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||
c.currIdx++
|
||||
c.emptyBucketCount--
|
||||
return true
|
||||
@@ -578,7 +494,7 @@ func (c *cumulativeBucketIterator) Next() bool {
|
||||
|
||||
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
|
||||
c.currCumulativeCount += uint64(c.currCount)
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||
|
||||
c.posBucketsIdx++
|
||||
c.idxInSpan++
|
||||
@@ -608,15 +524,7 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] {
|
||||
|
||||
// ReduceResolution reduces the histogram's spans, buckets into target schema.
|
||||
// The target schema must be smaller than the current histogram's schema.
|
||||
// This will panic if the histogram has custom buckets or if the target schema is
|
||||
// a custom buckets schema.
|
||||
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("cannot reduce resolution when there are custom buckets")
|
||||
}
|
||||
if IsCustomBucketsSchema(targetSchema) {
|
||||
panic("cannot reduce resolution to custom buckets schema")
|
||||
}
|
||||
if targetSchema >= h.Schema {
|
||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
||||
5
vendor/github.com/prometheus/prometheus/model/labels/labels_common.go
generated
vendored
5
vendor/github.com/prometheus/prometheus/model/labels/labels_common.go
generated
vendored
@@ -18,7 +18,6 @@ import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
@@ -216,7 +215,3 @@ func contains(s []Label, n string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func yoloString(b []byte) string {
|
||||
return *((*string)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
94
vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go
generated
vendored
94
vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
@@ -104,39 +105,30 @@ func (t *nameTable) ToName(num int) string {
|
||||
return t.byNum[num]
|
||||
}
|
||||
|
||||
// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes,
|
||||
// because we expect most Prometheus to have more than 127 unique strings.
|
||||
// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings.
|
||||
func decodeVarint(data string, index int) (int, int) {
|
||||
b := int(data[index]) + int(data[index+1])<<8
|
||||
index += 2
|
||||
if b < 0x8000 {
|
||||
return b, index
|
||||
}
|
||||
return decodeVarintRest(b, data, index)
|
||||
}
|
||||
|
||||
func decodeVarintRest(b int, data string, index int) (int, int) {
|
||||
value := int(b & 0x7FFF)
|
||||
b = int(data[index])
|
||||
// Fast-path for common case of a single byte, value 0..127.
|
||||
b := data[index]
|
||||
index++
|
||||
if b < 0x80 {
|
||||
return value | (b << 15), index
|
||||
return int(b), index
|
||||
}
|
||||
|
||||
value |= (b & 0x7f) << 15
|
||||
b = int(data[index])
|
||||
index++
|
||||
return value | (b << 22), index
|
||||
value := int(b & 0x7F)
|
||||
for shift := uint(7); ; shift += 7 {
|
||||
// Just panic if we go of the end of data, since all Labels strings are constructed internally and
|
||||
// malformed data indicates a bug, or memory corruption.
|
||||
b := data[index]
|
||||
index++
|
||||
value |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value, index
|
||||
}
|
||||
|
||||
func decodeString(t *nameTable, data string, index int) (string, int) {
|
||||
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||
num := int(data[index]) + int(data[index+1])<<8
|
||||
index += 2
|
||||
if num >= 0x8000 {
|
||||
num, index = decodeVarintRest(num, data, index)
|
||||
}
|
||||
var num int
|
||||
num, index = decodeVarint(data, index)
|
||||
return t.ToName(num), index
|
||||
}
|
||||
|
||||
@@ -330,12 +322,7 @@ func (ls Labels) Get(name string) string {
|
||||
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
||||
break
|
||||
}
|
||||
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||
num := int(ls.data[i]) + int(ls.data[i+1])<<8
|
||||
i += 2
|
||||
if num >= 0x8000 {
|
||||
_, i = decodeVarintRest(num, ls.data, i)
|
||||
}
|
||||
_, i = decodeVarint(ls.data, i)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -353,12 +340,7 @@ func (ls Labels) Has(name string) bool {
|
||||
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
||||
break
|
||||
}
|
||||
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||
num := int(ls.data[i]) + int(ls.data[i+1])<<8
|
||||
i += 2
|
||||
if num >= 0x8000 {
|
||||
_, i = decodeVarintRest(num, ls.data, i)
|
||||
}
|
||||
_, i = decodeVarint(ls.data, i)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -444,6 +426,10 @@ func EmptyLabels() Labels {
|
||||
return Labels{}
|
||||
}
|
||||
|
||||
func yoloString(b []byte) string {
|
||||
return *((*string)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
// New returns a sorted Labels from the given labels.
|
||||
// The caller has to guarantee that all label names are unique.
|
||||
// Note this function is not efficient; should not be used in performance-critical places.
|
||||
@@ -660,24 +646,29 @@ func marshalNumbersToSizedBuffer(nums []int, data []byte) int {
|
||||
|
||||
func sizeVarint(x uint64) (n int) {
|
||||
// Most common case first
|
||||
if x < 1<<15 {
|
||||
return 2
|
||||
if x < 1<<7 {
|
||||
return 1
|
||||
}
|
||||
if x < 1<<22 {
|
||||
return 3
|
||||
if x >= 1<<56 {
|
||||
return 9
|
||||
}
|
||||
if x >= 1<<29 {
|
||||
panic("Number too large to represent")
|
||||
if x >= 1<<28 {
|
||||
x >>= 28
|
||||
n = 4
|
||||
}
|
||||
return 4
|
||||
if x >= 1<<14 {
|
||||
x >>= 14
|
||||
n += 2
|
||||
}
|
||||
if x >= 1<<7 {
|
||||
n++
|
||||
}
|
||||
return n + 1
|
||||
}
|
||||
|
||||
func encodeVarintSlow(data []byte, offset int, v uint64) int {
|
||||
offset -= sizeVarint(v)
|
||||
base := offset
|
||||
data[offset] = uint8(v)
|
||||
v >>= 8
|
||||
offset++
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
@@ -687,12 +678,11 @@ func encodeVarintSlow(data []byte, offset int, v uint64) int {
|
||||
return base
|
||||
}
|
||||
|
||||
// Special code for the common case that a value is less than 32768
|
||||
// Special code for the common case that a value is less than 128
|
||||
func encodeVarint(data []byte, offset, v int) int {
|
||||
if v < 1<<15 {
|
||||
offset -= 2
|
||||
if v < 1<<7 {
|
||||
offset--
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
return offset
|
||||
}
|
||||
return encodeVarintSlow(data, offset, uint64(v))
|
||||
|
||||
5
vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
generated
vendored
5
vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
generated
vendored
@@ -299,6 +299,11 @@ func Equal(ls, o Labels) bool {
|
||||
func EmptyLabels() Labels {
|
||||
return Labels{}
|
||||
}
|
||||
|
||||
func yoloString(b []byte) string {
|
||||
return *((*string)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
func yoloBytes(s string) (b []byte) {
|
||||
*(*string)(unsafe.Pointer(&b)) = s
|
||||
(*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s)
|
||||
|
||||
2
vendor/github.com/prometheus/prometheus/model/labels/matcher.go
generated
vendored
2
vendor/github.com/prometheus/prometheus/model/labels/matcher.go
generated
vendored
@@ -101,7 +101,7 @@ func (m *Matcher) shouldQuoteName() bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
return len(m.Name) == 0
|
||||
return false
|
||||
}
|
||||
|
||||
// Matches returns whether the matcher matches the given string value.
|
||||
|
||||
248
vendor/github.com/prometheus/prometheus/model/labels/regexp.go
generated
vendored
248
vendor/github.com/prometheus/prometheus/model/labels/regexp.go
generated
vendored
@@ -16,19 +16,17 @@ package labels
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/grafana/regexp/syntax"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSetMatches = 256
|
||||
|
||||
// The minimum number of alternate values a regex should have to trigger
|
||||
// the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map
|
||||
// the optimization done by optimizeEqualStringMatchers() and so use a map
|
||||
// to match values instead of iterating over a list. This value has
|
||||
// been computed running BenchmarkOptimizeEqualStringMatchers.
|
||||
minEqualMultiStringMatcherMapThreshold = 16
|
||||
@@ -44,7 +42,7 @@ type FastRegexMatcher struct {
|
||||
stringMatcher StringMatcher
|
||||
prefix string
|
||||
suffix string
|
||||
contains []string
|
||||
contains string
|
||||
|
||||
// matchString is the "compiled" function to run by MatchString().
|
||||
matchString func(string) bool
|
||||
@@ -89,7 +87,7 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
|
||||
// compileMatchStringFunction returns the function to run by MatchString().
|
||||
func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
|
||||
// If the only optimization available is the string matcher, then we can just run it.
|
||||
if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && len(m.contains) == 0 && m.stringMatcher != nil {
|
||||
if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && m.contains == "" && m.stringMatcher != nil {
|
||||
return m.stringMatcher.Matches
|
||||
}
|
||||
|
||||
@@ -108,7 +106,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
|
||||
if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
|
||||
return false
|
||||
}
|
||||
if len(m.contains) > 0 && !containsInOrder(s, m.contains) {
|
||||
if m.contains != "" && !strings.Contains(s, m.contains) {
|
||||
return false
|
||||
}
|
||||
if m.stringMatcher != nil {
|
||||
@@ -121,7 +119,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
|
||||
// IsOptimized returns true if any fast-path optimization is applied to the
|
||||
// regex matcher.
|
||||
func (m *FastRegexMatcher) IsOptimized() bool {
|
||||
return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || len(m.contains) > 0
|
||||
return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != ""
|
||||
}
|
||||
|
||||
// findSetMatches extract equality matches from a regexp.
|
||||
@@ -337,7 +335,7 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0)
|
||||
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates)
|
||||
|
||||
for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') {
|
||||
// Split the string into the next literal and the remainder
|
||||
@@ -363,9 +361,8 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
|
||||
|
||||
// optimizeConcatRegex returns literal prefix/suffix text that can be safely
|
||||
// checked against the label value before running the regexp matcher.
|
||||
func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) {
|
||||
func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
|
||||
sub := r.Sub
|
||||
clearCapture(sub...)
|
||||
|
||||
// We can safely remove begin and end text matchers respectively
|
||||
// at the beginning and end of the regexp.
|
||||
@@ -390,11 +387,13 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []st
|
||||
suffix = string(sub[last].Rune)
|
||||
}
|
||||
|
||||
// If contains any literal which is not a prefix/suffix, we keep track of
|
||||
// all the ones which are case-sensitive.
|
||||
// If contains any literal which is not a prefix/suffix, we keep the
|
||||
// 1st one. We do not keep the whole list of literals to simplify the
|
||||
// fast path.
|
||||
for i := 1; i < len(sub)-1; i++ {
|
||||
if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 {
|
||||
contains = append(contains, string(sub[i].Rune))
|
||||
contains = string(sub[i].Rune)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -412,7 +411,7 @@ func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
|
||||
clearBeginEndText(re)
|
||||
|
||||
m := stringMatcherFromRegexpInternal(re)
|
||||
m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
|
||||
m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
|
||||
|
||||
return m
|
||||
}
|
||||
@@ -549,7 +548,11 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
|
||||
|
||||
// Right matcher with 1 fixed set match.
|
||||
case left == nil && len(matches) == 1:
|
||||
return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right)
|
||||
return &literalPrefixStringMatcher{
|
||||
prefix: matches[0],
|
||||
prefixCaseSensitive: matchesCaseSensitive,
|
||||
right: right,
|
||||
}
|
||||
|
||||
// Left matcher with 1 fixed set match.
|
||||
case right == nil && len(matches) == 1:
|
||||
@@ -627,47 +630,21 @@ func (m *containsStringMatcher) Matches(s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher {
|
||||
if prefixCaseSensitive {
|
||||
return &literalPrefixSensitiveStringMatcher{
|
||||
prefix: prefix,
|
||||
right: right,
|
||||
}
|
||||
}
|
||||
|
||||
return &literalPrefixInsensitiveStringMatcher{
|
||||
prefix: prefix,
|
||||
right: right,
|
||||
}
|
||||
}
|
||||
|
||||
// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher.
|
||||
type literalPrefixSensitiveStringMatcher struct {
|
||||
prefix string
|
||||
// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher.
|
||||
type literalPrefixStringMatcher struct {
|
||||
prefix string
|
||||
prefixCaseSensitive bool
|
||||
|
||||
// The matcher that must match the right side. Can be nil.
|
||||
right StringMatcher
|
||||
}
|
||||
|
||||
func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool {
|
||||
if !strings.HasPrefix(s, m.prefix) {
|
||||
func (m *literalPrefixStringMatcher) Matches(s string) bool {
|
||||
// Ensure the prefix matches.
|
||||
if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure the right side matches.
|
||||
return m.right.Matches(s[len(m.prefix):])
|
||||
}
|
||||
|
||||
// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher.
|
||||
type literalPrefixInsensitiveStringMatcher struct {
|
||||
prefix string
|
||||
|
||||
// The matcher that must match the right side. Can be nil.
|
||||
right StringMatcher
|
||||
}
|
||||
|
||||
func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool {
|
||||
if !hasPrefixCaseInsensitive(s, m.prefix) {
|
||||
if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -732,20 +709,17 @@ func (m *equalStringMatcher) Matches(s string) bool {
|
||||
type multiStringMatcherBuilder interface {
|
||||
StringMatcher
|
||||
add(s string)
|
||||
addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher)
|
||||
setMatches() []string
|
||||
}
|
||||
|
||||
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder {
|
||||
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder {
|
||||
// If the estimated size is low enough, it's faster to use a slice instead of a map.
|
||||
if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 {
|
||||
if estimatedSize < minEqualMultiStringMatcherMapThreshold {
|
||||
return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)}
|
||||
}
|
||||
|
||||
return &equalMultiStringMapMatcher{
|
||||
values: make(map[string]struct{}, estimatedSize),
|
||||
prefixes: make(map[string][]StringMatcher, estimatedPrefixes),
|
||||
minPrefixLen: minPrefixLength,
|
||||
caseSensitive: caseSensitive,
|
||||
}
|
||||
}
|
||||
@@ -761,10 +735,6 @@ func (m *equalMultiStringSliceMatcher) add(s string) {
|
||||
m.values = append(m.values, s)
|
||||
}
|
||||
|
||||
func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (m *equalMultiStringSliceMatcher) setMatches() []string {
|
||||
return m.values
|
||||
}
|
||||
@@ -786,49 +756,25 @@ func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// equalMultiStringMapMatcher matches a string exactly against a map of valid values
|
||||
// or against a set of prefix matchers.
|
||||
// equalMultiStringMapMatcher matches a string exactly against a map of valid values.
|
||||
type equalMultiStringMapMatcher struct {
|
||||
// values contains values to match a string against. If the matching is case insensitive,
|
||||
// the values here must be lowercase.
|
||||
values map[string]struct{}
|
||||
// prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string.
|
||||
// If the matching is case insensitive, prefixes are all lowercase.
|
||||
prefixes map[string][]StringMatcher
|
||||
// minPrefixLen can be zero, meaning there are no prefix matchers.
|
||||
minPrefixLen int
|
||||
|
||||
caseSensitive bool
|
||||
}
|
||||
|
||||
func (m *equalMultiStringMapMatcher) add(s string) {
|
||||
if !m.caseSensitive {
|
||||
s = toNormalisedLower(s)
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
|
||||
m.values[s] = struct{}{}
|
||||
}
|
||||
|
||||
func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) {
|
||||
if m.minPrefixLen == 0 {
|
||||
panic("addPrefix called when no prefix length defined")
|
||||
}
|
||||
if len(prefix) < m.minPrefixLen {
|
||||
panic("addPrefix called with a too short prefix")
|
||||
}
|
||||
if m.caseSensitive != prefixCaseSensitive {
|
||||
panic("addPrefix called with a prefix whose case sensitivity is different than the expected one")
|
||||
}
|
||||
|
||||
s := prefix[:m.minPrefixLen]
|
||||
if !m.caseSensitive {
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
|
||||
m.prefixes[s] = append(m.prefixes[s], matcher)
|
||||
}
|
||||
|
||||
func (m *equalMultiStringMapMatcher) setMatches() []string {
|
||||
if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 {
|
||||
if len(m.values) >= maxSetMatches {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -841,42 +787,11 @@ func (m *equalMultiStringMapMatcher) setMatches() []string {
|
||||
|
||||
func (m *equalMultiStringMapMatcher) Matches(s string) bool {
|
||||
if !m.caseSensitive {
|
||||
s = toNormalisedLower(s)
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
|
||||
if _, ok := m.values[s]; ok {
|
||||
return true
|
||||
}
|
||||
if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen {
|
||||
for _, matcher := range m.prefixes[s[:m.minPrefixLen]] {
|
||||
if matcher.Matches(s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert
|
||||
// it to lower case.
|
||||
func toNormalisedLower(s string) string {
|
||||
var buf []byte
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c >= utf8.RuneSelf {
|
||||
return strings.Map(unicode.ToLower, norm.NFKD.String(s))
|
||||
}
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
if buf == nil {
|
||||
buf = []byte(s)
|
||||
}
|
||||
buf[i] = c + 'a' - 'A'
|
||||
}
|
||||
}
|
||||
if buf == nil {
|
||||
return s
|
||||
}
|
||||
return yoloString(buf)
|
||||
_, ok := m.values[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string
|
||||
@@ -937,24 +852,20 @@ func (m trueMatcher) Matches(_ string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an
|
||||
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or
|
||||
// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher).
|
||||
//
|
||||
// In this specific case, when we have many strings to match against we can use a map instead
|
||||
// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an
|
||||
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In
|
||||
// this specific case, when we have many strings to match against we can use a map instead
|
||||
// of iterating over the list of strings.
|
||||
func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher {
|
||||
func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher {
|
||||
var (
|
||||
caseSensitive bool
|
||||
caseSensitiveSet bool
|
||||
numValues int
|
||||
numPrefixes int
|
||||
minPrefixLength int
|
||||
)
|
||||
|
||||
// Analyse the input StringMatcher to count the number of occurrences
|
||||
// and ensure all of them have the same case sensitivity.
|
||||
analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool {
|
||||
analyseCallback := func(matcher *equalStringMatcher) bool {
|
||||
// Ensure we don't have mixed case sensitivity.
|
||||
if caseSensitiveSet && caseSensitive != matcher.caseSensitive {
|
||||
return false
|
||||
@@ -967,55 +878,34 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str
|
||||
return true
|
||||
}
|
||||
|
||||
analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
|
||||
// Ensure we don't have mixed case sensitivity.
|
||||
if caseSensitiveSet && caseSensitive != prefixCaseSensitive {
|
||||
return false
|
||||
} else if !caseSensitiveSet {
|
||||
caseSensitive = prefixCaseSensitive
|
||||
caseSensitiveSet = true
|
||||
}
|
||||
if numPrefixes == 0 || len(prefix) < minPrefixLength {
|
||||
minPrefixLength = len(prefix)
|
||||
}
|
||||
|
||||
numPrefixes++
|
||||
return true
|
||||
}
|
||||
|
||||
if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) {
|
||||
if !findEqualStringMatchers(input, analyseCallback) {
|
||||
return input
|
||||
}
|
||||
|
||||
// If the number of values and prefixes found is less than the threshold, then we should skip the optimization.
|
||||
if (numValues + numPrefixes) < threshold {
|
||||
// If the number of values found is less than the threshold, then we should skip the optimization.
|
||||
if numValues < threshold {
|
||||
return input
|
||||
}
|
||||
|
||||
// Parse again the input StringMatcher to extract all values and storing them.
|
||||
// We can skip the case sensitivity check because we've already checked it and
|
||||
// if the code reach this point then it means all matchers have the same case sensitivity.
|
||||
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength)
|
||||
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues)
|
||||
|
||||
// Ignore the return value because we already iterated over the input StringMatcher
|
||||
// and it was all good.
|
||||
findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
|
||||
findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
|
||||
multiMatcher.add(matcher.s)
|
||||
return true
|
||||
}, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
|
||||
multiMatcher.addPrefix(prefix, caseSensitive, matcher)
|
||||
return true
|
||||
})
|
||||
|
||||
return multiMatcher
|
||||
}
|
||||
|
||||
// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each
|
||||
// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found.
|
||||
//
|
||||
// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or
|
||||
// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered.
|
||||
func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool {
|
||||
// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each
|
||||
// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only*
|
||||
// composed by an alternation of equalStringMatcher.
|
||||
func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool {
|
||||
orInput, ok := input.(orStringMatcher)
|
||||
if !ok {
|
||||
return false
|
||||
@@ -1024,27 +914,17 @@ func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback f
|
||||
for _, m := range orInput {
|
||||
switch casted := m.(type) {
|
||||
case orStringMatcher:
|
||||
if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) {
|
||||
if !findEqualStringMatchers(m, callback) {
|
||||
return false
|
||||
}
|
||||
|
||||
case *equalStringMatcher:
|
||||
if !equalMatcherCallback(casted) {
|
||||
return false
|
||||
}
|
||||
|
||||
case *literalPrefixSensitiveStringMatcher:
|
||||
if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) {
|
||||
return false
|
||||
}
|
||||
|
||||
case *literalPrefixInsensitiveStringMatcher:
|
||||
if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) {
|
||||
if !callback(casted) {
|
||||
return false
|
||||
}
|
||||
|
||||
default:
|
||||
// It's not an equal or prefix string matcher, so we have to stop searching
|
||||
// It's not an equal string matcher, so we have to stop searching
|
||||
// cause this optimization can't be applied.
|
||||
return false
|
||||
}
|
||||
@@ -1060,27 +940,3 @@ func hasPrefixCaseInsensitive(s, prefix string) bool {
|
||||
func hasSuffixCaseInsensitive(s, suffix string) bool {
|
||||
return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix)
|
||||
}
|
||||
|
||||
func containsInOrder(s string, contains []string) bool {
|
||||
// Optimization for the case we only have to look for 1 substring.
|
||||
if len(contains) == 1 {
|
||||
return strings.Contains(s, contains[0])
|
||||
}
|
||||
|
||||
return containsInOrderMulti(s, contains)
|
||||
}
|
||||
|
||||
func containsInOrderMulti(s string, contains []string) bool {
|
||||
offset := 0
|
||||
|
||||
for _, substr := range contains {
|
||||
at := strings.Index(s[offset:], substr)
|
||||
if at == -1 {
|
||||
return false
|
||||
}
|
||||
|
||||
offset += at + len(substr)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
5
vendor/github.com/prometheus/prometheus/model/relabel/relabel.go
generated
vendored
5
vendor/github.com/prometheus/prometheus/model/relabel/relabel.go
generated
vendored
@@ -206,11 +206,6 @@ func (re Regexp) MarshalYAML() (interface{}, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// IsZero implements the yaml.IsZeroer interface.
|
||||
func (re Regexp) IsZero() bool {
|
||||
return re.Regexp == DefaultRelabelConfig.Regex.Regexp
|
||||
}
|
||||
|
||||
// String returns the original string used to compile the regular expression.
|
||||
func (re Regexp) String() string {
|
||||
str := re.Regexp.String()
|
||||
|
||||
201
vendor/github.com/prometheus/prometheus/prompb/codec.go
generated
vendored
201
vendor/github.com/prometheus/prometheus/prompb/codec.go
generated
vendored
@@ -1,201 +0,0 @@
|
||||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prompb
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
|
||||
|
||||
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
|
||||
return labelProtosToLabels(b, m.GetLabels())
|
||||
}
|
||||
|
||||
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||
func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
|
||||
return labelProtosToLabels(b, m.GetLabels())
|
||||
}
|
||||
|
||||
func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels {
|
||||
b.Reset()
|
||||
for _, l := range labelPairs {
|
||||
b.Add(l.Name, l.Value)
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
||||
|
||||
// FromLabels transforms labels into prompb labels. The buffer slice
|
||||
// will be used to avoid allocations if it is big enough to store the labels.
|
||||
func FromLabels(lbls labels.Labels, buf []Label) []Label {
|
||||
result := buf[:0]
|
||||
lbls.Range(func(l labels.Label) {
|
||||
result = append(result, Label{
|
||||
Name: l.Name,
|
||||
Value: l.Value,
|
||||
})
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
|
||||
func FromMetadataType(t model.MetricType) MetricMetadata_MetricType {
|
||||
mt := strings.ToUpper(string(t))
|
||||
v, ok := MetricMetadata_MetricType_value[mt]
|
||||
if !ok {
|
||||
return MetricMetadata_UNKNOWN
|
||||
}
|
||||
return MetricMetadata_MetricType(v)
|
||||
}
|
||||
|
||||
// IsFloatHistogram returns true if the histogram is float.
|
||||
func (h Histogram) IsFloatHistogram() bool {
|
||||
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
|
||||
// of integer histogram. If it's a float histogram, the method returns nil.
|
||||
func (h Histogram) ToIntHistogram() *histogram.Histogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return nil
|
||||
}
|
||||
return &histogram.Histogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountInt(),
|
||||
Count: h.GetCountInt(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveDeltas(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeDeltas(),
|
||||
}
|
||||
}
|
||||
|
||||
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
|
||||
// of float histogram. If the underlying implementation is an integer histogram, a
|
||||
// conversion is performed.
|
||||
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountFloat(),
|
||||
Count: h.GetCountFloat(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveCounts(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeCounts(),
|
||||
}
|
||||
}
|
||||
// Conversion from integer histogram.
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: float64(h.GetZeroCountInt()),
|
||||
Count: float64(h.GetCountInt()),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
|
||||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func deltasToCounts(deltas []int64) []float64 {
|
||||
counts := make([]float64, len(deltas))
|
||||
var cur float64
|
||||
for i, d := range deltas {
|
||||
cur += float64(d)
|
||||
counts[i] = cur
|
||||
}
|
||||
return counts
|
||||
}
|
||||
|
||||
// FromIntHistogram returns remote Histogram from the integer Histogram.
|
||||
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountInt{CountInt: h.Count},
|
||||
Sum: h.Sum,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||
NegativeDeltas: h.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||
PositiveDeltas: h.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// FromFloatHistogram returns remote Histogram from the float Histogram.
|
||||
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountFloat{CountFloat: fh.Count},
|
||||
Sum: fh.Sum,
|
||||
Schema: fh.Schema,
|
||||
ZeroThreshold: fh.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||
NegativeCounts: fh.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||
PositiveCounts: fh.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||
spans := make([]BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
// ToExemplar converts remote exemplar to model exemplar.
|
||||
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar {
|
||||
timestamp := m.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: labelProtosToLabels(b, m.GetLabels()),
|
||||
Value: m.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
||||
8
vendor/github.com/prometheus/prometheus/prompb/custom.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/prompb/custom.go
generated
vendored
@@ -17,6 +17,14 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
func (m Sample) T() int64 { return m.Timestamp }
|
||||
func (m Sample) V() float64 { return m.Value }
|
||||
|
||||
func (h Histogram) IsFloatHistogram() bool {
|
||||
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
|
||||
size := r.Size()
|
||||
data, ok := p.Get().(*[]byte)
|
||||
|
||||
216
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go
generated
vendored
216
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go
generated
vendored
@@ -1,216 +0,0 @@
|
||||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import (
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
)
|
||||
|
||||
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
|
||||
|
||||
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
|
||||
return desymbolizeLabels(b, m.GetLabelsRefs(), symbols)
|
||||
}
|
||||
|
||||
// ToMetadata return model metadata from timeseries' remote metadata.
|
||||
func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata {
|
||||
typ := model.MetricTypeUnknown
|
||||
switch m.Metadata.Type {
|
||||
case Metadata_METRIC_TYPE_COUNTER:
|
||||
typ = model.MetricTypeCounter
|
||||
case Metadata_METRIC_TYPE_GAUGE:
|
||||
typ = model.MetricTypeGauge
|
||||
case Metadata_METRIC_TYPE_HISTOGRAM:
|
||||
typ = model.MetricTypeHistogram
|
||||
case Metadata_METRIC_TYPE_GAUGEHISTOGRAM:
|
||||
typ = model.MetricTypeGaugeHistogram
|
||||
case Metadata_METRIC_TYPE_SUMMARY:
|
||||
typ = model.MetricTypeSummary
|
||||
case Metadata_METRIC_TYPE_INFO:
|
||||
typ = model.MetricTypeInfo
|
||||
case Metadata_METRIC_TYPE_STATESET:
|
||||
typ = model.MetricTypeStateset
|
||||
}
|
||||
return metadata.Metadata{
|
||||
Type: typ,
|
||||
Unit: symbols[m.Metadata.UnitRef],
|
||||
Help: symbols[m.Metadata.HelpRef],
|
||||
}
|
||||
}
|
||||
|
||||
// FromMetadataType transforms a Prometheus metricType into writev2 metricType.
|
||||
// Since the former is a string we need to transform it to an enum.
|
||||
func FromMetadataType(t model.MetricType) Metadata_MetricType {
|
||||
switch t {
|
||||
case model.MetricTypeCounter:
|
||||
return Metadata_METRIC_TYPE_COUNTER
|
||||
case model.MetricTypeGauge:
|
||||
return Metadata_METRIC_TYPE_GAUGE
|
||||
case model.MetricTypeHistogram:
|
||||
return Metadata_METRIC_TYPE_HISTOGRAM
|
||||
case model.MetricTypeGaugeHistogram:
|
||||
return Metadata_METRIC_TYPE_GAUGEHISTOGRAM
|
||||
case model.MetricTypeSummary:
|
||||
return Metadata_METRIC_TYPE_SUMMARY
|
||||
case model.MetricTypeInfo:
|
||||
return Metadata_METRIC_TYPE_INFO
|
||||
case model.MetricTypeStateset:
|
||||
return Metadata_METRIC_TYPE_STATESET
|
||||
default:
|
||||
return Metadata_METRIC_TYPE_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
// IsFloatHistogram returns true if the histogram is float.
|
||||
func (h Histogram) IsFloatHistogram() bool {
|
||||
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
|
||||
// of integer histogram. If it's a float histogram, the method returns nil.
|
||||
func (h Histogram) ToIntHistogram() *histogram.Histogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return nil
|
||||
}
|
||||
return &histogram.Histogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountInt(),
|
||||
Count: h.GetCountInt(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveDeltas(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeDeltas(),
|
||||
CustomValues: h.GetCustomValues(),
|
||||
}
|
||||
}
|
||||
|
||||
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
|
||||
// of float histogram. If the underlying implementation is an integer histogram, a
|
||||
// conversion is performed.
|
||||
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountFloat(),
|
||||
Count: h.GetCountFloat(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveCounts(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeCounts(),
|
||||
CustomValues: h.GetCustomValues(),
|
||||
}
|
||||
}
|
||||
// Conversion from integer histogram.
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: float64(h.GetZeroCountInt()),
|
||||
Count: float64(h.GetCountInt()),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
|
||||
CustomValues: h.GetCustomValues(),
|
||||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func deltasToCounts(deltas []int64) []float64 {
|
||||
counts := make([]float64, len(deltas))
|
||||
var cur float64
|
||||
for i, d := range deltas {
|
||||
cur += float64(d)
|
||||
counts[i] = cur
|
||||
}
|
||||
return counts
|
||||
}
|
||||
|
||||
// FromIntHistogram returns remote Histogram from the integer Histogram.
|
||||
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountInt{CountInt: h.Count},
|
||||
Sum: h.Sum,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||
NegativeDeltas: h.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||
PositiveDeltas: h.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
||||
CustomValues: h.CustomValues,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// FromFloatHistogram returns remote Histogram from the float Histogram.
|
||||
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountFloat{CountFloat: fh.Count},
|
||||
Sum: fh.Sum,
|
||||
Schema: fh.Schema,
|
||||
ZeroThreshold: fh.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||
NegativeCounts: fh.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||
PositiveCounts: fh.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
||||
CustomValues: fh.CustomValues,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||
spans := make([]BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar {
|
||||
timestamp := m.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: desymbolizeLabels(b, m.LabelsRefs, symbols),
|
||||
Value: m.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
||||
165
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/custom.go
generated
vendored
165
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/custom.go
generated
vendored
@@ -1,165 +0,0 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import (
|
||||
"slices"
|
||||
)
|
||||
|
||||
func (m Sample) T() int64 { return m.Timestamp }
|
||||
func (m Sample) V() float64 { return m.Value }
|
||||
|
||||
func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) {
|
||||
siz := m.Size()
|
||||
if cap(dst) < siz {
|
||||
dst = make([]byte, siz)
|
||||
}
|
||||
n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:n], nil
|
||||
}
|
||||
|
||||
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
|
||||
// but calls OptimizedMarshalToSizedBuffer on the timeseries.
|
||||
func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Timeseries) > 0 {
|
||||
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
}
|
||||
if len(m.Symbols) > 0 {
|
||||
for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Symbols[iNdEx])
|
||||
copy(dAtA[i:], m.Symbols[iNdEx])
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
|
||||
// but marshals m.LabelsRefs in place without extra allocations.
|
||||
func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.CreatedTimestamp != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
|
||||
i--
|
||||
dAtA[i] = 0x30
|
||||
}
|
||||
{
|
||||
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
if len(m.Histograms) > 0 {
|
||||
for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
}
|
||||
if len(m.Exemplars) > 0 {
|
||||
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
}
|
||||
if len(m.Samples) > 0 {
|
||||
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.LabelsRefs) > 0 {
|
||||
// This is the trick: encode the varints in reverse order to make it easier
|
||||
// to do it in place. Then reverse the whole thing.
|
||||
var j10 int
|
||||
start := i
|
||||
for _, num := range m.LabelsRefs {
|
||||
for num >= 1<<7 {
|
||||
dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80)
|
||||
num >>= 7
|
||||
i--
|
||||
j10++
|
||||
}
|
||||
dAtA[i-1] = uint8(num)
|
||||
i--
|
||||
j10++
|
||||
}
|
||||
slices.Reverse(dAtA[i:start])
|
||||
// --- end of trick
|
||||
|
||||
i = encodeVarintTypes(dAtA, i, uint64(j10))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
83
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/symbols.go
generated
vendored
83
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/symbols.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import "github.com/prometheus/prometheus/model/labels"
|
||||
|
||||
// SymbolsTable implements table for easy symbol use.
|
||||
type SymbolsTable struct {
|
||||
strings []string
|
||||
symbolsMap map[string]uint32
|
||||
}
|
||||
|
||||
// NewSymbolTable returns a symbol table.
|
||||
func NewSymbolTable() SymbolsTable {
|
||||
return SymbolsTable{
|
||||
// Empty string is required as a first element.
|
||||
symbolsMap: map[string]uint32{"": 0},
|
||||
strings: []string{""},
|
||||
}
|
||||
}
|
||||
|
||||
// Symbolize adds (if not added before) a string to the symbols table,
|
||||
// while returning its reference number.
|
||||
func (t *SymbolsTable) Symbolize(str string) uint32 {
|
||||
if ref, ok := t.symbolsMap[str]; ok {
|
||||
return ref
|
||||
}
|
||||
ref := uint32(len(t.strings))
|
||||
t.strings = append(t.strings, str)
|
||||
t.symbolsMap[str] = ref
|
||||
return ref
|
||||
}
|
||||
|
||||
// SymbolizeLabels symbolize Prometheus labels.
|
||||
func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 {
|
||||
result := buf[:0]
|
||||
lbls.Range(func(l labels.Label) {
|
||||
off := t.Symbolize(l.Name)
|
||||
result = append(result, off)
|
||||
off = t.Symbolize(l.Value)
|
||||
result = append(result, off)
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// Symbols returns computes symbols table to put in e.g. Request.Symbols.
|
||||
// As per spec, order does not matter.
|
||||
func (t *SymbolsTable) Symbols() []string {
|
||||
return t.strings
|
||||
}
|
||||
|
||||
// Reset clears symbols table.
|
||||
func (t *SymbolsTable) Reset() {
|
||||
// NOTE: Make sure to keep empty symbol.
|
||||
t.strings = t.strings[:1]
|
||||
for k := range t.symbolsMap {
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
delete(t.symbolsMap, k)
|
||||
}
|
||||
}
|
||||
|
||||
// desymbolizeLabels decodes label references, with given symbols to labels.
|
||||
func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
|
||||
b.Reset()
|
||||
for i := 0; i < len(labelRefs); i += 2 {
|
||||
b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
||||
3241
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go
generated
vendored
3241
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
260
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.proto
generated
vendored
260
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.proto
generated
vendored
@@ -1,260 +0,0 @@
|
||||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2
|
||||
|
||||
syntax = "proto3";
|
||||
package io.prometheus.write.v2;
|
||||
|
||||
option go_package = "writev2";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
// Request represents a request to write the given timeseries to a remote destination.
|
||||
// This message was introduced in the Remote Write 2.0 specification:
|
||||
// https://prometheus.io/docs/concepts/remote_write_spec_2_0/
|
||||
//
|
||||
// The canonical Content-Type request header value for this message is
|
||||
// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
|
||||
//
|
||||
// NOTE: gogoproto options might change in future for this file, they
|
||||
// are not part of the spec proto (they only modify the generated Go code, not
|
||||
// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
|
||||
message Request {
|
||||
// Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message
|
||||
// for the deterministic interop between those two, see types_test.go for details.
|
||||
// Generally it's not needed, because Receivers must use the Content-Type header, but we want to
|
||||
// be sympathetic to adopters with mistaken implementations and have deterministic error (empty
|
||||
// message if you use the wrong proto schema).
|
||||
reserved 1 to 3;
|
||||
|
||||
// symbols contains a de-duplicated array of string elements used for various
|
||||
// items in a Request message, like labels and metadata items. For the sender's convenience
|
||||
// around empty values for optional fields like unit_ref, symbols array MUST start with
|
||||
// empty string.
|
||||
//
|
||||
// To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you
|
||||
// need to lookup the actual string by index from symbols array. The order of
|
||||
// strings is up to the sender. The receiver should not assume any particular encoding.
|
||||
repeated string symbols = 4;
|
||||
// timeseries represents an array of distinct series with 0 or more samples.
|
||||
repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// TimeSeries represents a single series.
|
||||
message TimeSeries {
|
||||
// labels_refs is a list of label name-value pair references, encoded
|
||||
// as indices to the Request.symbols array. This list's length is always
|
||||
// a multiple of two, and the underlying labels should be sorted lexicographically.
|
||||
//
|
||||
// Note that there might be multiple TimeSeries objects in the same
|
||||
// Requests with the same labels e.g. for different exemplars, metadata
|
||||
// or created timestamp.
|
||||
repeated uint32 labels_refs = 1;
|
||||
|
||||
// Timeseries messages can either specify samples or (native) histogram samples
|
||||
// (histogram field), but not both. For a typical sender (real-time metric
|
||||
// streaming), in healthy cases, there will be only one sample or histogram.
|
||||
//
|
||||
// Samples and histograms are sorted by timestamp (older first).
|
||||
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||
repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// exemplars represents an optional set of exemplars attached to this series' samples.
|
||||
repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false];
|
||||
|
||||
// metadata represents the metadata associated with the given series' samples.
|
||||
Metadata metadata = 5 [(gogoproto.nullable) = false];
|
||||
|
||||
// created_timestamp represents an optional created timestamp associated with
|
||||
// this series' samples in ms format, typically for counter or histogram type
|
||||
// metrics. Created timestamp represents the time when the counter started
|
||||
// counting (sometimes referred to as start timestamp), which can increase
|
||||
// the accuracy of query results.
|
||||
//
|
||||
// Note that some receivers might require this and in return fail to
|
||||
// ingest such samples within the Request.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
//
|
||||
// Note that the "optional" keyword is omitted due to
|
||||
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||
// Zero value means value not set. If you need to use exactly zero value for
|
||||
// the timestamp, use 1 millisecond before or after.
|
||||
int64 created_timestamp = 6;
|
||||
}
|
||||
|
||||
// Exemplar is an additional information attached to some series' samples.
|
||||
// It is typically used to attach an example trace or request ID associated with
|
||||
// the metric changes.
|
||||
message Exemplar {
|
||||
// labels_refs is an optional list of label name-value pair references, encoded
|
||||
// as indices to the Request.symbols array. This list's len is always
|
||||
// a multiple of 2, and the underlying labels should be sorted lexicographically.
|
||||
// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
|
||||
repeated uint32 labels_refs = 1;
|
||||
// value represents an exact example value. This can be useful when the exemplar
|
||||
// is attached to a histogram, which only gives an estimated value through buckets.
|
||||
double value = 2;
|
||||
// timestamp represents an optional timestamp of the sample in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
//
|
||||
// Note that the "optional" keyword is omitted due to
|
||||
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||
// Zero value means value not set. If you need to use exactly zero value for
|
||||
// the timestamp, use 1 millisecond before or after.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// Sample represents series sample.
|
||||
message Sample {
|
||||
// value of the sample.
|
||||
double value = 1;
|
||||
// timestamp represents timestamp of the sample in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
// Metadata represents the metadata associated with the given series' samples.
|
||||
message Metadata {
|
||||
enum MetricType {
|
||||
METRIC_TYPE_UNSPECIFIED = 0;
|
||||
METRIC_TYPE_COUNTER = 1;
|
||||
METRIC_TYPE_GAUGE = 2;
|
||||
METRIC_TYPE_HISTOGRAM = 3;
|
||||
METRIC_TYPE_GAUGEHISTOGRAM = 4;
|
||||
METRIC_TYPE_SUMMARY = 5;
|
||||
METRIC_TYPE_INFO = 6;
|
||||
METRIC_TYPE_STATESET = 7;
|
||||
}
|
||||
MetricType type = 1;
|
||||
// help_ref is a reference to the Request.symbols array representing help
|
||||
// text for the metric. Help is optional, reference should point to an empty string in
|
||||
// such a case.
|
||||
uint32 help_ref = 3;
|
||||
// unit_ref is a reference to the Request.symbols array representing a unit
|
||||
// for the metric. Unit is optional, reference should point to an empty string in
|
||||
// such a case.
|
||||
uint32 unit_ref = 4;
|
||||
}
|
||||
|
||||
// A native histogram, also known as a sparse histogram.
|
||||
// Original design doc:
|
||||
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||
// The appendix of this design doc also explains the concept of float
|
||||
// histograms. This Histogram message can represent both, the usual
|
||||
// integer histogram as well as a float histogram.
|
||||
message Histogram {
|
||||
enum ResetHint {
|
||||
RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
|
||||
RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset.
|
||||
RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||
RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||
}
|
||||
|
||||
oneof count { // Count of observations in the histogram.
|
||||
uint64 count_int = 1;
|
||||
double count_float = 2;
|
||||
}
|
||||
double sum = 3; // Sum of observations in the histogram.
|
||||
|
||||
// The schema defines the bucket schema. Currently, valid numbers
|
||||
// are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
|
||||
// added in future for new bucketing layouts.
|
||||
//
|
||||
// The schema equal to -53 means custom buckets. See
|
||||
// custom_values field description for more details.
|
||||
//
|
||||
// Values between -4 and 8 represent base-2 bucket schema, where 1
|
||||
// is a bucket boundary in each case, and then each power of two is
|
||||
// divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
|
||||
// each bucket boundary is the previous boundary times 2^(2^-n).
|
||||
sint32 schema = 4;
|
||||
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||
oneof zero_count { // Count in zero bucket.
|
||||
uint64 zero_count_int = 6;
|
||||
double zero_count_float = 7;
|
||||
}
|
||||
|
||||
// Negative Buckets.
|
||||
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
|
||||
// Use either "negative_deltas" or "negative_counts", the former for
|
||||
// regular histograms with integer counts, the latter for
|
||||
// float histograms.
|
||||
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||
|
||||
// Positive Buckets.
|
||||
//
|
||||
// In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
|
||||
// * The span offset+length points to an the index of the custom_values array
|
||||
// or +Inf if pointing to the len of the array.
|
||||
// * The counts and deltas have the same meaning as for exponential histograms.
|
||||
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
|
||||
// Use either "positive_deltas" or "positive_counts", the former for
|
||||
// regular histograms with integer counts, the latter for
|
||||
// float histograms.
|
||||
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||
|
||||
ResetHint reset_hint = 14;
|
||||
// timestamp represents timestamp of the sample in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 15;
|
||||
|
||||
// custom_values is an additional field used by non-exponential bucketing layouts.
|
||||
//
|
||||
// For custom buckets (-53 schema value) custom_values specify monotonically
|
||||
// increasing upper inclusive boundaries for the bucket counts with arbitrary
|
||||
// widths for this histogram. In other words, custom_values represents custom,
|
||||
// explicit bucketing that could have been converted from the classic histograms.
|
||||
//
|
||||
// Those bounds are then referenced by spans in positive_spans with corresponding positive
|
||||
// counts of deltas (refer to positive_spans for more details). This way we can
|
||||
// have encode sparse histograms with custom bucketing (many buckets are often
|
||||
// not used).
|
||||
//
|
||||
// Note that for custom bounds, even negative observations are placed in the positive
|
||||
// counts to simplify the implementation and avoid ambiguity of where to place
|
||||
// an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
|
||||
// the zero bucket are unused, if the schema indicates custom bucketing.
|
||||
//
|
||||
// For each upper boundary the previous boundary represent the lower exclusive
|
||||
// boundary for that bucket. The first element is the upper inclusive boundary
|
||||
// for the first bucket, which implicitly has a lower inclusive bound of -Inf.
|
||||
// This is similar to "le" label semantics on classic histograms. You may add a
|
||||
// bucket with an upper bound of 0 to make sure that you really have no negative
|
||||
// observations, but in practice, native histogram rendering will show both with
|
||||
// or without first upper boundary 0 and no negative counts as the same case.
|
||||
//
|
||||
// The last element is not only the upper inclusive bound of the last regular
|
||||
// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
|
||||
repeated double custom_values = 16;
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets with their
|
||||
// offset. Logically, it would be more straightforward to include the
|
||||
// bucket counts in the Span. However, the protobuf representation is
|
||||
// more compact in the way the data is structured here (with all the
|
||||
// buckets in a single array separate from the Spans).
|
||||
message BucketSpan {
|
||||
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||
uint32 length = 2; // Length of consecutive buckets.
|
||||
}
|
||||
8
vendor/github.com/prometheus/prometheus/scrape/manager.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/scrape/manager.go
generated
vendored
@@ -73,11 +73,9 @@ type Options struct {
|
||||
// Option used by downstream scraper users like OpenTelemetry Collector
|
||||
// to help lookup metric metadata. Should be false for Prometheus.
|
||||
PassMetadataInContext bool
|
||||
// Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
|
||||
// can decide what to do with metadata, but for practical purposes this flag exists so that metadata
|
||||
// can be written to the WAL and thus read for remote write.
|
||||
// TODO: implement some form of metadata storage
|
||||
AppendMetadata bool
|
||||
// Option to enable the experimental in-memory metadata storage and append
|
||||
// metadata to the WAL.
|
||||
EnableMetadataStorage bool
|
||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||
DiscoveryReloadInterval model.Duration
|
||||
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
||||
|
||||
10
vendor/github.com/prometheus/prometheus/scrape/metrics.go
generated
vendored
10
vendor/github.com/prometheus/prometheus/scrape/metrics.go
generated
vendored
@@ -34,7 +34,6 @@ type scrapeMetrics struct {
|
||||
targetScrapePoolExceededTargetLimit prometheus.Counter
|
||||
targetScrapePoolTargetLimit *prometheus.GaugeVec
|
||||
targetScrapePoolTargetsAdded *prometheus.GaugeVec
|
||||
targetScrapePoolSymbolTableItems *prometheus.GaugeVec
|
||||
targetSyncIntervalLength *prometheus.SummaryVec
|
||||
targetSyncFailed *prometheus.CounterVec
|
||||
|
||||
@@ -130,13 +129,6 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetScrapePoolSymbolTableItems = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_target_scrape_pool_symboltable_items",
|
||||
Help: "Current number of symbols in table for this scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetScrapePoolSyncsCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_sync_total",
|
||||
@@ -242,7 +234,6 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
|
||||
sm.targetScrapePoolExceededTargetLimit,
|
||||
sm.targetScrapePoolTargetLimit,
|
||||
sm.targetScrapePoolTargetsAdded,
|
||||
sm.targetScrapePoolSymbolTableItems,
|
||||
sm.targetSyncFailed,
|
||||
// Used by targetScraper.
|
||||
sm.targetScrapeExceededBodySizeLimit,
|
||||
@@ -283,7 +274,6 @@ func (sm *scrapeMetrics) Unregister() {
|
||||
sm.reg.Unregister(sm.targetScrapePoolExceededTargetLimit)
|
||||
sm.reg.Unregister(sm.targetScrapePoolTargetLimit)
|
||||
sm.reg.Unregister(sm.targetScrapePoolTargetsAdded)
|
||||
sm.reg.Unregister(sm.targetScrapePoolSymbolTableItems)
|
||||
sm.reg.Unregister(sm.targetSyncFailed)
|
||||
sm.reg.Unregister(sm.targetScrapeExceededBodySizeLimit)
|
||||
sm.reg.Unregister(sm.targetScrapeCacheFlushForced)
|
||||
|
||||
38
vendor/github.com/prometheus/prometheus/scrape/scrape.go
generated
vendored
38
vendor/github.com/prometheus/prometheus/scrape/scrape.go
generated
vendored
@@ -181,7 +181,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
||||
options.EnableNativeHistogramsIngestion,
|
||||
options.EnableCreatedTimestampZeroIngestion,
|
||||
options.ExtraMetrics,
|
||||
options.AppendMetadata,
|
||||
options.EnableMetadataStorage,
|
||||
opts.target,
|
||||
options.PassMetadataInContext,
|
||||
metrics,
|
||||
@@ -246,7 +246,6 @@ func (sp *scrapePool) stop() {
|
||||
sp.metrics.targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolSymbolTableItems.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName)
|
||||
}
|
||||
@@ -274,15 +273,6 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
||||
|
||||
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
||||
|
||||
sp.restartLoops(reuseCache)
|
||||
oldClient.CloseIdleConnections()
|
||||
sp.metrics.targetReloadIntervalLength.WithLabelValues(time.Duration(sp.config.ScrapeInterval).String()).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *scrapePool) restartLoops(reuseCache bool) {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
interval = time.Duration(sp.config.ScrapeInterval)
|
||||
@@ -323,7 +313,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
||||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
|
||||
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
||||
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
||||
}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
@@ -362,10 +352,11 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
||||
sp.targetMtx.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
oldClient.CloseIdleConnections()
|
||||
sp.metrics.targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
)
|
||||
|
||||
// Must be called with sp.mtx held.
|
||||
func (sp *scrapePool) checkSymbolTable() {
|
||||
// Here we take steps to clear out the symbol table if it has grown a lot.
|
||||
// After waiting some time for things to settle, we take the size of the symbol-table.
|
||||
// If, after some more time, the table has grown to twice that size, we start a new one.
|
||||
@@ -376,10 +367,11 @@ func (sp *scrapePool) checkSymbolTable() {
|
||||
} else if sp.symbolTable.Len() > 2*sp.initialSymbolTableLen {
|
||||
sp.symbolTable = labels.NewSymbolTable()
|
||||
sp.initialSymbolTableLen = 0
|
||||
sp.restartLoops(false) // To drop all caches.
|
||||
}
|
||||
sp.lastSymbolTableCheck = time.Now()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync converts target groups into actual scrape targets and synchronizes
|
||||
@@ -416,10 +408,8 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
||||
}
|
||||
}
|
||||
}
|
||||
sp.metrics.targetScrapePoolSymbolTableItems.WithLabelValues(sp.config.JobName).Set(float64(sp.symbolTable.Len()))
|
||||
sp.targetMtx.Unlock()
|
||||
sp.sync(all)
|
||||
sp.checkSymbolTable()
|
||||
|
||||
sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
@@ -673,7 +663,7 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int3
|
||||
}
|
||||
}
|
||||
|
||||
if maxSchema < histogram.ExponentialSchemaMax {
|
||||
if maxSchema < nativeHistogramMaxSchema {
|
||||
app = &maxSchemaAppender{
|
||||
Appender: app,
|
||||
maxSchema: maxSchema,
|
||||
@@ -1631,7 +1621,7 @@ loop:
|
||||
updateMetadata(lset, true)
|
||||
}
|
||||
|
||||
if seriesAlreadyScraped && parsedTimestamp == nil {
|
||||
if seriesAlreadyScraped {
|
||||
err = storage.ErrDuplicateSampleForTimestamp
|
||||
} else {
|
||||
if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil {
|
||||
@@ -1988,10 +1978,10 @@ func pickSchema(bucketFactor float64) int32 {
|
||||
}
|
||||
floor := math.Floor(-math.Log2(math.Log2(bucketFactor)))
|
||||
switch {
|
||||
case floor >= float64(histogram.ExponentialSchemaMax):
|
||||
return histogram.ExponentialSchemaMax
|
||||
case floor <= float64(histogram.ExponentialSchemaMin):
|
||||
return histogram.ExponentialSchemaMin
|
||||
case floor >= float64(nativeHistogramMaxSchema):
|
||||
return nativeHistogramMaxSchema
|
||||
case floor <= float64(nativeHistogramMinSchema):
|
||||
return nativeHistogramMinSchema
|
||||
default:
|
||||
return int32(floor)
|
||||
}
|
||||
|
||||
23
vendor/github.com/prometheus/prometheus/scrape/target.go
generated
vendored
23
vendor/github.com/prometheus/prometheus/scrape/target.go
generated
vendored
@@ -365,26 +365,16 @@ type bucketLimitAppender struct {
|
||||
|
||||
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if h != nil {
|
||||
// Return with an early error if the histogram has too many buckets and the
|
||||
// schema is not exponential, in which case we can't reduce the resolution.
|
||||
if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) {
|
||||
return 0, errBucketLimit
|
||||
}
|
||||
for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
|
||||
if h.Schema <= histogram.ExponentialSchemaMin {
|
||||
if h.Schema == -4 {
|
||||
return 0, errBucketLimit
|
||||
}
|
||||
h = h.ReduceResolution(h.Schema - 1)
|
||||
}
|
||||
}
|
||||
if fh != nil {
|
||||
// Return with an early error if the histogram has too many buckets and the
|
||||
// schema is not exponential, in which case we can't reduce the resolution.
|
||||
if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) {
|
||||
return 0, errBucketLimit
|
||||
}
|
||||
for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
|
||||
if fh.Schema <= histogram.ExponentialSchemaMin {
|
||||
if fh.Schema == -4 {
|
||||
return 0, errBucketLimit
|
||||
}
|
||||
fh = fh.ReduceResolution(fh.Schema - 1)
|
||||
@@ -397,6 +387,11 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
const (
|
||||
nativeHistogramMaxSchema int32 = 8
|
||||
nativeHistogramMinSchema int32 = -4
|
||||
)
|
||||
|
||||
type maxSchemaAppender struct {
|
||||
storage.Appender
|
||||
|
||||
@@ -405,12 +400,12 @@ type maxSchemaAppender struct {
|
||||
|
||||
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if h != nil {
|
||||
if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema {
|
||||
if h.Schema > app.maxSchema {
|
||||
h = h.ReduceResolution(app.maxSchema)
|
||||
}
|
||||
}
|
||||
if fh != nil {
|
||||
if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema {
|
||||
if fh.Schema > app.maxSchema {
|
||||
fh = fh.ReduceResolution(app.maxSchema)
|
||||
}
|
||||
}
|
||||
|
||||
18
vendor/github.com/prometheus/prometheus/storage/interface.go
generated
vendored
18
vendor/github.com/prometheus/prometheus/storage/interface.go
generated
vendored
@@ -122,11 +122,11 @@ type MockQuerier struct {
|
||||
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
|
||||
}
|
||||
|
||||
func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
@@ -161,12 +161,12 @@ type LabelQuerier interface {
|
||||
// It is not safe to use the strings beyond the lifetime of the querier.
|
||||
// If matchers are specified the returned result set is reduced
|
||||
// to label values of metrics matching the matchers.
|
||||
LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
|
||||
LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
|
||||
|
||||
// LabelNames returns all the unique label names present in the block in sorted order.
|
||||
// If matchers are specified the returned result set is reduced
|
||||
// to label names of metrics matching the matchers.
|
||||
LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
|
||||
LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
|
||||
|
||||
// Close releases the resources of the Querier.
|
||||
Close() error
|
||||
@@ -190,9 +190,6 @@ type SelectHints struct {
|
||||
Start int64 // Start time in milliseconds for this select.
|
||||
End int64 // End time in milliseconds for this select.
|
||||
|
||||
// Maximum number of results returned. Use a value of 0 to disable.
|
||||
Limit int
|
||||
|
||||
Step int64 // Query step size in milliseconds.
|
||||
Func string // String representation of surrounding function or aggregation.
|
||||
|
||||
@@ -220,13 +217,6 @@ type SelectHints struct {
|
||||
DisableTrimming bool
|
||||
}
|
||||
|
||||
// LabelHints specifies hints passed for label reads.
|
||||
// This is used only as an option for implementation to use.
|
||||
type LabelHints struct {
|
||||
// Maximum number of results returned. Use a value of 0 to disable.
|
||||
Limit int
|
||||
}
|
||||
|
||||
// TODO(bwplotka): Move to promql/engine_test.go?
|
||||
// QueryableFunc is an adapter to allow the use of ordinary functions as
|
||||
// Queryables. It follows the idea of http.HandlerFunc.
|
||||
|
||||
5
vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
generated
vendored
5
vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
generated
vendored
@@ -136,11 +136,6 @@ func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHist
|
||||
return b.it.AtFloatHistogram(nil)
|
||||
}
|
||||
|
||||
// AtT returns the timestamp of the current element of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtT() int64 {
|
||||
return b.it.AtT()
|
||||
}
|
||||
|
||||
// Err returns the last encountered error.
|
||||
func (b *MemoizedSeriesIterator) Err() error {
|
||||
return b.it.Err()
|
||||
|
||||
86
vendor/github.com/prometheus/prometheus/storage/merge.go
generated
vendored
86
vendor/github.com/prometheus/prometheus/storage/merge.go
generated
vendored
@@ -45,24 +45,19 @@ type mergeGenericQuerier struct {
|
||||
//
|
||||
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
||||
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
|
||||
primaries = filterQueriers(primaries)
|
||||
secondaries = filterQueriers(secondaries)
|
||||
|
||||
switch {
|
||||
case len(primaries) == 0 && len(secondaries) == 0:
|
||||
return noopQuerier{}
|
||||
case len(primaries) == 1 && len(secondaries) == 0:
|
||||
return primaries[0]
|
||||
case len(primaries) == 0 && len(secondaries) == 1:
|
||||
return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])}
|
||||
if len(primaries)+len(secondaries) == 0 {
|
||||
return NoopQuerier()
|
||||
}
|
||||
|
||||
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
||||
for _, q := range primaries {
|
||||
queriers = append(queriers, newGenericQuerierFrom(q))
|
||||
if _, ok := q.(noopQuerier); !ok && q != nil {
|
||||
queriers = append(queriers, newGenericQuerierFrom(q))
|
||||
}
|
||||
}
|
||||
for _, q := range secondaries {
|
||||
queriers = append(queriers, newSecondaryQuerierFrom(q))
|
||||
if _, ok := q.(noopQuerier); !ok && q != nil {
|
||||
queriers = append(queriers, newSecondaryQuerierFrom(q))
|
||||
}
|
||||
}
|
||||
|
||||
concurrentSelect := false
|
||||
@@ -76,40 +71,22 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
|
||||
}}
|
||||
}
|
||||
|
||||
func filterQueriers(qs []Querier) []Querier {
|
||||
ret := make([]Querier, 0, len(qs))
|
||||
for _, q := range qs {
|
||||
if _, ok := q.(noopQuerier); !ok && q != nil {
|
||||
ret = append(ret, q)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers.
|
||||
// See NewFanout commentary to learn more about primary vs secondary differences.
|
||||
//
|
||||
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
||||
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
|
||||
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
|
||||
primaries = filterChunkQueriers(primaries)
|
||||
secondaries = filterChunkQueriers(secondaries)
|
||||
|
||||
switch {
|
||||
case len(primaries) == 0 && len(secondaries) == 0:
|
||||
return noopChunkQuerier{}
|
||||
case len(primaries) == 1 && len(secondaries) == 0:
|
||||
return primaries[0]
|
||||
case len(primaries) == 0 && len(secondaries) == 1:
|
||||
return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])}
|
||||
}
|
||||
|
||||
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
||||
for _, q := range primaries {
|
||||
queriers = append(queriers, newGenericQuerierFromChunk(q))
|
||||
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
|
||||
queriers = append(queriers, newGenericQuerierFromChunk(q))
|
||||
}
|
||||
}
|
||||
for _, q := range secondaries {
|
||||
queriers = append(queriers, newSecondaryQuerierFromChunk(q))
|
||||
for _, querier := range secondaries {
|
||||
if _, ok := querier.(noopChunkQuerier); !ok && querier != nil {
|
||||
queriers = append(queriers, newSecondaryQuerierFromChunk(querier))
|
||||
}
|
||||
}
|
||||
|
||||
concurrentSelect := false
|
||||
@@ -123,18 +100,15 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
|
||||
}}
|
||||
}
|
||||
|
||||
func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier {
|
||||
ret := make([]ChunkQuerier, 0, len(qs))
|
||||
for _, q := range qs {
|
||||
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
|
||||
ret = append(ret, q)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Select returns a set of series that matches the given label matchers.
|
||||
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
|
||||
if len(q.queriers) == 0 {
|
||||
return noopGenericSeriesSet{}
|
||||
}
|
||||
if len(q.queriers) == 1 {
|
||||
return q.queriers[0].Select(ctx, sortSeries, hints, matchers...)
|
||||
}
|
||||
|
||||
seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
|
||||
if !q.concurrentSelect {
|
||||
for _, querier := range q.queriers {
|
||||
@@ -187,8 +161,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
|
||||
// LabelValues returns all potential values for a label name.
|
||||
// If matchers are specified the returned result set is reduced
|
||||
// to label values of metrics matching the matchers.
|
||||
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...)
|
||||
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
res, ws, err := q.lvals(ctx, q.queriers, name, matchers...)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
|
||||
}
|
||||
@@ -196,22 +170,22 @@ func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hint
|
||||
}
|
||||
|
||||
// lvals performs merge sort for LabelValues from multiple queriers.
|
||||
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
if lq.Len() == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
if lq.Len() == 1 {
|
||||
return lq.Get(0).LabelValues(ctx, n, hints, matchers...)
|
||||
return lq.Get(0).LabelValues(ctx, n, matchers...)
|
||||
}
|
||||
a, b := lq.SplitByHalf()
|
||||
|
||||
var ws annotations.Annotations
|
||||
s1, w, err := q.lvals(ctx, a, n, hints, matchers...)
|
||||
s1, w, err := q.lvals(ctx, a, n, matchers...)
|
||||
ws.Merge(w)
|
||||
if err != nil {
|
||||
return nil, ws, err
|
||||
}
|
||||
s2, ws, err := q.lvals(ctx, b, n, hints, matchers...)
|
||||
s2, ws, err := q.lvals(ctx, b, n, matchers...)
|
||||
ws.Merge(w)
|
||||
if err != nil {
|
||||
return nil, ws, err
|
||||
@@ -247,13 +221,13 @@ func mergeStrings(a, b []string) []string {
|
||||
}
|
||||
|
||||
// LabelNames returns all the unique label names present in all queriers in sorted order.
|
||||
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
var (
|
||||
labelNamesMap = make(map[string]struct{})
|
||||
warnings annotations.Annotations
|
||||
)
|
||||
for _, querier := range q.queriers {
|
||||
names, wrn, err := querier.LabelNames(ctx, hints, matchers...)
|
||||
names, wrn, err := querier.LabelNames(ctx, matchers...)
|
||||
if wrn != nil {
|
||||
// TODO(bwplotka): We could potentially wrap warnings.
|
||||
warnings.Merge(wrn)
|
||||
|
||||
8
vendor/github.com/prometheus/prometheus/storage/noop.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/storage/noop.go
generated
vendored
@@ -31,11 +31,11 @@ func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matche
|
||||
return NoopSeriesSet()
|
||||
}
|
||||
|
||||
func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
@@ -54,11 +54,11 @@ func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.M
|
||||
return NoopChunkedSeriesSet()
|
||||
}
|
||||
|
||||
func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
|
||||
91
vendor/github.com/prometheus/prometheus/storage/remote/client.go
generated
vendored
91
vendor/github.com/prometheus/prometheus/storage/remote/client.go
generated
vendored
@@ -14,6 +14,7 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
@@ -34,40 +35,13 @@ import (
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||
)
|
||||
|
||||
const maxErrMsgLen = 1024
|
||||
|
||||
const (
|
||||
RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version"
|
||||
RemoteWriteVersion1HeaderValue = "0.1.0"
|
||||
RemoteWriteVersion20HeaderValue = "2.0.0"
|
||||
appProtoContentType = "application/x-protobuf"
|
||||
)
|
||||
|
||||
// Compression represents the encoding. Currently remote storage supports only
|
||||
// one, but we experiment with more, thus leaving the compression scaffolding
|
||||
// for now.
|
||||
// NOTE(bwplotka): Keeping it public, as a non-stable help for importers to use.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
// SnappyBlockCompression represents https://github.com/google/snappy/blob/2c94e11145f0b7b184b831577c93e5a41c4c0346/format_description.txt
|
||||
SnappyBlockCompression Compression = "snappy"
|
||||
)
|
||||
|
||||
var (
|
||||
// UserAgent represents Prometheus version to use for user agent header.
|
||||
UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{
|
||||
config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.
|
||||
config.RemoteWriteProtoMsgV2: appProtoContentType + ";proto=io.prometheus.write.v2.Request",
|
||||
}
|
||||
)
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
var (
|
||||
remoteReadQueriesTotal = prometheus.NewCounterVec(
|
||||
@@ -119,9 +93,6 @@ type Client struct {
|
||||
readQueries prometheus.Gauge
|
||||
readQueriesTotal *prometheus.CounterVec
|
||||
readQueriesDuration prometheus.Observer
|
||||
|
||||
writeProtoMsg config.RemoteWriteProtoMsg
|
||||
writeCompression Compression // Not exposed by ClientConfig for now.
|
||||
}
|
||||
|
||||
// ClientConfig configures a client.
|
||||
@@ -133,7 +104,6 @@ type ClientConfig struct {
|
||||
AzureADConfig *azuread.AzureADConfig
|
||||
Headers map[string]string
|
||||
RetryOnRateLimit bool
|
||||
WriteProtoMsg config.RemoteWriteProtoMsg
|
||||
}
|
||||
|
||||
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
|
||||
@@ -192,20 +162,14 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
||||
}
|
||||
}
|
||||
|
||||
writeProtoMsg := config.RemoteWriteProtoMsgV1
|
||||
if conf.WriteProtoMsg != "" {
|
||||
writeProtoMsg = conf.WriteProtoMsg
|
||||
}
|
||||
|
||||
httpClient.Transport = otelhttp.NewTransport(t)
|
||||
|
||||
return &Client{
|
||||
remoteName: name,
|
||||
urlString: conf.URL.String(),
|
||||
Client: httpClient,
|
||||
retryOnRateLimit: conf.RetryOnRateLimit,
|
||||
timeout: time.Duration(conf.Timeout),
|
||||
writeProtoMsg: writeProtoMsg,
|
||||
writeCompression: SnappyBlockCompression,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -234,24 +198,18 @@ type RecoverableError struct {
|
||||
|
||||
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
|
||||
// and encoded bytes from codec.go.
|
||||
func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteResponseStats, error) {
|
||||
func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
||||
httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req))
|
||||
if err != nil {
|
||||
// Errors from NewRequest are from unparsable URLs, so are not
|
||||
// recoverable.
|
||||
return WriteResponseStats{}, err
|
||||
return err
|
||||
}
|
||||
|
||||
httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
|
||||
httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg])
|
||||
httpReq.Header.Add("Content-Encoding", "snappy")
|
||||
httpReq.Header.Set("Content-Type", "application/x-protobuf")
|
||||
httpReq.Header.Set("User-Agent", UserAgent)
|
||||
if c.writeProtoMsg == config.RemoteWriteProtoMsgV1 {
|
||||
// Compatibility mode for 1.0.
|
||||
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
|
||||
} else {
|
||||
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
||||
if attempt > 0 {
|
||||
httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt))
|
||||
}
|
||||
@@ -266,34 +224,26 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
|
||||
if err != nil {
|
||||
// Errors from Client.Do are from (for example) network errors, so are
|
||||
// recoverable.
|
||||
return WriteResponseStats{}, RecoverableError{err, defaultBackoff}
|
||||
return RecoverableError{err, defaultBackoff}
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(io.Discard, httpResp.Body)
|
||||
httpResp.Body.Close()
|
||||
}()
|
||||
|
||||
// TODO(bwplotka): Pass logger and emit debug on error?
|
||||
// Parsing error means there were some response header values we can't parse,
|
||||
// we can continue handling.
|
||||
rs, _ := ParseWriteResponseStats(httpResp)
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 == 2 {
|
||||
return rs, nil
|
||||
if httpResp.StatusCode/100 != 2 {
|
||||
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
||||
line := ""
|
||||
if scanner.Scan() {
|
||||
line = scanner.Text()
|
||||
}
|
||||
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
|
||||
}
|
||||
|
||||
// Handling errors e.g. read potential error in the body.
|
||||
// TODO(bwplotka): Pass logger and emit debug on error?
|
||||
body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
||||
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body)
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 == 5 ||
|
||||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
|
||||
return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
||||
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
||||
}
|
||||
return rs, err
|
||||
return err
|
||||
}
|
||||
|
||||
// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it
|
||||
@@ -313,12 +263,12 @@ func retryAfterDuration(t string) model.Duration {
|
||||
}
|
||||
|
||||
// Name uniquely identifies the client.
|
||||
func (c *Client) Name() string {
|
||||
func (c Client) Name() string {
|
||||
return c.remoteName
|
||||
}
|
||||
|
||||
// Endpoint is the remote read or write endpoint.
|
||||
func (c *Client) Endpoint() string {
|
||||
func (c Client) Endpoint() string {
|
||||
return c.urlString
|
||||
}
|
||||
|
||||
@@ -373,7 +323,6 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
||||
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
|
||||
}
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 != 2 {
|
||||
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
|
||||
}
|
||||
|
||||
224
vendor/github.com/prometheus/prometheus/storage/remote/codec.go
generated
vendored
224
vendor/github.com/prometheus/prometheus/storage/remote/codec.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"net/http"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -29,10 +30,10 @@ import (
|
||||
"github.com/prometheus/common/model"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
@@ -94,7 +95,7 @@ func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error
|
||||
|
||||
// ToQuery builds a Query proto.
|
||||
func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) {
|
||||
ms, err := ToLabelMatchers(matchers)
|
||||
ms, err := toLabelMatchers(matchers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -152,10 +153,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
||||
})
|
||||
case chunkenc.ValHistogram:
|
||||
ts, h := iter.AtHistogram(nil)
|
||||
histograms = append(histograms, prompb.FromIntHistogram(ts, h))
|
||||
histograms = append(histograms, HistogramToHistogramProto(ts, h))
|
||||
case chunkenc.ValFloatHistogram:
|
||||
ts, fh := iter.AtFloatHistogram(nil)
|
||||
histograms = append(histograms, prompb.FromFloatHistogram(ts, fh))
|
||||
histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
|
||||
default:
|
||||
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
|
||||
}
|
||||
@@ -165,7 +166,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
||||
}
|
||||
|
||||
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
|
||||
Labels: prompb.FromLabels(series.Labels(), nil),
|
||||
Labels: labelsToLabelsProto(series.Labels(), nil),
|
||||
Samples: samples,
|
||||
Histograms: histograms,
|
||||
})
|
||||
@@ -181,7 +182,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
|
||||
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
|
||||
return errSeriesSet{err: err}
|
||||
}
|
||||
lbls := ts.ToLabels(&b, nil)
|
||||
lbls := labelProtosToLabels(&b, ts.Labels)
|
||||
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
|
||||
}
|
||||
|
||||
@@ -234,7 +235,7 @@ func StreamChunkedReadResponses(
|
||||
for ss.Next() {
|
||||
series := ss.At()
|
||||
iter = series.Iterator(iter)
|
||||
lbls = MergeLabels(prompb.FromLabels(series.Labels(), lbls), sortedExternalLabels)
|
||||
lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
|
||||
|
||||
maxDataLength := maxBytesInFrame
|
||||
for _, lbl := range lbls {
|
||||
@@ -480,16 +481,21 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
|
||||
panic("iterator is not on an integer histogram sample")
|
||||
}
|
||||
h := c.series.histograms[c.histogramsCur]
|
||||
return h.Timestamp, h.ToIntHistogram()
|
||||
return h.Timestamp, HistogramProtoToHistogram(h)
|
||||
}
|
||||
|
||||
// AtFloatHistogram implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
||||
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
|
||||
switch c.curValType {
|
||||
case chunkenc.ValHistogram:
|
||||
fh := c.series.histograms[c.histogramsCur]
|
||||
return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted.
|
||||
return fh.Timestamp, HistogramProtoToFloatHistogram(fh)
|
||||
case chunkenc.ValFloatHistogram:
|
||||
fh := c.series.histograms[c.histogramsCur]
|
||||
return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh)
|
||||
default:
|
||||
panic("iterator is not on a histogram sample")
|
||||
}
|
||||
panic("iterator is not on a histogram sample")
|
||||
}
|
||||
|
||||
// AtT implements chunkenc.Iterator.
|
||||
@@ -560,8 +566,7 @@ func validateLabelsAndMetricName(ls []prompb.Label) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToLabelMatchers converts Prometheus label matchers to protobuf label matchers.
|
||||
func ToLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) {
|
||||
func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) {
|
||||
pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers))
|
||||
for _, m := range matchers {
|
||||
var mType prompb.LabelMatcher_Type
|
||||
@@ -586,7 +591,7 @@ func ToLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error)
|
||||
return pbMatchers, nil
|
||||
}
|
||||
|
||||
// FromLabelMatchers converts protobuf label matchers to Prometheus label matchers.
|
||||
// FromLabelMatchers parses protobuf label matchers to Prometheus label matchers.
|
||||
func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) {
|
||||
result := make([]*labels.Matcher, 0, len(matchers))
|
||||
for _, matcher := range matchers {
|
||||
@@ -612,6 +617,141 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemplar.Exemplar {
|
||||
timestamp := ep.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: labelProtosToLabels(b, ep.Labels),
|
||||
Value: ep.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
||||
|
||||
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
|
||||
// provided proto message. The caller has to make sure that the proto message
|
||||
// represents an integer histogram and not a float histogram, or it panics.
|
||||
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
|
||||
if hp.IsFloatHistogram() {
|
||||
panic("HistogramProtoToHistogram called with a float histogram")
|
||||
}
|
||||
return &histogram.Histogram{
|
||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
||||
Schema: hp.Schema,
|
||||
ZeroThreshold: hp.ZeroThreshold,
|
||||
ZeroCount: hp.GetZeroCountInt(),
|
||||
Count: hp.GetCountInt(),
|
||||
Sum: hp.Sum,
|
||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||
PositiveBuckets: hp.GetPositiveDeltas(),
|
||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||
NegativeBuckets: hp.GetNegativeDeltas(),
|
||||
}
|
||||
}
|
||||
|
||||
// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the
|
||||
// provided proto message to a Float Histogram. The caller has to make sure that
|
||||
// the proto message represents a float histogram and not an integer histogram,
|
||||
// or it panics.
|
||||
func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
|
||||
if !hp.IsFloatHistogram() {
|
||||
panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
|
||||
}
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
||||
Schema: hp.Schema,
|
||||
ZeroThreshold: hp.ZeroThreshold,
|
||||
ZeroCount: hp.GetZeroCountFloat(),
|
||||
Count: hp.GetCountFloat(),
|
||||
Sum: hp.Sum,
|
||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||
PositiveBuckets: hp.GetPositiveCounts(),
|
||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||
NegativeBuckets: hp.GetNegativeCounts(),
|
||||
}
|
||||
}
|
||||
|
||||
// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message
|
||||
// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a
|
||||
// float histogram, or it panics.
|
||||
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
|
||||
if hp.IsFloatHistogram() {
|
||||
panic("HistogramProtoToFloatHistogram called with a float histogram")
|
||||
}
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
||||
Schema: hp.Schema,
|
||||
ZeroThreshold: hp.ZeroThreshold,
|
||||
ZeroCount: float64(hp.GetZeroCountInt()),
|
||||
Count: float64(hp.GetCountInt()),
|
||||
Sum: hp.Sum,
|
||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||
PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
|
||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||
NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
|
||||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func deltasToCounts(deltas []int64) []float64 {
|
||||
counts := make([]float64, len(deltas))
|
||||
var cur float64
|
||||
for i, d := range deltas {
|
||||
cur += float64(d)
|
||||
counts[i] = cur
|
||||
}
|
||||
return counts
|
||||
}
|
||||
|
||||
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: h.Count},
|
||||
Sum: h.Sum,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||
NegativeDeltas: h.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||
PositiveDeltas: h.PositiveBuckets,
|
||||
ResetHint: prompb.Histogram_ResetHint(h.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountFloat{CountFloat: fh.Count},
|
||||
Sum: fh.Sum,
|
||||
Schema: fh.Schema,
|
||||
ZeroThreshold: fh.ZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||
NegativeCounts: fh.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||
PositiveCounts: fh.PositiveBuckets,
|
||||
ResetHint: prompb.Histogram_ResetHint(fh.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
|
||||
spans := make([]prompb.BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
|
||||
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||
metric := make(model.Metric, len(labelPairs))
|
||||
@@ -621,9 +761,41 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||
return metric
|
||||
}
|
||||
|
||||
func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels {
|
||||
b.Reset()
|
||||
for _, l := range labelPairs {
|
||||
b.Add(l.Name, l.Value)
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
||||
|
||||
// labelsToLabelsProto transforms labels into prompb labels. The buffer slice
|
||||
// will be used to avoid allocations if it is big enough to store the labels.
|
||||
func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
|
||||
result := buf[:0]
|
||||
lbls.Range(func(l labels.Label) {
|
||||
result = append(result, prompb.Label{
|
||||
Name: l.Name,
|
||||
Value: l.Value,
|
||||
})
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
|
||||
func metricTypeToMetricTypeProto(t model.MetricType) prompb.MetricMetadata_MetricType {
|
||||
mt := strings.ToUpper(string(t))
|
||||
v, ok := prompb.MetricMetadata_MetricType_value[mt]
|
||||
if !ok {
|
||||
return prompb.MetricMetadata_UNKNOWN
|
||||
}
|
||||
|
||||
return prompb.MetricMetadata_MetricType(v)
|
||||
}
|
||||
|
||||
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
|
||||
// snappy decompression.
|
||||
// Used also by documentation/examples/remote_storage.
|
||||
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
|
||||
compressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
@@ -643,28 +815,6 @@ func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
// DecodeWriteV2Request from an io.Reader into a writev2.Request, handling
|
||||
// snappy decompression.
|
||||
// Used also by documentation/examples/remote_storage.
|
||||
func DecodeWriteV2Request(r io.Reader) (*writev2.Request, error) {
|
||||
compressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqBuf, err := snappy.Decode(nil, compressed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var req writev2.Request
|
||||
if err := proto.Unmarshal(reqBuf, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error)
|
||||
|
||||
6
vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go
generated
vendored
6
vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go
generated
vendored
@@ -39,3 +39,9 @@ func (m *maxTimestamp) Get() float64 {
|
||||
defer m.mtx.Unlock()
|
||||
return m.value
|
||||
}
|
||||
|
||||
func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) {
|
||||
if m.Get() > 0 {
|
||||
m.Gauge.Collect(c)
|
||||
}
|
||||
}
|
||||
|
||||
4
vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go
generated
vendored
4
vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go
generated
vendored
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else.
|
||||
type MetadataAppender interface {
|
||||
AppendWatcherMetadata(context.Context, []scrape.MetricMetadata)
|
||||
AppendMetadata(context.Context, []scrape.MetricMetadata)
|
||||
}
|
||||
|
||||
// Watchable represents from where we fetch active targets for metadata.
|
||||
@@ -146,7 +146,7 @@ func (mw *MetadataWatcher) collect() {
|
||||
}
|
||||
|
||||
// Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired.
|
||||
mw.writer.AppendWatcherMetadata(mw.hardShutdownCtx, metadata)
|
||||
mw.writer.AppendMetadata(mw.hardShutdownCtx, metadata)
|
||||
}
|
||||
|
||||
func (mw *MetadataWatcher) ready() bool {
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
|
||||
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
|
||||
var unitMap = map[string]string{
|
||||
|
||||
// Time
|
||||
"d": "days",
|
||||
"h": "hours",
|
||||
@@ -110,6 +111,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
|
||||
|
||||
// Build a normalized name for the specified metric
|
||||
func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||
|
||||
// Split metric name in "tokens" (remove all non-alphanumeric)
|
||||
nameTokens := strings.FieldsFunc(
|
||||
metric.Name(),
|
||||
|
||||
@@ -19,6 +19,7 @@ package prometheus
|
||||
import "strings"
|
||||
|
||||
var wordToUCUM = map[string]string{
|
||||
|
||||
// Time
|
||||
"days": "d",
|
||||
"hours": "h",
|
||||
|
||||
@@ -182,13 +182,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
||||
if i+1 >= len(extras) {
|
||||
break
|
||||
}
|
||||
|
||||
name := extras[i]
|
||||
_, found := l[name]
|
||||
_, found := l[extras[i]]
|
||||
if found && logOnOverwrite {
|
||||
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||
log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||
}
|
||||
// internal labels should be maintained
|
||||
name := extras[i]
|
||||
if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
|
||||
name = prometheustranslator.NormalizeLabel(name)
|
||||
}
|
||||
@@ -220,13 +219,6 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series
|
||||
// as classical histogram samples.
|
||||
//
|
||||
// Note that we can't convert to native histograms, since these have exponential buckets and don't line up
|
||||
// with the user defined bucket boundaries of non-exponential OTel histograms.
|
||||
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
|
||||
// https://github.com/prometheus/prometheus/issues/13485.
|
||||
func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, baseName string) {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
|
||||
@@ -30,18 +30,10 @@ import (
|
||||
|
||||
const defaultZeroThreshold = 1e-128
|
||||
|
||||
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
|
||||
// as native histogram samples.
|
||||
func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, promName string) error {
|
||||
resource pcommon.Resource, settings Settings, baseName string) error {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
|
||||
histogram, err := exponentialToNativeHistogram(pt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
@@ -49,9 +41,14 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
promName,
|
||||
baseName,
|
||||
)
|
||||
ts, _ := c.getOrCreateTimeSeries(lbls)
|
||||
|
||||
histogram, err := exponentialToNativeHistogram(pt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ts.Histograms = append(ts.Histograms, histogram)
|
||||
|
||||
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
|
||||
@@ -61,7 +58,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
|
||||
return nil
|
||||
}
|
||||
|
||||
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
|
||||
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
|
||||
// to Prometheus Native Histogram.
|
||||
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) {
|
||||
scale := p.Scale()
|
||||
|
||||
582
vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go
generated
vendored
582
vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go
generated
vendored
@@ -16,7 +16,6 @@ package remote
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"sync"
|
||||
@@ -36,11 +35,9 @@ import (
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
@@ -232,7 +229,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "queue_highest_sent_timestamp_seconds",
|
||||
Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.",
|
||||
Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch.",
|
||||
ConstLabels: constLabels,
|
||||
}),
|
||||
}
|
||||
@@ -391,7 +388,7 @@ func (m *queueManagerMetrics) unregister() {
|
||||
// external timeseries database.
|
||||
type WriteClient interface {
|
||||
// Store stores the given samples in the remote storage.
|
||||
Store(ctx context.Context, req []byte, retryAttempt int) (WriteResponseStats, error)
|
||||
Store(context.Context, []byte, int) error
|
||||
// Name uniquely identifies the remote storage.
|
||||
Name() string
|
||||
// Endpoint is the remote read or write endpoint for the storage client.
|
||||
@@ -420,14 +417,11 @@ type QueueManager struct {
|
||||
|
||||
clientMtx sync.RWMutex
|
||||
storeClient WriteClient
|
||||
protoMsg config.RemoteWriteProtoMsg
|
||||
enc Compression
|
||||
|
||||
seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder.
|
||||
seriesLabels map[chunks.HeadSeriesRef]labels.Labels
|
||||
seriesMetadata map[chunks.HeadSeriesRef]*metadata.Metadata
|
||||
droppedSeries map[chunks.HeadSeriesRef]struct{}
|
||||
builder *labels.Builder
|
||||
seriesMtx sync.Mutex // Covers seriesLabels, droppedSeries and builder.
|
||||
seriesLabels map[chunks.HeadSeriesRef]labels.Labels
|
||||
droppedSeries map[chunks.HeadSeriesRef]struct{}
|
||||
builder *labels.Builder
|
||||
|
||||
seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first.
|
||||
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
|
||||
@@ -468,7 +462,6 @@ func NewQueueManager(
|
||||
sm ReadyScrapeManager,
|
||||
enableExemplarRemoteWrite bool,
|
||||
enableNativeHistogramRemoteWrite bool,
|
||||
protoMsg config.RemoteWriteProtoMsg,
|
||||
) *QueueManager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
@@ -493,7 +486,6 @@ func NewQueueManager(
|
||||
sendNativeHistograms: enableNativeHistogramRemoteWrite,
|
||||
|
||||
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
||||
seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata),
|
||||
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
||||
droppedSeries: make(map[chunks.HeadSeriesRef]struct{}),
|
||||
builder: labels.NewBuilder(labels.EmptyLabels()),
|
||||
@@ -510,26 +502,9 @@ func NewQueueManager(
|
||||
metrics: metrics,
|
||||
interner: interner,
|
||||
highestRecvTimestamp: highestRecvTimestamp,
|
||||
|
||||
protoMsg: protoMsg,
|
||||
enc: SnappyBlockCompression, // Hardcoded for now, but scaffolding exists for likely future use.
|
||||
}
|
||||
|
||||
walMetadata := false
|
||||
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||
walMetadata = true
|
||||
}
|
||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
|
||||
|
||||
// The current MetadataWatcher implementation is mutually exclusive
|
||||
// with the new approach, which stores metadata as WAL records and
|
||||
// ships them alongside series. If both mechanisms are set, the new one
|
||||
// takes precedence by implicitly disabling the older one.
|
||||
if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||
level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request")
|
||||
t.mcfg.Send = false
|
||||
}
|
||||
|
||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite)
|
||||
if t.mcfg.Send {
|
||||
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
|
||||
}
|
||||
@@ -538,21 +513,14 @@ func NewQueueManager(
|
||||
return t
|
||||
}
|
||||
|
||||
// AppendWatcherMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
|
||||
// This is only used for the metadata_config.send setting and 1.x Remote Write.
|
||||
func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
|
||||
// no op for any newer proto format, which will cache metadata sent to it from the WAL watcher.
|
||||
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||
return
|
||||
}
|
||||
|
||||
// 1.X will still get metadata in batches.
|
||||
// AppendMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
|
||||
func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
|
||||
mm := make([]prompb.MetricMetadata, 0, len(metadata))
|
||||
for _, entry := range metadata {
|
||||
mm = append(mm, prompb.MetricMetadata{
|
||||
MetricFamilyName: entry.Metric,
|
||||
Help: entry.Help,
|
||||
Type: prompb.FromMetadataType(entry.Type),
|
||||
Type: metricTypeToMetricTypeProto(entry.Type),
|
||||
Unit: entry.Unit,
|
||||
})
|
||||
}
|
||||
@@ -573,8 +541,8 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr
|
||||
}
|
||||
|
||||
func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error {
|
||||
// Build the WriteRequest with no samples (v1 flow).
|
||||
req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, t.enc)
|
||||
// Build the WriteRequest with no samples.
|
||||
req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -597,15 +565,14 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p
|
||||
}
|
||||
|
||||
begin := time.Now()
|
||||
// Ignoring WriteResponseStats, because there is nothing for metadata, since it's
|
||||
// embedded in v2 calls now, and we do v1 here.
|
||||
_, err := t.storeClient.Store(ctx, req, try)
|
||||
err := t.storeClient.Store(ctx, req, try)
|
||||
t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -661,36 +628,6 @@ func isTimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sam
|
||||
}
|
||||
}
|
||||
|
||||
func isV2TimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sampleAgeLimit time.Duration) func(ts writev2.TimeSeries) bool {
|
||||
return func(ts writev2.TimeSeries) bool {
|
||||
if sampleAgeLimit == 0 {
|
||||
// If sampleAgeLimit is unset, then we never skip samples due to their age.
|
||||
return false
|
||||
}
|
||||
switch {
|
||||
// Only the first element should be set in the series, therefore we only check the first element.
|
||||
case len(ts.Samples) > 0:
|
||||
if isSampleOld(baseTime, sampleAgeLimit, ts.Samples[0].Timestamp) {
|
||||
metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc()
|
||||
return true
|
||||
}
|
||||
case len(ts.Histograms) > 0:
|
||||
if isSampleOld(baseTime, sampleAgeLimit, ts.Histograms[0].Timestamp) {
|
||||
metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc()
|
||||
return true
|
||||
}
|
||||
case len(ts.Exemplars) > 0:
|
||||
if isSampleOld(baseTime, sampleAgeLimit, ts.Exemplars[0].Timestamp) {
|
||||
metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc()
|
||||
return true
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Append queues a sample to be sent to the remote storage. Blocks until all samples are
|
||||
// enqueued on their shards or a shutdown signal is received.
|
||||
func (t *QueueManager) Append(samples []record.RefSample) bool {
|
||||
@@ -714,9 +651,6 @@ outer:
|
||||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
// TODO(cstyan): Handle or at least log an error if no metadata is found.
|
||||
// See https://github.com/prometheus/prometheus/issues/14405
|
||||
meta := t.seriesMetadata[s.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
// Start with a very small backoff. This should not be t.cfg.MinBackoff
|
||||
// as it can happen without errors, and we want to pickup work after
|
||||
@@ -731,7 +665,6 @@ outer:
|
||||
}
|
||||
if t.shards.enqueue(s.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: s.T,
|
||||
value: s.V,
|
||||
sType: tSample,
|
||||
@@ -777,7 +710,6 @@ outer:
|
||||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[e.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
// This will only loop if the queues are being resharded.
|
||||
backoff := t.cfg.MinBackoff
|
||||
@@ -789,7 +721,6 @@ outer:
|
||||
}
|
||||
if t.shards.enqueue(e.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: e.T,
|
||||
value: e.V,
|
||||
exemplarLabels: e.Labels,
|
||||
@@ -833,7 +764,6 @@ outer:
|
||||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[h.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
backoff := model.Duration(5 * time.Millisecond)
|
||||
@@ -845,7 +775,6 @@ outer:
|
||||
}
|
||||
if t.shards.enqueue(h.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: h.T,
|
||||
histogram: h.H,
|
||||
sType: tHistogram,
|
||||
@@ -888,7 +817,6 @@ outer:
|
||||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[h.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
backoff := model.Duration(5 * time.Millisecond)
|
||||
@@ -900,7 +828,6 @@ outer:
|
||||
}
|
||||
if t.shards.enqueue(h.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: h.T,
|
||||
floatHistogram: h.FH,
|
||||
sType: tFloatHistogram,
|
||||
@@ -997,23 +924,6 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
|
||||
}
|
||||
}
|
||||
|
||||
// StoreMetadata keeps track of known series' metadata for lookups when sending samples to remote.
|
||||
func (t *QueueManager) StoreMetadata(meta []record.RefMetadata) {
|
||||
if t.protoMsg == config.RemoteWriteProtoMsgV1 {
|
||||
return
|
||||
}
|
||||
|
||||
t.seriesMtx.Lock()
|
||||
defer t.seriesMtx.Unlock()
|
||||
for _, m := range meta {
|
||||
t.seriesMetadata[m.Ref] = &metadata.Metadata{
|
||||
Type: record.ToMetricType(m.Type),
|
||||
Unit: m.Unit,
|
||||
Help: m.Help,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSeriesSegment updates the segment number held against the series,
|
||||
// so we can trim older ones in SeriesReset.
|
||||
func (t *QueueManager) UpdateSeriesSegment(series []record.RefSeries, index int) {
|
||||
@@ -1039,7 +949,6 @@ func (t *QueueManager) SeriesReset(index int) {
|
||||
delete(t.seriesSegmentIndexes, k)
|
||||
t.releaseLabels(t.seriesLabels[k])
|
||||
delete(t.seriesLabels, k)
|
||||
delete(t.seriesMetadata, k)
|
||||
delete(t.droppedSeries, k)
|
||||
}
|
||||
}
|
||||
@@ -1255,7 +1164,6 @@ type shards struct {
|
||||
samplesDroppedOnHardShutdown atomic.Uint32
|
||||
exemplarsDroppedOnHardShutdown atomic.Uint32
|
||||
histogramsDroppedOnHardShutdown atomic.Uint32
|
||||
metadataDroppedOnHardShutdown atomic.Uint32
|
||||
}
|
||||
|
||||
// start the shards; must be called before any call to enqueue.
|
||||
@@ -1284,7 +1192,6 @@ func (s *shards) start(n int) {
|
||||
s.samplesDroppedOnHardShutdown.Store(0)
|
||||
s.exemplarsDroppedOnHardShutdown.Store(0)
|
||||
s.histogramsDroppedOnHardShutdown.Store(0)
|
||||
s.metadataDroppedOnHardShutdown.Store(0)
|
||||
for i := 0; i < n; i++ {
|
||||
go s.runShard(hardShutdownCtx, i, newQueues[i])
|
||||
}
|
||||
@@ -1317,16 +1224,12 @@ func (s *shards) stop() {
|
||||
// Force an unclean shutdown.
|
||||
s.hardShutdown()
|
||||
<-s.done
|
||||
|
||||
// Log error for any dropped samples, exemplars, or histograms.
|
||||
logDroppedError := func(t string, counter atomic.Uint32) {
|
||||
if dropped := counter.Load(); dropped > 0 {
|
||||
level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped)
|
||||
}
|
||||
if dropped := s.samplesDroppedOnHardShutdown.Load(); dropped > 0 {
|
||||
level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped)
|
||||
}
|
||||
if dropped := s.exemplarsDroppedOnHardShutdown.Load(); dropped > 0 {
|
||||
level.Error(s.qm.logger).Log("msg", "Failed to flush all exemplars on shutdown", "count", dropped)
|
||||
}
|
||||
logDroppedError("samples", s.samplesDroppedOnHardShutdown)
|
||||
logDroppedError("exemplars", s.exemplarsDroppedOnHardShutdown)
|
||||
logDroppedError("histograms", s.histogramsDroppedOnHardShutdown)
|
||||
}
|
||||
|
||||
// enqueue data (sample or exemplar). If the shard is full, shutting down, or
|
||||
@@ -1337,6 +1240,7 @@ func (s *shards) stop() {
|
||||
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
|
||||
shard := uint64(ref) % uint64(len(s.queues))
|
||||
select {
|
||||
case <-s.softShutdown:
|
||||
@@ -1379,7 +1283,6 @@ type timeSeries struct {
|
||||
value float64
|
||||
histogram *histogram.Histogram
|
||||
floatHistogram *histogram.FloatHistogram
|
||||
metadata *metadata.Metadata
|
||||
timestamp int64
|
||||
exemplarLabels labels.Labels
|
||||
// The type of series: sample, exemplar, or histogram.
|
||||
@@ -1393,7 +1296,6 @@ const (
|
||||
tExemplar
|
||||
tHistogram
|
||||
tFloatHistogram
|
||||
tMetadata
|
||||
)
|
||||
|
||||
func newQueue(batchSize, capacity int) *queue {
|
||||
@@ -1417,10 +1319,6 @@ func newQueue(batchSize, capacity int) *queue {
|
||||
func (q *queue) Append(datum timeSeries) bool {
|
||||
q.batchMtx.Lock()
|
||||
defer q.batchMtx.Unlock()
|
||||
// TODO(cstyan): Check if metadata now means we've reduced the total # of samples
|
||||
// we can batch together here, and if so find a way to not include metadata
|
||||
// in the batch size calculation.
|
||||
// See https://github.com/prometheus/prometheus/issues/14405
|
||||
q.batch = append(q.batch, datum)
|
||||
if len(q.batch) == cap(q.batch) {
|
||||
select {
|
||||
@@ -1444,6 +1342,7 @@ func (q *queue) Chan() <-chan []timeSeries {
|
||||
func (q *queue) Batch() []timeSeries {
|
||||
q.batchMtx.Lock()
|
||||
defer q.batchMtx.Unlock()
|
||||
|
||||
select {
|
||||
case batch := <-q.batchQueue:
|
||||
return batch
|
||||
@@ -1469,8 +1368,6 @@ func (q *queue) FlushAndShutdown(done <-chan struct{}) {
|
||||
for q.tryEnqueueingBatch(done) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
q.batchMtx.Lock()
|
||||
defer q.batchMtx.Unlock()
|
||||
q.batch = nil
|
||||
close(q.batchQueue)
|
||||
}
|
||||
@@ -1517,23 +1414,19 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||
}()
|
||||
|
||||
shardNum := strconv.Itoa(shardID)
|
||||
symbolTable := writev2.NewSymbolTable()
|
||||
|
||||
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
||||
// If we have fewer samples than that, flush them out after a deadline anyways.
|
||||
var (
|
||||
max = s.qm.cfg.MaxSamplesPerSend
|
||||
|
||||
pBuf = proto.NewBuffer(nil)
|
||||
pBufRaw []byte
|
||||
buf []byte
|
||||
pBuf = proto.NewBuffer(nil)
|
||||
buf []byte
|
||||
)
|
||||
// TODO(@tpaschalis) Should we also raise the max if we have WAL metadata?
|
||||
if s.qm.sendExemplars {
|
||||
max += int(float64(max) * 0.1)
|
||||
}
|
||||
|
||||
// TODO: Dry all of this, we should make an interface/generic for the timeseries type.
|
||||
batchQueue := queue.Chan()
|
||||
pendingData := make([]prompb.TimeSeries, max)
|
||||
for i := range pendingData {
|
||||
@@ -1542,10 +1435,6 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||
pendingData[i].Exemplars = []prompb.Exemplar{{}}
|
||||
}
|
||||
}
|
||||
pendingDataV2 := make([]writev2.TimeSeries, max)
|
||||
for i := range pendingDataV2 {
|
||||
pendingDataV2[i].Samples = []writev2.Sample{{}}
|
||||
}
|
||||
|
||||
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
stop := func() {
|
||||
@@ -1558,24 +1447,6 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||
}
|
||||
defer stop()
|
||||
|
||||
sendBatch := func(batch []timeSeries, protoMsg config.RemoteWriteProtoMsg, enc Compression, timer bool) {
|
||||
switch protoMsg {
|
||||
case config.RemoteWriteProtoMsgV1:
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
if timer {
|
||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
|
||||
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
|
||||
}
|
||||
_ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc)
|
||||
case config.RemoteWriteProtoMsgV2:
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingDataV2, s.qm.sendExemplars, s.qm.sendNativeHistograms)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
_ = s.sendV2Samples(ctx, pendingDataV2[:n], symbolTable.Symbols(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf, enc)
|
||||
symbolTable.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -1599,11 +1470,10 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
sendBatch(batch, s.qm.protoMsg, s.qm.enc, false)
|
||||
// TODO(bwplotka): Previously the return was between popular and send.
|
||||
// Consider this when DRY-ing https://github.com/prometheus/prometheus/issues/14409
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||
queue.ReturnForReuse(batch)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||
|
||||
stop()
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
@@ -1611,7 +1481,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||
case <-timer.C:
|
||||
batch := queue.Batch()
|
||||
if len(batch) > 0 {
|
||||
sendBatch(batch, s.qm.protoMsg, s.qm.enc, true)
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
|
||||
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||
}
|
||||
queue.ReturnForReuse(batch)
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
@@ -1619,22 +1493,21 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||
}
|
||||
}
|
||||
|
||||
func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int) {
|
||||
func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries) (int, int, int) {
|
||||
var nPendingSamples, nPendingExemplars, nPendingHistograms int
|
||||
for nPending, d := range batch {
|
||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||
if sendExemplars {
|
||||
if s.qm.sendExemplars {
|
||||
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||
}
|
||||
if sendNativeHistograms {
|
||||
if s.qm.sendNativeHistograms {
|
||||
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||
}
|
||||
|
||||
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||
pendingData[nPending].Labels = prompb.FromLabels(d.seriesLabels, pendingData[nPending].Labels)
|
||||
|
||||
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
||||
switch d.sType {
|
||||
case tSample:
|
||||
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
|
||||
@@ -1644,64 +1517,37 @@ func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sen
|
||||
nPendingSamples++
|
||||
case tExemplar:
|
||||
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
|
||||
Labels: prompb.FromLabels(d.exemplarLabels, nil),
|
||||
Labels: labelsToLabelsProto(d.exemplarLabels, nil),
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingExemplars++
|
||||
case tHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromIntHistogram(d.timestamp, d.histogram))
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
|
||||
nPendingHistograms++
|
||||
case tFloatHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromFloatHistogram(d.timestamp, d.floatHistogram))
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
|
||||
nPendingHistograms++
|
||||
}
|
||||
}
|
||||
return nPendingSamples, nPendingExemplars, nPendingHistograms
|
||||
}
|
||||
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
|
||||
begin := time.Now()
|
||||
rs, err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, rs, time.Since(begin))
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(bwplotka): DRY this (have one logic for both v1 and v2).
|
||||
// See https://github.com/prometheus/prometheus/issues/14409
|
||||
func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
|
||||
begin := time.Now()
|
||||
rs, err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, rs, time.Since(begin))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, rs WriteResponseStats, duration time.Duration) {
|
||||
// Partial errors may happen -- account for that.
|
||||
sampleDiff := sampleCount - rs.Samples
|
||||
if sampleDiff > 0 {
|
||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleDiff))
|
||||
}
|
||||
histogramDiff := histogramCount - rs.Histograms
|
||||
if histogramDiff > 0 {
|
||||
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramDiff))
|
||||
}
|
||||
exemplarDiff := exemplarCount - rs.Exemplars
|
||||
if exemplarDiff > 0 {
|
||||
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff))
|
||||
}
|
||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
|
||||
if err != nil {
|
||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err)
|
||||
} else if sampleDiff+exemplarDiff+histogramDiff > 0 {
|
||||
level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff)
|
||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
|
||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
|
||||
}
|
||||
|
||||
// These counters are used to calculate the dynamic sharding, and as such
|
||||
// should be maintained irrespective of success or failure.
|
||||
s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount))
|
||||
s.qm.dataOutDuration.incr(int64(duration))
|
||||
s.qm.dataOut.incr(int64(len(samples)))
|
||||
s.qm.dataOutDuration.incr(int64(time.Since(begin)))
|
||||
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
||||
|
||||
// Pending samples/exemplars/histograms also should be subtracted, as an error means
|
||||
// they will not be retried.
|
||||
s.qm.metrics.pendingSamples.Sub(float64(sampleCount))
|
||||
@@ -1713,29 +1559,19 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl
|
||||
}
|
||||
|
||||
// sendSamples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) {
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error {
|
||||
// Build the WriteRequest with no metadata.
|
||||
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc)
|
||||
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, *buf, nil)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
// Failing to build the write request is non-recoverable, since it will
|
||||
// only error if marshaling the proto to bytes fails.
|
||||
return WriteResponseStats{}, err
|
||||
return err
|
||||
}
|
||||
|
||||
reqSize := len(req)
|
||||
*buf = req
|
||||
|
||||
// Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need
|
||||
// to track the total amount of accepted data across the various attempts.
|
||||
accumulatedStats := WriteResponseStats{}
|
||||
var accumulatedStatsMu sync.Mutex
|
||||
addStats := func(rs WriteResponseStats) {
|
||||
accumulatedStatsMu.Lock()
|
||||
accumulatedStats = accumulatedStats.Add(rs)
|
||||
accumulatedStatsMu.Unlock()
|
||||
}
|
||||
|
||||
// An anonymous function allows us to defer the completion of our per-try spans
|
||||
// without causing a memory leak, and it has the nice effect of not propagating any
|
||||
// parameters for sendSamplesWithBackoff/3.
|
||||
@@ -1749,9 +1585,8 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||
samples,
|
||||
nil,
|
||||
pBuf,
|
||||
buf,
|
||||
*buf,
|
||||
isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
|
||||
enc,
|
||||
)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
@@ -1782,20 +1617,15 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||
// Technically for v1, we will likely have empty response stats, but for
|
||||
// newer Receivers this might be not, so used it in a best effort.
|
||||
rs, err := s.qm.client().Store(ctx, *buf, try)
|
||||
err := s.qm.client().Store(ctx, *buf, try)
|
||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
// TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error
|
||||
// so far we don't have those, so it's ok to potentially skew statistics.
|
||||
addStats(rs)
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
span.RecordError(err)
|
||||
return err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
onRetry := func() {
|
||||
@@ -1808,186 +1638,13 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||
if errors.Is(err, context.Canceled) {
|
||||
// When there is resharding, we cancel the context for this queue, which means the data is not sent.
|
||||
// So we exit early to not update the metrics.
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
|
||||
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
|
||||
|
||||
if err == nil && !accumulatedStats.Confirmed {
|
||||
// No 2.0 response headers, and we sent v1 message, so likely it's 1.0 Receiver.
|
||||
// Assume success, don't rely on headers.
|
||||
return WriteResponseStats{
|
||||
Samples: sampleCount,
|
||||
Histograms: histogramCount,
|
||||
Exemplars: exemplarCount,
|
||||
}, nil
|
||||
}
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
// sendV2Samples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) {
|
||||
// Build the WriteRequest with no metadata.
|
||||
req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
// Failing to build the write request is non-recoverable, since it will
|
||||
// only error if marshaling the proto to bytes fails.
|
||||
return WriteResponseStats{}, err
|
||||
}
|
||||
|
||||
reqSize := len(req)
|
||||
*buf = req
|
||||
|
||||
// Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need
|
||||
// to track the total amount of accepted data across the various attempts.
|
||||
accumulatedStats := WriteResponseStats{}
|
||||
var accumulatedStatsMu sync.Mutex
|
||||
addStats := func(rs WriteResponseStats) {
|
||||
accumulatedStatsMu.Lock()
|
||||
accumulatedStats = accumulatedStats.Add(rs)
|
||||
accumulatedStatsMu.Unlock()
|
||||
}
|
||||
|
||||
// An anonymous function allows us to defer the completion of our per-try spans
|
||||
// without causing a memory leak, and it has the nice effect of not propagating any
|
||||
// parameters for sendSamplesWithBackoff/3.
|
||||
attemptStore := func(try int) error {
|
||||
currentTime := time.Now()
|
||||
lowest := s.qm.buildRequestLimitTimestamp.Load()
|
||||
if isSampleOld(currentTime, time.Duration(s.qm.cfg.SampleAgeLimit), lowest) {
|
||||
// This will filter out old samples during retries.
|
||||
req, _, lowest, err := buildV2WriteRequest(
|
||||
s.qm.logger,
|
||||
samples,
|
||||
labels,
|
||||
pBuf,
|
||||
buf,
|
||||
isV2TimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
|
||||
enc,
|
||||
)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*buf = req
|
||||
}
|
||||
|
||||
ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch")
|
||||
defer span.End()
|
||||
|
||||
span.SetAttributes(
|
||||
attribute.Int("request_size", reqSize),
|
||||
attribute.Int("samples", sampleCount),
|
||||
attribute.Int("try", try),
|
||||
attribute.String("remote_name", s.qm.storeClient.Name()),
|
||||
attribute.String("remote_url", s.qm.storeClient.Endpoint()),
|
||||
)
|
||||
|
||||
if exemplarCount > 0 {
|
||||
span.SetAttributes(attribute.Int("exemplars", exemplarCount))
|
||||
}
|
||||
if histogramCount > 0 {
|
||||
span.SetAttributes(attribute.Int("histograms", histogramCount))
|
||||
}
|
||||
|
||||
begin := time.Now()
|
||||
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||
rs, err := s.qm.client().Store(ctx, *buf, try)
|
||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
// TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error
|
||||
// so far we don't have those, so it's ok to potentially skew statistics.
|
||||
addStats(rs)
|
||||
|
||||
if err == nil {
|
||||
// Check the case mentioned in PRW 2.0
|
||||
// https://prometheus.io/docs/specs/remote_write_spec_2_0/#required-written-response-headers.
|
||||
if sampleCount+histogramCount+exemplarCount > 0 && rs.NoDataWritten() {
|
||||
err = fmt.Errorf("sent v2 request with %v samples, %v histograms and %v exemplars; got 2xx, but PRW 2.0 response header statistics indicate %v samples, %v histograms and %v exemplars were accepted;"+
|
||||
" assumining failure e.g. the target only supports PRW 1.0 prometheus.WriteRequest, but does not check the Content-Type header correctly",
|
||||
sampleCount, histogramCount, exemplarCount,
|
||||
rs.Samples, rs.Histograms, rs.Exemplars,
|
||||
)
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
onRetry := func() {
|
||||
s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.retriedHistogramsTotal.Add(float64(histogramCount))
|
||||
}
|
||||
|
||||
err = s.qm.sendWriteRequestWithBackoff(ctx, attemptStore, onRetry)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
// When there is resharding, we cancel the context for this queue, which means the data is not sent.
|
||||
// So we exit early to not update the metrics.
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
|
||||
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) {
|
||||
var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int
|
||||
for nPending, d := range batch {
|
||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||
// todo: should we also safeguard against empty metadata here?
|
||||
if d.metadata != nil {
|
||||
pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type)
|
||||
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help)
|
||||
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Unit)
|
||||
nPendingMetadata++
|
||||
}
|
||||
|
||||
if sendExemplars {
|
||||
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||
}
|
||||
if sendNativeHistograms {
|
||||
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||
}
|
||||
|
||||
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||
pendingData[nPending].LabelsRefs = symbolTable.SymbolizeLabels(d.seriesLabels, pendingData[nPending].LabelsRefs)
|
||||
switch d.sType {
|
||||
case tSample:
|
||||
pendingData[nPending].Samples = append(pendingData[nPending].Samples, writev2.Sample{
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingSamples++
|
||||
case tExemplar:
|
||||
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, writev2.Exemplar{
|
||||
LabelsRefs: symbolTable.SymbolizeLabels(d.exemplarLabels, nil), // TODO: optimize, reuse slice
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingExemplars++
|
||||
case tHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromIntHistogram(d.timestamp, d.histogram))
|
||||
nPendingHistograms++
|
||||
case tFloatHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromFloatHistogram(d.timestamp, d.floatHistogram))
|
||||
nPendingHistograms++
|
||||
case tMetadata:
|
||||
// TODO: log or return an error?
|
||||
// we shouldn't receive metadata type data here, it should already be inserted into the timeSeries
|
||||
}
|
||||
}
|
||||
return nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt func(int) error, onRetry func()) error {
|
||||
@@ -2121,11 +1778,9 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
|
||||
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
|
||||
lowest = ts.Histograms[0].Timestamp
|
||||
}
|
||||
if i != keepIdx {
|
||||
// We have to swap the kept timeseries with the one which should be dropped.
|
||||
// Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
|
||||
timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
|
||||
}
|
||||
|
||||
// Move the current element to the write position and increment the write pointer
|
||||
timeSeries[keepIdx] = timeSeries[i]
|
||||
keepIdx++
|
||||
}
|
||||
|
||||
@@ -2133,21 +1788,7 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
|
||||
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
|
||||
}
|
||||
|
||||
func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed []byte, _ error) {
|
||||
switch enc {
|
||||
case SnappyBlockCompression:
|
||||
compressed = snappy.Encode(*tmpbuf, inp)
|
||||
if n := snappy.MaxEncodedLen(len(inp)); n > len(*tmpbuf) {
|
||||
// grow the buffer for the next time
|
||||
*tmpbuf = make([]byte, n)
|
||||
}
|
||||
return compressed, nil
|
||||
default:
|
||||
return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc)
|
||||
}
|
||||
}
|
||||
|
||||
func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
|
||||
func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte, filter func(prompb.TimeSeries) bool) ([]byte, int64, int64, error) {
|
||||
highest, lowest, timeSeries,
|
||||
droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter)
|
||||
|
||||
@@ -2173,105 +1814,8 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada
|
||||
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
||||
// buffer as long as possible.
|
||||
if buf != nil {
|
||||
*buf = (*buf)[0:cap(*buf)]
|
||||
} else {
|
||||
buf = &[]byte{}
|
||||
}
|
||||
|
||||
compressed, err = compressPayload(buf, pBuf.Bytes(), enc)
|
||||
if err != nil {
|
||||
return nil, highest, lowest, err
|
||||
buf = buf[0:cap(buf)]
|
||||
}
|
||||
compressed := snappy.Encode(buf, pBuf.Bytes())
|
||||
return compressed, highest, lowest, nil
|
||||
}
|
||||
|
||||
func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
|
||||
highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter)
|
||||
|
||||
if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 {
|
||||
level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms)
|
||||
}
|
||||
|
||||
req := &writev2.Request{
|
||||
Symbols: labels,
|
||||
Timeseries: timeSeries,
|
||||
}
|
||||
|
||||
if pBuf == nil {
|
||||
pBuf = &[]byte{} // For convenience in tests. Not efficient.
|
||||
}
|
||||
|
||||
data, err := req.OptimizedMarshal(*pBuf)
|
||||
if err != nil {
|
||||
return nil, highest, lowest, err
|
||||
}
|
||||
*pBuf = data
|
||||
|
||||
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
||||
// buffer as long as possible.
|
||||
if buf != nil {
|
||||
*buf = (*buf)[0:cap(*buf)]
|
||||
} else {
|
||||
buf = &[]byte{}
|
||||
}
|
||||
|
||||
compressed, err = compressPayload(buf, data, enc)
|
||||
if err != nil {
|
||||
return nil, highest, lowest, err
|
||||
}
|
||||
return compressed, highest, lowest, nil
|
||||
}
|
||||
|
||||
func buildV2TimeSeries(timeSeries []writev2.TimeSeries, filter func(writev2.TimeSeries) bool) (int64, int64, []writev2.TimeSeries, int, int, int) {
|
||||
var highest int64
|
||||
var lowest int64
|
||||
var droppedSamples, droppedExemplars, droppedHistograms int
|
||||
|
||||
keepIdx := 0
|
||||
lowest = math.MaxInt64
|
||||
for i, ts := range timeSeries {
|
||||
if filter != nil && filter(ts) {
|
||||
if len(ts.Samples) > 0 {
|
||||
droppedSamples++
|
||||
}
|
||||
if len(ts.Exemplars) > 0 {
|
||||
droppedExemplars++
|
||||
}
|
||||
if len(ts.Histograms) > 0 {
|
||||
droppedHistograms++
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
|
||||
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
|
||||
highest = ts.Samples[0].Timestamp
|
||||
}
|
||||
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
|
||||
highest = ts.Exemplars[0].Timestamp
|
||||
}
|
||||
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
|
||||
highest = ts.Histograms[0].Timestamp
|
||||
}
|
||||
|
||||
// Get the lowest timestamp.
|
||||
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp < lowest {
|
||||
lowest = ts.Samples[0].Timestamp
|
||||
}
|
||||
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp < lowest {
|
||||
lowest = ts.Exemplars[0].Timestamp
|
||||
}
|
||||
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
|
||||
lowest = ts.Histograms[0].Timestamp
|
||||
}
|
||||
if i != keepIdx {
|
||||
// We have to swap the kept timeseries with the one which should be dropped.
|
||||
// Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
|
||||
timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
|
||||
}
|
||||
keepIdx++
|
||||
}
|
||||
|
||||
timeSeries = timeSeries[:keepIdx]
|
||||
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
|
||||
}
|
||||
|
||||
4
vendor/github.com/prometheus/prometheus/storage/remote/read.go
generated
vendored
4
vendor/github.com/prometheus/prometheus/storage/remote/read.go
generated
vendored
@@ -210,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s
|
||||
}
|
||||
|
||||
// LabelValues implements storage.Querier and is a noop.
|
||||
func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// LabelNames implements storage.Querier and is a noop.
|
||||
func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
|
||||
return nil, nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
53
vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go
generated
vendored
53
vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go
generated
vendored
@@ -202,34 +202,16 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
|
||||
return err
|
||||
}
|
||||
|
||||
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
|
||||
if err != nil {
|
||||
chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers)
|
||||
if err := chunks.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := querier.Close(); err != nil {
|
||||
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
var hints *storage.SelectHints
|
||||
if query.Hints != nil {
|
||||
hints = &storage.SelectHints{
|
||||
Start: query.Hints.StartMs,
|
||||
End: query.Hints.EndMs,
|
||||
Step: query.Hints.StepMs,
|
||||
Func: query.Hints.Func,
|
||||
Grouping: query.Hints.Grouping,
|
||||
Range: query.Hints.RangeMs,
|
||||
By: query.Hints.By,
|
||||
}
|
||||
}
|
||||
|
||||
ws, err := StreamChunkedReadResponses(
|
||||
NewChunkedWriter(w, f),
|
||||
int64(i),
|
||||
// The streaming API has to provide the series sorted.
|
||||
querier.Select(ctx, true, hints, filteredMatchers...),
|
||||
chunks,
|
||||
sortedExternalLabels,
|
||||
h.remoteReadMaxBytesInFrame,
|
||||
h.marshalPool,
|
||||
@@ -254,6 +236,35 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
|
||||
}
|
||||
}
|
||||
|
||||
// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet,
|
||||
// encapsulating the operation in its own function to ensure timely release of
|
||||
// the querier resources.
|
||||
func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet {
|
||||
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
|
||||
if err != nil {
|
||||
return storage.ErrChunkSeriesSet(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := querier.Close(); err != nil {
|
||||
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
var hints *storage.SelectHints
|
||||
if query.Hints != nil {
|
||||
hints = &storage.SelectHints{
|
||||
Start: query.Hints.StartMs,
|
||||
End: query.Hints.EndMs,
|
||||
Step: query.Hints.StepMs,
|
||||
Func: query.Hints.Func,
|
||||
Grouping: query.Hints.Grouping,
|
||||
Range: query.Hints.RangeMs,
|
||||
By: query.Hints.By,
|
||||
}
|
||||
}
|
||||
return querier.Select(ctx, true, hints, filteredMatchers...)
|
||||
}
|
||||
|
||||
// filterExtLabelsFromMatchers change equality matchers which match external labels
|
||||
// to a matcher that looks for an empty label,
|
||||
// as that label should not be present in the storage.
|
||||
|
||||
107
vendor/github.com/prometheus/prometheus/storage/remote/stats.go
generated
vendored
107
vendor/github.com/prometheus/prometheus/storage/remote/stats.go
generated
vendored
@@ -1,107 +0,0 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written"
|
||||
rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written"
|
||||
rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written"
|
||||
)
|
||||
|
||||
// WriteResponseStats represents the response write statistics specified in https://github.com/prometheus/docs/pull/2486
|
||||
type WriteResponseStats struct {
|
||||
// Samples represents X-Prometheus-Remote-Write-Written-Samples
|
||||
Samples int
|
||||
// Histograms represents X-Prometheus-Remote-Write-Written-Histograms
|
||||
Histograms int
|
||||
// Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
|
||||
Exemplars int
|
||||
|
||||
// Confirmed means we can trust those statistics from the point of view
|
||||
// of the PRW 2.0 spec. When parsed from headers, it means we got at least one
|
||||
// response header from the Receiver to confirm those numbers, meaning it must
|
||||
// be a at least 2.0 Receiver. See ParseWriteResponseStats for details.
|
||||
Confirmed bool
|
||||
}
|
||||
|
||||
// NoDataWritten returns true if statistics indicate no data was written.
|
||||
func (s WriteResponseStats) NoDataWritten() bool {
|
||||
return (s.Samples + s.Histograms + s.Exemplars) == 0
|
||||
}
|
||||
|
||||
// AllSamples returns both float and histogram sample numbers.
|
||||
func (s WriteResponseStats) AllSamples() int {
|
||||
return s.Samples + s.Histograms
|
||||
}
|
||||
|
||||
// Add returns the sum of this WriteResponseStats plus the given WriteResponseStats.
|
||||
func (s WriteResponseStats) Add(rs WriteResponseStats) WriteResponseStats {
|
||||
s.Confirmed = rs.Confirmed
|
||||
s.Samples += rs.Samples
|
||||
s.Histograms += rs.Histograms
|
||||
s.Exemplars += rs.Exemplars
|
||||
return s
|
||||
}
|
||||
|
||||
// SetHeaders sets response headers in a given response writer.
|
||||
// Make sure to use it before http.ResponseWriter.WriteHeader and .Write.
|
||||
func (s WriteResponseStats) SetHeaders(w http.ResponseWriter) {
|
||||
h := w.Header()
|
||||
h.Set(rw20WrittenSamplesHeader, strconv.Itoa(s.Samples))
|
||||
h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.Histograms))
|
||||
h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.Exemplars))
|
||||
}
|
||||
|
||||
// ParseWriteResponseStats returns WriteResponseStats parsed from the response headers.
|
||||
//
|
||||
// As per 2.0 spec, missing header means 0. However, abrupt HTTP errors, 1.0 Receivers
|
||||
// or buggy 2.0 Receivers might result in no response headers specified and that
|
||||
// might NOT necessarily mean nothing was written. To represent that we set
|
||||
// s.Confirmed = true only when see at least on response header.
|
||||
//
|
||||
// Error is returned when any of the header fails to parse as int64.
|
||||
func ParseWriteResponseStats(r *http.Response) (s WriteResponseStats, err error) {
|
||||
var (
|
||||
errs []error
|
||||
h = r.Header
|
||||
)
|
||||
if v := h.Get(rw20WrittenSamplesHeader); v != "" { // Empty means zero.
|
||||
s.Confirmed = true
|
||||
if s.Samples, err = strconv.Atoi(v); err != nil {
|
||||
s.Samples = 0
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if v := h.Get(rw20WrittenHistogramsHeader); v != "" { // Empty means zero.
|
||||
s.Confirmed = true
|
||||
if s.Histograms, err = strconv.Atoi(v); err != nil {
|
||||
s.Histograms = 0
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if v := h.Get(rw20WrittenExemplarsHeader); v != "" { // Empty means zero.
|
||||
s.Confirmed = true
|
||||
if s.Exemplars, err = strconv.Atoi(v); err != nil {
|
||||
s.Exemplars = 0
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return s, errors.Join(errs...)
|
||||
}
|
||||
4
vendor/github.com/prometheus/prometheus/storage/remote/storage.go
generated
vendored
4
vendor/github.com/prometheus/prometheus/storage/remote/storage.go
generated
vendored
@@ -62,7 +62,7 @@ type Storage struct {
|
||||
}
|
||||
|
||||
// NewStorage returns a remote.Storage.
|
||||
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage {
|
||||
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
|
||||
logger: logger,
|
||||
localStartTimeCallback: stCallback,
|
||||
}
|
||||
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL)
|
||||
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm)
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
12
vendor/github.com/prometheus/prometheus/storage/remote/write.go
generated
vendored
12
vendor/github.com/prometheus/prometheus/storage/remote/write.go
generated
vendored
@@ -15,7 +15,6 @@ package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
@@ -66,7 +65,6 @@ type WriteStorage struct {
|
||||
externalLabels labels.Labels
|
||||
dir string
|
||||
queues map[string]*QueueManager
|
||||
metadataInWAL bool
|
||||
samplesIn *ewmaRate
|
||||
flushDeadline time.Duration
|
||||
interner *pool
|
||||
@@ -78,7 +76,7 @@ type WriteStorage struct {
|
||||
}
|
||||
|
||||
// NewWriteStorage creates and runs a WriteStorage.
|
||||
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage {
|
||||
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
@@ -94,13 +92,12 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
|
||||
interner: newPool(),
|
||||
scraper: sm,
|
||||
quit: make(chan struct{}),
|
||||
metadataInWAL: metadataInWal,
|
||||
highestTimestamp: &maxTimestamp{
|
||||
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "highest_timestamp_in_seconds",
|
||||
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet.",
|
||||
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
|
||||
}),
|
||||
},
|
||||
}
|
||||
@@ -148,9 +145,6 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||
newQueues := make(map[string]*QueueManager)
|
||||
newHashes := []string{}
|
||||
for _, rwConf := range conf.RemoteWriteConfigs {
|
||||
if rwConf.ProtobufMessage == config.RemoteWriteProtoMsgV2 && !rws.metadataInWAL {
|
||||
return errors.New("invalid remote write configuration, if you are using remote write version 2.0 the `--enable-feature=metadata-wal-records` feature flag must be enabled")
|
||||
}
|
||||
hash, err := toHash(rwConf)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -171,7 +165,6 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||
|
||||
c, err := NewWriteClient(name, &ClientConfig{
|
||||
URL: rwConf.URL,
|
||||
WriteProtoMsg: rwConf.ProtobufMessage,
|
||||
Timeout: rwConf.RemoteTimeout,
|
||||
HTTPClientConfig: rwConf.HTTPClientConfig,
|
||||
SigV4Config: rwConf.SigV4Config,
|
||||
@@ -214,7 +207,6 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||
rws.scraper,
|
||||
rwConf.SendExemplars,
|
||||
rwConf.SendNativeHistograms,
|
||||
rwConf.ProtobufMessage,
|
||||
)
|
||||
// Keep track of which queues are new so we know which to start.
|
||||
newHashes = append(newHashes, hash)
|
||||
|
||||
509
vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
generated
vendored
509
vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
generated
vendored
@@ -17,25 +17,16 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
)
|
||||
@@ -44,229 +35,140 @@ type writeHandler struct {
|
||||
logger log.Logger
|
||||
appendable storage.Appendable
|
||||
|
||||
samplesWithInvalidLabelsTotal prometheus.Counter
|
||||
samplesAppendedWithoutMetadata prometheus.Counter
|
||||
|
||||
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
|
||||
samplesWithInvalidLabelsTotal prometheus.Counter
|
||||
}
|
||||
|
||||
const maxAheadTime = 10 * time.Minute
|
||||
|
||||
// NewWriteHandler creates a http.Handler that accepts remote write requests with
|
||||
// the given message in acceptedProtoMsgs and writes them to the provided appendable.
|
||||
//
|
||||
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
|
||||
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
|
||||
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
|
||||
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
|
||||
for _, acc := range acceptedProtoMsgs {
|
||||
protoMsgs[acc] = struct{}{}
|
||||
}
|
||||
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
||||
// writes them to the provided appendable.
|
||||
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
|
||||
h := &writeHandler{
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
acceptedProtoMsgs: protoMsgs,
|
||||
samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
|
||||
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "api",
|
||||
Name: "remote_write_invalid_labels_samples_total",
|
||||
Help: "The total number of received remote write samples and histogram samples which were rejected due to invalid labels.",
|
||||
}),
|
||||
samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "api",
|
||||
Name: "remote_write_without_metadata_appended_samples_total",
|
||||
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
|
||||
Help: "The total number of remote write samples which contains invalid labels.",
|
||||
}),
|
||||
}
|
||||
if reg != nil {
|
||||
reg.MustRegister(h.samplesWithInvalidLabelsTotal)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) {
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
|
||||
parts := strings.Split(contentType, ";")
|
||||
if parts[0] != appProtoContentType {
|
||||
return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType)
|
||||
}
|
||||
// Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter
|
||||
for _, p := range parts[1:] {
|
||||
pair := strings.Split(p, "=")
|
||||
if len(pair) != 2 {
|
||||
return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType)
|
||||
}
|
||||
if pair[0] == "proto" {
|
||||
ret := config.RemoteWriteProtoMsg(pair[1])
|
||||
if err := ret.Validate(); err != nil {
|
||||
return "", fmt.Errorf("got %v content type; %w", contentType, err)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
// No "proto=" parameter, assuming v1.
|
||||
return config.RemoteWriteProtoMsgV1, nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
// Don't break yolo 1.0 clients if not needed. This is similar to what we did
|
||||
// before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
|
||||
// We could give http.StatusUnsupportedMediaType, but let's assume 1.0 message by default.
|
||||
contentType = appProtoContentType
|
||||
}
|
||||
|
||||
msgType, err := h.parseProtoMsg(contentType)
|
||||
if err != nil {
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := h.acceptedProtoMsgs[msgType]; !ok {
|
||||
err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msgType, func() (ret []string) {
|
||||
for k := range h.acceptedProtoMsgs {
|
||||
ret = append(ret, string(k))
|
||||
}
|
||||
return ret
|
||||
}())
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||
}
|
||||
|
||||
enc := r.Header.Get("Content-Encoding")
|
||||
if enc == "" {
|
||||
// Don't break yolo 1.0 clients if not needed. This is similar to what we did
|
||||
// before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
|
||||
// We could give http.StatusUnsupportedMediaType, but let's assume snappy by default.
|
||||
} else if enc != string(SnappyBlockCompression) {
|
||||
err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression)
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||
}
|
||||
|
||||
// Read the request body.
|
||||
body, err := io.ReadAll(r.Body)
|
||||
req, err := DecodeWriteRequest(r.Body)
|
||||
if err != nil {
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
decompressed, err := snappy.Decode(nil, body)
|
||||
if err != nil {
|
||||
// TODO(bwplotka): Add more context to responded error?
|
||||
level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error())
|
||||
err = h.write(r.Context(), req)
|
||||
switch {
|
||||
case err == nil:
|
||||
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
|
||||
// Indicated an out of order sample is a bad request to prevent retries.
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Now we have a decompressed buffer we can unmarshal it.
|
||||
|
||||
if msgType == config.RemoteWriteProtoMsgV1 {
|
||||
// PRW 1.0 flow has different proto message and no partial write handling.
|
||||
var req prompb.WriteRequest
|
||||
if err := proto.Unmarshal(decompressed, &req); err != nil {
|
||||
// TODO(bwplotka): Add more context to responded error?
|
||||
level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err = h.write(r.Context(), &req); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
|
||||
// Indicated an out-of-order sample is a bad request to prevent retries.
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
default:
|
||||
level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
default:
|
||||
level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Remote Write 2.x proto message handling.
|
||||
var req writev2.Request
|
||||
if err := proto.Unmarshal(decompressed, &req); err != nil {
|
||||
// TODO(bwplotka): Add more context to responded error?
|
||||
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
respStats, errHTTPCode, err := h.writeV2(r.Context(), &req)
|
||||
|
||||
// Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases.
|
||||
respStats.SetHeaders(w)
|
||||
|
||||
if err != nil {
|
||||
if errHTTPCode/5 == 100 { // 5xx
|
||||
level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error())
|
||||
}
|
||||
http.Error(w, err.Error(), errHTTPCode)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// checkAppendExemplarError modifies the AppendExemplar's returned error based on the error cause.
|
||||
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
switch {
|
||||
case errors.Is(unwrappedErr, storage.ErrNotFound):
|
||||
return storage.ErrNotFound
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar):
|
||||
*outOfOrderErrs++
|
||||
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
|
||||
outOfOrderExemplarErrs := 0
|
||||
samplesWithInvalidLabels := 0
|
||||
samplesAppended := 0
|
||||
|
||||
app := &timeLimitAppender{
|
||||
Appender: h.appendable.Appender(ctx),
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
|
||||
app := h.appendable.Appender(ctx)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = app.Rollback()
|
||||
return
|
||||
}
|
||||
err = app.Commit()
|
||||
if err != nil {
|
||||
h.samplesAppendedWithoutMetadata.Add(float64(samplesAppended))
|
||||
}
|
||||
}()
|
||||
|
||||
b := labels.NewScratchBuilder(0)
|
||||
var exemplarErr error
|
||||
for _, ts := range req.Timeseries {
|
||||
ls := ts.ToLabels(&b, nil)
|
||||
if !ls.Has(labels.MetricName) || !ls.IsValid() {
|
||||
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
|
||||
labels := labelProtosToLabels(&b, ts.Labels)
|
||||
if !labels.IsValid() {
|
||||
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
|
||||
samplesWithInvalidLabels++
|
||||
// TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are
|
||||
// potentially written. Perhaps unify with fixed writeV2 implementation a bit.
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.appendV1Samples(app, ts.Samples, ls); err != nil {
|
||||
return err
|
||||
}
|
||||
samplesAppended += len(ts.Samples)
|
||||
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := ep.ToExemplar(&b, nil)
|
||||
if _, err := app.AppendExemplar(0, ls, e); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, storage.ErrOutOfOrderExemplar):
|
||||
outOfOrderExemplarErrs++
|
||||
level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
||||
default:
|
||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
|
||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
|
||||
var ref storage.SeriesRef
|
||||
for _, s := range ts.Samples {
|
||||
ref, err = app.Append(ref, labels, s.Timestamp, s.Value)
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil {
|
||||
return err
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := exemplarProtoToExemplar(&b, ep)
|
||||
|
||||
_, exemplarErr = app.AppendExemplar(0, labels, e)
|
||||
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
||||
if exemplarErr != nil {
|
||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hp := range ts.Histograms {
|
||||
if hp.IsFloatHistogram() {
|
||||
fhs := FloatHistogramProtoToFloatHistogram(hp)
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
|
||||
} else {
|
||||
hs := HistogramProtoToHistogram(hp)
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
|
||||
}
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||
// a note indicating its inclusion in the future.
|
||||
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
samplesAppended += len(ts.Histograms)
|
||||
}
|
||||
|
||||
if outOfOrderExemplarErrs > 0 {
|
||||
@@ -275,201 +177,10 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||
if samplesWithInvalidLabels > 0 {
|
||||
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
|
||||
var ref storage.SeriesRef
|
||||
var err error
|
||||
for _, s := range ss {
|
||||
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
|
||||
if err != nil {
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
||||
errors.Is(err, storage.ErrOutOfBounds) ||
|
||||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
|
||||
var err error
|
||||
for _, hp := range hh {
|
||||
if hp.IsFloatHistogram() {
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
|
||||
}
|
||||
if err != nil {
|
||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||
// a note indicating its inclusion in the future.
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
||||
errors.Is(err, storage.ErrOutOfBounds) ||
|
||||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeV2 is similar to write, but it works with v2 proto message,
|
||||
// allows partial 4xx writes and gathers statistics.
|
||||
//
|
||||
// writeV2 returns the statistics.
|
||||
// In error cases, writeV2, also returns statistics, but also the error that
|
||||
// should be propagated to the remote write sender and httpCode to use for status.
|
||||
//
|
||||
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
|
||||
// Once we have 5xx type of error, we immediately stop and rollback all appends.
|
||||
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) {
|
||||
app := &timeLimitAppender{
|
||||
Appender: h.appendable.Appender(ctx),
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
|
||||
s := WriteResponseStats{}
|
||||
samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &s)
|
||||
if err != nil {
|
||||
if errHTTPCode/5 == 100 {
|
||||
// On 5xx, we always rollback, because we expect
|
||||
// sender to retry and TSDB is not idempotent.
|
||||
if rerr := app.Rollback(); rerr != nil {
|
||||
level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr)
|
||||
}
|
||||
return WriteResponseStats{}, errHTTPCode, err
|
||||
}
|
||||
|
||||
// Non-retriable (e.g. bad request error case). Can be partially written.
|
||||
commitErr := app.Commit()
|
||||
if commitErr != nil {
|
||||
// Bad requests does not matter as we have internal error (retryable).
|
||||
return WriteResponseStats{}, http.StatusInternalServerError, commitErr
|
||||
}
|
||||
// Bad request error happened, but rest of data (if any) was written.
|
||||
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
|
||||
return s, errHTTPCode, err
|
||||
}
|
||||
|
||||
// All good just commit.
|
||||
if err := app.Commit(); err != nil {
|
||||
return WriteResponseStats{}, http.StatusInternalServerError, err
|
||||
}
|
||||
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
|
||||
return s, 0, nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
|
||||
var (
|
||||
badRequestErrs []error
|
||||
outOfOrderExemplarErrs, samplesWithInvalidLabels int
|
||||
|
||||
b = labels.NewScratchBuilder(0)
|
||||
)
|
||||
for _, ts := range req.Timeseries {
|
||||
ls := ts.ToLabels(&b, req.Symbols)
|
||||
// Validate series labels early.
|
||||
// NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
|
||||
// specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
|
||||
if !ls.Has(labels.MetricName) || !ls.IsValid() {
|
||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String()))
|
||||
samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
|
||||
continue
|
||||
}
|
||||
|
||||
allSamplesSoFar := rs.AllSamples()
|
||||
var ref storage.SeriesRef
|
||||
|
||||
// Samples.
|
||||
for _, s := range ts.Samples {
|
||||
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
|
||||
if err == nil {
|
||||
rs.Samples++
|
||||
continue
|
||||
}
|
||||
// Handle append error.
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
||||
errors.Is(err, storage.ErrOutOfBounds) ||
|
||||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
|
||||
errors.Is(err, storage.ErrTooOldSample) {
|
||||
// TODO(bwplotka): Not too spammy log?
|
||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp)
|
||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||
continue
|
||||
}
|
||||
return 0, http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
// Native Histograms.
|
||||
for _, hp := range ts.Histograms {
|
||||
if hp.IsFloatHistogram() {
|
||||
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram())
|
||||
} else {
|
||||
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil)
|
||||
}
|
||||
if err == nil {
|
||||
rs.Histograms++
|
||||
continue
|
||||
}
|
||||
// Handle append error.
|
||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||
// a note indicating its inclusion in the future.
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
||||
errors.Is(err, storage.ErrOutOfBounds) ||
|
||||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
||||
// TODO(bwplotka): Not too spammy log?
|
||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
|
||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||
continue
|
||||
}
|
||||
return 0, http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
// Exemplars.
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := ep.ToExemplar(&b, req.Symbols)
|
||||
ref, err = app.AppendExemplar(ref, ls, e)
|
||||
if err == nil {
|
||||
rs.Exemplars++
|
||||
continue
|
||||
}
|
||||
// Handle append error.
|
||||
if errors.Is(err, storage.ErrOutOfOrderExemplar) {
|
||||
outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here.
|
||||
level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||
continue
|
||||
}
|
||||
// TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed.
|
||||
// For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx.
|
||||
level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
||||
}
|
||||
|
||||
m := ts.ToMetadata(req.Symbols)
|
||||
if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
|
||||
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
|
||||
// Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
|
||||
// we don't report remote write error either. We increment metric instead.
|
||||
samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar
|
||||
}
|
||||
}
|
||||
|
||||
if outOfOrderExemplarErrs > 0 {
|
||||
level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
||||
}
|
||||
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
|
||||
|
||||
if len(badRequestErrs) == 0 {
|
||||
return samplesWithoutMetadata, 0, nil
|
||||
}
|
||||
// TODO(bwplotka): Better concat formatting? Perhaps add size limit?
|
||||
return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...)
|
||||
}
|
||||
|
||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||
// writes them to the provided appendable.
|
||||
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
||||
@@ -522,45 +233,3 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
type timeLimitAppender struct {
|
||||
storage.Appender
|
||||
|
||||
maxTime int64
|
||||
}
|
||||
|
||||
func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||
if t > app.maxTime {
|
||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||
}
|
||||
|
||||
ref, err := app.Appender.Append(ref, lset, t, v)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if t > app.maxTime {
|
||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||
}
|
||||
|
||||
ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||
if e.Ts > app.maxTime {
|
||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||
}
|
||||
|
||||
ref, err := app.Appender.AppendExemplar(ref, l, e)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
8
vendor/github.com/prometheus/prometheus/storage/secondary.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/storage/secondary.go
generated
vendored
@@ -49,16 +49,16 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
|
||||
return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
|
||||
}
|
||||
|
||||
func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...)
|
||||
func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...)
|
||||
if err != nil {
|
||||
return nil, w.Add(err), nil
|
||||
}
|
||||
return vals, w, nil
|
||||
}
|
||||
|
||||
func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...)
|
||||
func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
names, w, err := s.genericQuerier.LabelNames(ctx, matchers...)
|
||||
if err != nil {
|
||||
return nil, w.Add(err), nil
|
||||
}
|
||||
|
||||
20
vendor/github.com/prometheus/prometheus/tsdb/block.go
generated
vendored
20
vendor/github.com/prometheus/prometheus/tsdb/block.go
generated
vendored
@@ -103,9 +103,9 @@ type IndexReader interface {
|
||||
// storage.ErrNotFound is returned as error.
|
||||
LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error)
|
||||
|
||||
// LabelNamesFor returns all the label names for the series referred to by the postings.
|
||||
// LabelNamesFor returns all the label names for the series referred to by IDs.
|
||||
// The names returned are sorted.
|
||||
LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error)
|
||||
LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error)
|
||||
|
||||
// Close releases the underlying resources of the reader.
|
||||
Close() error
|
||||
@@ -551,10 +551,10 @@ func (r blockIndexReader) LabelValueFor(ctx context.Context, id storage.SeriesRe
|
||||
return r.ir.LabelValueFor(ctx, id, label)
|
||||
}
|
||||
|
||||
// LabelNamesFor returns all the label names for the series referred to by the postings.
|
||||
// LabelNamesFor returns all the label names for the series referred to by IDs.
|
||||
// The names returned are sorted.
|
||||
func (r blockIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
|
||||
return r.ir.LabelNamesFor(ctx, postings)
|
||||
func (r blockIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
|
||||
return r.ir.LabelNamesFor(ctx, ids...)
|
||||
}
|
||||
|
||||
type blockTombstoneReader struct {
|
||||
@@ -646,10 +646,10 @@ Outer:
|
||||
}
|
||||
|
||||
// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).
|
||||
// If there was a rewrite, then it returns the ULID of new blocks written, else nil.
|
||||
// If a resultant block is empty (tombstones covered the whole block), then it returns an empty slice.
|
||||
// If there was a rewrite, then it returns the ULID of the new block written, else nil.
|
||||
// If the resultant block is empty (tombstones covered the whole block), then it deletes the new block and return nil UID.
|
||||
// It returns a boolean indicating if the parent block can be deleted safely of not.
|
||||
func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) {
|
||||
func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, error) {
|
||||
numStones := 0
|
||||
|
||||
if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error {
|
||||
@@ -664,12 +664,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, e
|
||||
}
|
||||
|
||||
meta := pb.Meta()
|
||||
uids, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
|
||||
uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return uids, true, nil
|
||||
return &uid, true, nil
|
||||
}
|
||||
|
||||
// Snapshot creates snapshot of the block into dir.
|
||||
|
||||
9
vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go
generated
vendored
9
vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go
generated
vendored
@@ -105,17 +105,12 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) {
|
||||
if err != nil {
|
||||
return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err)
|
||||
}
|
||||
ids, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil)
|
||||
id, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil)
|
||||
if err != nil {
|
||||
return ulid.ULID{}, fmt.Errorf("compactor write: %w", err)
|
||||
}
|
||||
|
||||
// No block was produced. Caller is responsible to check empty
|
||||
// ulid.ULID based on its use case.
|
||||
if len(ids) == 0 {
|
||||
return ulid.ULID{}, nil
|
||||
}
|
||||
return ids[0], nil
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (w *BlockWriter) Close() error {
|
||||
|
||||
52
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go
generated
vendored
52
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go
generated
vendored
@@ -76,7 +76,6 @@ func (c *FloatHistogramChunk) NumSamples() int {
|
||||
func (c *FloatHistogramChunk) Layout() (
|
||||
schema int32, zeroThreshold float64,
|
||||
negativeSpans, positiveSpans []histogram.Span,
|
||||
customValues []float64,
|
||||
err error,
|
||||
) {
|
||||
if c.NumSamples() == 0 {
|
||||
@@ -134,18 +133,17 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
|
||||
a := &FloatHistogramAppender{
|
||||
b: &c.b,
|
||||
|
||||
schema: it.schema,
|
||||
zThreshold: it.zThreshold,
|
||||
pSpans: it.pSpans,
|
||||
nSpans: it.nSpans,
|
||||
customValues: it.customValues,
|
||||
t: it.t,
|
||||
tDelta: it.tDelta,
|
||||
cnt: it.cnt,
|
||||
zCnt: it.zCnt,
|
||||
pBuckets: pBuckets,
|
||||
nBuckets: nBuckets,
|
||||
sum: it.sum,
|
||||
schema: it.schema,
|
||||
zThreshold: it.zThreshold,
|
||||
pSpans: it.pSpans,
|
||||
nSpans: it.nSpans,
|
||||
t: it.t,
|
||||
tDelta: it.tDelta,
|
||||
cnt: it.cnt,
|
||||
zCnt: it.zCnt,
|
||||
pBuckets: pBuckets,
|
||||
nBuckets: nBuckets,
|
||||
sum: it.sum,
|
||||
}
|
||||
if it.numTotal == 0 {
|
||||
a.sum.leading = 0xff
|
||||
@@ -193,7 +191,6 @@ type FloatHistogramAppender struct {
|
||||
schema int32
|
||||
zThreshold float64
|
||||
pSpans, nSpans []histogram.Span
|
||||
customValues []float64
|
||||
|
||||
t, tDelta int64
|
||||
sum, cnt, zCnt xorValue
|
||||
@@ -225,7 +222,6 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
// - The schema has changed.
|
||||
// - The custom bounds have changed if the current schema is custom buckets.
|
||||
// - The threshold for the zero bucket has changed.
|
||||
// - Any buckets have disappeared.
|
||||
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
|
||||
@@ -267,11 +263,6 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
|
||||
return
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
|
||||
if h.ZeroCount < a.zCnt.value {
|
||||
// There has been a counter reset since ZeroThreshold didn't change.
|
||||
counterReset = true
|
||||
@@ -312,7 +303,6 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
// - The schema has changed.
|
||||
// - The custom bounds have changed if the current schema is custom buckets.
|
||||
// - The threshold for the zero bucket has changed.
|
||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||
func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
|
||||
@@ -339,10 +329,6 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
|
||||
return
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||
return
|
||||
}
|
||||
|
||||
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
||||
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
||||
okToAppend = true
|
||||
@@ -436,7 +422,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
|
||||
if num == 0 {
|
||||
// The first append gets the privilege to dictate the layout
|
||||
// but it's also responsible for encoding it into the chunk!
|
||||
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
|
||||
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans)
|
||||
a.schema = h.Schema
|
||||
a.zThreshold = h.ZeroThreshold
|
||||
|
||||
@@ -452,12 +438,6 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
|
||||
} else {
|
||||
a.nSpans = nil
|
||||
}
|
||||
if len(h.CustomValues) > 0 {
|
||||
a.customValues = make([]float64, len(h.CustomValues))
|
||||
copy(a.customValues, h.CustomValues)
|
||||
} else {
|
||||
a.customValues = nil
|
||||
}
|
||||
|
||||
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
||||
if numPBuckets > 0 {
|
||||
@@ -713,7 +693,6 @@ type floatHistogramIterator struct {
|
||||
schema int32
|
||||
zThreshold float64
|
||||
pSpans, nSpans []histogram.Span
|
||||
customValues []float64
|
||||
|
||||
// For the fields that are tracked as deltas and ultimately dod's.
|
||||
t int64
|
||||
@@ -774,7 +753,6 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
||||
NegativeSpans: it.nSpans,
|
||||
PositiveBuckets: it.pBuckets,
|
||||
NegativeBuckets: it.nBuckets,
|
||||
CustomValues: it.customValues,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -797,9 +775,6 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
||||
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(it.nBuckets))
|
||||
copy(fh.NegativeBuckets, it.nBuckets)
|
||||
|
||||
fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
|
||||
copy(fh.CustomValues, it.customValues)
|
||||
|
||||
return it.t, fh
|
||||
}
|
||||
|
||||
@@ -844,7 +819,7 @@ func (it *floatHistogramIterator) Next() ValueType {
|
||||
// The first read is responsible for reading the chunk layout
|
||||
// and for initializing fields that depend on it. We give
|
||||
// counter reset info at chunk level, hence we discard it here.
|
||||
schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
|
||||
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
@@ -852,7 +827,6 @@ func (it *floatHistogramIterator) Next() ValueType {
|
||||
it.schema = schema
|
||||
it.zThreshold = zeroThreshold
|
||||
it.pSpans, it.nSpans = posSpans, negSpans
|
||||
it.customValues = customValues
|
||||
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
||||
// Allocate bucket slices as needed, recycling existing slices
|
||||
// in case this iterator was reset and already has slices of a
|
||||
|
||||
34
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
generated
vendored
34
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
generated
vendored
@@ -69,7 +69,6 @@ func (c *HistogramChunk) NumSamples() int {
|
||||
func (c *HistogramChunk) Layout() (
|
||||
schema int32, zeroThreshold float64,
|
||||
negativeSpans, positiveSpans []histogram.Span,
|
||||
customValues []float64,
|
||||
err error,
|
||||
) {
|
||||
if c.NumSamples() == 0 {
|
||||
@@ -132,7 +131,6 @@ func (c *HistogramChunk) Appender() (Appender, error) {
|
||||
zThreshold: it.zThreshold,
|
||||
pSpans: it.pSpans,
|
||||
nSpans: it.nSpans,
|
||||
customValues: it.customValues,
|
||||
t: it.t,
|
||||
cnt: it.cnt,
|
||||
zCnt: it.zCnt,
|
||||
@@ -200,7 +198,6 @@ type HistogramAppender struct {
|
||||
schema int32
|
||||
zThreshold float64
|
||||
pSpans, nSpans []histogram.Span
|
||||
customValues []float64
|
||||
|
||||
// Although we intend to start new chunks on counter resets, we still
|
||||
// have to handle negative deltas for gauge histograms. Therefore, even
|
||||
@@ -244,7 +241,6 @@ func (a *HistogramAppender) Append(int64, float64) {
|
||||
// The chunk is not appendable in the following cases:
|
||||
//
|
||||
// - The schema has changed.
|
||||
// - The custom bounds have changed if the current schema is custom buckets.
|
||||
// - The threshold for the zero bucket has changed.
|
||||
// - Any buckets have disappeared.
|
||||
// - There was a counter reset in the count of observations or in any bucket,
|
||||
@@ -287,11 +283,6 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
|
||||
return
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
|
||||
if h.ZeroCount < a.zCnt {
|
||||
// There has been a counter reset since ZeroThreshold didn't change.
|
||||
counterReset = true
|
||||
@@ -332,7 +323,6 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
// - The schema has changed.
|
||||
// - The custom bounds have changed if the current schema is custom buckets.
|
||||
// - The threshold for the zero bucket has changed.
|
||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||
func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
|
||||
@@ -359,10 +349,6 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
|
||||
return
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
|
||||
return
|
||||
}
|
||||
|
||||
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
|
||||
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
|
||||
okToAppend = true
|
||||
@@ -456,7 +442,7 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
|
||||
if num == 0 {
|
||||
// The first append gets the privilege to dictate the layout
|
||||
// but it's also responsible for encoding it into the chunk!
|
||||
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans, h.CustomValues)
|
||||
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans)
|
||||
a.schema = h.Schema
|
||||
a.zThreshold = h.ZeroThreshold
|
||||
|
||||
@@ -472,12 +458,6 @@ func (a *HistogramAppender) appendHistogram(t int64, h *histogram.Histogram) {
|
||||
} else {
|
||||
a.nSpans = nil
|
||||
}
|
||||
if len(h.CustomValues) > 0 {
|
||||
a.customValues = make([]float64, len(h.CustomValues))
|
||||
copy(a.customValues, h.CustomValues)
|
||||
} else {
|
||||
a.customValues = nil
|
||||
}
|
||||
|
||||
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
||||
if numPBuckets > 0 {
|
||||
@@ -761,7 +741,6 @@ type histogramIterator struct {
|
||||
schema int32
|
||||
zThreshold float64
|
||||
pSpans, nSpans []histogram.Span
|
||||
customValues []float64
|
||||
|
||||
// For the fields that are tracked as deltas and ultimately dod's.
|
||||
t int64
|
||||
@@ -818,7 +797,6 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
|
||||
NegativeSpans: it.nSpans,
|
||||
PositiveBuckets: it.pBuckets,
|
||||
NegativeBuckets: it.nBuckets,
|
||||
CustomValues: it.customValues,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -841,9 +819,6 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
|
||||
h.NegativeBuckets = resize(h.NegativeBuckets, len(it.nBuckets))
|
||||
copy(h.NegativeBuckets, it.nBuckets)
|
||||
|
||||
h.CustomValues = resize(h.CustomValues, len(it.customValues))
|
||||
copy(h.CustomValues, it.customValues)
|
||||
|
||||
return it.t, h
|
||||
}
|
||||
|
||||
@@ -864,7 +839,6 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
|
||||
NegativeSpans: it.nSpans,
|
||||
PositiveBuckets: it.pFloatBuckets,
|
||||
NegativeBuckets: it.nFloatBuckets,
|
||||
CustomValues: it.customValues,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -895,9 +869,6 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
|
||||
fh.NegativeBuckets[i] = currentNegative
|
||||
}
|
||||
|
||||
fh.CustomValues = resize(fh.CustomValues, len(it.customValues))
|
||||
copy(fh.CustomValues, it.customValues)
|
||||
|
||||
return it.t, fh
|
||||
}
|
||||
|
||||
@@ -956,7 +927,7 @@ func (it *histogramIterator) Next() ValueType {
|
||||
// The first read is responsible for reading the chunk layout
|
||||
// and for initializing fields that depend on it. We give
|
||||
// counter reset info at chunk level, hence we discard it here.
|
||||
schema, zeroThreshold, posSpans, negSpans, customValues, err := readHistogramChunkLayout(&it.br)
|
||||
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
@@ -964,7 +935,6 @@ func (it *histogramIterator) Next() ValueType {
|
||||
it.schema = schema
|
||||
it.zThreshold = zeroThreshold
|
||||
it.pSpans, it.nSpans = posSpans, negSpans
|
||||
it.customValues = customValues
|
||||
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
||||
// The code below recycles existing slices in case this iterator
|
||||
// was reset and already has slices of a sufficient capacity.
|
||||
|
||||
90
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
generated
vendored
90
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
generated
vendored
@@ -21,21 +21,17 @@ import (
|
||||
|
||||
func writeHistogramChunkLayout(
|
||||
b *bstream, schema int32, zeroThreshold float64,
|
||||
positiveSpans, negativeSpans []histogram.Span, customValues []float64,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
) {
|
||||
putZeroThreshold(b, zeroThreshold)
|
||||
putVarbitInt(b, int64(schema))
|
||||
putHistogramChunkLayoutSpans(b, positiveSpans)
|
||||
putHistogramChunkLayoutSpans(b, negativeSpans)
|
||||
if histogram.IsCustomBucketsSchema(schema) {
|
||||
putHistogramChunkLayoutCustomBounds(b, customValues)
|
||||
}
|
||||
}
|
||||
|
||||
func readHistogramChunkLayout(b *bstreamReader) (
|
||||
schema int32, zeroThreshold float64,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
customValues []float64,
|
||||
err error,
|
||||
) {
|
||||
zeroThreshold, err = readZeroThreshold(b)
|
||||
@@ -59,13 +55,6 @@ func readHistogramChunkLayout(b *bstreamReader) (
|
||||
return
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(schema) {
|
||||
customValues, err = readHistogramChunkLayoutCustomBounds(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -102,30 +91,6 @@ func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
|
||||
return spans, nil
|
||||
}
|
||||
|
||||
func putHistogramChunkLayoutCustomBounds(b *bstream, customValues []float64) {
|
||||
putVarbitUint(b, uint64(len(customValues)))
|
||||
for _, bound := range customValues {
|
||||
putCustomBound(b, bound)
|
||||
}
|
||||
}
|
||||
|
||||
func readHistogramChunkLayoutCustomBounds(b *bstreamReader) ([]float64, error) {
|
||||
var customValues []float64
|
||||
num, err := readVarbitUint(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < int(num); i++ {
|
||||
bound, err := readCustomBound(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
customValues = append(customValues, bound)
|
||||
}
|
||||
return customValues, nil
|
||||
}
|
||||
|
||||
// putZeroThreshold writes the zero threshold to the bstream. It stores typical
|
||||
// values in just one byte, but needs 9 bytes for other values. In detail:
|
||||
// - If the threshold is 0, store a single zero byte.
|
||||
@@ -174,59 +139,6 @@ func readZeroThreshold(br *bstreamReader) (float64, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// isWholeWhenMultiplied checks to see if the number when multiplied by 1000 can
|
||||
// be converted into an integer without losing precision.
|
||||
func isWholeWhenMultiplied(in float64) bool {
|
||||
i := uint(math.Round(in * 1000))
|
||||
out := float64(i) / 1000
|
||||
return in == out
|
||||
}
|
||||
|
||||
// putCustomBound writes a custom bound to the bstream. It stores values from
|
||||
// 0 to 33554.430 (inclusive) that are multiples of 0.001 in unsigned varbit
|
||||
// encoding of up to 4 bytes, but needs 1 bit + 8 bytes for other values like
|
||||
// negative numbers, numbers greater than 33554.430, or numbers that are not
|
||||
// a multiple of 0.001, on the assumption that they are less common. In detail:
|
||||
// - Multiply the bound by 1000, without rounding.
|
||||
// - If the multiplied bound is >= 0, <= 33554430 and a whole number,
|
||||
// add 1 and store it in unsigned varbit encoding. All these numbers are
|
||||
// greater than 0, so the leading bit of the varbit is always 1!
|
||||
// - Otherwise, store a 0 bit, followed by the 8 bytes of the original
|
||||
// bound as a float64.
|
||||
//
|
||||
// When reading the values, we can first decode a value as unsigned varbit,
|
||||
// if it's 0, then we read the next 8 bytes as a float64, otherwise
|
||||
// we can convert the value to a float64 by subtracting 1 and dividing by 1000.
|
||||
func putCustomBound(b *bstream, f float64) {
|
||||
tf := f * 1000
|
||||
// 33554431-1 comes from the maximum that can be stored in a varbit in 4
|
||||
// bytes, other values are stored in 8 bytes anyway.
|
||||
if tf < 0 || tf > 33554430 || !isWholeWhenMultiplied(f) {
|
||||
b.writeBit(zero)
|
||||
b.writeBits(math.Float64bits(f), 64)
|
||||
return
|
||||
}
|
||||
putVarbitUint(b, uint64(math.Round(tf))+1)
|
||||
}
|
||||
|
||||
// readCustomBound reads the custom bound written with putCustomBound.
|
||||
func readCustomBound(br *bstreamReader) (float64, error) {
|
||||
b, err := readVarbitUint(br)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch b {
|
||||
case 0:
|
||||
v, err := br.readBits(64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float64frombits(v), nil
|
||||
default:
|
||||
return float64(b-1) / 1000, nil
|
||||
}
|
||||
}
|
||||
|
||||
type bucketIterator struct {
|
||||
spans []histogram.Span
|
||||
span int // Span position of last yielded bucket.
|
||||
|
||||
2
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
generated
vendored
2
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
generated
vendored
@@ -60,7 +60,7 @@ type XORChunk struct {
|
||||
b bstream
|
||||
}
|
||||
|
||||
// NewXORChunk returns a new chunk with XOR encoding.
|
||||
// NewXORChunk returns a new chunk with XOR encoding of the given size.
|
||||
func NewXORChunk() *XORChunk {
|
||||
b := make([]byte, 2, 128)
|
||||
return &XORChunk{b: bstream{stream: b, count: 0}}
|
||||
|
||||
9
vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
generated
vendored
9
vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
generated
vendored
@@ -133,6 +133,15 @@ type Meta struct {
|
||||
// Time range the data covers.
|
||||
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
|
||||
MinTime, MaxTime int64
|
||||
|
||||
// OOOLastRef, OOOLastMinTime and OOOLastMaxTime are kept as markers for
|
||||
// overlapping chunks.
|
||||
// These fields point to the last created out of order Chunk (the head) that existed
|
||||
// when Series() was called and was overlapping.
|
||||
// Series() and Chunk() method responses should be consistent for the same
|
||||
// query even if new data is added in between the calls.
|
||||
OOOLastRef ChunkRef
|
||||
OOOLastMinTime, OOOLastMaxTime int64
|
||||
}
|
||||
|
||||
// ChunkFromSamples requires all samples to have the same type.
|
||||
|
||||
90
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
90
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
@@ -58,23 +58,19 @@ type Compactor interface {
|
||||
// Results returned when compactions are in progress are undefined.
|
||||
Plan(dir string) ([]string, error)
|
||||
|
||||
// Write persists one or more Blocks into a directory.
|
||||
// No Block is written when resulting Block has 0 samples and returns an empty slice.
|
||||
// Prometheus always return one or no block. The interface allows returning more than one
|
||||
// block for downstream users to experiment with compactor.
|
||||
Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error)
|
||||
// Write persists a Block into a directory.
|
||||
// No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}.
|
||||
Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error)
|
||||
|
||||
// Compact runs compaction against the provided directories. Must
|
||||
// only be called concurrently with results of Plan().
|
||||
// Can optionally pass a list of already open blocks,
|
||||
// to avoid having to reopen them.
|
||||
// Prometheus always return one or no block. The interface allows returning more than one
|
||||
// block for downstream users to experiment with compactor.
|
||||
// When one resulting Block has 0 samples
|
||||
// When resulting Block has 0 samples
|
||||
// * No block is written.
|
||||
// * The source dirs are marked Deletable.
|
||||
// * Block is not included in the result.
|
||||
Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error)
|
||||
// * Returns empty ulid.ULID{}.
|
||||
Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error)
|
||||
}
|
||||
|
||||
// LeveledCompactor implements the Compactor interface.
|
||||
@@ -445,11 +441,11 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
|
||||
|
||||
// Compact creates a new block in the compactor's directory from the blocks in the
|
||||
// provided directories.
|
||||
func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error) {
|
||||
func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) {
|
||||
return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{})
|
||||
}
|
||||
|
||||
func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) ([]ulid.ULID, error) {
|
||||
func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) (uid ulid.ULID, err error) {
|
||||
var (
|
||||
blocks []BlockReader
|
||||
bs []*Block
|
||||
@@ -461,7 +457,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
|
||||
for _, d := range dirs {
|
||||
meta, _, err := readMetaFile(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return uid, err
|
||||
}
|
||||
|
||||
var b *Block
|
||||
@@ -479,7 +475,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
|
||||
var err error
|
||||
b, err = OpenBlock(c.logger, d, c.chunkPool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return uid, err
|
||||
}
|
||||
defer b.Close()
|
||||
}
|
||||
@@ -490,10 +486,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
|
||||
uids = append(uids, meta.ULID.String())
|
||||
}
|
||||
|
||||
uid := ulid.MustNew(ulid.Now(), rand.Reader)
|
||||
uid = ulid.MustNew(ulid.Now(), rand.Reader)
|
||||
|
||||
meta := CompactBlockMetas(uid, metas...)
|
||||
err := c.write(dest, meta, blockPopulator, blocks...)
|
||||
err = c.write(dest, meta, blockPopulator, blocks...)
|
||||
if err == nil {
|
||||
if meta.Stats.NumSamples == 0 {
|
||||
for _, b := range bs {
|
||||
@@ -507,25 +503,25 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
|
||||
}
|
||||
b.numBytesMeta = n
|
||||
}
|
||||
uid = ulid.ULID{}
|
||||
level.Info(c.logger).Log(
|
||||
"msg", "compact blocks resulted in empty block",
|
||||
"count", len(blocks),
|
||||
"sources", fmt.Sprintf("%v", uids),
|
||||
"duration", time.Since(start),
|
||||
)
|
||||
return nil, nil
|
||||
} else {
|
||||
level.Info(c.logger).Log(
|
||||
"msg", "compact blocks",
|
||||
"count", len(blocks),
|
||||
"mint", meta.MinTime,
|
||||
"maxt", meta.MaxTime,
|
||||
"ulid", meta.ULID,
|
||||
"sources", fmt.Sprintf("%v", uids),
|
||||
"duration", time.Since(start),
|
||||
)
|
||||
}
|
||||
|
||||
level.Info(c.logger).Log(
|
||||
"msg", "compact blocks",
|
||||
"count", len(blocks),
|
||||
"mint", meta.MinTime,
|
||||
"maxt", meta.MaxTime,
|
||||
"ulid", meta.ULID,
|
||||
"sources", fmt.Sprintf("%v", uids),
|
||||
"duration", time.Since(start),
|
||||
)
|
||||
return []ulid.ULID{uid}, nil
|
||||
return uid, nil
|
||||
}
|
||||
|
||||
errs := tsdb_errors.NewMulti(err)
|
||||
@@ -537,10 +533,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errs.Err()
|
||||
return uid, errs.Err()
|
||||
}
|
||||
|
||||
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) {
|
||||
func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error) {
|
||||
start := time.Now()
|
||||
|
||||
uid := ulid.MustNew(ulid.Now(), rand.Reader)
|
||||
@@ -564,7 +560,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
|
||||
|
||||
err := c.write(dest, meta, DefaultBlockPopulator{}, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return uid, err
|
||||
}
|
||||
|
||||
if meta.Stats.NumSamples == 0 {
|
||||
@@ -574,7 +570,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
|
||||
"maxt", meta.MaxTime,
|
||||
"duration", time.Since(start),
|
||||
)
|
||||
return nil, nil
|
||||
return ulid.ULID{}, nil
|
||||
}
|
||||
|
||||
level.Info(c.logger).Log(
|
||||
@@ -585,7 +581,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
|
||||
"duration", time.Since(start),
|
||||
"ooo", meta.Compaction.FromOutOfOrder(),
|
||||
)
|
||||
return []ulid.ULID{uid}, nil
|
||||
return uid, nil
|
||||
}
|
||||
|
||||
// instrumentedChunkWriter is used for level 1 compactions to record statistics
|
||||
@@ -656,7 +652,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
|
||||
}
|
||||
closers = append(closers, indexw)
|
||||
|
||||
if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw, AllSortedPostings); err != nil {
|
||||
if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil {
|
||||
return fmt.Errorf("populate block: %w", err)
|
||||
}
|
||||
|
||||
@@ -722,20 +718,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl
|
||||
}
|
||||
|
||||
type BlockPopulator interface {
|
||||
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error
|
||||
}
|
||||
|
||||
// IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader.
|
||||
type IndexReaderPostingsFunc func(ctx context.Context, reader IndexReader) index.Postings
|
||||
|
||||
// AllSortedPostings returns a sorted all posting iterator from the input index reader.
|
||||
func AllSortedPostings(ctx context.Context, reader IndexReader) index.Postings {
|
||||
k, v := index.AllPostingsKey()
|
||||
all, err := reader.Postings(ctx, k, v)
|
||||
if err != nil {
|
||||
return index.ErrPostings(err)
|
||||
}
|
||||
return reader.SortedPostings(all)
|
||||
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error
|
||||
}
|
||||
|
||||
type DefaultBlockPopulator struct{}
|
||||
@@ -743,7 +726,7 @@ type DefaultBlockPopulator struct{}
|
||||
// PopulateBlock fills the index and chunk writers with new data gathered as the union
|
||||
// of the provided blocks. It returns meta information for the new block.
|
||||
// It expects sorted blocks input by mint.
|
||||
func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) {
|
||||
func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) {
|
||||
if len(blocks) == 0 {
|
||||
return errors.New("cannot populate block from no readers")
|
||||
}
|
||||
@@ -801,9 +784,14 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa
|
||||
}
|
||||
closers = append(closers, tombsr)
|
||||
|
||||
postings := postingsFunc(ctx, indexr)
|
||||
k, v := index.AllPostingsKey()
|
||||
all, err := indexr.Postings(ctx, k, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
all = indexr.SortedPostings(all)
|
||||
// Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp.
|
||||
sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, postings, meta.MinTime, meta.MaxTime-1, false))
|
||||
sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false))
|
||||
syms := indexr.Symbols()
|
||||
if i == 0 {
|
||||
symbols = syms
|
||||
|
||||
118
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
118
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
@@ -192,22 +192,12 @@ type Options struct {
|
||||
|
||||
// NewCompactorFunc is a function that returns a TSDB compactor.
|
||||
NewCompactorFunc NewCompactorFunc
|
||||
|
||||
// BlockQuerierFunc is a function to return storage.Querier from a BlockReader.
|
||||
BlockQuerierFunc BlockQuerierFunc
|
||||
|
||||
// BlockChunkQuerierFunc is a function to return storage.ChunkQuerier from a BlockReader.
|
||||
BlockChunkQuerierFunc BlockChunkQuerierFunc
|
||||
}
|
||||
|
||||
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
|
||||
|
||||
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{}
|
||||
|
||||
type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, error)
|
||||
|
||||
type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error)
|
||||
|
||||
// DB handles reads and writes of time series falling into
|
||||
// a hashed partition of a seriedb.
|
||||
type DB struct {
|
||||
@@ -254,10 +244,6 @@ type DB struct {
|
||||
writeNotified wlog.WriteNotified
|
||||
|
||||
registerer prometheus.Registerer
|
||||
|
||||
blockQuerierFunc BlockQuerierFunc
|
||||
|
||||
blockChunkQuerierFunc BlockChunkQuerierFunc
|
||||
}
|
||||
|
||||
type dbMetrics struct {
|
||||
@@ -573,12 +559,10 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
|
||||
|
||||
db.closers = append(db.closers, head)
|
||||
return &DB{
|
||||
dir: db.dir,
|
||||
logger: db.logger,
|
||||
blocks: blocks,
|
||||
head: head,
|
||||
blockQuerierFunc: NewBlockQuerier,
|
||||
blockChunkQuerierFunc: NewBlockChunkQuerier,
|
||||
dir: db.dir,
|
||||
logger: db.logger,
|
||||
blocks: blocks,
|
||||
head: head,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -886,18 +870,6 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
||||
}
|
||||
db.compactCancel = cancel
|
||||
|
||||
if opts.BlockQuerierFunc == nil {
|
||||
db.blockQuerierFunc = NewBlockQuerier
|
||||
} else {
|
||||
db.blockQuerierFunc = opts.BlockQuerierFunc
|
||||
}
|
||||
|
||||
if opts.BlockChunkQuerierFunc == nil {
|
||||
db.blockChunkQuerierFunc = NewBlockChunkQuerier
|
||||
} else {
|
||||
db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc
|
||||
}
|
||||
|
||||
var wal, wbl *wlog.WL
|
||||
segmentSize := wlog.DefaultSegmentSize
|
||||
// Wal is enabled.
|
||||
@@ -1295,9 +1267,6 @@ func (db *DB) CompactOOOHead(ctx context.Context) error {
|
||||
return db.compactOOOHead(ctx)
|
||||
}
|
||||
|
||||
// Callback for testing.
|
||||
var compactOOOHeadTestingCallback func()
|
||||
|
||||
func (db *DB) compactOOOHead(ctx context.Context) error {
|
||||
if !db.oooWasEnabled.Load() {
|
||||
return nil
|
||||
@@ -1307,11 +1276,6 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
|
||||
return fmt.Errorf("get ooo compaction head: %w", err)
|
||||
}
|
||||
|
||||
if compactOOOHeadTestingCallback != nil {
|
||||
compactOOOHeadTestingCallback()
|
||||
compactOOOHeadTestingCallback = nil
|
||||
}
|
||||
|
||||
ulids, err := db.compactOOO(db.dir, oooHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compact ooo head: %w", err)
|
||||
@@ -1372,11 +1336,13 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
|
||||
for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize {
|
||||
mint, maxt := t, t+blockSize
|
||||
// Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
|
||||
uids, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta)
|
||||
uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ulids = append(ulids, uids...)
|
||||
if uid.Compare(ulid.ULID{}) != 0 {
|
||||
ulids = append(ulids, uid)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ulids) == 0 {
|
||||
@@ -1398,26 +1364,23 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID
|
||||
// compactHead compacts the given RangeHead.
|
||||
// The compaction mutex should be held before calling this method.
|
||||
func (db *DB) compactHead(head *RangeHead) error {
|
||||
uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil)
|
||||
uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("persist head block: %w", err)
|
||||
}
|
||||
|
||||
if err := db.reloadBlocks(); err != nil {
|
||||
multiErr := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
|
||||
for _, uid := range uids {
|
||||
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
|
||||
multiErr.Add(fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
|
||||
}
|
||||
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
|
||||
return tsdb_errors.NewMulti(
|
||||
fmt.Errorf("reloadBlocks blocks: %w", err),
|
||||
fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll),
|
||||
).Err()
|
||||
}
|
||||
return multiErr.Err()
|
||||
return fmt.Errorf("reloadBlocks blocks: %w", err)
|
||||
}
|
||||
if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil {
|
||||
return fmt.Errorf("head memory truncate: %w", err)
|
||||
}
|
||||
|
||||
db.head.RebuildSymbolTable(db.logger)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1448,19 +1411,16 @@ func (db *DB) compactBlocks() (err error) {
|
||||
default:
|
||||
}
|
||||
|
||||
uids, err := db.compactor.Compact(db.dir, plan, db.blocks)
|
||||
uid, err := db.compactor.Compact(db.dir, plan, db.blocks)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compact %s: %w", plan, err)
|
||||
}
|
||||
|
||||
if err := db.reloadBlocks(); err != nil {
|
||||
errs := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err))
|
||||
for _, uid := range uids {
|
||||
if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil {
|
||||
errs.Add(fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll))
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
|
||||
return fmt.Errorf("delete compacted block after failed db reloadBlocks:%s: %w", uid, err)
|
||||
}
|
||||
return errs.Err()
|
||||
return fmt.Errorf("reloadBlocks blocks: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1581,15 +1541,12 @@ func (db *DB) reloadBlocks() (err error) {
|
||||
oldBlocks := db.blocks
|
||||
db.blocks = toLoad
|
||||
|
||||
// Only check overlapping blocks when overlapping compaction is enabled.
|
||||
if db.opts.EnableOverlappingCompaction {
|
||||
blockMetas := make([]BlockMeta, 0, len(toLoad))
|
||||
for _, b := range toLoad {
|
||||
blockMetas = append(blockMetas, b.Meta())
|
||||
}
|
||||
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
|
||||
level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String())
|
||||
}
|
||||
blockMetas := make([]BlockMeta, 0, len(toLoad))
|
||||
for _, b := range toLoad {
|
||||
blockMetas = append(blockMetas, b.Meta())
|
||||
}
|
||||
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
|
||||
level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String())
|
||||
}
|
||||
|
||||
// Append blocks to old, deletable blocks, so we can close them.
|
||||
@@ -2003,7 +1960,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||
if maxt >= db.head.MinTime() {
|
||||
rh := NewRangeHead(db.head, mint, maxt)
|
||||
var err error
|
||||
inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
|
||||
inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open block querier for head %s: %w", rh, err)
|
||||
}
|
||||
@@ -2020,7 +1977,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||
}
|
||||
if getNew {
|
||||
rh := NewRangeHead(db.head, newMint, maxt)
|
||||
inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt)
|
||||
inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err)
|
||||
}
|
||||
@@ -2034,9 +1991,9 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
||||
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
||||
var err error
|
||||
outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt)
|
||||
outOfOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt)
|
||||
if err != nil {
|
||||
// If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
||||
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
||||
rh.isoState.Close()
|
||||
|
||||
return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err)
|
||||
@@ -2046,7 +2003,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) {
|
||||
}
|
||||
|
||||
for _, b := range blocks {
|
||||
q, err := db.blockQuerierFunc(b, mint, maxt)
|
||||
q, err := NewBlockQuerier(b, mint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open querier for block %s: %w", b, err)
|
||||
}
|
||||
@@ -2084,7 +2041,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||
|
||||
if maxt >= db.head.MinTime() {
|
||||
rh := NewRangeHead(db.head, mint, maxt)
|
||||
inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
|
||||
inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open querier for head %s: %w", rh, err)
|
||||
}
|
||||
@@ -2101,7 +2058,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||
}
|
||||
if getNew {
|
||||
rh := NewRangeHead(db.head, newMint, maxt)
|
||||
inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt)
|
||||
inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err)
|
||||
}
|
||||
@@ -2114,11 +2071,8 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||
|
||||
if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) {
|
||||
rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef)
|
||||
outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt)
|
||||
outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt)
|
||||
if err != nil {
|
||||
// If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead.
|
||||
rh.isoState.Close()
|
||||
|
||||
return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err)
|
||||
}
|
||||
|
||||
@@ -2126,7 +2080,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer
|
||||
}
|
||||
|
||||
for _, b := range blocks {
|
||||
q, err := db.blockChunkQuerierFunc(b, mint, maxt)
|
||||
q, err := NewBlockChunkQuerier(b, mint, maxt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open querier for block %s: %w", b, err)
|
||||
}
|
||||
@@ -2195,7 +2149,7 @@ func (db *DB) CleanTombstones() (err error) {
|
||||
cleanUpCompleted = true
|
||||
|
||||
for _, pb := range db.Blocks() {
|
||||
uids, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor)
|
||||
uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor)
|
||||
if cleanErr != nil {
|
||||
return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr)
|
||||
}
|
||||
@@ -2219,7 +2173,7 @@ func (db *DB) CleanTombstones() (err error) {
|
||||
}
|
||||
|
||||
// Delete new block if it was created.
|
||||
for _, uid := range uids {
|
||||
if uid != nil && *uid != (ulid.ULID{}) {
|
||||
dir := filepath.Join(db.Dir(), uid.String())
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)
|
||||
|
||||
34
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
34
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
@@ -1552,7 +1552,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
|
||||
|
||||
// Drop old chunks and remember series IDs and hashes if they can be
|
||||
// deleted entirely.
|
||||
deleted, affected, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef)
|
||||
deleted, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef)
|
||||
seriesRemoved := len(deleted)
|
||||
|
||||
h.metrics.seriesRemoved.Add(float64(seriesRemoved))
|
||||
@@ -1561,7 +1561,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
|
||||
h.numSeries.Sub(uint64(seriesRemoved))
|
||||
|
||||
// Remove deleted series IDs from the postings lists.
|
||||
h.postings.Delete(deleted, affected)
|
||||
h.postings.Delete(deleted)
|
||||
|
||||
// Remove tombstones referring to the deleted series.
|
||||
h.tombstones.DeleteTombstones(deleted)
|
||||
@@ -1759,12 +1759,12 @@ type seriesHashmap struct {
|
||||
|
||||
func (m *seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries {
|
||||
if s, found := m.unique[hash]; found {
|
||||
if labels.Equal(s.labels(), lset) {
|
||||
if labels.Equal(s.lset, lset) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
for _, s := range m.conflicts[hash] {
|
||||
if labels.Equal(s.labels(), lset) {
|
||||
if labels.Equal(s.lset, lset) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
@@ -1772,7 +1772,7 @@ func (m *seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries {
|
||||
}
|
||||
|
||||
func (m *seriesHashmap) set(hash uint64, s *memSeries) {
|
||||
if existing, found := m.unique[hash]; !found || labels.Equal(existing.labels(), s.labels()) {
|
||||
if existing, found := m.unique[hash]; !found || labels.Equal(existing.lset, s.lset) {
|
||||
m.unique[hash] = s
|
||||
return
|
||||
}
|
||||
@@ -1781,7 +1781,7 @@ func (m *seriesHashmap) set(hash uint64, s *memSeries) {
|
||||
}
|
||||
l := m.conflicts[hash]
|
||||
for i, prev := range l {
|
||||
if labels.Equal(prev.labels(), s.labels()) {
|
||||
if labels.Equal(prev.lset, s.lset) {
|
||||
l[i] = s
|
||||
return
|
||||
}
|
||||
@@ -1869,10 +1869,9 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st
|
||||
// but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct
|
||||
// and there's no easy way to cast maps.
|
||||
// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series.
|
||||
func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int, _, _ int64, minMmapFile int) {
|
||||
func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) {
|
||||
var (
|
||||
deleted = map[storage.SeriesRef]struct{}{}
|
||||
affected = map[labels.Label]struct{}{}
|
||||
rmChunks = 0
|
||||
actualMint int64 = math.MaxInt64
|
||||
minOOOTime int64 = math.MaxInt64
|
||||
@@ -1928,10 +1927,9 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
||||
}
|
||||
|
||||
deleted[storage.SeriesRef(series.ref)] = struct{}{}
|
||||
series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} })
|
||||
s.hashes[hashShard].del(hash, series.ref)
|
||||
delete(s.series[refShard], series.ref)
|
||||
deletedForCallback[series.ref] = series.lset // OK to access lset; series is locked at the top of this function.
|
||||
deletedForCallback[series.ref] = series.lset
|
||||
}
|
||||
|
||||
s.iterForDeletion(check)
|
||||
@@ -1940,7 +1938,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
||||
actualMint = mint
|
||||
}
|
||||
|
||||
return deleted, affected, rmChunks, actualMint, minOOOTime, minMmapFile
|
||||
return deleted, rmChunks, actualMint, minOOOTime, minMmapFile
|
||||
}
|
||||
|
||||
// The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each.
|
||||
@@ -2023,7 +2021,7 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu
|
||||
}
|
||||
// Setting the series in the s.hashes marks the creation of series
|
||||
// as any further calls to this methods would return that series.
|
||||
s.seriesLifecycleCallback.PostCreation(series.labels())
|
||||
s.seriesLifecycleCallback.PostCreation(series.lset)
|
||||
|
||||
i = uint64(series.ref) & uint64(s.size-1)
|
||||
|
||||
@@ -2064,19 +2062,16 @@ func (s sample) Type() chunkenc.ValueType {
|
||||
// memSeries is the in-memory representation of a series. None of its methods
|
||||
// are goroutine safe and it is the caller's responsibility to lock it.
|
||||
type memSeries struct {
|
||||
// Members up to the Mutex are not changed after construction, so can be accessed without a lock.
|
||||
sync.Mutex
|
||||
|
||||
ref chunks.HeadSeriesRef
|
||||
lset labels.Labels
|
||||
meta *metadata.Metadata
|
||||
|
||||
// Series labels hash to use for sharding purposes. The value is always 0 when sharding has not
|
||||
// been explicitly enabled in TSDB.
|
||||
shardHash uint64
|
||||
|
||||
// Everything after here should only be accessed with the lock held.
|
||||
sync.Mutex
|
||||
|
||||
lset labels.Labels // Locking required with -tags dedupelabels, not otherwise.
|
||||
|
||||
// Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps.
|
||||
// When compaction runs, chunks get moved into a block and all pointers are shifted like so:
|
||||
//
|
||||
@@ -2099,7 +2094,6 @@ type memSeries struct {
|
||||
|
||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||
histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise.
|
||||
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
||||
|
||||
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||
lastValue float64
|
||||
@@ -2115,6 +2109,8 @@ type memSeries struct {
|
||||
|
||||
// txs is nil if isolation is disabled.
|
||||
txs *txRing
|
||||
|
||||
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
||||
}
|
||||
|
||||
// memSeriesOOOFields contains the fields required by memSeries
|
||||
|
||||
111
vendor/github.com/prometheus/prometheus/tsdb/head_append.go
generated
vendored
111
vendor/github.com/prometheus/prometheus/tsdb/head_append.go
generated
vendored
@@ -554,7 +554,7 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
|
||||
// Ensure no empty labels have gotten through.
|
||||
e.Labels = e.Labels.WithoutEmpty()
|
||||
|
||||
err := a.head.exemplars.ValidateExemplar(s.labels(), e)
|
||||
err := a.head.exemplars.ValidateExemplar(s.lset, e)
|
||||
if err != nil {
|
||||
if errors.Is(err, storage.ErrDuplicateExemplar) || errors.Is(err, storage.ErrExemplarsDisabled) {
|
||||
// Duplicate, don't return an error but don't accept the exemplar.
|
||||
@@ -708,7 +708,7 @@ func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRe
|
||||
return 0, labels.EmptyLabels()
|
||||
}
|
||||
// returned labels must be suitable to pass to Append()
|
||||
return storage.SeriesRef(s.ref), s.labels()
|
||||
return storage.SeriesRef(s.ref), s.lset
|
||||
}
|
||||
|
||||
// log writes all headAppender's data to the WAL.
|
||||
@@ -816,7 +816,7 @@ func (a *headAppender) Commit() (err error) {
|
||||
continue
|
||||
}
|
||||
// We don't instrument exemplar appends here, all is instrumented by storage.
|
||||
if err := a.head.exemplars.AddExemplar(s.labels(), e.exemplar); err != nil {
|
||||
if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil {
|
||||
if errors.Is(err, storage.ErrOutOfOrderExemplar) {
|
||||
continue
|
||||
}
|
||||
@@ -846,17 +846,16 @@ func (a *headAppender) Commit() (err error) {
|
||||
// number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled)
|
||||
floatOOBRejected int
|
||||
|
||||
inOrderMint int64 = math.MaxInt64
|
||||
inOrderMaxt int64 = math.MinInt64
|
||||
oooMinT int64 = math.MaxInt64
|
||||
oooMaxT int64 = math.MinInt64
|
||||
wblSamples []record.RefSample
|
||||
oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef
|
||||
oooMmapMarkersCount int
|
||||
oooRecords [][]byte
|
||||
oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
|
||||
series *memSeries
|
||||
appendChunkOpts = chunkOpts{
|
||||
inOrderMint int64 = math.MaxInt64
|
||||
inOrderMaxt int64 = math.MinInt64
|
||||
ooomint int64 = math.MaxInt64
|
||||
ooomaxt int64 = math.MinInt64
|
||||
wblSamples []record.RefSample
|
||||
oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef
|
||||
oooRecords [][]byte
|
||||
oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
|
||||
series *memSeries
|
||||
appendChunkOpts = chunkOpts{
|
||||
chunkDiskMapper: a.head.chunkDiskMapper,
|
||||
chunkRange: a.head.chunkRange.Load(),
|
||||
samplesPerChunk: a.head.opts.SamplesPerChunk,
|
||||
@@ -873,7 +872,6 @@ func (a *headAppender) Commit() (err error) {
|
||||
// WBL is not enabled. So no need to collect.
|
||||
wblSamples = nil
|
||||
oooMmapMarkers = nil
|
||||
oooMmapMarkersCount = 0
|
||||
return
|
||||
}
|
||||
// The m-map happens before adding a new sample. So we collect
|
||||
@@ -882,14 +880,12 @@ func (a *headAppender) Commit() (err error) {
|
||||
// WBL Before this Commit(): [old samples before this commit for chunk 1]
|
||||
// WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3]
|
||||
if oooMmapMarkers != nil {
|
||||
markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount)
|
||||
for ref, mmapRefs := range oooMmapMarkers {
|
||||
for _, mmapRef := range mmapRefs {
|
||||
markers = append(markers, record.RefMmapMarker{
|
||||
Ref: ref,
|
||||
MmapRef: mmapRef,
|
||||
})
|
||||
}
|
||||
markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers))
|
||||
for ref, mmapRef := range oooMmapMarkers {
|
||||
markers = append(markers, record.RefMmapMarker{
|
||||
Ref: ref,
|
||||
MmapRef: mmapRef,
|
||||
})
|
||||
}
|
||||
r := enc.MmapMarkers(markers, a.head.getBytesBuffer())
|
||||
oooRecords = append(oooRecords, r)
|
||||
@@ -932,39 +928,32 @@ func (a *headAppender) Commit() (err error) {
|
||||
case oooSample:
|
||||
// Sample is OOO and OOO handling is enabled
|
||||
// and the delta is within the OOO tolerance.
|
||||
var mmapRefs []chunks.ChunkDiskMapperRef
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
|
||||
var mmapRef chunks.ChunkDiskMapperRef
|
||||
ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
|
||||
if chunkCreated {
|
||||
r, ok := oooMmapMarkers[series.ref]
|
||||
if !ok || r != nil {
|
||||
if !ok || r != 0 {
|
||||
// !ok means there are no markers collected for these samples yet. So we first flush the samples
|
||||
// before setting this m-map marker.
|
||||
|
||||
// r != nil means we have already m-mapped a chunk for this series in the same Commit().
|
||||
// r != 0 means we have already m-mapped a chunk for this series in the same Commit().
|
||||
// Hence, before we m-map again, we should add the samples and m-map markers
|
||||
// seen till now to the WBL records.
|
||||
collectOOORecords()
|
||||
}
|
||||
|
||||
if oooMmapMarkers == nil {
|
||||
oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef)
|
||||
}
|
||||
if len(mmapRefs) > 0 {
|
||||
oooMmapMarkers[series.ref] = mmapRefs
|
||||
oooMmapMarkersCount += len(mmapRefs)
|
||||
} else {
|
||||
// No chunk was written to disk, so we need to set an initial marker for this series.
|
||||
oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0}
|
||||
oooMmapMarkersCount++
|
||||
oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef)
|
||||
}
|
||||
oooMmapMarkers[series.ref] = mmapRef
|
||||
}
|
||||
if ok {
|
||||
wblSamples = append(wblSamples, s)
|
||||
if s.T < oooMinT {
|
||||
oooMinT = s.T
|
||||
if s.T < ooomint {
|
||||
ooomint = s.T
|
||||
}
|
||||
if s.T > oooMaxT {
|
||||
oooMaxT = s.T
|
||||
if s.T > ooomaxt {
|
||||
ooomaxt = s.T
|
||||
}
|
||||
floatOOOAccepted++
|
||||
} else {
|
||||
@@ -1064,7 +1053,7 @@ func (a *headAppender) Commit() (err error) {
|
||||
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
|
||||
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted))
|
||||
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
|
||||
a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
|
||||
a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt)
|
||||
|
||||
collectOOORecords()
|
||||
if a.head.wbl != nil {
|
||||
@@ -1080,14 +1069,14 @@ func (a *headAppender) Commit() (err error) {
|
||||
}
|
||||
|
||||
// insert is like append, except it inserts. Used for OOO samples.
|
||||
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
|
||||
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) {
|
||||
if s.ooo == nil {
|
||||
s.ooo = &memSeriesOOOFields{}
|
||||
}
|
||||
c := s.ooo.oooHeadChunk
|
||||
if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
|
||||
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
|
||||
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
|
||||
c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
@@ -1100,7 +1089,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
|
||||
c.maxTime = t
|
||||
}
|
||||
}
|
||||
return ok, chunkCreated, mmapRefs
|
||||
return ok, chunkCreated, mmapRef
|
||||
}
|
||||
|
||||
// chunkOpts are chunk-level options that are passed when appending to a memSeries.
|
||||
@@ -1442,7 +1431,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
|
||||
|
||||
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
|
||||
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) {
|
||||
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
|
||||
|
||||
s.ooo.oooHeadChunk = &oooHeadChunk{
|
||||
@@ -1454,29 +1443,21 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
|
||||
return s.ooo.oooHeadChunk, ref
|
||||
}
|
||||
|
||||
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
|
||||
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef {
|
||||
if s.ooo == nil || s.ooo.oooHeadChunk == nil {
|
||||
// OOO is not enabled or there is no head chunk, so nothing to m-map here.
|
||||
return nil
|
||||
}
|
||||
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
|
||||
if err != nil {
|
||||
handleChunkWriteError(err)
|
||||
return nil
|
||||
}
|
||||
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
|
||||
for _, memchunk := range chks {
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
|
||||
chunkRefs = append(chunkRefs, chunkRef)
|
||||
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
|
||||
ref: chunkRef,
|
||||
numSamples: uint16(memchunk.chunk.NumSamples()),
|
||||
minTime: memchunk.minTime,
|
||||
maxTime: memchunk.maxTime,
|
||||
})
|
||||
// There is no head chunk, so nothing to m-map here.
|
||||
return 0
|
||||
}
|
||||
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError)
|
||||
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
|
||||
ref: chunkRef,
|
||||
numSamples: uint16(xor.NumSamples()),
|
||||
minTime: s.ooo.oooHeadChunk.minTime,
|
||||
maxTime: s.ooo.oooHeadChunk.maxTime,
|
||||
})
|
||||
s.ooo.oooHeadChunk = nil
|
||||
return chunkRefs
|
||||
return chunkRef
|
||||
}
|
||||
|
||||
// mmapChunks will m-map all but first chunk on s.headChunks list.
|
||||
|
||||
95
vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go
generated
vendored
95
vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go
generated
vendored
@@ -1,95 +0,0 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build dedupelabels
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// Helper method to access labels under lock.
|
||||
func (s *memSeries) labels() labels.Labels {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.lset
|
||||
}
|
||||
|
||||
// RebuildSymbolTable goes through all the series in h, build a SymbolTable with all names and values,
|
||||
// replace each series' Labels with one using that SymbolTable.
|
||||
func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable {
|
||||
level.Info(logger).Log("msg", "RebuildSymbolTable starting")
|
||||
st := labels.NewSymbolTable()
|
||||
builder := labels.NewScratchBuilderWithSymbolTable(st, 0)
|
||||
rebuildLabels := func(lbls labels.Labels) labels.Labels {
|
||||
builder.Reset()
|
||||
lbls.Range(func(l labels.Label) {
|
||||
builder.Add(l.Name, l.Value)
|
||||
})
|
||||
return builder.Labels()
|
||||
}
|
||||
|
||||
for i := 0; i < h.series.size; i++ {
|
||||
h.series.locks[i].Lock()
|
||||
|
||||
for _, s := range h.series.hashes[i].unique {
|
||||
s.Lock()
|
||||
s.lset = rebuildLabels(s.lset)
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
for _, all := range h.series.hashes[i].conflicts {
|
||||
for _, s := range all {
|
||||
s.Lock()
|
||||
s.lset = rebuildLabels(s.lset)
|
||||
s.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
h.series.locks[i].Unlock()
|
||||
}
|
||||
type withReset interface{ ResetSymbolTable(*labels.SymbolTable) }
|
||||
if e, ok := h.exemplars.(withReset); ok {
|
||||
e.ResetSymbolTable(st)
|
||||
}
|
||||
level.Info(logger).Log("msg", "RebuildSymbolTable finished", "size", st.Len())
|
||||
return st
|
||||
}
|
||||
|
||||
func (ce *CircularExemplarStorage) ResetSymbolTable(st *labels.SymbolTable) {
|
||||
builder := labels.NewScratchBuilderWithSymbolTable(st, 0)
|
||||
rebuildLabels := func(lbls labels.Labels) labels.Labels {
|
||||
builder.Reset()
|
||||
lbls.Range(func(l labels.Label) {
|
||||
builder.Add(l.Name, l.Value)
|
||||
})
|
||||
return builder.Labels()
|
||||
}
|
||||
|
||||
ce.lock.RLock()
|
||||
defer ce.lock.RUnlock()
|
||||
|
||||
for _, v := range ce.index {
|
||||
v.seriesLabels = rebuildLabels(v.seriesLabels)
|
||||
}
|
||||
for i := range ce.exemplars {
|
||||
if ce.exemplars[i].ref == nil {
|
||||
continue
|
||||
}
|
||||
ce.exemplars[i].exemplar.Labels = rebuildLabels(ce.exemplars[i].exemplar.Labels)
|
||||
}
|
||||
}
|
||||
32
vendor/github.com/prometheus/prometheus/tsdb/head_other.go
generated
vendored
32
vendor/github.com/prometheus/prometheus/tsdb/head_other.go
generated
vendored
@@ -1,32 +0,0 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !dedupelabels
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"github.com/go-kit/log"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// Helper method to access labels; trivial when not using dedupelabels.
|
||||
func (s *memSeries) labels() labels.Labels {
|
||||
return s.lset
|
||||
}
|
||||
|
||||
// No-op when not using dedupelabels.
|
||||
func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable {
|
||||
return nil
|
||||
}
|
||||
170
vendor/github.com/prometheus/prometheus/tsdb/head_read.go
generated
vendored
170
vendor/github.com/prometheus/prometheus/tsdb/head_read.go
generated
vendored
@@ -142,7 +142,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||
}
|
||||
|
||||
slices.SortFunc(series, func(a, b *memSeries) int {
|
||||
return labels.Compare(a.labels(), b.labels())
|
||||
return labels.Compare(a.lset, b.lset)
|
||||
})
|
||||
|
||||
// Convert back to list.
|
||||
@@ -189,7 +189,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
|
||||
h.head.metrics.seriesNotFound.Inc()
|
||||
return storage.ErrNotFound
|
||||
}
|
||||
builder.Assign(s.labels())
|
||||
builder.Assign(s.lset)
|
||||
|
||||
if chks == nil {
|
||||
return nil
|
||||
@@ -259,7 +259,7 @@ func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef,
|
||||
return "", storage.ErrNotFound
|
||||
}
|
||||
|
||||
value := memSeries.labels().Get(label)
|
||||
value := memSeries.lset.Get(label)
|
||||
if value == "" {
|
||||
return "", storage.ErrNotFound
|
||||
}
|
||||
@@ -267,29 +267,22 @@ func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef,
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// LabelNamesFor returns all the label names for the series referred to by the postings.
|
||||
// LabelNamesFor returns all the label names for the series referred to by IDs.
|
||||
// The names returned are sorted.
|
||||
func (h *headIndexReader) LabelNamesFor(ctx context.Context, series index.Postings) ([]string, error) {
|
||||
func (h *headIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
|
||||
namesMap := make(map[string]struct{})
|
||||
i := 0
|
||||
for series.Next() {
|
||||
i++
|
||||
if i%checkContextEveryNIterations == 0 && ctx.Err() != nil {
|
||||
for _, id := range ids {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
memSeries := h.head.series.getByID(chunks.HeadSeriesRef(series.At()))
|
||||
memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id))
|
||||
if memSeries == nil {
|
||||
// Series not found, this happens during compaction,
|
||||
// when series was garbage collected after the caller got the series IDs.
|
||||
continue
|
||||
return nil, storage.ErrNotFound
|
||||
}
|
||||
memSeries.labels().Range(func(lbl labels.Label) {
|
||||
memSeries.lset.Range(func(lbl labels.Label) {
|
||||
namesMap[lbl.Name] = struct{}{}
|
||||
})
|
||||
}
|
||||
if err := series.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := make([]string, 0, len(namesMap))
|
||||
for name := range namesMap {
|
||||
names = append(names, name)
|
||||
@@ -467,7 +460,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
|
||||
// amongst all the chunks in the OOOHead.
|
||||
// This function is not thread safe unless the caller holds a lock.
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (*mergedOOOChunks, error) {
|
||||
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) {
|
||||
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||
|
||||
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
|
||||
@@ -487,27 +480,55 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
||||
|
||||
// We create a temporary slice of chunk metas to hold the information of all
|
||||
// possible chunks that may overlap with the requested chunk.
|
||||
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
|
||||
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks))
|
||||
|
||||
oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
|
||||
if s.ooo.oooHeadChunk != nil && s.ooo.oooHeadChunk.OverlapsClosedInterval(mint, maxt) {
|
||||
// We only want to append the head chunk if this chunk existed when
|
||||
// Series() was called. This brings consistency in case new data
|
||||
// is added in between Series() and Chunk() calls.
|
||||
if oooHeadRef == meta.OOOLastRef {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: chunks.Meta{
|
||||
// Ignoring samples added before and after the last known min and max time for this chunk.
|
||||
MinTime: meta.OOOLastMinTime,
|
||||
MaxTime: meta.OOOLastMaxTime,
|
||||
Ref: oooHeadRef,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for i, c := range s.ooo.oooMmappedChunks {
|
||||
if maxMmapRef != 0 && c.ref > maxMmapRef {
|
||||
chunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
|
||||
// We can skip chunks that came in later than the last known OOOLastRef.
|
||||
if chunkRef > meta.OOOLastRef {
|
||||
break
|
||||
}
|
||||
if c.OverlapsClosedInterval(mint, maxt) {
|
||||
|
||||
switch {
|
||||
case chunkRef == meta.OOOLastRef:
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: chunks.Meta{
|
||||
MinTime: meta.OOOLastMinTime,
|
||||
MaxTime: meta.OOOLastMaxTime,
|
||||
Ref: chunkRef,
|
||||
},
|
||||
ref: c.ref,
|
||||
origMinT: c.minTime,
|
||||
origMaxT: c.maxTime,
|
||||
})
|
||||
case c.OverlapsClosedInterval(mint, maxt):
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: chunks.Meta{
|
||||
MinTime: c.minTime,
|
||||
MaxTime: c.maxTime,
|
||||
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))),
|
||||
Ref: chunkRef,
|
||||
},
|
||||
ref: c.ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Add in data copied from the head OOO chunk.
|
||||
if meta.Chunk != nil {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta})
|
||||
}
|
||||
|
||||
// Next we want to sort all the collected chunks by min time so we can find
|
||||
// those that overlap and stop when we know the rest don't.
|
||||
@@ -520,8 +541,22 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
||||
continue
|
||||
}
|
||||
var iterable chunkenc.Iterable
|
||||
if c.meta.Chunk != nil {
|
||||
iterable = c.meta.Chunk
|
||||
if c.meta.Ref == oooHeadRef {
|
||||
var xor *chunkenc.XORChunk
|
||||
var err error
|
||||
// If head chunk min and max time match the meta OOO markers
|
||||
// that means that the chunk has not expanded so we can append
|
||||
// it as it is.
|
||||
if s.ooo.oooHeadChunk.minTime == meta.OOOLastMinTime && s.ooo.oooHeadChunk.maxTime == meta.OOOLastMaxTime {
|
||||
xor, err = s.ooo.oooHeadChunk.chunk.ToXOR() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called.
|
||||
} else {
|
||||
// We need to remove samples that are outside of the markers
|
||||
xor, err = s.ooo.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert ooo head chunk to xor chunk: %w", err)
|
||||
}
|
||||
iterable = xor
|
||||
} else {
|
||||
chk, err := cdm.Chunk(c.ref)
|
||||
if err != nil {
|
||||
@@ -531,7 +566,16 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
iterable = chk
|
||||
if c.meta.Ref == meta.OOOLastRef &&
|
||||
(c.origMinT != meta.OOOLastMinTime || c.origMaxT != meta.OOOLastMaxTime) {
|
||||
// The head expanded and was memory mapped so now we need to
|
||||
// wrap the chunk within a chunk that doesnt allows us to iterate
|
||||
// through samples out of the OOOLastMinT and OOOLastMaxT
|
||||
// markers.
|
||||
iterable = boundedIterable{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime}
|
||||
} else {
|
||||
iterable = chk
|
||||
}
|
||||
}
|
||||
mc.chunkIterables = append(mc.chunkIterables, iterable)
|
||||
if c.meta.MaxTime > absoluteMax {
|
||||
@@ -542,6 +586,74 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
var _ chunkenc.Iterable = &boundedIterable{}
|
||||
|
||||
// boundedIterable is an implementation of chunkenc.Iterable that uses a
|
||||
// boundedIterator that only iterates through samples which timestamps are
|
||||
// >= minT and <= maxT.
|
||||
type boundedIterable struct {
|
||||
chunk chunkenc.Chunk
|
||||
minT int64
|
||||
maxT int64
|
||||
}
|
||||
|
||||
func (b boundedIterable) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
|
||||
it := b.chunk.Iterator(iterator)
|
||||
if it == nil {
|
||||
panic("iterator shouldn't be nil")
|
||||
}
|
||||
return boundedIterator{it, b.minT, b.maxT}
|
||||
}
|
||||
|
||||
var _ chunkenc.Iterator = &boundedIterator{}
|
||||
|
||||
// boundedIterator is an implementation of Iterator that only iterates through
|
||||
// samples which timestamps are >= minT and <= maxT.
|
||||
type boundedIterator struct {
|
||||
chunkenc.Iterator
|
||||
minT int64
|
||||
maxT int64
|
||||
}
|
||||
|
||||
// Next the first time its called it will advance as many positions as necessary
|
||||
// until its able to find a sample within the bounds minT and maxT.
|
||||
// If there are samples within bounds it will advance one by one amongst them.
|
||||
// If there are no samples within bounds it will return false.
|
||||
func (b boundedIterator) Next() chunkenc.ValueType {
|
||||
for b.Iterator.Next() == chunkenc.ValFloat {
|
||||
t, _ := b.Iterator.At()
|
||||
switch {
|
||||
case t < b.minT:
|
||||
continue
|
||||
case t > b.maxT:
|
||||
return chunkenc.ValNone
|
||||
default:
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if t < b.minT {
|
||||
// We must seek at least up to b.minT if it is asked for something before that.
|
||||
val := b.Iterator.Seek(b.minT)
|
||||
if !(val == chunkenc.ValFloat) {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
t, _ := b.Iterator.At()
|
||||
if t <= b.maxT {
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
if t > b.maxT {
|
||||
// We seek anyway so that the subsequent Next() calls will also return false.
|
||||
b.Iterator.Seek(t)
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
return b.Iterator.Seek(t)
|
||||
}
|
||||
|
||||
// safeHeadChunk makes sure that the chunk can be accessed without a race condition.
|
||||
type safeHeadChunk struct {
|
||||
chunkenc.Chunk
|
||||
|
||||
8
vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
generated
vendored
@@ -126,7 +126,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
||||
}
|
||||
// At the moment the only possible error here is out of order exemplars, which we shouldn't see when
|
||||
// replaying the WAL, so lets just log the error if it's not that type.
|
||||
err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
|
||||
err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
|
||||
if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) {
|
||||
level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err)
|
||||
}
|
||||
@@ -448,7 +448,7 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
|
||||
) {
|
||||
level.Debug(h.logger).Log(
|
||||
"msg", "M-mapped chunks overlap on a duplicate series record",
|
||||
"series", mSeries.labels().String(),
|
||||
"series", mSeries.lset.String(),
|
||||
"oldref", mSeries.ref,
|
||||
"oldmint", mSeries.mmappedChunks[0].minTime,
|
||||
"oldmaxt", mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime,
|
||||
@@ -932,7 +932,7 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
|
||||
|
||||
buf.PutByte(chunkSnapshotRecordTypeSeries)
|
||||
buf.PutBE64(uint64(s.ref))
|
||||
record.EncodeLabels(&buf, s.labels())
|
||||
record.EncodeLabels(&buf, s.lset)
|
||||
buf.PutBE64int64(0) // Backwards-compatibility; was chunkRange but now unused.
|
||||
|
||||
s.Lock()
|
||||
@@ -1485,7 +1485,7 @@ Outer:
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{
|
||||
if err := h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{
|
||||
Labels: e.Labels,
|
||||
Value: e.V,
|
||||
Ts: e.T,
|
||||
|
||||
14
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
14
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
@@ -1551,18 +1551,12 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe
|
||||
|
||||
// LabelNamesFor returns all the label names for the series referred to by IDs.
|
||||
// The names returned are sorted.
|
||||
func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string, error) {
|
||||
func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
|
||||
// Gather offsetsMap the name offsetsMap in the symbol table first
|
||||
offsetsMap := make(map[uint32]struct{})
|
||||
i := 0
|
||||
for postings.Next() {
|
||||
id := postings.At()
|
||||
i++
|
||||
|
||||
if i%checkContextEveryNIterations == 0 {
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return nil, ctxErr
|
||||
}
|
||||
for _, id := range ids {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
offset := id
|
||||
|
||||
150
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
150
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
@@ -288,34 +288,62 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) {
|
||||
}
|
||||
|
||||
// Delete removes all ids in the given map from the postings lists.
|
||||
// affectedLabels contains all the labels that are affected by the deletion, there's no need to check other labels.
|
||||
func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) {
|
||||
p.mtx.Lock()
|
||||
defer p.mtx.Unlock()
|
||||
func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}) {
|
||||
var keys, vals []string
|
||||
|
||||
process := func(l labels.Label) {
|
||||
orig := p.m[l.Name][l.Value]
|
||||
repl := make([]storage.SeriesRef, 0, len(orig))
|
||||
for _, id := range orig {
|
||||
if _, ok := deleted[id]; !ok {
|
||||
repl = append(repl, id)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
p.m[l.Name][l.Value] = repl
|
||||
} else {
|
||||
delete(p.m[l.Name], l.Value)
|
||||
// Delete the key if we removed all values.
|
||||
if len(p.m[l.Name]) == 0 {
|
||||
delete(p.m, l.Name)
|
||||
}
|
||||
}
|
||||
// Collect all keys relevant for deletion once. New keys added afterwards
|
||||
// can by definition not be affected by any of the given deletes.
|
||||
p.mtx.RLock()
|
||||
for n := range p.m {
|
||||
keys = append(keys, n)
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
for l := range affected {
|
||||
process(l)
|
||||
for _, n := range keys {
|
||||
p.mtx.RLock()
|
||||
vals = vals[:0]
|
||||
for v := range p.m[n] {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
// For each posting we first analyse whether the postings list is affected by the deletes.
|
||||
// If yes, we actually reallocate a new postings list.
|
||||
for _, l := range vals {
|
||||
// Only lock for processing one postings list so we don't block reads for too long.
|
||||
p.mtx.Lock()
|
||||
|
||||
found := false
|
||||
for _, id := range p.m[n][l] {
|
||||
if _, ok := deleted[id]; ok {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
p.mtx.Unlock()
|
||||
continue
|
||||
}
|
||||
repl := make([]storage.SeriesRef, 0, len(p.m[n][l]))
|
||||
|
||||
for _, id := range p.m[n][l] {
|
||||
if _, ok := deleted[id]; !ok {
|
||||
repl = append(repl, id)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
p.m[n][l] = repl
|
||||
} else {
|
||||
delete(p.m[n], l)
|
||||
}
|
||||
p.mtx.Unlock()
|
||||
}
|
||||
p.mtx.Lock()
|
||||
if len(p.m[n]) == 0 {
|
||||
delete(p.m, n)
|
||||
}
|
||||
p.mtx.Unlock()
|
||||
}
|
||||
process(allPostingsKey)
|
||||
}
|
||||
|
||||
// Iter calls f for each postings list. It aborts if f returns an error and returns it.
|
||||
@@ -370,62 +398,16 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) {
|
||||
}
|
||||
|
||||
func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings {
|
||||
// We'll copy the values into a slice and then match over that,
|
||||
// this way we don't need to hold the mutex while we're matching,
|
||||
// which can be slow (seconds) if the match function is a huge regex.
|
||||
// Holding this lock prevents new series from being added (slows down the write path)
|
||||
// and blocks the compaction process.
|
||||
vals := p.labelValues(name)
|
||||
for i, count := 0, 1; i < len(vals); count++ {
|
||||
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
|
||||
return ErrPostings(ctx.Err())
|
||||
}
|
||||
|
||||
if match(vals[i]) {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Didn't match, bring the last value to this position, make the slice shorter and check again.
|
||||
// The order of the slice doesn't matter as it comes from a map iteration.
|
||||
vals[i], vals = vals[len(vals)-1], vals[:len(vals)-1]
|
||||
}
|
||||
|
||||
// If none matched (or this label had no values), no need to grab the lock again.
|
||||
if len(vals) == 0 {
|
||||
return EmptyPostings()
|
||||
}
|
||||
|
||||
// Now `vals` only contains the values that matched, get their postings.
|
||||
its := make([]Postings, 0, len(vals))
|
||||
p.mtx.RLock()
|
||||
e := p.m[name]
|
||||
for _, v := range vals {
|
||||
if refs, ok := e[v]; ok {
|
||||
// Some of the values may have been garbage-collected in the meantime this is fine, we'll just skip them.
|
||||
// If we didn't let the mutex go, we'd have these postings here, but they would be pointing nowhere
|
||||
// because there would be a `MemPostings.Delete()` call waiting for the lock to delete these labels,
|
||||
// because the series were deleted already.
|
||||
its = append(its, NewListPostings(refs))
|
||||
}
|
||||
}
|
||||
// Let the mutex go before merging.
|
||||
p.mtx.RUnlock()
|
||||
|
||||
return Merge(ctx, its...)
|
||||
}
|
||||
|
||||
// labelValues returns a slice of label values for the given label name.
|
||||
// It will take the read lock.
|
||||
func (p *MemPostings) labelValues(name string) []string {
|
||||
p.mtx.RLock()
|
||||
defer p.mtx.RUnlock()
|
||||
|
||||
e := p.m[name]
|
||||
if len(e) == 0 {
|
||||
return nil
|
||||
p.mtx.RUnlock()
|
||||
return EmptyPostings()
|
||||
}
|
||||
|
||||
// Benchmarking shows that first copying the values into a slice and then matching over that is
|
||||
// faster than matching over the map keys directly, at least on AMD64.
|
||||
vals := make([]string, 0, len(e))
|
||||
for v, srs := range e {
|
||||
if len(srs) > 0 {
|
||||
@@ -433,7 +415,21 @@ func (p *MemPostings) labelValues(name string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
return vals
|
||||
var its []Postings
|
||||
count := 1
|
||||
for _, v := range vals {
|
||||
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
|
||||
p.mtx.RUnlock()
|
||||
return ErrPostings(ctx.Err())
|
||||
}
|
||||
count++
|
||||
if match(v) {
|
||||
its = append(its, NewListPostings(e[v]))
|
||||
}
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
return Merge(ctx, its...)
|
||||
}
|
||||
|
||||
// ExpandPostings returns the postings expanded as a slice.
|
||||
@@ -755,7 +751,9 @@ func (it *ListPostings) Seek(x storage.SeriesRef) bool {
|
||||
}
|
||||
|
||||
// Do binary search between current position and end.
|
||||
i, _ := slices.BinarySearch(it.list, x)
|
||||
i := sort.Search(len(it.list), func(i int) bool {
|
||||
return it.list[i] >= x
|
||||
})
|
||||
if i < len(it.list) {
|
||||
it.cur = it.list[i]
|
||||
it.list = it.list[i+1:]
|
||||
|
||||
109
vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go
generated
vendored
109
vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go
generated
vendored
@@ -17,10 +17,9 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/oklog/ulid"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
)
|
||||
@@ -75,22 +74,24 @@ func (o *OOOChunk) NumSamples() int {
|
||||
return len(o.samples)
|
||||
}
|
||||
|
||||
// ToEncodedChunks returns chunks with the samples in the OOOChunk.
|
||||
//
|
||||
//nolint:revive // unexported-return.
|
||||
func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) {
|
||||
if len(o.samples) == 0 {
|
||||
return nil, nil
|
||||
func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) {
|
||||
x := chunkenc.NewXORChunk()
|
||||
app, err := x.Appender()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range o.samples {
|
||||
app.Append(s.t, s.f)
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, error) {
|
||||
x := chunkenc.NewXORChunk()
|
||||
app, err := x.Appender()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The most common case is that there will be a single chunk, with the same type of samples in it - this is always true for float samples.
|
||||
chks = make([]memChunk, 0, 1)
|
||||
var (
|
||||
cmint int64
|
||||
cmaxt int64
|
||||
chunk chunkenc.Chunk
|
||||
app chunkenc.Appender
|
||||
)
|
||||
prevEncoding := chunkenc.EncNone // Yes we could call the chunk for this, but this is more efficient.
|
||||
for _, s := range o.samples {
|
||||
if s.t < mint {
|
||||
continue
|
||||
@@ -98,77 +99,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
|
||||
if s.t > maxt {
|
||||
break
|
||||
}
|
||||
encoding := chunkenc.EncXOR
|
||||
if s.h != nil {
|
||||
encoding = chunkenc.EncHistogram
|
||||
} else if s.fh != nil {
|
||||
encoding = chunkenc.EncFloatHistogram
|
||||
}
|
||||
|
||||
// prevApp is the appender for the previous sample.
|
||||
prevApp := app
|
||||
|
||||
if encoding != prevEncoding { // For the first sample, this will always be true as EncNone != EncXOR | EncHistogram | EncFloatHistogram
|
||||
if prevEncoding != chunkenc.EncNone {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
cmint = s.t
|
||||
switch encoding {
|
||||
case chunkenc.EncXOR:
|
||||
chunk = chunkenc.NewXORChunk()
|
||||
case chunkenc.EncHistogram:
|
||||
chunk = chunkenc.NewHistogramChunk()
|
||||
case chunkenc.EncFloatHistogram:
|
||||
chunk = chunkenc.NewFloatHistogramChunk()
|
||||
default:
|
||||
chunk = chunkenc.NewXORChunk()
|
||||
}
|
||||
app, err = chunk.Appender()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
switch encoding {
|
||||
case chunkenc.EncXOR:
|
||||
app.Append(s.t, s.f)
|
||||
case chunkenc.EncHistogram:
|
||||
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
||||
prevHApp, _ := prevApp.(*chunkenc.HistogramAppender)
|
||||
var (
|
||||
newChunk chunkenc.Chunk
|
||||
recoded bool
|
||||
)
|
||||
newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false)
|
||||
if newChunk != nil { // A new chunk was allocated.
|
||||
if !recoded {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
chunk = newChunk
|
||||
cmint = s.t
|
||||
}
|
||||
case chunkenc.EncFloatHistogram:
|
||||
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
||||
prevHApp, _ := prevApp.(*chunkenc.FloatHistogramAppender)
|
||||
var (
|
||||
newChunk chunkenc.Chunk
|
||||
recoded bool
|
||||
)
|
||||
newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false)
|
||||
if newChunk != nil { // A new chunk was allocated.
|
||||
if !recoded {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
chunk = newChunk
|
||||
cmint = s.t
|
||||
}
|
||||
}
|
||||
cmaxt = s.t
|
||||
prevEncoding = encoding
|
||||
app.Append(s.t, s.f)
|
||||
}
|
||||
if prevEncoding != chunkenc.EncNone {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
return chks, nil
|
||||
return x, nil
|
||||
}
|
||||
|
||||
var _ BlockReader = &OOORangeHead{}
|
||||
@@ -201,7 +134,7 @@ func (oh *OOORangeHead) Index() (IndexReader, error) {
|
||||
}
|
||||
|
||||
func (oh *OOORangeHead) Chunks() (ChunkReader, error) {
|
||||
return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState, 0), nil
|
||||
return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil
|
||||
}
|
||||
|
||||
func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) {
|
||||
|
||||
96
vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go
generated
vendored
96
vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go
generated
vendored
@@ -78,7 +78,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
||||
oh.head.metrics.seriesNotFound.Inc()
|
||||
return storage.ErrNotFound
|
||||
}
|
||||
builder.Assign(s.labels())
|
||||
builder.Assign(s.lset)
|
||||
|
||||
if chks == nil {
|
||||
return nil
|
||||
@@ -94,40 +94,48 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
||||
|
||||
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
|
||||
|
||||
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
|
||||
// We define these markers to track the last chunk reference while we
|
||||
// fill the chunk meta.
|
||||
// These markers are useful to give consistent responses to repeated queries
|
||||
// even if new chunks that might be overlapping or not are added afterwards.
|
||||
// Also, lastMinT and lastMaxT are initialized to the max int as a sentinel
|
||||
// value to know they are unset.
|
||||
var lastChunkRef chunks.ChunkRef
|
||||
lastMinT, lastMaxT := int64(math.MaxInt64), int64(math.MaxInt64)
|
||||
|
||||
addChunk := func(minT, maxT int64, ref chunks.ChunkRef) {
|
||||
// the first time we get called is for the last included chunk.
|
||||
// set the markers accordingly
|
||||
if lastMinT == int64(math.MaxInt64) {
|
||||
lastChunkRef = ref
|
||||
lastMinT = minT
|
||||
lastMaxT = maxT
|
||||
}
|
||||
|
||||
tmpChks = append(tmpChks, chunks.Meta{
|
||||
MinTime: minT,
|
||||
MaxTime: maxT,
|
||||
Ref: ref,
|
||||
Chunk: chunk,
|
||||
MinTime: minT,
|
||||
MaxTime: maxT,
|
||||
Ref: ref,
|
||||
OOOLastRef: lastChunkRef,
|
||||
OOOLastMinTime: lastMinT,
|
||||
OOOLastMaxTime: lastMaxT,
|
||||
})
|
||||
}
|
||||
|
||||
// Collect all chunks that overlap the query range.
|
||||
// Collect all chunks that overlap the query range, in order from most recent to most old,
|
||||
// so we can set the correct markers.
|
||||
if s.ooo.oooHeadChunk != nil {
|
||||
c := s.ooo.oooHeadChunk
|
||||
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
|
||||
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
|
||||
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
|
||||
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
|
||||
if err != nil {
|
||||
handleChunkWriteError(err)
|
||||
return nil
|
||||
}
|
||||
for _, chk := range chks {
|
||||
addChunk(c.minTime, c.maxTime, ref, chk.chunk)
|
||||
}
|
||||
} else {
|
||||
var emptyChunk chunkenc.Chunk
|
||||
addChunk(c.minTime, c.maxTime, ref, emptyChunk)
|
||||
}
|
||||
addChunk(c.minTime, c.maxTime, ref)
|
||||
}
|
||||
}
|
||||
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
|
||||
c := s.ooo.oooMmappedChunks[i]
|
||||
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
|
||||
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
|
||||
addChunk(c.minTime, c.maxTime, ref, nil)
|
||||
addChunk(c.minTime, c.maxTime, ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,12 +163,6 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
||||
case c.MaxTime > maxTime:
|
||||
maxTime = c.MaxTime
|
||||
(*chks)[len(*chks)-1].MaxTime = c.MaxTime
|
||||
fallthrough
|
||||
default:
|
||||
// If the head OOO chunk is part of an output chunk, copy the chunk pointer.
|
||||
if c.Chunk != nil {
|
||||
(*chks)[len(*chks)-1].Chunk = c.Chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,8 +185,10 @@ func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matc
|
||||
}
|
||||
|
||||
type chunkMetaAndChunkDiskMapperRef struct {
|
||||
meta chunks.Meta
|
||||
ref chunks.ChunkDiskMapperRef
|
||||
meta chunks.Meta
|
||||
ref chunks.ChunkDiskMapperRef
|
||||
origMinT int64
|
||||
origMaxT int64
|
||||
}
|
||||
|
||||
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
|
||||
@@ -243,16 +247,14 @@ type OOOHeadChunkReader struct {
|
||||
head *Head
|
||||
mint, maxt int64
|
||||
isoState *oooIsolationState
|
||||
maxMmapRef chunks.ChunkDiskMapperRef
|
||||
}
|
||||
|
||||
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *OOOHeadChunkReader {
|
||||
func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader {
|
||||
return &OOOHeadChunkReader{
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
isoState: isoState,
|
||||
maxMmapRef: maxMmapRef,
|
||||
head: head,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
isoState: isoState,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,7 +273,7 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk,
|
||||
s.Unlock()
|
||||
return nil, nil, storage.ErrNotFound
|
||||
}
|
||||
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt, cr.maxMmapRef)
|
||||
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
|
||||
s.Unlock()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -351,20 +353,14 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
|
||||
continue
|
||||
}
|
||||
|
||||
var lastMmapRef chunks.ChunkDiskMapperRef
|
||||
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
|
||||
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
|
||||
mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
|
||||
if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
|
||||
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
|
||||
mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
|
||||
mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref
|
||||
}
|
||||
if len(mmapRefs) == 0 {
|
||||
lastMmapRef = 0
|
||||
} else {
|
||||
lastMmapRef = mmapRefs[len(mmapRefs)-1]
|
||||
}
|
||||
seq, off := lastMmapRef.Unpack()
|
||||
seq, off := mmapRef.Unpack()
|
||||
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
|
||||
ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off
|
||||
ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off
|
||||
}
|
||||
if len(ms.ooo.oooMmappedChunks) > 0 {
|
||||
ch.postings = append(ch.postings, seriesRef)
|
||||
@@ -388,7 +384,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) {
|
||||
}
|
||||
|
||||
func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
|
||||
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil, ch.lastMmapRef), nil
|
||||
return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil
|
||||
}
|
||||
|
||||
func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
|
||||
@@ -487,7 +483,7 @@ func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.S
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
|
||||
func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
|
||||
15
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
15
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
@@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
res, err := q.index.SortedLabelValues(ctx, name, matchers...)
|
||||
return res, nil, err
|
||||
}
|
||||
|
||||
func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
res, err := q.index.LabelNames(ctx, matchers...)
|
||||
return res, nil, err
|
||||
}
|
||||
@@ -447,7 +447,16 @@ func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*lab
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.LabelNamesFor(ctx, p)
|
||||
|
||||
var postings []storage.SeriesRef
|
||||
for p.Next() {
|
||||
postings = append(postings, p.At())
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
return nil, fmt.Errorf("postings for label names with matchers: %w", err)
|
||||
}
|
||||
|
||||
return r.LabelNamesFor(ctx, postings...)
|
||||
}
|
||||
|
||||
// seriesData, used inside other iterators, are updated when we move from one series to another.
|
||||
|
||||
2
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
2
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
@@ -543,7 +543,7 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
|
||||
return histograms, nil
|
||||
}
|
||||
|
||||
// DecodeFloatHistogram decodes a Histogram from a byte slice.
|
||||
// Decode decodes a Histogram from a byte slice.
|
||||
func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
|
||||
fh.CounterResetHint = histogram.CounterResetHint(buf.Byte())
|
||||
|
||||
|
||||
176
vendor/github.com/prometheus/prometheus/tsdb/testutil.go
generated
vendored
176
vendor/github.com/prometheus/prometheus/tsdb/testutil.go
generated
vendored
@@ -1,176 +0,0 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
)
|
||||
|
||||
const (
|
||||
float = "float"
|
||||
)
|
||||
|
||||
type testValue struct {
|
||||
Ts int64
|
||||
V int64
|
||||
CounterResetHeader histogram.CounterResetHint
|
||||
}
|
||||
|
||||
type sampleTypeScenario struct {
|
||||
sampleType string
|
||||
appendFunc func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)
|
||||
sampleFunc func(ts, value int64) sample
|
||||
}
|
||||
|
||||
// TODO: native histogram sample types will be added as part of out-of-order native histogram support; see #11220.
|
||||
var sampleTypeScenarios = map[string]sampleTypeScenario{
|
||||
float: {
|
||||
sampleType: sampleMetricTypeFloat,
|
||||
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
s := sample{t: ts, f: float64(value)}
|
||||
ref, err := appender.Append(0, lbls, ts, s.f)
|
||||
return ref, s, err
|
||||
},
|
||||
sampleFunc: func(ts, value int64) sample {
|
||||
return sample{t: ts, f: float64(value)}
|
||||
},
|
||||
},
|
||||
// intHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
// floatHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
// gaugeIntHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
// gaugeFloatHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
}
|
||||
|
||||
// requireEqualSeries checks that the actual series are equal to the expected ones. It ignores the counter reset hints for histograms.
|
||||
func requireEqualSeries(t *testing.T, expected, actual map[string][]chunks.Sample, ignoreCounterResets bool) {
|
||||
for name, expectedItem := range expected {
|
||||
actualItem, ok := actual[name]
|
||||
require.True(t, ok, "Expected series %s not found", name)
|
||||
requireEqualSamples(t, name, expectedItem, actualItem, ignoreCounterResets)
|
||||
}
|
||||
for name := range actual {
|
||||
_, ok := expected[name]
|
||||
require.True(t, ok, "Unexpected series %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func requireEqualOOOSamples(t *testing.T, expectedSamples int, db *DB) {
|
||||
require.Equal(t, float64(expectedSamples),
|
||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat))+
|
||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram)),
|
||||
"number of ooo appended samples mismatch")
|
||||
}
|
||||
|
||||
func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sample, ignoreCounterResets bool) {
|
||||
require.Equal(t, len(expected), len(actual), "Length not equal to expected for %s", name)
|
||||
for i, s := range expected {
|
||||
expectedSample := s
|
||||
actualSample := actual[i]
|
||||
require.Equal(t, expectedSample.T(), actualSample.T(), "Different timestamps for %s[%d]", name, i)
|
||||
require.Equal(t, expectedSample.Type().String(), actualSample.Type().String(), "Different types for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
switch {
|
||||
case s.H() != nil:
|
||||
{
|
||||
expectedHist := expectedSample.H()
|
||||
actualHist := actualSample.H()
|
||||
if ignoreCounterResets && expectedHist.CounterResetHint != histogram.GaugeType {
|
||||
expectedHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
actualHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
} else {
|
||||
require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint))
|
||||
}
|
||||
require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
}
|
||||
case s.FH() != nil:
|
||||
{
|
||||
expectedHist := expectedSample.FH()
|
||||
actualHist := actualSample.FH()
|
||||
if ignoreCounterResets {
|
||||
expectedHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
actualHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
} else {
|
||||
require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint))
|
||||
}
|
||||
require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
}
|
||||
default:
|
||||
expectedFloat := expectedSample.F()
|
||||
actualFloat := actualSample.F()
|
||||
require.Equal(t, expectedFloat, actualFloat, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func counterResetAsString(h histogram.CounterResetHint) string {
|
||||
switch h {
|
||||
case histogram.UnknownCounterReset:
|
||||
return "UnknownCounterReset"
|
||||
case histogram.CounterReset:
|
||||
return "CounterReset"
|
||||
case histogram.NotCounterReset:
|
||||
return "NotCounterReset"
|
||||
case histogram.GaugeType:
|
||||
return "GaugeType"
|
||||
}
|
||||
panic("Unexpected counter reset type")
|
||||
}
|
||||
38
vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go
generated
vendored
38
vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go
generated
vendored
@@ -30,10 +30,12 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram {
|
||||
h := GenerateTestHistogram(n)
|
||||
h.CounterResetHint = hint
|
||||
return h
|
||||
func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram {
|
||||
hs := GenerateTestHistograms(n)
|
||||
for i := range hs {
|
||||
hs[i].CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
return hs
|
||||
}
|
||||
|
||||
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.
|
||||
@@ -57,20 +59,6 @@ func GenerateTestHistogram(i int) *histogram.Histogram {
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram {
|
||||
return &histogram.Histogram{
|
||||
Count: 5 + uint64(i*4),
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
CustomValues: []float64{0, 1, 2, 3, 4},
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
|
||||
for x := 0; x < n; x++ {
|
||||
i := int(math.Sin(float64(x))*100) + 100
|
||||
@@ -117,20 +105,6 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram {
|
||||
return &histogram.FloatHistogram{
|
||||
Count: 5 + float64(i*4),
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)},
|
||||
CustomValues: []float64{0, 1, 2, 3, 4},
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||
for x := 0; x < n; x++ {
|
||||
i := int(math.Sin(float64(x))*100) + 100
|
||||
|
||||
17
vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go
generated
vendored
17
vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go
generated
vendored
@@ -57,7 +57,6 @@ type WriteTo interface {
|
||||
AppendHistograms([]record.RefHistogramSample) bool
|
||||
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
|
||||
StoreSeries([]record.RefSeries, int)
|
||||
StoreMetadata([]record.RefMetadata)
|
||||
|
||||
// Next two methods are intended for garbage-collection: first we call
|
||||
// UpdateSeriesSegment on all current series
|
||||
@@ -89,7 +88,6 @@ type Watcher struct {
|
||||
lastCheckpoint string
|
||||
sendExemplars bool
|
||||
sendHistograms bool
|
||||
sendMetadata bool
|
||||
metrics *WatcherMetrics
|
||||
readerMetrics *LiveReaderMetrics
|
||||
|
||||
@@ -172,7 +170,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
|
||||
}
|
||||
|
||||
// NewWatcher creates a new WAL watcher for a given WriteTo.
|
||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher {
|
||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms bool) *Watcher {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
@@ -185,7 +183,6 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
|
||||
name: name,
|
||||
sendExemplars: sendExemplars,
|
||||
sendHistograms: sendHistograms,
|
||||
sendMetadata: sendMetadata,
|
||||
|
||||
readNotify: make(chan struct{}),
|
||||
quit: make(chan struct{}),
|
||||
@@ -543,7 +540,6 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||
histogramsToSend []record.RefHistogramSample
|
||||
floatHistograms []record.RefFloatHistogramSample
|
||||
floatHistogramsToSend []record.RefFloatHistogramSample
|
||||
metadata []record.RefMetadata
|
||||
)
|
||||
for r.Next() && !isClosed(w.quit) {
|
||||
rec := r.Record()
|
||||
@@ -655,17 +651,6 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||
w.writer.AppendFloatHistograms(floatHistogramsToSend)
|
||||
floatHistogramsToSend = floatHistogramsToSend[:0]
|
||||
}
|
||||
|
||||
case record.Metadata:
|
||||
if !w.sendMetadata || !tail {
|
||||
break
|
||||
}
|
||||
meta, err := dec.Metadata(rec, metadata[:0])
|
||||
if err != nil {
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
return err
|
||||
}
|
||||
w.writer.StoreMetadata(meta)
|
||||
case record.Tombstones:
|
||||
|
||||
default:
|
||||
|
||||
99
vendor/github.com/prometheus/prometheus/util/annotations/annotations.go
generated
vendored
99
vendor/github.com/prometheus/prometheus/util/annotations/annotations.go
generated
vendored
@@ -71,58 +71,27 @@ func (a Annotations) AsErrors() []error {
|
||||
return arr
|
||||
}
|
||||
|
||||
// AsStrings is a convenience function to return the annotations map as 2 slices
|
||||
// of strings, separated into warnings and infos. The query string is used to get the
|
||||
// line number and character offset positioning info of the elements which trigger an
|
||||
// annotation. We limit the number of warnings and infos returned here with maxWarnings
|
||||
// and maxInfos respectively (0 for no limit).
|
||||
func (a Annotations) AsStrings(query string, maxWarnings, maxInfos int) (warnings, infos []string) {
|
||||
warnings = make([]string, 0, maxWarnings+1)
|
||||
infos = make([]string, 0, maxInfos+1)
|
||||
warnSkipped := 0
|
||||
infoSkipped := 0
|
||||
// AsStrings is a convenience function to return the annotations map as a slice
|
||||
// of strings. The query string is used to get the line number and character offset
|
||||
// positioning info of the elements which trigger an annotation. We limit the number
|
||||
// of annotations returned here with maxAnnos (0 for no limit).
|
||||
func (a Annotations) AsStrings(query string, maxAnnos int) []string {
|
||||
arr := make([]string, 0, len(a))
|
||||
for _, err := range a {
|
||||
if maxAnnos > 0 && len(arr) >= maxAnnos {
|
||||
break
|
||||
}
|
||||
var anErr annoErr
|
||||
if errors.As(err, &anErr) {
|
||||
anErr.Query = query
|
||||
err = anErr
|
||||
}
|
||||
switch {
|
||||
case errors.Is(err, PromQLInfo):
|
||||
if maxInfos == 0 || len(infos) < maxInfos {
|
||||
infos = append(infos, err.Error())
|
||||
} else {
|
||||
infoSkipped++
|
||||
}
|
||||
default:
|
||||
if maxWarnings == 0 || len(warnings) < maxWarnings {
|
||||
warnings = append(warnings, err.Error())
|
||||
} else {
|
||||
warnSkipped++
|
||||
}
|
||||
}
|
||||
arr = append(arr, err.Error())
|
||||
}
|
||||
if warnSkipped > 0 {
|
||||
warnings = append(warnings, fmt.Sprintf("%d more warning annotations omitted", warnSkipped))
|
||||
if maxAnnos > 0 && len(a) > maxAnnos {
|
||||
arr = append(arr, fmt.Sprintf("%d more annotations omitted", len(a)-maxAnnos))
|
||||
}
|
||||
if infoSkipped > 0 {
|
||||
infos = append(infos, fmt.Sprintf("%d more info annotations omitted", infoSkipped))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CountWarningsAndInfo counts and returns the number of warnings and infos in the
|
||||
// annotations wrapper.
|
||||
func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) {
|
||||
for _, err := range a {
|
||||
if errors.Is(err, PromQLWarning) {
|
||||
countWarnings++
|
||||
}
|
||||
if errors.Is(err, PromQLInfo) {
|
||||
countInfo++
|
||||
}
|
||||
}
|
||||
return
|
||||
return arr
|
||||
}
|
||||
|
||||
//nolint:revive // error-naming.
|
||||
@@ -134,15 +103,12 @@ var (
|
||||
PromQLInfo = errors.New("PromQL info")
|
||||
PromQLWarning = errors.New("PromQL warning")
|
||||
|
||||
InvalidRatioWarning = fmt.Errorf("%w: ratio value should be between -1 and 1", PromQLWarning)
|
||||
InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
|
||||
BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
|
||||
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
|
||||
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
|
||||
NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
|
||||
NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
|
||||
MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning)
|
||||
IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning)
|
||||
InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
|
||||
BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
|
||||
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
|
||||
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
|
||||
NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
|
||||
NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
|
||||
|
||||
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo)
|
||||
HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo)
|
||||
@@ -174,15 +140,6 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
|
||||
}
|
||||
}
|
||||
|
||||
// NewInvalidQuantileWarning is used when the user specifies an invalid ratio
|
||||
// value, i.e. a float that is outside the range [-1, 1] or NaN.
|
||||
func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
|
||||
return annoErr{
|
||||
PositionRange: pos,
|
||||
Err: fmt.Errorf("%w, got %g, capping to %g", InvalidRatioWarning, q, to),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBadBucketLabelWarning is used when there is an error parsing the bucket label
|
||||
// of a classic histogram.
|
||||
func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error {
|
||||
@@ -238,24 +195,6 @@ func NewNativeHistogramNotGaugeWarning(metricName string, pos posrange.PositionR
|
||||
}
|
||||
}
|
||||
|
||||
// NewMixedExponentialCustomHistogramsWarning is used when the queried series includes
|
||||
// histograms with both exponential and custom buckets schemas.
|
||||
func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.PositionRange) error {
|
||||
return annoErr{
|
||||
PositionRange: pos,
|
||||
Err: fmt.Errorf("%w %q", MixedExponentialCustomHistogramsWarning, metricName),
|
||||
}
|
||||
}
|
||||
|
||||
// NewIncompatibleCustomBucketsHistogramsWarning is used when the queried series includes
|
||||
// custom buckets histograms with incompatible custom bounds.
|
||||
func NewIncompatibleCustomBucketsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
|
||||
return annoErr{
|
||||
PositionRange: pos,
|
||||
Err: fmt.Errorf("%w %q", IncompatibleCustomBucketsHistogramsWarning, metricName),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
|
||||
// have the suffixes _total, _sum, _count, or _bucket.
|
||||
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user