mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
Compare commits
30 Commits
debug/erro
...
fix-panel-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1cc1d45503 | ||
|
|
d064e14933 | ||
|
|
77b0fcfdd9 | ||
|
|
ee7fe11fd2 | ||
|
|
4c26fb6fe5 | ||
|
|
fc135094b3 | ||
|
|
5d42f21abd | ||
|
|
28eeabded1 | ||
|
|
b6910cfff7 | ||
|
|
8938ef398c | ||
|
|
df2b75fa81 | ||
|
|
857734c66c | ||
|
|
bedc0c0f8f | ||
|
|
5a41bdf329 | ||
|
|
bf5d0dd245 | ||
|
|
1cec37b0f5 | ||
|
|
c40c25b03c | ||
|
|
82badc3dd5 | ||
|
|
43ded688f7 | ||
|
|
661420fe85 | ||
|
|
7aab967447 | ||
|
|
afb07034ed | ||
|
|
44d2205136 | ||
|
|
b226318f9e | ||
|
|
30999204c9 | ||
|
|
ffddfa1f94 | ||
|
|
fc336bbf20 | ||
|
|
e0b2c1c4f5 | ||
|
|
5afbee5f6f | ||
|
|
51459196f9 |
16
README.md
16
README.md
@@ -1,12 +1,14 @@
|
||||
# VictoriaMetrics
|
||||
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
[](https://hub.docker.com/r/victoriametrics/victoria-metrics)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
<picture>
|
||||
<source srcset="docs/logo_white.webp" media="(prefers-color-scheme: dark)">
|
||||
|
||||
@@ -614,7 +614,7 @@ func (ar *AlertingRule) alertToTimeSeries(a *notifier.Alert, timestamp int64) []
|
||||
}
|
||||
|
||||
func alertToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
||||
var labels []prompbmarshal.Label
|
||||
labels := make([]prompbmarshal.Label, 0, len(a.Labels)+2)
|
||||
for k, v := range a.Labels {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
@@ -634,7 +634,7 @@ func alertToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSer
|
||||
// alertForToTimeSeries returns a time series that represents
|
||||
// state of active alerts, where value is time when alert become active
|
||||
func alertForToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
||||
var labels []prompbmarshal.Label
|
||||
labels := make([]prompbmarshal.Label, 0, len(a.Labels)+1)
|
||||
for k, v := range a.Labels {
|
||||
labels = append(labels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
@@ -650,21 +650,24 @@ func alertForToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.Time
|
||||
// for alerts which changed their state from Pending to Inactive or Firing.
|
||||
func pendingAlertStaleTimeSeries(ls map[string]string, timestamp int64, includeAlertForState bool) []prompbmarshal.TimeSeries {
|
||||
var result []prompbmarshal.TimeSeries
|
||||
var baseLabels []prompbmarshal.Label
|
||||
baseLabels := make([]prompbmarshal.Label, 0, len(ls)+1)
|
||||
for k, v := range ls {
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
alertsLabels := make([]prompbmarshal.Label, 0, len(ls)+2)
|
||||
alertsLabels = append(alertsLabels, baseLabels...)
|
||||
// __name__ already been dropped, no need to check duplication
|
||||
alertsLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: alertStateLabel, Value: notifier.StatePending.String()})
|
||||
result = append(result, newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsLabels))
|
||||
|
||||
if includeAlertForState {
|
||||
alertsForStateLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
||||
result = append(result, newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsForStateLabels))
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
||||
result = append(result, newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, baseLabels))
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -672,22 +675,25 @@ func pendingAlertStaleTimeSeries(ls map[string]string, timestamp int64, includeA
|
||||
// firingAlertStaleTimeSeries returns stale `ALERTS` and `ALERTS_FOR_STATE` time series
|
||||
// for alerts which changed their state from Firing to Inactive.
|
||||
func firingAlertStaleTimeSeries(ls map[string]string, timestamp int64) []prompbmarshal.TimeSeries {
|
||||
var baseLabels []prompbmarshal.Label
|
||||
baseLabels := make([]prompbmarshal.Label, 0, len(ls)+1)
|
||||
for k, v := range ls {
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
alertsLabels := make([]prompbmarshal.Label, 0, len(ls)+2)
|
||||
alertsLabels = append(alertsLabels, baseLabels...)
|
||||
// __name__ already been dropped, no need to check duplication
|
||||
alertsLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
||||
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: alertStateLabel, Value: notifier.StateFiring.String()})
|
||||
|
||||
alertsForStateLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
||||
baseLabels = append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
||||
|
||||
return []prompbmarshal.TimeSeries{
|
||||
newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsLabels),
|
||||
newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsForStateLabels),
|
||||
newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, baseLabels),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -252,10 +252,14 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
},
|
||||
map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "empty_labels"}, {Name: "alertstate", Value: "firing"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "empty_labels"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "empty_labels"}, {Name: "alertstate", Value: "firing"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "empty_labels"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -273,22 +277,34 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
1: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(2 * defaultStep).Unix()), Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "single-firing=>inactive=>firing=>inactive=>inactive"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(2 * defaultStep).Unix()), Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -344,34 +360,54 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
1: {
|
||||
// stale time series for foo, `firing -> inactive`
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
// new time series for foo1
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(defaultStep).Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(defaultStep).Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
// stale time series for foo1
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo1"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
// new time series for foo2
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo2"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo2"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(2 * defaultStep).Unix()), Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo2"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "multiple-steps-firing"}, {Name: "name", Value: "foo2"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(2 * defaultStep).Unix()), Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -389,50 +425,72 @@ func TestAlertingRule_Exec(t *testing.T) {
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
1: {
|
||||
// stale time series for `pending -> firing`
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(defaultStep).Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "alertstate", Value: "firing"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-fired"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Add(defaultStep).Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
f(newTestAlertingRule("for-pending=>empty", time.Second), [][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo", "a1", "b1", "a2", "b2", "a3", "b3")},
|
||||
{metricWithLabels(t, "name", "foo", "a1", "b1", "a2", "b2", "a3", "b3")},
|
||||
// empty step to delete pending alerts
|
||||
{},
|
||||
}, map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
0: {{labels: []string{"name", "foo", "a1", "b1", "a2", "b2", "a3", "b3"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo", "a1", "b1", "a2", "b2", "a3", "b3"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
2: {},
|
||||
}, map[int][]prompbmarshal.TimeSeries{
|
||||
0: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
1: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: 1, Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: float64(ts.Unix()), Timestamp: ts.Add(defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
// stale time series for `pending -> inactive`
|
||||
2: {
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}}},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "alertstate", Value: "pending"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
{
|
||||
Labels: []prompbmarshal.Label{{Name: "__name__", Value: alertForStateMetricName}, {Name: "a1", Value: "b1"}, {Name: "a2", Value: "b2"}, {Name: "a3", Value: "b3"}, {Name: "alertname", Value: "for-pending=>empty"}, {Name: "name", Value: "foo"}},
|
||||
Samples: []prompbmarshal.Sample{{Value: decimal.StaleNaN, Timestamp: ts.Add(2*defaultStep).UnixNano() / 1e6}},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -7,11 +7,13 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -348,6 +350,7 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
hostToAddrs := make(map[string][]string)
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
if hostToAddrs[host] != nil {
|
||||
// ips for the given host have been already discovered
|
||||
continue
|
||||
@@ -364,7 +367,11 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
} else {
|
||||
resolvedAddrs = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = fmt.Sprintf("%s:%d", addr.Target, addr.Port)
|
||||
hostPort := port
|
||||
if hostPort == "" && addr.Port > 0 {
|
||||
hostPort = strconv.FormatUint(uint64(addr.Port), 10)
|
||||
}
|
||||
resolvedAddrs[i] = net.JoinHostPort(addr.Target, hostPort)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -375,7 +382,7 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
} else {
|
||||
resolvedAddrs = make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
resolvedAddrs[i] = addr.String()
|
||||
resolvedAddrs[i] = net.JoinHostPort(addr.String(), port)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -389,17 +396,9 @@ func (up *URLPrefix) discoverBackendAddrsIfNeeded() {
|
||||
var busNew []*backendURL
|
||||
for _, bu := range up.busOriginal {
|
||||
host := bu.Hostname()
|
||||
port := bu.Port()
|
||||
for _, addr := range hostToAddrs[host] {
|
||||
buCopy := *bu
|
||||
buCopy.Host = addr
|
||||
if port != "" {
|
||||
if n := strings.IndexByte(buCopy.Host, ':'); n >= 0 {
|
||||
// Drop the discovered port and substitute it the port specified in bu.
|
||||
buCopy.Host = buCopy.Host[:n]
|
||||
}
|
||||
buCopy.Host += ":" + port
|
||||
}
|
||||
busNew = append(busNew, &backendURL{
|
||||
url: &buCopy,
|
||||
})
|
||||
|
||||
@@ -3,12 +3,14 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
)
|
||||
|
||||
func TestParseAuthConfigFailure(t *testing.T) {
|
||||
@@ -799,6 +801,75 @@ func TestBrokenBackend(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscoverBackendIPsWithIPV6(t *testing.T) {
|
||||
f := func(actualUrl, expectedUrl string) {
|
||||
t.Helper()
|
||||
up := mustParseURL(actualUrl)
|
||||
up.discoverBackendIPs = true
|
||||
up.loadBalancingPolicy = "least_loaded"
|
||||
|
||||
up.discoverBackendAddrsIfNeeded()
|
||||
pbus := up.bus.Load()
|
||||
bus := *pbus
|
||||
|
||||
if len(bus) != 1 {
|
||||
t.Fatalf("expected url list to be of size 1; got %d instead", len(bus))
|
||||
}
|
||||
|
||||
got := bus[0].url.Host
|
||||
if got != expectedUrl {
|
||||
t.Fatalf(`expected url to be %q; got %q instead`, expectedUrl, bus[0].url.Host)
|
||||
}
|
||||
}
|
||||
|
||||
// Discover backendURL with SRV hostnames
|
||||
customResolver := &fakeResolver{
|
||||
Resolver: &net.Resolver{},
|
||||
// SRV records must return hostname
|
||||
// not an IP address
|
||||
lookupSRVResults: map[string][]*net.SRV{
|
||||
"_vmselect._tcp.selectwithport.": {
|
||||
{
|
||||
Target: "vmselect.local",
|
||||
Port: 8481,
|
||||
},
|
||||
},
|
||||
"_vmselect._tcp.selectwoport.": {
|
||||
{
|
||||
Target: "vmselect.local",
|
||||
},
|
||||
},
|
||||
},
|
||||
lookupIPAddrResults: map[string][]net.IPAddr{
|
||||
"vminsert.local": {
|
||||
{
|
||||
IP: net.ParseIP("10.0.10.13"),
|
||||
},
|
||||
},
|
||||
"ipv6.vminsert.local": {
|
||||
{
|
||||
IP: net.ParseIP("2607:f8b0:400a:80b::200e"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
origResolver := netutil.Resolver
|
||||
netutil.Resolver = customResolver
|
||||
defer func() {
|
||||
netutil.Resolver = origResolver
|
||||
}()
|
||||
f("http://srv+_vmselect._tcp.selectwithport.:8080", "vmselect.local:8080")
|
||||
f("http://srv+_vmselect._tcp.selectwithport.:", "vmselect.local:8481")
|
||||
f("http://srv+_vmselect._tcp.selectwoport.:8080", "vmselect.local:8080")
|
||||
f("http://srv+_vmselect._tcp.selectwoport.", "vmselect.local:")
|
||||
|
||||
f("http://vminsert.local:8080", "10.0.10.13:8080")
|
||||
f("http://vminsert.local", "10.0.10.13:")
|
||||
f("http://ipv6.vminsert.local:8080", "[2607:f8b0:400a:80b::200e]:8080")
|
||||
f("http://ipv6.vminsert.local", "[2607:f8b0:400a:80b::200e]:")
|
||||
|
||||
}
|
||||
|
||||
func getRegexs(paths []string) []*Regex {
|
||||
var sps []*Regex
|
||||
for _, path := range paths {
|
||||
|
||||
@@ -180,11 +180,7 @@ func (c *Client) Explore() ([]*Series, error) {
|
||||
log.Printf("skip measurement %q since it has no fields", s.Measurement)
|
||||
continue
|
||||
}
|
||||
tags, ok := measurementTags[s.Measurement]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to find tags of measurement %s", s.Measurement)
|
||||
}
|
||||
emptyTags := getEmptyTags(tags, s.LabelPairs)
|
||||
emptyTags := getEmptyTags(measurementTags[s.Measurement], s.LabelPairs)
|
||||
for _, field := range fields {
|
||||
is := &Series{
|
||||
Measurement: s.Measurement,
|
||||
@@ -201,11 +197,16 @@ func (c *Client) Explore() ([]*Series, error) {
|
||||
// getEmptyTags returns tags of a measurement that are missing in a specific series.
|
||||
// Tags represent all tags of a measurement. LabelPairs represent tags of a specific series.
|
||||
func getEmptyTags(tags map[string]struct{}, LabelPairs []LabelPair) []string {
|
||||
if len(tags) == 0 {
|
||||
// fast path: the measurement does not contain any tag
|
||||
return nil
|
||||
}
|
||||
|
||||
labelMap := make(map[string]struct{})
|
||||
for _, pair := range LabelPairs {
|
||||
labelMap[pair.Name] = struct{}{}
|
||||
}
|
||||
result := make([]string, 0, len(labelMap)-len(LabelPairs))
|
||||
var result []string
|
||||
for tag := range tags {
|
||||
if _, ok := labelMap[tag]; !ok {
|
||||
result = append(result, tag)
|
||||
|
||||
@@ -105,7 +105,7 @@ func (ctx *InsertCtx) TryPrepareLabels(hasRelabeling bool) bool {
|
||||
if timeserieslimits.Enabled() && timeserieslimits.IsExceeding(ctx.Labels) {
|
||||
return false
|
||||
}
|
||||
ctx.sortLabelsIfNeeded()
|
||||
ctx.SortLabelsIfNeeded()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ var sortLabels = flag.Bool("sortLabels", false, `Whether to sort labels for inco
|
||||
`For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}. `+
|
||||
`Enabled sorting for labels can slow down ingestion performance a bit`)
|
||||
|
||||
// sortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set
|
||||
func (ctx *InsertCtx) sortLabelsIfNeeded() {
|
||||
// SortLabelsIfNeeded sorts labels if -sortLabels command-line flag is set
|
||||
func (ctx *InsertCtx) SortLabelsIfNeeded() {
|
||||
if *sortLabels {
|
||||
sort.Sort(&ctx.Labels)
|
||||
}
|
||||
|
||||
@@ -118,8 +118,14 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !ic.TryPrepareLabels(false) {
|
||||
continue
|
||||
// special case for optimisations below
|
||||
// do not call TryPrepareLabels
|
||||
// manually apply sort and limits on demand
|
||||
ic.SortLabelsIfNeeded()
|
||||
if hasLimitsEnabled {
|
||||
if timeserieslimits.IsExceeding(ic.Labels) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
labelsLen := len(ic.Labels)
|
||||
@@ -132,8 +138,6 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label)
|
||||
ic.Labels = ic.Labels[:labelsLen]
|
||||
ic.AddLabel("", metricGroup)
|
||||
if hasLimitsEnabled {
|
||||
// special case for optimisation above
|
||||
// check only __name__ label value limits
|
||||
if timeserieslimits.IsExceeding(ic.Labels[len(ic.Labels)-1:]) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -46,6 +46,8 @@ var (
|
||||
"so there is no need in spending additional CPU time on its handling. Staleness markers may exist only in data obtained from Prometheus scrape targets")
|
||||
minWindowForInstantRollupOptimization = flag.Duration("search.minWindowForInstantRollupOptimization", time.Hour*3, "Enable cache-based optimization for repeated queries "+
|
||||
"to /api/v1/query (aka instant queries), which contain rollup functions with lookbehind window exceeding the given value")
|
||||
maxBinaryOpPushdownLabelValues = flag.Int("search.maxBinaryOpPushdownLabelValues", 100, "The maximum number of values for a label in the first expression that can be extracted as a common label filter and pushed down to the second expression in a binary operation. "+
|
||||
"A larger value makes the pushed-down filter more complex but fewer time series will be returned. This flag is useful when selective label contains numerous values, for example `instance`, and storage resources are abundant.")
|
||||
)
|
||||
|
||||
// The minimum number of points per timeseries for enabling time rounding.
|
||||
@@ -582,7 +584,7 @@ func getCommonLabelFilters(tss []*timeseries) []metricsql.LabelFilter {
|
||||
}
|
||||
continue
|
||||
}
|
||||
if len(vc.values) > 100 {
|
||||
if len(vc.values) > *maxBinaryOpPushdownLabelValues {
|
||||
// Too many unique values found for the given tag.
|
||||
// Do not make a filter on such values, since it may slow down
|
||||
// search for matching time series.
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
@@ -16,7 +14,6 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
@@ -270,7 +267,7 @@ func getReverseCmpOp(op string) string {
|
||||
}
|
||||
|
||||
func parsePromQLWithCache(q string) (metricsql.Expr, error) {
|
||||
pcv := parseCacheV.Get(q)
|
||||
pcv := parseCacheV.get(q)
|
||||
if pcv == nil {
|
||||
e, err := metricsql.Parse(q)
|
||||
if err == nil {
|
||||
@@ -284,7 +281,7 @@ func parsePromQLWithCache(q string) (metricsql.Expr, error) {
|
||||
e: e,
|
||||
err: err,
|
||||
}
|
||||
parseCacheV.Put(q, pcv)
|
||||
parseCacheV.put(q, pcv)
|
||||
}
|
||||
if pcv.err != nil {
|
||||
return nil, pcv.err
|
||||
@@ -328,80 +325,3 @@ func escapeDots(s string) string {
|
||||
}
|
||||
return string(result)
|
||||
}
|
||||
|
||||
var parseCacheV = func() *parseCache {
|
||||
pc := &parseCache{
|
||||
m: make(map[string]*parseCacheValue),
|
||||
}
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Requests())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Misses())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Len())
|
||||
})
|
||||
return pc
|
||||
}()
|
||||
|
||||
const parseCacheMaxLen = 10e3
|
||||
|
||||
type parseCacheValue struct {
|
||||
e metricsql.Expr
|
||||
err error
|
||||
}
|
||||
|
||||
type parseCache struct {
|
||||
requests atomic.Uint64
|
||||
misses atomic.Uint64
|
||||
|
||||
m map[string]*parseCacheValue
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (pc *parseCache) Requests() uint64 {
|
||||
return pc.requests.Load()
|
||||
}
|
||||
|
||||
func (pc *parseCache) Misses() uint64 {
|
||||
return pc.misses.Load()
|
||||
}
|
||||
|
||||
func (pc *parseCache) Len() uint64 {
|
||||
pc.mu.RLock()
|
||||
n := len(pc.m)
|
||||
pc.mu.RUnlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func (pc *parseCache) Get(q string) *parseCacheValue {
|
||||
pc.requests.Add(1)
|
||||
|
||||
pc.mu.RLock()
|
||||
pcv := pc.m[q]
|
||||
pc.mu.RUnlock()
|
||||
|
||||
if pcv == nil {
|
||||
pc.misses.Add(1)
|
||||
}
|
||||
return pcv
|
||||
}
|
||||
|
||||
func (pc *parseCache) Put(q string, pcv *parseCacheValue) {
|
||||
pc.mu.Lock()
|
||||
overflow := len(pc.m) - parseCacheMaxLen
|
||||
if overflow > 0 {
|
||||
// Remove 10% of items from the cache.
|
||||
overflow = int(float64(len(pc.m)) * 0.1)
|
||||
for k := range pc.m {
|
||||
delete(pc.m, k)
|
||||
overflow--
|
||||
if overflow <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
pc.m[q] = pcv
|
||||
pc.mu.Unlock()
|
||||
}
|
||||
|
||||
142
app/vmselect/promql/parse_cache.go
Normal file
142
app/vmselect/promql/parse_cache.go
Normal file
@@ -0,0 +1,142 @@
|
||||
// Cache for metricsql expressions
|
||||
// Based on the fastcache idea of locking buckets in order to avoid whole cache locks.
|
||||
// See: https://github.com/VictoriaMetrics/fastcache
|
||||
package promql
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
|
||||
xxhash "github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
var parseCacheV = func() *parseCache {
|
||||
pc := newParseCache()
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.requests())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.misses())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.len())
|
||||
})
|
||||
return pc
|
||||
}()
|
||||
|
||||
const (
|
||||
parseBucketCount = 128
|
||||
|
||||
parseCacheMaxLen int = 10e3
|
||||
|
||||
parseBucketMaxLen int = parseCacheMaxLen / parseBucketCount
|
||||
|
||||
parseBucketFreePercent float64 = 0.1
|
||||
)
|
||||
|
||||
type parseCacheValue struct {
|
||||
e metricsql.Expr
|
||||
err error
|
||||
}
|
||||
|
||||
type parseBucket struct {
|
||||
m map[string]*parseCacheValue
|
||||
mu sync.RWMutex
|
||||
requests atomic.Uint64
|
||||
misses atomic.Uint64
|
||||
}
|
||||
|
||||
type parseCache struct {
|
||||
buckets [parseBucketCount]parseBucket
|
||||
}
|
||||
|
||||
func newParseCache() *parseCache {
|
||||
pc := new(parseCache)
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
pc.buckets[i] = newParseBucket()
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
func (pc *parseCache) put(q string, pcv *parseCacheValue) {
|
||||
h := xxhash.Sum64String(q)
|
||||
idx := h % parseBucketCount
|
||||
pc.buckets[idx].put(q, pcv)
|
||||
}
|
||||
|
||||
func (pc *parseCache) get(q string) *parseCacheValue {
|
||||
h := xxhash.Sum64String(q)
|
||||
idx := h % parseBucketCount
|
||||
return pc.buckets[idx].get(q)
|
||||
}
|
||||
|
||||
func (pc *parseCache) requests() uint64 {
|
||||
var n uint64
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
n += pc.buckets[i].requests.Load()
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (pc *parseCache) misses() uint64 {
|
||||
var n uint64
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
n += pc.buckets[i].misses.Load()
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (pc *parseCache) len() uint64 {
|
||||
var n uint64
|
||||
for i := 0; i < parseBucketCount; i++ {
|
||||
n += pc.buckets[i].len()
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func newParseBucket() parseBucket {
|
||||
return parseBucket{
|
||||
m: make(map[string]*parseCacheValue, parseBucketMaxLen),
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *parseBucket) len() uint64 {
|
||||
pb.mu.RLock()
|
||||
n := len(pb.m)
|
||||
pb.mu.RUnlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func (pb *parseBucket) get(q string) *parseCacheValue {
|
||||
pb.requests.Add(1)
|
||||
|
||||
pb.mu.RLock()
|
||||
pcv := pb.m[q]
|
||||
pb.mu.RUnlock()
|
||||
|
||||
if pcv == nil {
|
||||
pb.misses.Add(1)
|
||||
}
|
||||
return pcv
|
||||
}
|
||||
|
||||
func (pb *parseBucket) put(q string, pcv *parseCacheValue) {
|
||||
pb.mu.Lock()
|
||||
overflow := len(pb.m) - parseBucketMaxLen
|
||||
if overflow > 0 {
|
||||
// Remove parseBucketDeletePercent*100 % of items from the bucket.
|
||||
overflow = int(float64(len(pb.m)) * parseBucketFreePercent)
|
||||
for k := range pb.m {
|
||||
delete(pb.m, k)
|
||||
overflow--
|
||||
if overflow <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
pb.m[q] = pcv
|
||||
pb.mu.Unlock()
|
||||
}
|
||||
129
app/vmselect/promql/parse_cache_test.go
Normal file
129
app/vmselect/promql/parse_cache_test.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
func testGetParseCacheValue(q string) *parseCacheValue {
|
||||
e, err := metricsql.Parse(q)
|
||||
return &parseCacheValue{
|
||||
e: e,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func testGenerateQueries(items int) []string {
|
||||
queries := make([]string, items)
|
||||
for i := 0; i < items; i++ {
|
||||
queries[i] = fmt.Sprintf(`node_time_seconds{instance="node%d", job="job%d"}`, i, i)
|
||||
}
|
||||
return queries
|
||||
}
|
||||
|
||||
func TestParseCache(t *testing.T) {
|
||||
pc := newParseCache()
|
||||
if pc.len() != 0 || pc.misses() != 0 || pc.requests() != 0 {
|
||||
t.Errorf("unexpected pc.Len()=%d, pc.Misses()=%d, pc.Requests()=%d; expected all to be zero.", pc.len(), pc.misses(), pc.requests())
|
||||
}
|
||||
|
||||
q1 := `foo{bar="baz"}`
|
||||
v1 := testGetParseCacheValue(q1)
|
||||
|
||||
q2 := `foo1{bar1="baz1"}`
|
||||
v2 := testGetParseCacheValue(q2)
|
||||
|
||||
pc.put(q1, v1)
|
||||
if pc.len() != 1 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", pc.len(), 1)
|
||||
}
|
||||
|
||||
if res := pc.get(q2); res != nil {
|
||||
t.Errorf("unexpected non-empty value obtained from cache: %d ", res)
|
||||
}
|
||||
if pc.len() != 1 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", pc.len(), 1)
|
||||
}
|
||||
if miss := pc.misses(); miss != 1 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", miss, 1)
|
||||
}
|
||||
if req := pc.requests(); req != 1 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", req, 1)
|
||||
}
|
||||
|
||||
pc.put(q2, v2)
|
||||
if pc.len() != 2 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", pc.len(), 2)
|
||||
}
|
||||
|
||||
if res := pc.get(q1); res != v1 {
|
||||
t.Errorf("unexpected value obtained; got %v; want %v", res, v1)
|
||||
}
|
||||
|
||||
if res := pc.get(q2); res != v2 {
|
||||
t.Errorf("unexpected value obtained; got %v; want %v", res, v2)
|
||||
}
|
||||
|
||||
pc.put(q2, v2)
|
||||
if pc.len() != 2 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", pc.len(), 2)
|
||||
}
|
||||
if miss := pc.misses(); miss != 1 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", miss, 1)
|
||||
}
|
||||
if req := pc.requests(); req != 3 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", req, 3)
|
||||
}
|
||||
|
||||
if res := pc.get(q2); res != v2 {
|
||||
t.Errorf("unexpected value obtained; got %v; want %v", res, v2)
|
||||
}
|
||||
if pc.len() != 2 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", pc.len(), 2)
|
||||
}
|
||||
if miss := pc.misses(); miss != 1 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", miss, 1)
|
||||
}
|
||||
if req := pc.requests(); req != 4 {
|
||||
t.Errorf("unexpected value obtained; got %d; want %d", req, 4)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCacheBucketOverflow(t *testing.T) {
|
||||
b := newParseBucket()
|
||||
var expectedLen uint64
|
||||
|
||||
// +2 for overflow and clean up
|
||||
queries := testGenerateQueries(parseBucketMaxLen + 2)
|
||||
|
||||
// Same value for all keys
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
// Fill bucket
|
||||
for i := 0; i < parseBucketMaxLen; i++ {
|
||||
b.put(queries[i], v)
|
||||
}
|
||||
expectedLen = uint64(parseBucketMaxLen)
|
||||
if b.len() != expectedLen {
|
||||
t.Errorf("unexpected value obtained; got %v; want %v", b.len(), expectedLen)
|
||||
}
|
||||
|
||||
// Overflow bucket
|
||||
expectedLen = uint64(parseBucketMaxLen + 1)
|
||||
b.put(queries[parseBucketMaxLen], v)
|
||||
if b.len() != uint64(expectedLen) {
|
||||
t.Errorf("unexpected value obtained; got %v; want %v", b.len(), expectedLen)
|
||||
}
|
||||
|
||||
// Clean up;
|
||||
oldLen := b.len()
|
||||
overflow := int(float64(oldLen) * parseBucketFreePercent)
|
||||
expectedLen = oldLen - uint64(overflow) + 1 // +1 for new entry
|
||||
|
||||
b.put(queries[parseBucketMaxLen+1], v)
|
||||
if b.len() != expectedLen {
|
||||
t.Errorf("unexpected value obtained; got %v; want %v", b.len(), expectedLen)
|
||||
}
|
||||
}
|
||||
235
app/vmselect/promql/parse_cache_timing_test.go
Normal file
235
app/vmselect/promql/parse_cache_timing_test.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkCachePutNoOverFlow(b *testing.B) {
|
||||
const items int = (parseCacheMaxLen / 2)
|
||||
pc := newParseCache()
|
||||
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < items; i++ {
|
||||
pc.put(queries[i], v)
|
||||
}
|
||||
}
|
||||
})
|
||||
if pc.len() != uint64(items) {
|
||||
b.Errorf("unexpected value obtained; got %d; want %d", pc.len(), items)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCacheGetNoOverflow(b *testing.B) {
|
||||
const items int = parseCacheMaxLen / 2
|
||||
pc := newParseCache()
|
||||
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
for i := 0; i < len(queries); i++ {
|
||||
pc.put(queries[i], v)
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < items; i++ {
|
||||
if v := pc.get(queries[i]); v == nil {
|
||||
b.Errorf("unexpected nil value obtained from cache for query: %s ", queries[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCachePutGetNoOverflow(b *testing.B) {
|
||||
const items int = parseCacheMaxLen / 2
|
||||
pc := newParseCache()
|
||||
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < items; i++ {
|
||||
pc.put(queries[i], v)
|
||||
if res := pc.get(queries[i]); res == nil {
|
||||
b.Errorf("unexpected nil value obtained from cache for query: %s ", queries[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
if pc.len() != uint64(items) {
|
||||
b.Errorf("unexpected value obtained; got %d; want %d", pc.len(), items)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCachePutOverflow(b *testing.B) {
|
||||
const items int = parseCacheMaxLen + (parseCacheMaxLen / 2)
|
||||
c := newParseCache()
|
||||
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
for i := 0; i < parseCacheMaxLen; i++ {
|
||||
c.put(queries[i], v)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := parseCacheMaxLen; i < items; i++ {
|
||||
c.put(queries[i], v)
|
||||
}
|
||||
}
|
||||
})
|
||||
maxElemnts := uint64(parseCacheMaxLen + parseBucketCount)
|
||||
if c.len() > maxElemnts {
|
||||
b.Errorf("cache length is more than expected; got %d, expected %d", c.len(), maxElemnts)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCachePutGetOverflow(b *testing.B) {
|
||||
const items int = parseCacheMaxLen + (parseCacheMaxLen / 2)
|
||||
c := newParseCache()
|
||||
|
||||
queries := testGenerateQueries(items)
|
||||
v := testGetParseCacheValue(queries[0])
|
||||
|
||||
for i := 0; i < parseCacheMaxLen; i++ {
|
||||
c.put(queries[i], v)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := parseCacheMaxLen; i < items; i++ {
|
||||
c.put(queries[i], v)
|
||||
c.get(queries[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
maxElemnts := uint64(parseCacheMaxLen + parseBucketCount)
|
||||
if c.len() > maxElemnts {
|
||||
b.Errorf("cache length is more than expected; got %d, expected %d", c.len(), maxElemnts)
|
||||
}
|
||||
}
|
||||
|
||||
var testSimpleQueries = []string{
|
||||
`m{a="b"}`,
|
||||
`{a="b"}`,
|
||||
`m{c="d",a="b"}`,
|
||||
`{a="b",c="d"}`,
|
||||
`m1{a="foo"}`,
|
||||
`m2{a="bar"}`,
|
||||
`m1{b="foo"}`,
|
||||
`m2{b="bar"}`,
|
||||
`m1{a="foo",b="bar"}`,
|
||||
`m2{b="bar",c="x"}`,
|
||||
`{b="bar"}`,
|
||||
}
|
||||
|
||||
func BenchmarkParsePromQLWithCacheSimple(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < len(testSimpleQueries); j++ {
|
||||
_, err := parsePromQLWithCache(testSimpleQueries[j])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParsePromQLWithCacheSimpleParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < len(testSimpleQueries); i++ {
|
||||
_, err := parsePromQLWithCache(testSimpleQueries[i])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var testComplexQueries = []string{
|
||||
`sort_desc(label_set(2, "foo", "bar") * ignoring(a) (label_set(time(), "foo", "bar") or label_set(10, "foo", "qwert")))`,
|
||||
`sum(a.b{c="d.e",x=~"a.b.+[.a]",y!~"aaa.bb|cc.dd"}) + avg_over_time(1,sum({x=~"aa.bb"}))`,
|
||||
`sort((label_set(time() offset 100s, "foo", "bar"), label_set(time()+10, "foo", "baz") offset 50s) offset 400s)`,
|
||||
`sort(label_map((
|
||||
label_set(time(), "label", "v1"),
|
||||
label_set(time()+100, "label", "v2"),
|
||||
label_set(time()+200, "label", "v3"),
|
||||
label_set(time()+300, "x", "y"),
|
||||
label_set(time()+400, "label", "v4"),
|
||||
), "label", "v1", "foo", "v2", "bar", "", "qwe", "v4", ""))`,
|
||||
`sort(labels_equal((
|
||||
label_set(10, "instance", "qwe", "host", "rty"),
|
||||
label_set(20, "instance", "qwe", "host", "qwe"),
|
||||
label_set(30, "aaa", "bbb", "instance", "foo", "host", "foo"),
|
||||
), "instance", "host"))`,
|
||||
`with (
|
||||
x = (
|
||||
label_set(time() > 1500, "foo", "123.456", "__name__", "aaa"),
|
||||
label_set(-time(), "foo", "bar", "__name__", "bbb"),
|
||||
label_set(-time(), "__name__", "bxs"),
|
||||
label_set(-time(), "foo", "45", "bar", "xs"),
|
||||
)
|
||||
)
|
||||
sort(x + label_value(x, "foo"))`,
|
||||
`label_replace(
|
||||
label_replace(
|
||||
label_replace(time(), "__name__", "x${1}y", "foo", ".*"),
|
||||
"xxx", "foo${1}bar(${1})", "__name__", "(.+)"),
|
||||
"xxx", "AA$1", "xxx", "foox(.+)"
|
||||
)`,
|
||||
`sort_desc(union(
|
||||
label_set(time() > 1400, "__name__", "x", "foo", "bar"),
|
||||
label_set(time() < 1700, "__name__", "y", "foo", "baz")) default 123)`,
|
||||
`sort(histogram_quantile(0.6,
|
||||
label_set(90, "foo", "bar", "le", "10")
|
||||
or label_set(100, "foo", "bar", "le", "30")
|
||||
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||
or label_set(200, "tag", "xx", "le", "10")
|
||||
or label_set(300, "tag", "xx", "le", "30")
|
||||
))`,
|
||||
}
|
||||
|
||||
func BenchmarkParsePromQLWithCacheComplex(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < len(testComplexQueries); j++ {
|
||||
_, err := parsePromQLWithCache(testComplexQueries[j])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParsePromQLWithCacheComplexParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for i := 0; i < len(testComplexQueries); i++ {
|
||||
_, err := parsePromQLWithCache(testComplexQueries[i])
|
||||
if err != nil {
|
||||
b.Errorf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -56,3 +56,8 @@ export interface ReportMetaData {
|
||||
comment: string;
|
||||
params: Record<string, string>;
|
||||
}
|
||||
|
||||
export interface LogsFiledValues {
|
||||
value: string;
|
||||
hits: number;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,154 @@
|
||||
import React, { FC, useCallback, useEffect, useMemo, useState } from "preact/compat";
|
||||
import Autocomplete, { AutocompleteOptions } from "../../../Main/Autocomplete/Autocomplete";
|
||||
import { AUTOCOMPLETE_LIMITS } from "../../../../constants/queryAutocomplete";
|
||||
import { QueryEditorAutocompleteProps } from "../QueryEditor";
|
||||
import { getContextData, splitLogicalParts } from "./parser";
|
||||
import { ContextType, LogicalPart, LogicalPartType } from "./types";
|
||||
import { useFetchLogsQLOptions } from "./useFetchLogsQLOptions";
|
||||
import { pipeList } from "./pipes";
|
||||
|
||||
const LogsQueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
value,
|
||||
anchorEl,
|
||||
caretPosition,
|
||||
hasHelperText,
|
||||
onSelect,
|
||||
onFoundOptions
|
||||
}) => {
|
||||
const [offsetPos, setOffsetPos] = useState({ top: 0, left: 0 });
|
||||
|
||||
const fullValue = useMemo(() => {
|
||||
if (caretPosition[0] !== caretPosition[1]) return { valueBeforeCursor: value, valueAfterCursor: "" };
|
||||
const valueBeforeCursor = value.substring(0, caretPosition[0]);
|
||||
const valueAfterCursor = value.substring(caretPosition[1]);
|
||||
return { valueBeforeCursor, valueAfterCursor };
|
||||
}, [value, caretPosition]);
|
||||
|
||||
const logicalParts = useMemo(() => {
|
||||
return splitLogicalParts(value);
|
||||
}, [value]);
|
||||
|
||||
const contextData = useMemo(() => {
|
||||
if (caretPosition[0] !== caretPosition[1]) return;
|
||||
const part = logicalParts.find(p => caretPosition[0] >= p.position[0] && caretPosition[0] <= p.position[1]);
|
||||
if (!part) return;
|
||||
const cursorStartPosition = caretPosition[0] - part.position[0];
|
||||
return {
|
||||
...part,
|
||||
...getContextData(part, cursorStartPosition)
|
||||
};
|
||||
}, [logicalParts, caretPosition]);
|
||||
|
||||
const { fieldNames, fieldValues, loading } = useFetchLogsQLOptions(contextData);
|
||||
|
||||
const options = useMemo(() => {
|
||||
switch (contextData?.contextType) {
|
||||
case ContextType.FilterName:
|
||||
case ContextType.FilterUnknown:
|
||||
return fieldNames;
|
||||
case ContextType.FilterValue:
|
||||
return fieldValues;
|
||||
case ContextType.PipeName:
|
||||
return pipeList;
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}, [contextData, fieldNames, fieldValues]);
|
||||
|
||||
const getUpdatedValue = (insertValue: string, logicalParts: LogicalPart[], id?: number) => {
|
||||
return logicalParts.reduce((acc, part) => {
|
||||
const value = part.id === id ? insertValue : part.value;
|
||||
const separator = part.type === LogicalPartType.Pipe ? " | " : " ";
|
||||
return `${acc}${separator}${value}`;
|
||||
}, "").trim();
|
||||
};
|
||||
|
||||
const getModifyInsert = (insert: string, contextType: ContextType, value = "", insertType?: string) => {
|
||||
let modifiedInsert = insert;
|
||||
|
||||
if (insertType === ContextType.FilterName) {
|
||||
modifiedInsert += ":";
|
||||
} else if (contextType === ContextType.FilterValue) {
|
||||
const insertWithQuotes = value.startsWith("_stream:") ? modifiedInsert : `"${modifiedInsert}"`;
|
||||
modifiedInsert = `${contextData?.filterName || ""}:${insertWithQuotes}`;
|
||||
}
|
||||
|
||||
return modifiedInsert;
|
||||
};
|
||||
|
||||
const handleSelect = useCallback((insert: string, item: AutocompleteOptions) => {
|
||||
const {
|
||||
id,
|
||||
contextType = ContextType.FilterUnknown,
|
||||
value = "",
|
||||
position = [0, 0]
|
||||
} = contextData || {};
|
||||
|
||||
const insertValue = getModifyInsert(insert, contextType, value, item.type);
|
||||
const newValue = getUpdatedValue(insertValue, logicalParts, id);
|
||||
const updatedPosition = (position[0] || 1) + insertValue.length + (item.type === ContextType.PipeName ? 1 : 0);
|
||||
|
||||
onSelect(newValue, updatedPosition);
|
||||
}, [contextData, logicalParts]);
|
||||
|
||||
|
||||
useEffect(() => {
|
||||
if (!anchorEl.current) {
|
||||
setOffsetPos({ top: 0, left: 0 });
|
||||
return;
|
||||
}
|
||||
|
||||
const element = anchorEl.current.querySelector("textarea") || anchorEl.current;
|
||||
const style = window.getComputedStyle(element);
|
||||
const fontSize = `${style.getPropertyValue("font-size")}`;
|
||||
const fontFamily = `${style.getPropertyValue("font-family")}`;
|
||||
const lineHeight = parseInt(`${style.getPropertyValue("line-height")}`);
|
||||
|
||||
const span = document.createElement("div");
|
||||
span.style.font = `${fontSize} ${fontFamily}`;
|
||||
span.style.padding = style.getPropertyValue("padding");
|
||||
span.style.lineHeight = `${lineHeight}px`;
|
||||
span.style.width = `${element.offsetWidth}px`;
|
||||
span.style.maxWidth = `${element.offsetWidth}px`;
|
||||
span.style.whiteSpace = style.getPropertyValue("white-space");
|
||||
span.style.overflowWrap = style.getPropertyValue("overflow-wrap");
|
||||
|
||||
const marker = document.createElement("span");
|
||||
span.appendChild(document.createTextNode(fullValue.valueBeforeCursor || ""));
|
||||
span.appendChild(marker);
|
||||
span.appendChild(document.createTextNode(fullValue.valueAfterCursor || ""));
|
||||
document.body.appendChild(span);
|
||||
|
||||
const spanRect = span.getBoundingClientRect();
|
||||
const markerRect = marker.getBoundingClientRect();
|
||||
|
||||
const leftOffset = markerRect.left - spanRect.left;
|
||||
const topOffset = markerRect.bottom - spanRect.bottom - (hasHelperText ? lineHeight : 0);
|
||||
setOffsetPos({ top: topOffset, left: leftOffset });
|
||||
|
||||
span.remove();
|
||||
marker.remove();
|
||||
}, [anchorEl, caretPosition, hasHelperText, fullValue]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Autocomplete
|
||||
loading={loading}
|
||||
disabledFullScreen
|
||||
value={contextData?.valueContext || ""}
|
||||
options={options}
|
||||
anchor={anchorEl}
|
||||
minLength={0}
|
||||
offset={offsetPos}
|
||||
onSelect={handleSelect}
|
||||
onFoundOptions={onFoundOptions}
|
||||
maxDisplayResults={{
|
||||
limit: AUTOCOMPLETE_LIMITS.displayResults,
|
||||
message: "Please, specify the query more precisely."
|
||||
}}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default LogsQueryEditorAutocomplete;
|
||||
@@ -0,0 +1,117 @@
|
||||
import { ContextData, ContextType, LogicalPart, LogicalPartPosition, LogicalPartType } from "./types";
|
||||
import { pipeList } from "./pipes";
|
||||
|
||||
const BUILDER_OPERATORS = ["AND", "OR", "NOT"];
|
||||
const PIPE_NAMES = pipeList.map(p => p.value);
|
||||
|
||||
export const splitLogicalParts = (expr: string) => {
|
||||
// Replace spaces around the colon (:) with just the colon, removing the spaces
|
||||
const input = expr; //.replace(/\s*:\s*/g, ":");
|
||||
const parts: LogicalPart[] = [];
|
||||
let currentPart = "";
|
||||
let isPipePart = false;
|
||||
|
||||
const quotes = ["'", "\"", "`"];
|
||||
let insideQuotes = false;
|
||||
let expectedQuote = "";
|
||||
|
||||
const openBrackets = ["(", "[", "{"];
|
||||
const closeBrackets = [")", "]", "}"];
|
||||
const brackets = [...openBrackets, ...closeBrackets];
|
||||
let insideBrackets = 0;
|
||||
|
||||
let startIndex = 0;
|
||||
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
const char = input[i];
|
||||
|
||||
// Check if the current character is a quote
|
||||
if (quotes.includes(char)) {
|
||||
const isClosedQuote: boolean = insideQuotes && (char === expectedQuote);
|
||||
insideQuotes = !isClosedQuote;
|
||||
expectedQuote = isClosedQuote ? "" : char;
|
||||
}
|
||||
|
||||
// Check if the current character is a bracket
|
||||
if (!insideQuotes && brackets.includes(char)) {
|
||||
const dir = openBrackets.includes(char) ? 1 : -1;
|
||||
insideBrackets += dir;
|
||||
}
|
||||
|
||||
// Check if the current character is a pipe
|
||||
if ((!insideQuotes && !insideBrackets && char === "|")) {
|
||||
isPipePart = true;
|
||||
const countStartSpaces = currentPart.match(/^ */)?.[0].length || 0;
|
||||
const countEndSpaces = currentPart.match(/ *$/)?.[0].length || 0;
|
||||
pushPart(currentPart, true, [startIndex + countStartSpaces, i - countEndSpaces - 1], parts);
|
||||
currentPart = "";
|
||||
startIndex = i + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if the current character is a space
|
||||
if (!isPipePart && !insideQuotes && !insideBrackets && char === " ") {
|
||||
const nextStr = input.slice(i).replace(/^\s*/, "");
|
||||
const prevStr = input.slice(0, i).replace(/\s*$/, "");
|
||||
if (!nextStr.startsWith(":") && !prevStr.endsWith(":")) {
|
||||
pushPart(currentPart, false, [startIndex, i - 1], parts);
|
||||
currentPart = "";
|
||||
startIndex = i + 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
currentPart += char;
|
||||
}
|
||||
|
||||
// push the last part
|
||||
pushPart(currentPart, isPipePart, [startIndex, input.length], parts);
|
||||
|
||||
return parts;
|
||||
};
|
||||
|
||||
const pushPart = (currentPart: string, isPipePart: boolean, position: LogicalPartPosition, parts: LogicalPart[]) => {
|
||||
const trimmedPart = currentPart.trim();
|
||||
if (!trimmedPart) return;
|
||||
const isOperator = BUILDER_OPERATORS.includes(trimmedPart.toUpperCase());
|
||||
parts.push({
|
||||
id: parts.length,
|
||||
value: trimmedPart,
|
||||
position,
|
||||
type: isPipePart
|
||||
? LogicalPartType.Pipe
|
||||
: isOperator ? LogicalPartType.Operator : LogicalPartType.Filter,
|
||||
});
|
||||
};
|
||||
|
||||
export const getContextData = (part: LogicalPart, cursorPos: number) => {
|
||||
const valueBeforeCursor = part.value.substring(0, cursorPos);
|
||||
const valueAfterCursor = part.value.substring(cursorPos);
|
||||
|
||||
const metaData: ContextData = {
|
||||
valueBeforeCursor,
|
||||
valueAfterCursor,
|
||||
valueContext: part.value,
|
||||
contextType: ContextType.Unknown,
|
||||
};
|
||||
|
||||
if (part.type === LogicalPartType.Filter) {
|
||||
const noColon = !valueBeforeCursor.includes(":") && !valueAfterCursor.includes(":");
|
||||
if (noColon) {
|
||||
metaData.contextType = ContextType.FilterUnknown;
|
||||
} else if (valueBeforeCursor.includes(":")) {
|
||||
const [filterName, filterValue] = valueBeforeCursor.split(":");
|
||||
metaData.contextType = ContextType.FilterValue;
|
||||
metaData.filterName = filterName;
|
||||
metaData.valueContext = filterValue;
|
||||
} else {
|
||||
metaData.contextType = ContextType.FilterName;
|
||||
}
|
||||
} else if (part.type === LogicalPartType.Pipe) {
|
||||
const valueStartWithPipe = PIPE_NAMES.some(p => part.value.startsWith(p));
|
||||
metaData.contextType = valueStartWithPipe ? ContextType.PipeValue : ContextType.PipeName;
|
||||
}
|
||||
|
||||
metaData.valueContext = metaData.valueContext.replace(/^["']|["']$/g, "");
|
||||
return metaData;
|
||||
};
|
||||
@@ -0,0 +1,130 @@
|
||||
import React from "react";
|
||||
import { ContextType } from "./types";
|
||||
import { FunctionIcon } from "../../../Main/Icons";
|
||||
|
||||
const docsUrl = "https://docs.victoriametrics.com/victorialogs/logsql";
|
||||
const classLink = "vm-link vm-link_colored";
|
||||
|
||||
const prepareDescription = (text: string): string => {
|
||||
const replaceClass = `$1 target="_blank" class="${classLink}" $2`;
|
||||
const replaceHref = `$1 $2${docsUrl}#`;
|
||||
return text
|
||||
.replace(/(<a) (href=")#/gm, replaceHref)
|
||||
.replace(/(<a) (href="[^"]+")/gm, replaceClass);
|
||||
};
|
||||
|
||||
export const pipeList = [
|
||||
{
|
||||
"value": "copy",
|
||||
"description": "<a href=\"#copy-pipe\"><code>copy</code></a> copies <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "delete",
|
||||
"description": "<a href=\"#delete-pipe\"><code>delete</code></a> deletes <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "drop_empty_fields",
|
||||
"description": "<a href=\"#drop_empty_fields-pipe\"><code>drop_empty_fields</code></a> drops <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a> with empty values."
|
||||
},
|
||||
{
|
||||
"value": "extract",
|
||||
"description": "<a href=\"#extract-pipe\"><code>extract</code></a> extracts the specified text into the given log fields."
|
||||
},
|
||||
{
|
||||
"value": "extract_regexp",
|
||||
"description": "<a href=\"#extract_regexp-pipe\"><code>extract_regexp</code></a> extracts the specified text into the given log fields via <a href=\"https://github.com/google/re2/wiki/Syntax\" rel=\"external\" target=\"_blank\">RE2 regular expressions</a>."
|
||||
},
|
||||
{
|
||||
"value": "field_names",
|
||||
"description": "<a href=\"#field_names-pipe\"><code>field_names</code></a> returns all the names of <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "field_values",
|
||||
"description": "<a href=\"#field_values-pipe\"><code>field_values</code></a> returns all the values for the given <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log field</a>."
|
||||
},
|
||||
{
|
||||
"value": "fields",
|
||||
"description": "<a href=\"#fields-pipe\"><code>fields</code></a> selects the given set of <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "filter",
|
||||
"description": "<a href=\"#filter-pipe\"><code>filter</code></a> applies additional <a href=\"#filters\">filters</a> to results."
|
||||
},
|
||||
{
|
||||
"value": "format",
|
||||
"description": "<a href=\"#format-pipe\"><code>format</code></a> formats output field from input <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "limit",
|
||||
"description": "<a href=\"#limit-pipe\"><code>limit</code></a> limits the number selected logs."
|
||||
},
|
||||
{
|
||||
"value": "math",
|
||||
"description": "<a href=\"#math-pipe\"><code>math</code></a> performs mathematical calculations over <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "offset",
|
||||
"description": "<a href=\"#offset-pipe\"><code>offset</code></a> skips the given number of selected logs."
|
||||
},
|
||||
{
|
||||
"value": "pack_json",
|
||||
"description": "<a href=\"#pack_json-pipe\"><code>pack_json</code></a> packs <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a> into JSON object."
|
||||
},
|
||||
{
|
||||
"value": "pack_logfmt",
|
||||
"description": "<a href=\"#pack_logfmt-pipe\"><code>pack_logfmt</code></a> packs <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a> into <a href=\"https://brandur.org/logfmt\" rel=\"external\" target=\"_blank\">logfmt</a> message."
|
||||
},
|
||||
{
|
||||
"value": "rename",
|
||||
"description": "<a href=\"#rename-pipe\"><code>rename</code></a> renames <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "replace",
|
||||
"description": "<a href=\"#replace-pipe\"><code>replace</code></a> replaces substrings in the specified <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "replace_regexp",
|
||||
"description": "<a href=\"#replace_regexp-pipe\"><code>replace_regexp</code></a> updates <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a> with regular expressions."
|
||||
},
|
||||
{
|
||||
"value": "sort",
|
||||
"description": "<a href=\"#sort-pipe\"><code>sort</code></a> sorts logs by the given <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "stats",
|
||||
"description": "<a href=\"#stats-pipe\"><code>stats</code></a> calculates various stats over the selected logs."
|
||||
},
|
||||
{
|
||||
"value": "stream_context",
|
||||
"description": "<a href=\"#stream_context-pipe\"><code>stream_context</code></a> allows selecting surrounding logs in front and after the matching logs\nper each <a href=\"/victorialogs/keyconcepts/#stream-fields\">log stream</a>."
|
||||
},
|
||||
{
|
||||
"value": "top",
|
||||
"description": "<a href=\"#top-pipe\"><code>top</code></a> returns top <code>N</code> field sets with the maximum number of matching logs."
|
||||
},
|
||||
{
|
||||
"value": "uniq",
|
||||
"description": "<a href=\"#uniq-pipe\"><code>uniq</code></a> returns unique log entires."
|
||||
},
|
||||
{
|
||||
"value": "unpack_json",
|
||||
"description": "<a href=\"#unpack_json-pipe\"><code>unpack_json</code></a> unpacks JSON messages from <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "unpack_logfmt",
|
||||
"description": "<a href=\"#unpack_logfmt-pipe\"><code>unpack_logfmt</code></a> unpacks <a href=\"https://brandur.org/logfmt\" rel=\"external\" target=\"_blank\">logfmt</a> messages from <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "unpack_syslog",
|
||||
"description": "<a href=\"#unpack_syslog-pipe\"><code>unpack_syslog</code></a> unpacks <a href=\"https://en.wikipedia.org/wiki/Syslog\" rel=\"external\" target=\"_blank\">syslog</a> messages from <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
},
|
||||
{
|
||||
"value": "unroll",
|
||||
"description": "<a href=\"#unroll-pipe\"><code>unroll</code></a> unrolls JSON arrays from <a href=\"https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model\">log fields</a>."
|
||||
}
|
||||
].map(item => ({
|
||||
...item,
|
||||
type: ContextType.PipeName,
|
||||
icon: <FunctionIcon/>,
|
||||
description: prepareDescription(item.description),
|
||||
}));
|
||||
@@ -0,0 +1,31 @@
|
||||
export enum LogicalPartType {
|
||||
Filter = "Filter",
|
||||
Pipe = "Pipe",
|
||||
Operator = "Operator",
|
||||
}
|
||||
|
||||
export type LogicalPartPosition = [start: number, end: number];
|
||||
|
||||
export interface LogicalPart {
|
||||
id: number;
|
||||
value: string;
|
||||
type: LogicalPartType;
|
||||
position: LogicalPartPosition;
|
||||
}
|
||||
|
||||
export interface ContextData {
|
||||
valueBeforeCursor: string;
|
||||
valueAfterCursor: string;
|
||||
contextType: ContextType;
|
||||
valueContext: string;
|
||||
filterName?: string;
|
||||
}
|
||||
|
||||
export enum ContextType {
|
||||
FilterName = "FilterName",
|
||||
FilterUnknown = "FilterUnknown",
|
||||
FilterValue = "FilterValue",
|
||||
PipeName = "Pipes",
|
||||
PipeValue = "PipeValue",
|
||||
Unknown = "Unknown",
|
||||
}
|
||||
@@ -0,0 +1,137 @@
|
||||
import React, { useEffect, useState, useRef, Dispatch, SetStateAction } from "preact/compat";
|
||||
import dayjs from "dayjs";
|
||||
import { ContextData, ContextType } from "./types";
|
||||
import { FunctionIcon, LabelIcon, MetricIcon, ValueIcon } from "../../../Main/Icons";
|
||||
import { AutocompleteOptions } from "../../../Main/Autocomplete/Autocomplete";
|
||||
import { useAppState } from "../../../../state/common/StateContext";
|
||||
import { useTimeState } from "../../../../state/time/TimeStateContext";
|
||||
import { useCallback } from "react";
|
||||
import { AUTOCOMPLETE_LIMITS } from "../../../../constants/queryAutocomplete";
|
||||
import { LogsFiledValues } from "../../../../api/types";
|
||||
import { useLogsDispatch, useLogsState } from "../../../../state/logsPanel/LogsStateContext";
|
||||
|
||||
type FetchDataArgs = {
|
||||
urlSuffix: string;
|
||||
setter: Dispatch<SetStateAction<AutocompleteOptions[]>>
|
||||
type: ContextType;
|
||||
params?: URLSearchParams;
|
||||
}
|
||||
|
||||
const icons = {
|
||||
[ContextType.FilterName]: <MetricIcon/>,
|
||||
[ContextType.FilterUnknown]: <MetricIcon/>,
|
||||
[ContextType.FilterValue]: <ValueIcon/>,
|
||||
[ContextType.PipeName]: <FunctionIcon/>,
|
||||
[ContextType.PipeValue]: <LabelIcon/>,
|
||||
[ContextType.Unknown]: <ValueIcon/>
|
||||
};
|
||||
|
||||
export const useFetchLogsQLOptions = (contextData?: ContextData) => {
|
||||
const { serverUrl } = useAppState();
|
||||
const { period: { start, end } } = useTimeState();
|
||||
const { autocompleteCache } = useLogsState();
|
||||
const dispatch = useLogsDispatch();
|
||||
|
||||
const [loading, setLoading] = useState(false);
|
||||
|
||||
const [fieldNames, setFieldNames] = useState<AutocompleteOptions[]>([]);
|
||||
const [fieldValues, setFieldValues] = useState<AutocompleteOptions[]>([]);
|
||||
|
||||
const abortControllerRef = useRef(new AbortController());
|
||||
|
||||
const getQueryParams = useCallback((params?: Record<string, string>) => {
|
||||
const startDay = dayjs(start * 1000).startOf("day").valueOf() / 1000;
|
||||
const endDay = dayjs(end * 1000).endOf("day").valueOf() / 1000;
|
||||
|
||||
return new URLSearchParams({
|
||||
...(params || {}),
|
||||
limit: `${AUTOCOMPLETE_LIMITS.queryLimit}`,
|
||||
start: `${startDay}`,
|
||||
end: `${endDay}`
|
||||
});
|
||||
}, [start, end]);
|
||||
|
||||
const processData = (values: LogsFiledValues[], type: ContextType): AutocompleteOptions[] => {
|
||||
return values.map(v => ({
|
||||
value: v.value,
|
||||
type: `${type}`,
|
||||
icon: icons[type]
|
||||
}));
|
||||
};
|
||||
|
||||
const fetchData = async ({ urlSuffix, setter, type, params }: FetchDataArgs) => {
|
||||
// if (!value && type === TypeData.metric) return;
|
||||
abortControllerRef.current.abort();
|
||||
abortControllerRef.current = new AbortController();
|
||||
const { signal } = abortControllerRef.current;
|
||||
const key = `${urlSuffix}?${params?.toString()}`;
|
||||
setLoading(true);
|
||||
try {
|
||||
const cachedData = autocompleteCache.get(key);
|
||||
if (cachedData) {
|
||||
setter(processData(cachedData, type));
|
||||
setLoading(false);
|
||||
return;
|
||||
}
|
||||
const response = await fetch(`${serverUrl}/select/logsql/${urlSuffix}?${params}`, { signal });
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const value = (data?.values || []) as LogsFiledValues[];
|
||||
setter(value ? processData(value, type) : []);
|
||||
dispatch({ type: "SET_AUTOCOMPLETE_CACHE", payload: { key, value } });
|
||||
}
|
||||
setLoading(false);
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
dispatch({ type: "SET_AUTOCOMPLETE_CACHE", payload: { key, value: [] } });
|
||||
setLoading(false);
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// fetch field names
|
||||
useEffect(() => {
|
||||
const validContexts = [ContextType.FilterName, ContextType.FilterUnknown];
|
||||
const isInvalidContext = !validContexts.includes(contextData?.contextType || ContextType.Unknown);
|
||||
if (!serverUrl || isInvalidContext) {
|
||||
return;
|
||||
}
|
||||
|
||||
setFieldNames([]);
|
||||
|
||||
fetchData({
|
||||
urlSuffix: "field_names",
|
||||
setter: setFieldNames,
|
||||
type: ContextType.FilterName,
|
||||
params: getQueryParams({ query: "*" })
|
||||
});
|
||||
|
||||
return () => abortControllerRef.current?.abort();
|
||||
}, [serverUrl, contextData]);
|
||||
|
||||
// fetch field values
|
||||
useEffect(() => {
|
||||
const isInvalidContext = contextData?.contextType !== ContextType.FilterValue;
|
||||
if (!serverUrl || isInvalidContext || !contextData?.filterName) {
|
||||
return;
|
||||
}
|
||||
|
||||
setFieldValues([]);
|
||||
|
||||
fetchData({
|
||||
urlSuffix: "field_values",
|
||||
setter: setFieldValues,
|
||||
type: ContextType.FilterValue,
|
||||
params: getQueryParams({ query: "*", field: contextData.filterName })
|
||||
});
|
||||
|
||||
return () => abortControllerRef.current?.abort();
|
||||
}, [serverUrl, contextData]);
|
||||
|
||||
return {
|
||||
fieldNames,
|
||||
fieldValues,
|
||||
loading,
|
||||
};
|
||||
};
|
||||
@@ -2,7 +2,6 @@ import React, { FC, useEffect, useRef, useState } from "preact/compat";
|
||||
import { KeyboardEvent } from "react";
|
||||
import { ErrorTypes } from "../../../types";
|
||||
import TextField from "../../Main/TextField/TextField";
|
||||
import QueryEditorAutocomplete from "./QueryEditorAutocomplete";
|
||||
import "./style.scss";
|
||||
import { QueryStats } from "../../../api/types";
|
||||
import { partialWarning, seriesFetchedWarning } from "./warningText";
|
||||
@@ -11,6 +10,16 @@ import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||
import { useQueryState } from "../../../state/query/QueryStateContext";
|
||||
import debounce from "lodash.debounce";
|
||||
|
||||
export interface QueryEditorAutocompleteProps {
|
||||
value: string;
|
||||
anchorEl: React.RefObject<HTMLInputElement>;
|
||||
caretPosition: [number, number]; // [start, end]
|
||||
hasHelperText: boolean;
|
||||
includeFunctions: boolean;
|
||||
onSelect: (val: string, caretPosition: number) => void;
|
||||
onFoundOptions: (val: AutocompleteOptions[]) => void;
|
||||
}
|
||||
|
||||
export interface QueryEditorProps {
|
||||
onChange: (query: string) => void;
|
||||
onEnter: () => void;
|
||||
@@ -19,6 +28,7 @@ export interface QueryEditorProps {
|
||||
value: string;
|
||||
oneLiner?: boolean;
|
||||
autocomplete: boolean;
|
||||
autocompleteEl?: FC<QueryEditorAutocompleteProps>;
|
||||
error?: ErrorTypes | string;
|
||||
stats?: QueryStats;
|
||||
label: string;
|
||||
@@ -33,6 +43,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
onArrowUp,
|
||||
onArrowDown,
|
||||
autocomplete,
|
||||
autocompleteEl: AutocompleteEl,
|
||||
error,
|
||||
stats,
|
||||
label,
|
||||
@@ -47,7 +58,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
const [caretPositionInput, setCaretPositionInput] = useState<[number, number]>([0, 0]);
|
||||
const autocompleteAnchorEl = useRef<HTMLInputElement>(null);
|
||||
|
||||
const [showAutocomplete, setShowAutocomplete] = useState(autocomplete);
|
||||
const [showAutocomplete, setShowAutocomplete] = useState(!!AutocompleteEl);
|
||||
const debouncedSetShowAutocomplete = useRef(debounce(setShowAutocomplete, 500)).current;
|
||||
|
||||
const warning = [
|
||||
@@ -113,7 +124,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setOpenAutocomplete(autocomplete);
|
||||
setOpenAutocomplete(!!AutocompleteEl);
|
||||
}, [autocompleteQuick]);
|
||||
|
||||
useEffect(() => {
|
||||
@@ -140,8 +151,8 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||
inputmode={"search"}
|
||||
caretPosition={caretPositionInput}
|
||||
/>
|
||||
{showAutocomplete && autocomplete && (
|
||||
<QueryEditorAutocomplete
|
||||
{showAutocomplete && autocomplete && AutocompleteEl && (
|
||||
<AutocompleteEl
|
||||
value={value}
|
||||
anchorEl={autocompleteAnchorEl}
|
||||
caretPosition={caretPositionAutocomplete}
|
||||
|
||||
@@ -1,20 +1,11 @@
|
||||
import React, { FC, useState, useEffect, useMemo, useCallback } from "preact/compat";
|
||||
import Autocomplete, { AutocompleteOptions } from "../../Main/Autocomplete/Autocomplete";
|
||||
import Autocomplete from "../../Main/Autocomplete/Autocomplete";
|
||||
import { useFetchQueryOptions } from "../../../hooks/useFetchQueryOptions";
|
||||
import { escapeRegexp, hasUnclosedQuotes } from "../../../utils/regexp";
|
||||
import useGetMetricsQL from "../../../hooks/useGetMetricsQL";
|
||||
import { QueryContextType } from "../../../types";
|
||||
import { AUTOCOMPLETE_LIMITS } from "../../../constants/queryAutocomplete";
|
||||
|
||||
interface QueryEditorAutocompleteProps {
|
||||
value: string;
|
||||
anchorEl: React.RefObject<HTMLElement>;
|
||||
caretPosition: [number, number]; // [start, end]
|
||||
hasHelperText: boolean;
|
||||
includeFunctions: boolean;
|
||||
onSelect: (val: string, caretPosition: number) => void;
|
||||
onFoundOptions: (val: AutocompleteOptions[]) => void;
|
||||
}
|
||||
import { QueryEditorAutocompleteProps } from "./QueryEditor";
|
||||
|
||||
const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
|
||||
value,
|
||||
|
||||
@@ -28,7 +28,7 @@ interface AutocompleteProps {
|
||||
offset?: {top: number, left: number}
|
||||
maxDisplayResults?: {limit: number, message?: string}
|
||||
loading?: boolean;
|
||||
onSelect: (val: string) => void
|
||||
onSelect: (val: string, item: AutocompleteOptions) => void
|
||||
onOpenAutocomplete?: (val: boolean) => void
|
||||
onFoundOptions?: (val: AutocompleteOptions[]) => void
|
||||
onChangeWrapperRef?: (elementRef: React.RefObject<HTMLElement>) => void
|
||||
@@ -97,9 +97,9 @@ const Autocomplete: FC<AutocompleteProps> = ({
|
||||
return noOptionsText && !foundOptions.length;
|
||||
}, [noOptionsText,foundOptions]);
|
||||
|
||||
const createHandlerSelect = (item: string) => () => {
|
||||
const createHandlerSelect = (item: AutocompleteOptions) => () => {
|
||||
if (disabled) return;
|
||||
onSelect(item);
|
||||
onSelect(item.value, item);
|
||||
if (!selected) handleCloseAutocomplete();
|
||||
};
|
||||
|
||||
@@ -141,7 +141,7 @@ const Autocomplete: FC<AutocompleteProps> = ({
|
||||
|
||||
if (key === "Enter") {
|
||||
const item = foundOptions[focusOption.index];
|
||||
item && onSelect(item.value);
|
||||
item && onSelect(item.value, item);
|
||||
if (!selected) handleCloseAutocomplete();
|
||||
}
|
||||
|
||||
@@ -206,7 +206,7 @@ const Autocomplete: FC<AutocompleteProps> = ({
|
||||
})}
|
||||
id={`$autocomplete$${option.value}`}
|
||||
key={`${i}${option.value}`}
|
||||
onClick={createHandlerSelect(option.value)}
|
||||
onClick={createHandlerSelect(option)}
|
||||
onMouseEnter={createHandlerMouseEnter(i)}
|
||||
onMouseLeave={handlerMouseLeave}
|
||||
>
|
||||
|
||||
@@ -25,6 +25,7 @@ import { QueryStats } from "../../../api/types";
|
||||
import { usePrettifyQuery } from "./hooks/usePrettifyQuery";
|
||||
import QueryHistory from "../QueryHistory/QueryHistory";
|
||||
import AnomalyConfig from "../../../components/ExploreAnomaly/AnomalyConfig";
|
||||
import QueryEditorAutocomplete from "../../../components/Configurators/QueryEditor/QueryEditorAutocomplete";
|
||||
|
||||
export interface QueryConfiguratorProps {
|
||||
queryErrors: string[];
|
||||
@@ -216,6 +217,7 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
|
||||
<QueryEditor
|
||||
value={stateQuery[i]}
|
||||
autocomplete={!hideButtons?.autocomplete && (autocomplete || autocompleteQuick)}
|
||||
autocompleteEl={QueryEditorAutocomplete}
|
||||
error={queryErrors[i]}
|
||||
stats={stats[i]}
|
||||
onArrowUp={createHandlerArrow(-1, i)}
|
||||
|
||||
@@ -6,6 +6,9 @@ import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||
import Button from "../../../components/Main/Button/Button";
|
||||
import QueryEditor from "../../../components/Configurators/QueryEditor/QueryEditor";
|
||||
import TextField from "../../../components/Main/TextField/TextField";
|
||||
import LogsQueryEditorAutocomplete from "../../../components/Configurators/QueryEditor/LogsQL/LogsQueryEditorAutocomplete";
|
||||
import { useQueryDispatch, useQueryState } from "../../../state/query/QueryStateContext";
|
||||
import Switch from "../../../components/Main/Switch/Switch";
|
||||
|
||||
export interface ExploreLogHeaderProps {
|
||||
query: string;
|
||||
@@ -27,6 +30,8 @@ const ExploreLogsHeader: FC<ExploreLogHeaderProps> = ({
|
||||
onRun,
|
||||
}) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const { autocomplete } = useQueryState();
|
||||
const queryDispatch = useQueryDispatch();
|
||||
|
||||
const [errorLimit, setErrorLimit] = useState("");
|
||||
const [limitInput, setLimitInput] = useState(limit);
|
||||
@@ -42,6 +47,10 @@ const ExploreLogsHeader: FC<ExploreLogHeaderProps> = ({
|
||||
}
|
||||
};
|
||||
|
||||
const onChangeAutocomplete = () => {
|
||||
queryDispatch({ type: "TOGGLE_AUTOCOMPLETE" });
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setLimitInput(limit);
|
||||
}, [limit]);
|
||||
@@ -57,7 +66,8 @@ const ExploreLogsHeader: FC<ExploreLogHeaderProps> = ({
|
||||
<div className="vm-explore-logs-header-top">
|
||||
<QueryEditor
|
||||
value={query}
|
||||
autocomplete={false}
|
||||
autocomplete={autocomplete}
|
||||
autocompleteEl={LogsQueryEditorAutocomplete}
|
||||
onArrowUp={() => null}
|
||||
onArrowDown={() => null}
|
||||
onEnter={onRun}
|
||||
@@ -75,7 +85,14 @@ const ExploreLogsHeader: FC<ExploreLogHeaderProps> = ({
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-explore-logs-header-bottom">
|
||||
<div className="vm-explore-logs-header-bottom-contols"></div>
|
||||
<div className="vm-explore-logs-header-bottom-contols">
|
||||
<Switch
|
||||
label={"Autocomplete"}
|
||||
value={autocomplete}
|
||||
onChange={onChangeAutocomplete}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-explore-logs-header-bottom-helpful">
|
||||
<a
|
||||
className="vm-link vm-link_with-icon"
|
||||
|
||||
@@ -26,6 +26,9 @@
|
||||
}
|
||||
|
||||
&-contols {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
flex-grow: 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
import { getFromStorage, saveToStorage } from "../../utils/storage";
|
||||
import { LogsFiledValues } from "../../api/types";
|
||||
import { AUTOCOMPLETE_LIMITS } from "../../constants/queryAutocomplete";
|
||||
|
||||
export interface LogsState {
|
||||
markdownParsing: boolean;
|
||||
autocompleteCache: Map<string, LogsFiledValues[]>;
|
||||
}
|
||||
|
||||
export type LogsAction =
|
||||
| { type: "SET_MARKDOWN_PARSING", payload: boolean }
|
||||
| { type: "SET_AUTOCOMPLETE_CACHE", payload: { key: string, value: LogsFiledValues[] } }
|
||||
|
||||
|
||||
export const initialLogsState: LogsState = {
|
||||
markdownParsing: getFromStorage("LOGS_MARKDOWN") === "true",
|
||||
autocompleteCache: new Map<string, LogsFiledValues[]>(),
|
||||
};
|
||||
|
||||
export function reducer(state: LogsState, action: LogsAction): LogsState {
|
||||
@@ -20,6 +25,18 @@ export function reducer(state: LogsState, action: LogsAction): LogsState {
|
||||
...state,
|
||||
markdownParsing: action.payload
|
||||
};
|
||||
case "SET_AUTOCOMPLETE_CACHE": {
|
||||
if (state.autocompleteCache.size >= AUTOCOMPLETE_LIMITS.cacheLimit) {
|
||||
const firstKey = state.autocompleteCache.keys().next().value;
|
||||
state.autocompleteCache.delete(firstKey);
|
||||
}
|
||||
state.autocompleteCache.set(action.payload.key, action.payload.value);
|
||||
|
||||
return {
|
||||
...state,
|
||||
autocompleteCache: state.autocompleteCache,
|
||||
};
|
||||
}
|
||||
default:
|
||||
throw new Error();
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -103,6 +104,25 @@ func NewPrometheusAPIV1QueryResponse(t *testing.T, s string) *PrometheusAPIV1Que
|
||||
return res
|
||||
}
|
||||
|
||||
// Sort performs data.Result sort by metric labels
|
||||
func (pqr *PrometheusAPIV1QueryResponse) Sort() {
|
||||
sort.Slice(pqr.Data.Result, func(i, j int) bool {
|
||||
leftS := make([]string, 0, len(pqr.Data.Result[i].Metric))
|
||||
rightS := make([]string, 0, len(pqr.Data.Result[j].Metric))
|
||||
for k, v := range pqr.Data.Result[i].Metric {
|
||||
leftS = append(leftS, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
for k, v := range pqr.Data.Result[j].Metric {
|
||||
rightS = append(rightS, fmt.Sprintf("%s=%s", k, v))
|
||||
|
||||
}
|
||||
sort.Strings(leftS)
|
||||
sort.Strings(rightS)
|
||||
return strings.Join(leftS, ",") < strings.Join(rightS, ",")
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// QueryData holds the query result along with its type.
|
||||
type QueryData struct {
|
||||
ResultType string
|
||||
|
||||
318
apptest/tests/ingestprotocols_test.go
Normal file
318
apptest/tests/ingestprotocols_test.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
at "github.com/VictoriaMetrics/VictoriaMetrics/apptest"
|
||||
pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func TestSingleIngestionProtocols(t *testing.T) {
|
||||
os.RemoveAll(t.Name())
|
||||
tc := at.NewTestCase(t)
|
||||
defer tc.Stop()
|
||||
sut := tc.MustStartDefaultVmsingle()
|
||||
type opts struct {
|
||||
query string
|
||||
wantMetrics []map[string]string
|
||||
wantSamples []*at.Sample
|
||||
}
|
||||
f := func(sut at.PrometheusQuerier, opts *opts) {
|
||||
t.Helper()
|
||||
wantResult := []*at.QueryResult{}
|
||||
for idx, wm := range opts.wantMetrics {
|
||||
wantResult = append(wantResult, &at.QueryResult{
|
||||
Metric: wm,
|
||||
Samples: []*at.Sample{opts.wantSamples[idx]},
|
||||
})
|
||||
|
||||
}
|
||||
tc.Assert(&at.AssertOptions{
|
||||
Msg: "unexpected /export query response",
|
||||
Got: func() any {
|
||||
got := sut.PrometheusAPIV1Export(t, opts.query, at.QueryOpts{
|
||||
Start: "2024-02-05T08:50:00.700Z",
|
||||
End: "2024-02-05T09:00:00.700Z",
|
||||
})
|
||||
got.Sort()
|
||||
return got
|
||||
},
|
||||
Want: &at.PrometheusAPIV1QueryResponse{Data: &at.QueryData{Result: wantResult}},
|
||||
CmpOpts: []cmp.Option{
|
||||
cmpopts.IgnoreFields(at.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType"),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// influx line format
|
||||
sut.InfluxWrite(t, []string{
|
||||
`influxline series1=10 1707123456700`, // 2024-02-05T08:57:36.700Z
|
||||
`influxline,label=foo1,label1=value1,label2=value2 series2=40 1707123456800`, // 2024-02-05T08:57:36.800Z
|
||||
}, at.QueryOpts{})
|
||||
sut.ForceFlush(t)
|
||||
f(sut, &opts{
|
||||
query: `{__name__=~"influxline.+"}`,
|
||||
wantMetrics: []map[string]string{
|
||||
{
|
||||
"__name__": "influxline_series1",
|
||||
},
|
||||
{
|
||||
"__name__": "influxline_series2",
|
||||
"label": "foo1",
|
||||
"label1": "value1",
|
||||
"label2": "value2",
|
||||
},
|
||||
},
|
||||
wantSamples: []*at.Sample{
|
||||
{Timestamp: 1707123456700, Value: 10},
|
||||
{Timestamp: 1707123456800, Value: 40},
|
||||
},
|
||||
})
|
||||
|
||||
// prometheus text exposition format
|
||||
sut.PrometheusAPIV1ImportPrometheus(t, []string{
|
||||
`importprometheus_series 10 1707123456700`, // 2024-02-05T08:57:36.700Z
|
||||
`importprometheus_series2{label="foo",label1="value1"} 20 1707123456800`, // 2024-02-05T08:57:36.800Z
|
||||
}, at.QueryOpts{})
|
||||
sut.ForceFlush(t)
|
||||
f(sut, &opts{
|
||||
query: `{__name__=~"importprometheus.+"}`,
|
||||
wantMetrics: []map[string]string{
|
||||
{
|
||||
"__name__": "importprometheus_series",
|
||||
},
|
||||
{
|
||||
"__name__": "importprometheus_series2",
|
||||
"label": "foo",
|
||||
"label1": "value1",
|
||||
},
|
||||
},
|
||||
wantSamples: []*at.Sample{
|
||||
{Timestamp: 1707123456700, Value: 10},
|
||||
{Timestamp: 1707123456800, Value: 20},
|
||||
},
|
||||
})
|
||||
|
||||
// prometheus remote write format
|
||||
pbData := []pb.TimeSeries{
|
||||
{
|
||||
Labels: []pb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "prometheusrw_series",
|
||||
},
|
||||
},
|
||||
Samples: []pb.Sample{
|
||||
{
|
||||
Value: 10,
|
||||
Timestamp: 1707123456700, // 2024-02-05T08:57:36.700Z
|
||||
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []pb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "prometheusrw_series2",
|
||||
},
|
||||
{
|
||||
Name: "label",
|
||||
Value: "foo2",
|
||||
},
|
||||
{
|
||||
Name: "label1",
|
||||
Value: "value1",
|
||||
},
|
||||
},
|
||||
Samples: []pb.Sample{
|
||||
{
|
||||
Value: 20,
|
||||
Timestamp: 1707123456800, // 2024-02-05T08:57:36.800Z
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
sut.PrometheusAPIV1Write(t, pbData, at.QueryOpts{})
|
||||
sut.ForceFlush(t)
|
||||
f(sut, &opts{
|
||||
query: `{__name__=~"prometheusrw.+"}`,
|
||||
wantMetrics: []map[string]string{
|
||||
{
|
||||
"__name__": "prometheusrw_series",
|
||||
},
|
||||
{
|
||||
"__name__": "prometheusrw_series2",
|
||||
"label": "foo2",
|
||||
"label1": "value1",
|
||||
},
|
||||
},
|
||||
wantSamples: []*at.Sample{
|
||||
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
|
||||
{Timestamp: 1707123456800, Value: 20}, // 2024-02-05T08:57:36.700Z
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestClusterIngestionProtocols(t *testing.T) {
|
||||
os.RemoveAll(t.Name())
|
||||
tc := at.NewTestCase(t)
|
||||
defer tc.Stop()
|
||||
vmstorage := tc.MustStartVmstorage("vmstorage", []string{
|
||||
"-storageDataPath=" + tc.Dir() + "/vmstorage",
|
||||
"-retentionPeriod=100y",
|
||||
})
|
||||
vminsert := tc.MustStartVminsert("vminsert", []string{
|
||||
"-storageNode=" + vmstorage.VminsertAddr(),
|
||||
})
|
||||
vmselect := tc.MustStartVmselect("vmselect", []string{
|
||||
"-storageNode=" + vmstorage.VmselectAddr(),
|
||||
})
|
||||
|
||||
type opts struct {
|
||||
query string
|
||||
wantMetrics []map[string]string
|
||||
wantSamples []*at.Sample
|
||||
}
|
||||
f := func(opts *opts) {
|
||||
t.Helper()
|
||||
wantResult := []*at.QueryResult{}
|
||||
for idx, wm := range opts.wantMetrics {
|
||||
wantResult = append(wantResult, &at.QueryResult{
|
||||
Metric: wm,
|
||||
Samples: []*at.Sample{opts.wantSamples[idx]},
|
||||
})
|
||||
|
||||
}
|
||||
tc.Assert(&at.AssertOptions{
|
||||
Msg: "unexpected /export query response",
|
||||
Got: func() any {
|
||||
got := vmselect.PrometheusAPIV1Export(t, opts.query, at.QueryOpts{
|
||||
Start: "2024-02-05T08:50:00.700Z",
|
||||
End: "2024-02-05T09:00:00.700Z",
|
||||
})
|
||||
got.Sort()
|
||||
return got
|
||||
},
|
||||
Want: &at.PrometheusAPIV1QueryResponse{Data: &at.QueryData{Result: wantResult}},
|
||||
CmpOpts: []cmp.Option{
|
||||
cmpopts.IgnoreFields(at.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType"),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// prometheus text exposition format
|
||||
vminsert.PrometheusAPIV1ImportPrometheus(t, []string{
|
||||
`importprometheus_series 10 1707123456700`, // 2024-02-05T08:57:36.700Z
|
||||
`importprometheus_series2{label="foo",label1="value1"} 20 1707123456800`, // 2024-02-05T08:57:36.800Z
|
||||
}, at.QueryOpts{})
|
||||
vmstorage.ForceFlush(t)
|
||||
f(&opts{
|
||||
query: `{__name__=~"importprometheus.+"}`,
|
||||
wantMetrics: []map[string]string{
|
||||
{
|
||||
"__name__": "importprometheus_series",
|
||||
},
|
||||
{
|
||||
"__name__": "importprometheus_series2",
|
||||
"label": "foo",
|
||||
"label1": "value1",
|
||||
},
|
||||
},
|
||||
wantSamples: []*at.Sample{
|
||||
{Timestamp: 1707123456700, Value: 10},
|
||||
{Timestamp: 1707123456800, Value: 20},
|
||||
},
|
||||
})
|
||||
|
||||
// influx line format
|
||||
vminsert.InfluxWrite(t, []string{
|
||||
`influxline series1=10 1707123456700`, // 2024-02-05T08:57:36.700Z
|
||||
`influxline,label=foo1,label1=value1,label2=value2 series2=40 1707123456800`, // 2024-02-05T08:57:36.800Z
|
||||
}, at.QueryOpts{})
|
||||
vmstorage.ForceFlush(t)
|
||||
f(&opts{
|
||||
query: `{__name__=~"influxline.+"}`,
|
||||
wantMetrics: []map[string]string{
|
||||
{
|
||||
"__name__": "influxline_series1",
|
||||
},
|
||||
{
|
||||
"__name__": "influxline_series2",
|
||||
"label": "foo1",
|
||||
"label1": "value1",
|
||||
"label2": "value2",
|
||||
},
|
||||
},
|
||||
wantSamples: []*at.Sample{
|
||||
{Timestamp: 1707123456700, Value: 10},
|
||||
{Timestamp: 1707123456800, Value: 40},
|
||||
},
|
||||
})
|
||||
|
||||
// prometheus remote write format
|
||||
pbData := []pb.TimeSeries{
|
||||
{
|
||||
Labels: []pb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "prometheusrw_series",
|
||||
},
|
||||
},
|
||||
Samples: []pb.Sample{
|
||||
{
|
||||
Value: 10,
|
||||
Timestamp: 1707123456700, // 2024-02-05T08:57:36.700Z
|
||||
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Labels: []pb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "prometheusrw_series2",
|
||||
},
|
||||
{
|
||||
Name: "label",
|
||||
Value: "foo2",
|
||||
},
|
||||
{
|
||||
Name: "label1",
|
||||
Value: "value1",
|
||||
},
|
||||
},
|
||||
Samples: []pb.Sample{
|
||||
{
|
||||
Value: 20,
|
||||
Timestamp: 1707123456800, // 2024-02-05T08:57:36.800Z
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
vminsert.PrometheusAPIV1Write(t, pbData, at.QueryOpts{})
|
||||
vmstorage.ForceFlush(t)
|
||||
f(&opts{
|
||||
query: `{__name__=~"prometheusrw.+"}`,
|
||||
wantMetrics: []map[string]string{
|
||||
{
|
||||
"__name__": "prometheusrw_series",
|
||||
},
|
||||
{
|
||||
"__name__": "prometheusrw_series2",
|
||||
"label": "foo2",
|
||||
"label1": "value1",
|
||||
},
|
||||
},
|
||||
wantSamples: []*at.Sample{
|
||||
{Timestamp: 1707123456700, Value: 10}, // 2024-02-05T08:57:36.700Z
|
||||
{Timestamp: 1707123456800, Value: 20}, // 2024-02-05T08:57:36.700Z
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func TestClusterKeyConceptsQueryData(t *testing.T) {
|
||||
testKeyConceptsQueryData(t, sut)
|
||||
}
|
||||
|
||||
// testClusterKeyConceptsQuery verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
|
||||
// testKeyConceptsQueryData verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
|
||||
func testKeyConceptsQueryData(t *testing.T, sut at.PrometheusWriteQuerier) {
|
||||
|
||||
// Insert example data from documentation.
|
||||
|
||||
@@ -78,6 +78,19 @@ func (app *Vminsert) ClusternativeListenAddr() string {
|
||||
return app.clusternativeListenAddr
|
||||
}
|
||||
|
||||
// InfluxWrite is a test helper function that inserts a
|
||||
// collection of records in Influx line format by sending a HTTP
|
||||
// POST request to /influx/write vmsingle endpoint.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/url-examples/#influxwrite
|
||||
func (app *Vminsert) InfluxWrite(t *testing.T, records []string, opts QueryOpts) {
|
||||
t.Helper()
|
||||
|
||||
url := fmt.Sprintf("http://%s/insert/%s/influx/write", app.httpListenAddr, opts.getTenant())
|
||||
data := []byte(strings.Join(records, "\n"))
|
||||
app.cli.Post(t, url, "text/plain", data, http.StatusNoContent)
|
||||
}
|
||||
|
||||
// PrometheusAPIV1Write is a test helper function that inserts a
|
||||
// collection of records in Prometheus remote-write format by sending a HTTP
|
||||
// POST request to /prometheus/api/v1/write vminsert endpoint.
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
FROM node:lts-alpine3.20
|
||||
|
||||
ENV PATH="/home/node/node_modules/.bin:$PATH"
|
||||
|
||||
WORKDIR /home/node
|
||||
|
||||
COPY package.json .
|
||||
COPY package-lock.json .
|
||||
|
||||
RUN npm ci
|
||||
|
||||
WORKDIR /victoriametrics
|
||||
WORKDIR /opt/node
|
||||
COPY yarn.lock package.json .
|
||||
RUN yarn install
|
||||
WORKDIR /vm
|
||||
ENTRYPOINT ["/opt/node/node_modules/.bin/cspell"]
|
||||
|
||||
@@ -2,27 +2,27 @@
|
||||
|
||||
# Builds cspell image.
|
||||
cspell-install:
|
||||
@ (docker inspect cspell > /dev/null) || (docker build cspell --tag cspell)
|
||||
@docker build cspell -t cspell
|
||||
|
||||
# Checks for spelling errors.
|
||||
cspell-check: cspell-install
|
||||
@CMD="cspell --no-progress" $(MAKE) cspell-run-command
|
||||
cspell-check: CMD="--no-progress -r /vm"
|
||||
cspell-check: cspell-install cspell-run
|
||||
|
||||
# Runs spelling error check.
|
||||
# A user facing alias to cspell-check command.
|
||||
spellcheck: cspell-check
|
||||
|
||||
# Runs cspell container commands.
|
||||
cspell-run-command:
|
||||
@cp cspell/cspell.json cspell.json
|
||||
cspell-run:
|
||||
@-docker run \
|
||||
--entrypoint /bin/sh \
|
||||
--mount type=bind,src=".",dst=/victoriametrics \
|
||||
--mount type=bind,src="$(PWD)",dst=/vm \
|
||||
--rm \
|
||||
--tty \
|
||||
cspell -c "$(CMD)"
|
||||
@rm cspell.json
|
||||
cspell -c cspell/cspell.json "$(CMD)"
|
||||
|
||||
cspell-update-deps: cspell-install
|
||||
@CMD="cd /victoriametrics/cspell && npm update && rm -rf ./node_modules" $(MAKE) cspell-run-command
|
||||
|
||||
@-docker run \
|
||||
--mount type=bind,src="$(PWD)",dst=/vm \
|
||||
--entrypoint=/bin/sh \
|
||||
--workdir=/vm/cspell \
|
||||
--rm \
|
||||
cspell -c "yarn install && yarn upgrade && rm -rf ./node_modules"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
{
|
||||
"addWords": true,
|
||||
"name": "custom-dict",
|
||||
"path": "cspell/custom-dict.txt"
|
||||
"path": "custom-dict.txt"
|
||||
}
|
||||
],
|
||||
"dictionaries": [
|
||||
@@ -50,18 +50,18 @@
|
||||
"/vm[a-zA-Z0-9-_]+/i"
|
||||
],
|
||||
"import": [
|
||||
"/home/node/node_modules/@cspell/dict-aws/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-companies/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-data-science/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-en_us/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-fullstack/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-golang/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-k8s/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-people-names/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-ru_ru/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-software-terms/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-uk-ua/cspell-ext.json",
|
||||
"/home/node/node_modules/@cspell/dict-win32/cspell-ext.json"
|
||||
"@cspell/dict-aws/cspell-ext.json",
|
||||
"@cspell/dict-companies/cspell-ext.json",
|
||||
"@cspell/dict-data-science/cspell-ext.json",
|
||||
"@cspell/dict-en_us/cspell-ext.json",
|
||||
"@cspell/dict-fullstack/cspell-ext.json",
|
||||
"@cspell/dict-golang/cspell-ext.json",
|
||||
"@cspell/dict-k8s/cspell-ext.json",
|
||||
"@cspell/dict-people-names/cspell-ext.json",
|
||||
"@cspell/dict-ru_ru/cspell-ext.json",
|
||||
"@cspell/dict-software-terms/cspell-ext.json",
|
||||
"@cspell/dict-uk-ua/cspell-ext.json",
|
||||
"@cspell/dict-win32/cspell-ext.json"
|
||||
],
|
||||
"useGitignore": true
|
||||
}
|
||||
|
||||
@@ -145,3 +145,5 @@ SSZ
|
||||
DDZ
|
||||
DOKS
|
||||
iforest
|
||||
deltatocumulative
|
||||
TLSCA
|
||||
|
||||
1445
cspell/package-lock.json
generated
1445
cspell/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,6 @@
|
||||
"@cspell/dict-software-terms": "^4.1.3",
|
||||
"@cspell/dict-uk-ua": "^4.0.1",
|
||||
"@cspell/dict-win32": "^2.0.3",
|
||||
"cspell": "^8.14.2"
|
||||
"cspell": "^8.17.1"
|
||||
}
|
||||
}
|
||||
|
||||
793
cspell/yarn.lock
Normal file
793
cspell/yarn.lock
Normal file
@@ -0,0 +1,793 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
"@cspell/cspell-bundled-dicts@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/cspell-bundled-dicts/-/cspell-bundled-dicts-8.17.1.tgz#61adad73f1bb1e12b182ffa04423d6052b18f0fc"
|
||||
integrity sha512-HmkXS5uX4bk/XxsRS4Q+zRvhgRa81ddGiR2/Xfag9MIi5L5UnEJ4g21EpmIlXkMxYrTu2fp69SZFss5NfcFF9Q==
|
||||
dependencies:
|
||||
"@cspell/dict-ada" "^4.0.5"
|
||||
"@cspell/dict-al" "^1.0.3"
|
||||
"@cspell/dict-aws" "^4.0.7"
|
||||
"@cspell/dict-bash" "^4.1.8"
|
||||
"@cspell/dict-companies" "^3.1.8"
|
||||
"@cspell/dict-cpp" "^6.0.2"
|
||||
"@cspell/dict-cryptocurrencies" "^5.0.3"
|
||||
"@cspell/dict-csharp" "^4.0.5"
|
||||
"@cspell/dict-css" "^4.0.16"
|
||||
"@cspell/dict-dart" "^2.2.4"
|
||||
"@cspell/dict-django" "^4.1.3"
|
||||
"@cspell/dict-docker" "^1.1.11"
|
||||
"@cspell/dict-dotnet" "^5.0.8"
|
||||
"@cspell/dict-elixir" "^4.0.6"
|
||||
"@cspell/dict-en-common-misspellings" "^2.0.7"
|
||||
"@cspell/dict-en-gb" "1.1.33"
|
||||
"@cspell/dict-en_us" "^4.3.28"
|
||||
"@cspell/dict-filetypes" "^3.0.9"
|
||||
"@cspell/dict-flutter" "^1.0.3"
|
||||
"@cspell/dict-fonts" "^4.0.3"
|
||||
"@cspell/dict-fsharp" "^1.0.4"
|
||||
"@cspell/dict-fullstack" "^3.2.3"
|
||||
"@cspell/dict-gaming-terms" "^1.0.9"
|
||||
"@cspell/dict-git" "^3.0.3"
|
||||
"@cspell/dict-golang" "^6.0.17"
|
||||
"@cspell/dict-google" "^1.0.4"
|
||||
"@cspell/dict-haskell" "^4.0.4"
|
||||
"@cspell/dict-html" "^4.0.10"
|
||||
"@cspell/dict-html-symbol-entities" "^4.0.3"
|
||||
"@cspell/dict-java" "^5.0.10"
|
||||
"@cspell/dict-julia" "^1.0.4"
|
||||
"@cspell/dict-k8s" "^1.0.9"
|
||||
"@cspell/dict-latex" "^4.0.3"
|
||||
"@cspell/dict-lorem-ipsum" "^4.0.3"
|
||||
"@cspell/dict-lua" "^4.0.6"
|
||||
"@cspell/dict-makefile" "^1.0.3"
|
||||
"@cspell/dict-markdown" "^2.0.7"
|
||||
"@cspell/dict-monkeyc" "^1.0.9"
|
||||
"@cspell/dict-node" "^5.0.5"
|
||||
"@cspell/dict-npm" "^5.1.17"
|
||||
"@cspell/dict-php" "^4.0.13"
|
||||
"@cspell/dict-powershell" "^5.0.13"
|
||||
"@cspell/dict-public-licenses" "^2.0.11"
|
||||
"@cspell/dict-python" "^4.2.13"
|
||||
"@cspell/dict-r" "^2.0.4"
|
||||
"@cspell/dict-ruby" "^5.0.7"
|
||||
"@cspell/dict-rust" "^4.0.10"
|
||||
"@cspell/dict-scala" "^5.0.6"
|
||||
"@cspell/dict-software-terms" "^4.1.19"
|
||||
"@cspell/dict-sql" "^2.1.8"
|
||||
"@cspell/dict-svelte" "^1.0.5"
|
||||
"@cspell/dict-swift" "^2.0.4"
|
||||
"@cspell/dict-terraform" "^1.0.6"
|
||||
"@cspell/dict-typescript" "^3.1.11"
|
||||
"@cspell/dict-vue" "^3.0.3"
|
||||
|
||||
"@cspell/cspell-json-reporter@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/cspell-json-reporter/-/cspell-json-reporter-8.17.1.tgz#c1678665f183589e5fc19a1c0933b8d362165a43"
|
||||
integrity sha512-EV9Xkh42Xw3aORvDZfxusICX91DDbqQpYdGKBdPGuhgxWOUYYZKpLXsHCmDkhruMPo2m5gDh++/OqjLRPZofKQ==
|
||||
dependencies:
|
||||
"@cspell/cspell-types" "8.17.1"
|
||||
|
||||
"@cspell/cspell-pipe@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/cspell-pipe/-/cspell-pipe-8.17.1.tgz#c247d4bd1c8ec43c49c46dc4458f00489e98232b"
|
||||
integrity sha512-uhC99Ox+OH3COSgShv4fpVHiotR70dNvAOSkzRvKVRzV6IGyFnxHjmyVVPEV0dsqzVLxltwYTqFhwI+UOwm45A==
|
||||
|
||||
"@cspell/cspell-resolver@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/cspell-resolver/-/cspell-resolver-8.17.1.tgz#6377c9c8c05c940fee675c74e31f893b7b2f38ab"
|
||||
integrity sha512-XEK2ymTdQNgsV3ny60VkKzWskbICl4zNXh/DbxsoRXHqIRg43MXFpTNkEJ7j873EqdX7BU4opQQ+5D4stWWuhQ==
|
||||
dependencies:
|
||||
global-directory "^4.0.1"
|
||||
|
||||
"@cspell/cspell-service-bus@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/cspell-service-bus/-/cspell-service-bus-8.17.1.tgz#8d6d82ea3ab0fc9d7efed8523b070e4842780bd1"
|
||||
integrity sha512-2sFWQtMEWZ4tdz7bw0bAx4NaV1t0ynGfjpuKWdQppsJFKNb+ZPZZ6Ah1dC13AdRRMZaG194kDRFwzNvRaCgWkQ==
|
||||
|
||||
"@cspell/cspell-types@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/cspell-types/-/cspell-types-8.17.1.tgz#5512030b4c2e7881a8822ab3afabbd4f5ddffb6f"
|
||||
integrity sha512-NJbov7Jp57fh8addoxesjb8atg/APQfssCH5Q9uZuHBN06wEJDgs7fhfE48bU+RBViC9gltblsYZzZZQKzHYKg==
|
||||
|
||||
"@cspell/dict-ada@^4.0.5":
|
||||
version "4.0.5"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-ada/-/dict-ada-4.0.5.tgz#c14aae2faaecbad2d99f0d701e4700a48c68ef60"
|
||||
integrity sha512-6/RtZ/a+lhFVmrx/B7bfP7rzC4yjEYe8o74EybXcvu4Oue6J4Ey2WSYj96iuodloj1LWrkNCQyX5h4Pmcj0Iag==
|
||||
|
||||
"@cspell/dict-al@^1.0.3":
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-al/-/dict-al-1.0.3.tgz#09e288b5ab56b126dce895d3301faf7c0dd732d6"
|
||||
integrity sha512-V1HClwlfU/qwSq2Kt+MkqRAsonNu3mxjSCDyGRecdLGIHmh7yeEeaxqRiO/VZ4KP+eVSiSIlbwrb5YNFfxYZbw==
|
||||
|
||||
"@cspell/dict-aws@^4.0.4", "@cspell/dict-aws@^4.0.7":
|
||||
version "4.0.7"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-aws/-/dict-aws-4.0.7.tgz#f96f3b70cd52a25b895eb08e297de5a5cc3fc5b6"
|
||||
integrity sha512-PoaPpa2NXtSkhGIMIKhsJUXB6UbtTt6Ao3x9JdU9kn7fRZkwD4RjHDGqulucIOz7KeEX/dNRafap6oK9xHe4RA==
|
||||
|
||||
"@cspell/dict-bash@^4.1.8":
|
||||
version "4.1.8"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-bash/-/dict-bash-4.1.8.tgz#26dc898e06eddea069cf1ad475ee0e867c89e632"
|
||||
integrity sha512-I2CM2pTNthQwW069lKcrVxchJGMVQBzru2ygsHCwgidXRnJL/NTjAPOFTxN58Jc1bf7THWghfEDyKX/oyfc0yg==
|
||||
|
||||
"@cspell/dict-companies@^3.1.8":
|
||||
version "3.1.10"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-companies/-/dict-companies-3.1.10.tgz#1351959ba14a2c1f0678dd99069240748a724aa2"
|
||||
integrity sha512-KpRLiVDCpTkF+IjWnuYc31B0gyHVh0TSf/MDrWPobl9oYNQRWFUMACAJO9FP+kHI0jzLjTyLC1KpKwqte/88iA==
|
||||
|
||||
"@cspell/dict-cpp@^6.0.2":
|
||||
version "6.0.2"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-cpp/-/dict-cpp-6.0.2.tgz#e4549ee1bdf4b6402c0b978eb9dd3deac0eb05df"
|
||||
integrity sha512-yw5eejWvY4bAnc6LUA44m4WsFwlmgPt2uMSnO7QViGMBDuoeopMma4z9XYvs4lSjTi8fIJs/A1YDfM9AVzb8eg==
|
||||
|
||||
"@cspell/dict-cryptocurrencies@^5.0.3":
|
||||
version "5.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-cryptocurrencies/-/dict-cryptocurrencies-5.0.3.tgz#502f9fffcb2835a3379668ddebdc487678ce6207"
|
||||
integrity sha512-bl5q+Mk+T3xOZ12+FG37dB30GDxStza49Rmoax95n37MTLksk9wBo1ICOlPJ6PnDUSyeuv4SIVKgRKMKkJJglA==
|
||||
|
||||
"@cspell/dict-csharp@^4.0.5":
|
||||
version "4.0.5"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-csharp/-/dict-csharp-4.0.5.tgz#c677c50be09ca5bb3a2cc0be15f3cd05141fd2f7"
|
||||
integrity sha512-c/sFnNgtRwRJxtC3JHKkyOm+U3/sUrltFeNwml9VsxKBHVmvlg4tk4ar58PdpW9/zTlGUkWi2i85//DN1EsUCA==
|
||||
|
||||
"@cspell/dict-css@^4.0.16":
|
||||
version "4.0.16"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-css/-/dict-css-4.0.16.tgz#b7b87b5ea0f1157b023205bdb00070a7d231e367"
|
||||
integrity sha512-70qu7L9z/JR6QLyJPk38fNTKitlIHnfunx0wjpWQUQ8/jGADIhMCrz6hInBjqPNdtGpYm8d1dNFyF8taEkOgrQ==
|
||||
|
||||
"@cspell/dict-dart@^2.2.4":
|
||||
version "2.2.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-dart/-/dict-dart-2.2.4.tgz#8b877161ccdc65cead912b742b71aa55099c1706"
|
||||
integrity sha512-of/cVuUIZZK/+iqefGln8G3bVpfyN6ZtH+LyLkHMoR5tEj+2vtilGNk9ngwyR8L4lEqbKuzSkOxgfVjsXf5PsQ==
|
||||
|
||||
"@cspell/dict-data-science@^2.0.1", "@cspell/dict-data-science@^2.0.5":
|
||||
version "2.0.5"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-data-science/-/dict-data-science-2.0.5.tgz#816e9b394c2a423d14cdc9a5de5d6fc6141d3900"
|
||||
integrity sha512-nNSILXmhSJox9/QoXICPQgm8q5PbiSQP4afpbkBqPi/u/b3K9MbNH5HvOOa6230gxcGdbZ9Argl2hY/U8siBlg==
|
||||
|
||||
"@cspell/dict-django@^4.1.3":
|
||||
version "4.1.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-django/-/dict-django-4.1.3.tgz#a02a4a9ef8c9f47344f2d4a0c3964bcb62069ef5"
|
||||
integrity sha512-yBspeL3roJlO0a1vKKNaWABURuHdHZ9b1L8d3AukX0AsBy9snSggc8xCavPmSzNfeMDXbH+1lgQiYBd3IW03fg==
|
||||
|
||||
"@cspell/dict-docker@^1.1.11":
|
||||
version "1.1.11"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-docker/-/dict-docker-1.1.11.tgz#6fce86eb6d86d73f77e18d3e7b9747bad3ca98de"
|
||||
integrity sha512-s0Yhb16/R+UT1y727ekbR/itWQF3Qz275DR1ahOa66wYtPjHUXmhM3B/LT3aPaX+hD6AWmK23v57SuyfYHUjsw==
|
||||
|
||||
"@cspell/dict-dotnet@^5.0.8":
|
||||
version "5.0.8"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-dotnet/-/dict-dotnet-5.0.8.tgz#8a110ca302946025e0273a9940079483ec33a88a"
|
||||
integrity sha512-MD8CmMgMEdJAIPl2Py3iqrx3B708MbCIXAuOeZ0Mzzb8YmLmiisY7QEYSZPg08D7xuwARycP0Ki+bb0GAkFSqg==
|
||||
|
||||
"@cspell/dict-elixir@^4.0.6":
|
||||
version "4.0.6"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-elixir/-/dict-elixir-4.0.6.tgz#3d8965c558d8afd190356e9a900b02c546741feb"
|
||||
integrity sha512-TfqSTxMHZ2jhiqnXlVKM0bUADtCvwKQv2XZL/DI0rx3doG8mEMS8SGPOmiyyGkHpR/pGOq18AFH3BEm4lViHIw==
|
||||
|
||||
"@cspell/dict-en-common-misspellings@^2.0.7":
|
||||
version "2.0.7"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-en-common-misspellings/-/dict-en-common-misspellings-2.0.7.tgz#62861cc9e813c947ebd71c7a50fc720767b4b543"
|
||||
integrity sha512-qNFo3G4wyabcwnM+hDrMYKN9vNVg/k9QkhqSlSst6pULjdvPyPs1mqz1689xO/v9t8e6sR4IKc3CgUXDMTYOpA==
|
||||
|
||||
"@cspell/dict-en-gb@1.1.33":
|
||||
version "1.1.33"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-en-gb/-/dict-en-gb-1.1.33.tgz#7f1fd90fc364a5cb77111b5438fc9fcf9cc6da0e"
|
||||
integrity sha512-tKSSUf9BJEV+GJQAYGw5e+ouhEe2ZXE620S7BLKe3ZmpnjlNG9JqlnaBhkIMxKnNFkLY2BP/EARzw31AZnOv4g==
|
||||
|
||||
"@cspell/dict-en_us@^4.3.23", "@cspell/dict-en_us@^4.3.28":
|
||||
version "4.3.28"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-en_us/-/dict-en_us-4.3.28.tgz#41169e1ed18465e7ff367a4f4488d4cbc6cf0baa"
|
||||
integrity sha512-BN1PME7cOl7DXRQJ92pEd1f0Xk5sqjcDfThDGkKcsgwbSOY7KnTc/czBW6Pr3WXIchIm6cT12KEfjNqx7U7Rrw==
|
||||
|
||||
"@cspell/dict-filetypes@^3.0.9":
|
||||
version "3.0.9"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-filetypes/-/dict-filetypes-3.0.9.tgz#f4d5c35c341e6c3b77c08aec00678412641e1504"
|
||||
integrity sha512-U7ycC1cE32A5aEgwzp/iE0TVabonUFnVt+Ygbf6NsIWqEuFWZgZChC7gfztA4T1fpuj602nFdp7eOnTWKORsnQ==
|
||||
|
||||
"@cspell/dict-flutter@^1.0.3":
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-flutter/-/dict-flutter-1.0.3.tgz#23e552209ab2238733d30ca3f2a141359756af51"
|
||||
integrity sha512-52C9aUEU22ptpgYh6gQyIdA4MP6NPwzbEqndfgPh3Sra191/kgs7CVqXiO1qbtZa9gnYHUoVApkoxRE7mrXHfg==
|
||||
|
||||
"@cspell/dict-fonts@^4.0.3":
|
||||
version "4.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-fonts/-/dict-fonts-4.0.3.tgz#abf578c10a2e7b2bd8f4374002677625288560d9"
|
||||
integrity sha512-sPd17kV5qgYXLteuHFPn5mbp/oCHKgitNfsZLFC3W2fWEgZlhg4hK+UGig3KzrYhhvQ8wBnmZrAQm0TFKCKzsA==
|
||||
|
||||
"@cspell/dict-fsharp@^1.0.4":
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-fsharp/-/dict-fsharp-1.0.4.tgz#19a7263a61ca89cd3ec9c17537e424907b81ef38"
|
||||
integrity sha512-G5wk0o1qyHUNi9nVgdE1h5wl5ylq7pcBjX8vhjHcO4XBq20D5eMoXjwqMo/+szKAqzJ+WV3BgAL50akLKrT9Rw==
|
||||
|
||||
"@cspell/dict-fullstack@^3.2.0", "@cspell/dict-fullstack@^3.2.3":
|
||||
version "3.2.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-fullstack/-/dict-fullstack-3.2.3.tgz#f6fff74eff00c6759cba510168acada0619004cc"
|
||||
integrity sha512-62PbndIyQPH11mAv0PyiyT0vbwD0AXEocPpHlCHzfb5v9SspzCCbzQ/LIBiFmyRa+q5LMW35CnSVu6OXdT+LKg==
|
||||
|
||||
"@cspell/dict-gaming-terms@^1.0.9":
|
||||
version "1.0.9"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-gaming-terms/-/dict-gaming-terms-1.0.9.tgz#6b920386d281b89f70857e6dacea10ab89e88658"
|
||||
integrity sha512-AVIrZt3YiUnxsUzzGYTZ1XqgtkgwGEO0LWIlEf+SiDUEVLtv4CYmmyXFQ+WXDN0pyJ0wOwDazWrP0Cu7avYQmQ==
|
||||
|
||||
"@cspell/dict-git@^3.0.3":
|
||||
version "3.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-git/-/dict-git-3.0.3.tgz#3a3805ab9902bffc9255ec48f648145b957eb30b"
|
||||
integrity sha512-LSxB+psZ0qoj83GkyjeEH/ZViyVsGEF/A6BAo8Nqc0w0HjD2qX/QR4sfA6JHUgQ3Yi/ccxdK7xNIo67L2ScW5A==
|
||||
|
||||
"@cspell/dict-golang@^6.0.12", "@cspell/dict-golang@^6.0.17":
|
||||
version "6.0.17"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-golang/-/dict-golang-6.0.17.tgz#8f3c11189b869db7216cb4496514b9882d1e30a5"
|
||||
integrity sha512-uDDLEJ/cHdLiqPw4+5BnmIo2i/TSR+uDvYd6JlBjTmjBKpOCyvUgYRztH7nv5e7virsN5WDiUWah4/ATQGz4Pw==
|
||||
|
||||
"@cspell/dict-google@^1.0.4":
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-google/-/dict-google-1.0.4.tgz#e15a7ea2dee73800231a81840a59d3b50d49346f"
|
||||
integrity sha512-JThUT9eiguCja1mHHLwYESgxkhk17Gv7P3b1S7ZJzXw86QyVHPrbpVoMpozHk0C9o+Ym764B7gZGKmw9uMGduQ==
|
||||
|
||||
"@cspell/dict-haskell@^4.0.4":
|
||||
version "4.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-haskell/-/dict-haskell-4.0.4.tgz#37e9cb9a7f5be337a697bcffd0a0d25e80aab50d"
|
||||
integrity sha512-EwQsedEEnND/vY6tqRfg9y7tsnZdxNqOxLXSXTsFA6JRhUlr8Qs88iUUAfsUzWc4nNmmzQH2UbtT25ooG9x4nA==
|
||||
|
||||
"@cspell/dict-html-symbol-entities@^4.0.3":
|
||||
version "4.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-html-symbol-entities/-/dict-html-symbol-entities-4.0.3.tgz#bf2887020ca4774413d8b1f27c9b6824ba89e9ef"
|
||||
integrity sha512-aABXX7dMLNFdSE8aY844X4+hvfK7977sOWgZXo4MTGAmOzR8524fjbJPswIBK7GaD3+SgFZ2yP2o0CFvXDGF+A==
|
||||
|
||||
"@cspell/dict-html@^4.0.10":
|
||||
version "4.0.10"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-html/-/dict-html-4.0.10.tgz#7b536b2adca4b58ed92752c9d3c7ffc724dd5991"
|
||||
integrity sha512-I9uRAcdtHbh0wEtYZlgF0TTcgH0xaw1B54G2CW+tx4vHUwlde/+JBOfIzird4+WcMv4smZOfw+qHf7puFUbI5g==
|
||||
|
||||
"@cspell/dict-java@^5.0.10":
|
||||
version "5.0.10"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-java/-/dict-java-5.0.10.tgz#e6383ca645046b9f05a04a2c2e858fcc80c6fc63"
|
||||
integrity sha512-pVNcOnmoGiNL8GSVq4WbX/Vs2FGS0Nej+1aEeGuUY9CU14X8yAVCG+oih5ZoLt1jaR8YfR8byUF8wdp4qG4XIw==
|
||||
|
||||
"@cspell/dict-julia@^1.0.4":
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-julia/-/dict-julia-1.0.4.tgz#e478c20d742cd6857b6de41dc61a92036dafb4bc"
|
||||
integrity sha512-bFVgNX35MD3kZRbXbJVzdnN7OuEqmQXGpdOi9jzB40TSgBTlJWA4nxeAKV4CPCZxNRUGnLH0p05T/AD7Aom9/w==
|
||||
|
||||
"@cspell/dict-k8s@^1.0.6", "@cspell/dict-k8s@^1.0.9":
|
||||
version "1.0.9"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-k8s/-/dict-k8s-1.0.9.tgz#e9392a002797c67ffc3e96893156cc15af3774d1"
|
||||
integrity sha512-Q7GELSQIzo+BERl2ya/nBEnZeQC+zJP19SN1pI6gqDYraM51uYJacbbcWLYYO2Y+5joDjNt/sd/lJtLaQwoSlA==
|
||||
|
||||
"@cspell/dict-latex@^4.0.3":
|
||||
version "4.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-latex/-/dict-latex-4.0.3.tgz#a1254c7d9c3a2d70cd6391a9f2f7694431b1b2cb"
|
||||
integrity sha512-2KXBt9fSpymYHxHfvhUpjUFyzrmN4c4P8mwIzweLyvqntBT3k0YGZJSriOdjfUjwSygrfEwiuPI1EMrvgrOMJw==
|
||||
|
||||
"@cspell/dict-lorem-ipsum@^4.0.3":
|
||||
version "4.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-lorem-ipsum/-/dict-lorem-ipsum-4.0.3.tgz#c5fc631d934f1daf8b10c88b795278701a2469ec"
|
||||
integrity sha512-WFpDi/PDYHXft6p0eCXuYnn7mzMEQLVeqpO+wHSUd+kz5ADusZ4cpslAA4wUZJstF1/1kMCQCZM6HLZic9bT8A==
|
||||
|
||||
"@cspell/dict-lua@^4.0.6":
|
||||
version "4.0.6"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-lua/-/dict-lua-4.0.6.tgz#7de412bfaead794445e26d566aec222e20ad69ba"
|
||||
integrity sha512-Jwvh1jmAd9b+SP9e1GkS2ACbqKKRo9E1f9GdjF/ijmooZuHU0hPyqvnhZzUAxO1egbnNjxS/J2T6iUtjAUK2KQ==
|
||||
|
||||
"@cspell/dict-makefile@^1.0.3":
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-makefile/-/dict-makefile-1.0.3.tgz#08d3349bf7cbd8f5dacf8641f3d35092ca0b8b38"
|
||||
integrity sha512-R3U0DSpvTs6qdqfyBATnePj9Q/pypkje0Nj26mQJ8TOBQutCRAJbr2ZFAeDjgRx5EAJU/+8txiyVF97fbVRViw==
|
||||
|
||||
"@cspell/dict-markdown@^2.0.7":
|
||||
version "2.0.7"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-markdown/-/dict-markdown-2.0.7.tgz#15d6f9eed6bd1b33921b4332426ff387961163f1"
|
||||
integrity sha512-F9SGsSOokFn976DV4u/1eL4FtKQDSgJHSZ3+haPRU5ki6OEqojxKa8hhj4AUrtNFpmBaJx/WJ4YaEzWqG7hgqg==
|
||||
|
||||
"@cspell/dict-monkeyc@^1.0.9":
|
||||
version "1.0.9"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-monkeyc/-/dict-monkeyc-1.0.9.tgz#58b5f6f15fc7c11ce0eeffd0742fba4b39fc0b8b"
|
||||
integrity sha512-Jvf6g5xlB4+za3ThvenYKREXTEgzx5gMUSzrAxIiPleVG4hmRb/GBSoSjtkGaibN3XxGx5x809gSTYCA/IHCpA==
|
||||
|
||||
"@cspell/dict-node@^5.0.5":
|
||||
version "5.0.5"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-node/-/dict-node-5.0.5.tgz#11653612ebdd833208432e8b3cbe61bd6dd35dc3"
|
||||
integrity sha512-7NbCS2E8ZZRZwlLrh2sA0vAk9n1kcTUiRp/Nia8YvKaItGXLfxYqD2rMQ3HpB1kEutal6hQLVic3N2Yi1X7AaA==
|
||||
|
||||
"@cspell/dict-npm@^5.1.17":
|
||||
version "5.1.20"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-npm/-/dict-npm-5.1.20.tgz#5e54b428d7609267263d426d0edc9d51d53f0a6f"
|
||||
integrity sha512-vE9pFIifCDChsVhhUDuVtnwxygOdtHNluDm+8FkgC84M6LwiUVJr/CuSOI/SCR0oI9iiFp0VvMz194B6XwMv3g==
|
||||
|
||||
"@cspell/dict-people-names@^1.1.1":
|
||||
version "1.1.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-people-names/-/dict-people-names-1.1.4.tgz#e5a9bf51f0d6aae7f51cd1f2d2eb218f8f5bdba8"
|
||||
integrity sha512-YgnstmeGj/FoIEX996w6SpzTHyNHNxBinwbgozciEo4KJEQkdR7K1GIvJJdRh5Bh81T7xY7PCb6l4nStCFyclA==
|
||||
|
||||
"@cspell/dict-php@^4.0.13":
|
||||
version "4.0.13"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-php/-/dict-php-4.0.13.tgz#86f1e6fb2174b2b0fa012baf86c448b2730f04f9"
|
||||
integrity sha512-P6sREMZkhElzz/HhXAjahnICYIqB/HSGp1EhZh+Y6IhvC15AzgtDP8B8VYCIsQof6rPF1SQrFwunxOv8H1e2eg==
|
||||
|
||||
"@cspell/dict-powershell@^5.0.13":
|
||||
version "5.0.13"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-powershell/-/dict-powershell-5.0.13.tgz#f557aa04ee9bda4fe091308a0bcaea09ed12fa76"
|
||||
integrity sha512-0qdj0XZIPmb77nRTynKidRJKTU0Fl+10jyLbAhFTuBWKMypVY06EaYFnwhsgsws/7nNX8MTEQuewbl9bWFAbsg==
|
||||
|
||||
"@cspell/dict-public-licenses@^2.0.11":
|
||||
version "2.0.11"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-public-licenses/-/dict-public-licenses-2.0.11.tgz#37550c4e0cd445991caba528bf4ba58ce7a935c3"
|
||||
integrity sha512-rR5KjRUSnVKdfs5G+gJ4oIvQvm8+NJ6cHWY2N+GE69/FSGWDOPHxulCzeGnQU/c6WWZMSimG9o49i9r//lUQyA==
|
||||
|
||||
"@cspell/dict-python@^4.2.13":
|
||||
version "4.2.13"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-python/-/dict-python-4.2.13.tgz#c3dbaa7e2434c835e11540345e2168e5e685190a"
|
||||
integrity sha512-mZIcmo9qif8LkJ6N/lqTZawcOk2kVTcuWIUOSbMcjyomO0XZ7iWz15TfONyr03Ea/l7o5ULV+MZ4vx76bAUb7w==
|
||||
dependencies:
|
||||
"@cspell/dict-data-science" "^2.0.5"
|
||||
|
||||
"@cspell/dict-r@^2.0.4":
|
||||
version "2.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-r/-/dict-r-2.0.4.tgz#31b5abd91cc12aebfffdde4be4d2902668789311"
|
||||
integrity sha512-cBpRsE/U0d9BRhiNRMLMH1PpWgw+N+1A2jumgt1if9nBGmQw4MUpg2u9I0xlFVhstTIdzXiLXMxP45cABuiUeQ==
|
||||
|
||||
"@cspell/dict-ru_ru@^2.2.1":
|
||||
version "2.2.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-ru_ru/-/dict-ru_ru-2.2.4.tgz#a0b4a9272ca360b1721ef999e02559dc636d4043"
|
||||
integrity sha512-Ub5Y318ZAaFJDAPgeImcLg8ksfthGhxMHsyHGkn9Uf3g9AZUlYsabs1HwgLmh9NtqDNjMlF52S9R11GFDdaWIw==
|
||||
|
||||
"@cspell/dict-ruby@^5.0.7":
|
||||
version "5.0.7"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-ruby/-/dict-ruby-5.0.7.tgz#3593a955baaffe3c5d28fb178b72fdf93c7eec71"
|
||||
integrity sha512-4/d0hcoPzi5Alk0FmcyqlzFW9lQnZh9j07MJzPcyVO62nYJJAGKaPZL2o4qHeCS/od/ctJC5AHRdoUm0ktsw6Q==
|
||||
|
||||
"@cspell/dict-rust@^4.0.10":
|
||||
version "4.0.10"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-rust/-/dict-rust-4.0.10.tgz#8ae6eaf31a0ebce9dc8fd8dd68e5925e1d5290ee"
|
||||
integrity sha512-6o5C8566VGTTctgcwfF3Iy7314W0oMlFFSQOadQ0OEdJ9Z9ERX/PDimrzP3LGuOrvhtEFoK8pj+BLnunNwRNrw==
|
||||
|
||||
"@cspell/dict-scala@^5.0.6":
|
||||
version "5.0.6"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-scala/-/dict-scala-5.0.6.tgz#5e925def2fe6dc27ee2ad1c452941c3d6790fb6d"
|
||||
integrity sha512-tl0YWAfjUVb4LyyE4JIMVE8DlLzb1ecHRmIWc4eT6nkyDqQgHKzdHsnusxFEFMVLIQomgSg0Zz6hJ5S1E4W4ww==
|
||||
|
||||
"@cspell/dict-software-terms@^4.1.19", "@cspell/dict-software-terms@^4.1.3":
|
||||
version "4.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-software-terms/-/dict-software-terms-4.2.0.tgz#c16e9d3326c36195dadea812bc035817054bf0c1"
|
||||
integrity sha512-cTLTNdP9RM6nruZ01FThEFKRi7C4TPN8ndc+FpvCqis9J8iSg4Cr4YQemT/DxXoXz0527NbBPCARunxA0qIgTA==
|
||||
|
||||
"@cspell/dict-sql@^2.1.8":
|
||||
version "2.1.8"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-sql/-/dict-sql-2.1.8.tgz#45ea53b3e57fd2cc5f839f49b644aa743dac4990"
|
||||
integrity sha512-dJRE4JV1qmXTbbGm6WIcg1knmR6K5RXnQxF4XHs5HA3LAjc/zf77F95i5LC+guOGppVF6Hdl66S2UyxT+SAF3A==
|
||||
|
||||
"@cspell/dict-svelte@^1.0.5":
|
||||
version "1.0.5"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-svelte/-/dict-svelte-1.0.5.tgz#09752e01ff6667e737566d9dfc704c8dcc9a6492"
|
||||
integrity sha512-sseHlcXOqWE4Ner9sg8KsjxwSJ2yssoJNqFHR9liWVbDV+m7kBiUtn2EB690TihzVsEmDr/0Yxrbb5Bniz70mA==
|
||||
|
||||
"@cspell/dict-swift@^2.0.4":
|
||||
version "2.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-swift/-/dict-swift-2.0.4.tgz#bc19522418ed68cf914736b612c4e4febbf07e8d"
|
||||
integrity sha512-CsFF0IFAbRtYNg0yZcdaYbADF5F3DsM8C4wHnZefQy8YcHP/qjAF/GdGfBFBLx+XSthYuBlo2b2XQVdz3cJZBw==
|
||||
|
||||
"@cspell/dict-terraform@^1.0.6":
|
||||
version "1.0.7"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-terraform/-/dict-terraform-1.0.7.tgz#815a523c86f647cb7695d48b69e4a793c49f875e"
|
||||
integrity sha512-Ip7tOlAt/qUVdWYyDMA7DlKMpQ6sjtrsXk4vcpqXoYpoJlzMoDce7pw+fPhHshtNOFBAZ4nOrszlLu6APuy+HQ==
|
||||
|
||||
"@cspell/dict-typescript@^3.1.11":
|
||||
version "3.1.11"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-typescript/-/dict-typescript-3.1.11.tgz#40586f13b0337bd9cba958e0661b35888580b249"
|
||||
integrity sha512-FwvK5sKbwrVpdw0e9+1lVTl8FPoHYvfHRuQRQz2Ql5XkC0gwPPkpoyD1zYImjIyZRoYXk3yp9j8ss4iz7A7zoQ==
|
||||
|
||||
"@cspell/dict-uk-ua@^4.0.1":
|
||||
version "4.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-uk-ua/-/dict-uk-ua-4.0.4.tgz#19ecb8eaa70a363fd53a2adab0ea462f6e57e055"
|
||||
integrity sha512-4n+ImjsKVWkcZC9O/zQY4oZ9KWm0qkyTZY5/aW1KZsP8sx0cK5IUaA/375R3i4l8SDc0WDZN8K6Wy71cggDUEA==
|
||||
|
||||
"@cspell/dict-vue@^3.0.3":
|
||||
version "3.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-vue/-/dict-vue-3.0.3.tgz#295c288f6fd363879898223202ec3be048663b98"
|
||||
integrity sha512-akmYbrgAGumqk1xXALtDJcEcOMYBYMnkjpmGzH13Ozhq1mkPF4VgllFQlm1xYde+BUKNnzMgPEzxrL2qZllgYA==
|
||||
|
||||
"@cspell/dict-win32@^2.0.3":
|
||||
version "2.0.6"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dict-win32/-/dict-win32-2.0.6.tgz#3edaa576906061584b470aca1b41b6d76548eeca"
|
||||
integrity sha512-znBvliYtgXIpMW1/T/VnrvA2CwEqfJd06I57laVQsa/ST7dUeUU2kTbk4A4ICCr3eemVMPl9NQtR+t3SQu+ogQ==
|
||||
|
||||
"@cspell/dynamic-import@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/dynamic-import/-/dynamic-import-8.17.1.tgz#2b3f3325b6013a067a1a49cda8b69ae73aaed36a"
|
||||
integrity sha512-XQtr2olYOtqbg49E+8SISd6I5DzfxmsKINDn0ZgaTFeLalnNdF3ewDU4gOEbApIzGffRa1mW9t19MsiVrznSDw==
|
||||
dependencies:
|
||||
"@cspell/url" "8.17.1"
|
||||
import-meta-resolve "^4.1.0"
|
||||
|
||||
"@cspell/filetypes@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/filetypes/-/filetypes-8.17.1.tgz#d193afc5029364334f005ff23f4c4cb80170c374"
|
||||
integrity sha512-AxYw6j7EPYtDFAFjwybjFpMc9waXQzurfBXmEVfQ5RQRlbylujLZWwR6GnMqofeNg4oGDUpEjcAZFrgdkvMQlA==
|
||||
|
||||
"@cspell/strong-weak-map@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/strong-weak-map/-/strong-weak-map-8.17.1.tgz#2fa88f283ef10222fad25134b5ebb54edaad985f"
|
||||
integrity sha512-8cY3vLAKdt5gQEMM3Gr57BuQ8sun2NjYNh9qTdrctC1S9gNC7XzFghTYAfHSWR4VrOUcMFLO/izMdsc1KFvFOA==
|
||||
|
||||
"@cspell/url@8.17.1":
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/@cspell/url/-/url-8.17.1.tgz#e7daec1597fa31b4d0a7a685e7a24a11b0c8a193"
|
||||
integrity sha512-LMvReIndW1ckvemElfDgTt282fb2C3C/ZXfsm0pJsTV5ZmtdelCHwzmgSBmY5fDr7D66XDp8EurotSE0K6BTvw==
|
||||
|
||||
array-timsort@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/array-timsort/-/array-timsort-1.0.3.tgz#3c9e4199e54fb2b9c3fe5976396a21614ef0d926"
|
||||
integrity sha512-/+3GRL7dDAGEfM6TseQk/U+mi18TU2Ms9I3UlLdUMhz2hbvGNTKdj9xniwXfUqgYhHxRx0+8UnKkvlNwVU+cWQ==
|
||||
|
||||
braces@^3.0.3:
|
||||
version "3.0.3"
|
||||
resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789"
|
||||
integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==
|
||||
dependencies:
|
||||
fill-range "^7.1.1"
|
||||
|
||||
callsites@^3.0.0, callsites@^3.1.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
|
||||
integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==
|
||||
|
||||
chalk-template@^1.1.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/chalk-template/-/chalk-template-1.1.0.tgz#ffc55db6dd745e9394b85327c8ac8466edb7a7b1"
|
||||
integrity sha512-T2VJbcDuZQ0Tb2EWwSotMPJjgpy1/tGee1BTpUNsGZ/qgNjV2t7Mvu+d4600U564nbLesN1x2dPL+xii174Ekg==
|
||||
dependencies:
|
||||
chalk "^5.2.0"
|
||||
|
||||
chalk@^5.2.0, chalk@^5.3.0:
|
||||
version "5.4.1"
|
||||
resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.4.1.tgz#1b48bf0963ec158dce2aacf69c093ae2dd2092d8"
|
||||
integrity sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==
|
||||
|
||||
clear-module@^4.1.2:
|
||||
version "4.1.2"
|
||||
resolved "https://registry.yarnpkg.com/clear-module/-/clear-module-4.1.2.tgz#5a58a5c9f8dccf363545ad7284cad3c887352a80"
|
||||
integrity sha512-LWAxzHqdHsAZlPlEyJ2Poz6AIs384mPeqLVCru2p0BrP9G/kVGuhNyZYClLO6cXlnuJjzC8xtsJIuMjKqLXoAw==
|
||||
dependencies:
|
||||
parent-module "^2.0.0"
|
||||
resolve-from "^5.0.0"
|
||||
|
||||
commander@^12.1.0:
|
||||
version "12.1.0"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-12.1.0.tgz#01423b36f501259fdaac4d0e4d60c96c991585d3"
|
||||
integrity sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==
|
||||
|
||||
comment-json@^4.2.5:
|
||||
version "4.2.5"
|
||||
resolved "https://registry.yarnpkg.com/comment-json/-/comment-json-4.2.5.tgz#482e085f759c2704b60bc6f97f55b8c01bc41e70"
|
||||
integrity sha512-bKw/r35jR3HGt5PEPm1ljsQQGyCrR8sFGNiN5L+ykDHdpO8Smxkrkla9Yi6NkQyUrb8V54PGhfMs6NrIwtxtdw==
|
||||
dependencies:
|
||||
array-timsort "^1.0.3"
|
||||
core-util-is "^1.0.3"
|
||||
esprima "^4.0.1"
|
||||
has-own-prop "^2.0.0"
|
||||
repeat-string "^1.6.1"
|
||||
|
||||
core-util-is@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85"
|
||||
integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==
|
||||
|
||||
cspell-config-lib@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-config-lib/-/cspell-config-lib-8.17.1.tgz#a87973b91d51bf23a2018042c25aeaaa8a4e69c0"
|
||||
integrity sha512-x1S7QWprgUcwuwiJB1Ng0ZTBC4G50qP9qQyg/aroMkcdMsHfk26E8jUGRPNt4ftHFzS4YMhwtXuJQ9IgRUuNPA==
|
||||
dependencies:
|
||||
"@cspell/cspell-types" "8.17.1"
|
||||
comment-json "^4.2.5"
|
||||
yaml "^2.6.1"
|
||||
|
||||
cspell-dictionary@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-dictionary/-/cspell-dictionary-8.17.1.tgz#bfc9bfdbd3720d1425260a98091acffab7b03dd5"
|
||||
integrity sha512-zSl9l3wii+x16yc2NVZl/+CMLeLBAiuEd5YoFkOYPcbTJnfPwdjMNcj71u7wBvNJ+qwbF+kGbutEt15yHW3NBw==
|
||||
dependencies:
|
||||
"@cspell/cspell-pipe" "8.17.1"
|
||||
"@cspell/cspell-types" "8.17.1"
|
||||
cspell-trie-lib "8.17.1"
|
||||
fast-equals "^5.0.1"
|
||||
|
||||
cspell-gitignore@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-gitignore/-/cspell-gitignore-8.17.1.tgz#38f3213a40ba86480bb5f66a91198db6e0ef37c0"
|
||||
integrity sha512-bk727Zf4FBCjm9Mwvyreyhgjwe+YhPQEW7PldkHiinKd+Irfez4s8GXLQb1EgV0UpvViqaqBqLmngjZdS30BTA==
|
||||
dependencies:
|
||||
"@cspell/url" "8.17.1"
|
||||
cspell-glob "8.17.1"
|
||||
cspell-io "8.17.1"
|
||||
find-up-simple "^1.0.0"
|
||||
|
||||
cspell-glob@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-glob/-/cspell-glob-8.17.1.tgz#23d1be46b32fb4933487e4edff347d34db446f5a"
|
||||
integrity sha512-cUwM5auSt0RvLX7UkP2GEArJRWc85l51B1voArl+3ZIKeMZwcJpJgN3qvImtF8yRTZwYeYCs1sgsihb179q+mg==
|
||||
dependencies:
|
||||
"@cspell/url" "8.17.1"
|
||||
micromatch "^4.0.8"
|
||||
|
||||
cspell-grammar@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-grammar/-/cspell-grammar-8.17.1.tgz#8f6619fbfaebff6aeee63b13d17898b4d0c09136"
|
||||
integrity sha512-H5tLcBuW7aUj9L0rR+FSbnWPEsWb8lWppHVidtqw9Ll1CUHWOZC9HTB2RdrhJZrsz/8DJbM2yNbok0Xt0VAfdw==
|
||||
dependencies:
|
||||
"@cspell/cspell-pipe" "8.17.1"
|
||||
"@cspell/cspell-types" "8.17.1"
|
||||
|
||||
cspell-io@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-io/-/cspell-io-8.17.1.tgz#b91f1cac1c64a6fa2b61a388d0dc67437fcf3ada"
|
||||
integrity sha512-liIOsblt7oVItifzRAbuxiYrwlgw1VOqKppMxVKtYoAn2VUuuEpjCj6jLWpoTqSszR/38o7ChsHY1LHakhJZmw==
|
||||
dependencies:
|
||||
"@cspell/cspell-service-bus" "8.17.1"
|
||||
"@cspell/url" "8.17.1"
|
||||
|
||||
cspell-lib@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-lib/-/cspell-lib-8.17.1.tgz#21c76f1ea4e91c90245e55acddbf452d055a6607"
|
||||
integrity sha512-66n83Q7bK5tnvkDH7869/pBY/65AKmZVfCOAlsbhJn3YMDbNHFCHR0d1oNMlqG+n65Aco89VGwYfXxImZY+/mA==
|
||||
dependencies:
|
||||
"@cspell/cspell-bundled-dicts" "8.17.1"
|
||||
"@cspell/cspell-pipe" "8.17.1"
|
||||
"@cspell/cspell-resolver" "8.17.1"
|
||||
"@cspell/cspell-types" "8.17.1"
|
||||
"@cspell/dynamic-import" "8.17.1"
|
||||
"@cspell/filetypes" "8.17.1"
|
||||
"@cspell/strong-weak-map" "8.17.1"
|
||||
"@cspell/url" "8.17.1"
|
||||
clear-module "^4.1.2"
|
||||
comment-json "^4.2.5"
|
||||
cspell-config-lib "8.17.1"
|
||||
cspell-dictionary "8.17.1"
|
||||
cspell-glob "8.17.1"
|
||||
cspell-grammar "8.17.1"
|
||||
cspell-io "8.17.1"
|
||||
cspell-trie-lib "8.17.1"
|
||||
env-paths "^3.0.0"
|
||||
fast-equals "^5.0.1"
|
||||
gensequence "^7.0.0"
|
||||
import-fresh "^3.3.0"
|
||||
resolve-from "^5.0.0"
|
||||
vscode-languageserver-textdocument "^1.0.12"
|
||||
vscode-uri "^3.0.8"
|
||||
xdg-basedir "^5.1.0"
|
||||
|
||||
cspell-trie-lib@8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell-trie-lib/-/cspell-trie-lib-8.17.1.tgz#618e5cc671b0a24cf7ec27a9a9b834b197e17392"
|
||||
integrity sha512-13WNa5s75VwOjlGzWprmfNbBFIfXyA7tYYrbV+LugKkznyNZJeJPojHouEudcLq3SYb2Q6tJ7qyWcuT5bR9qPA==
|
||||
dependencies:
|
||||
"@cspell/cspell-pipe" "8.17.1"
|
||||
"@cspell/cspell-types" "8.17.1"
|
||||
gensequence "^7.0.0"
|
||||
|
||||
cspell@^8.17.1:
|
||||
version "8.17.1"
|
||||
resolved "https://registry.yarnpkg.com/cspell/-/cspell-8.17.1.tgz#be3c79a5b0b2e374ac0df8f921eb30ddca170110"
|
||||
integrity sha512-D0lw8XTXrTycNzOn5DkfPJNUT00X53OgvFDm+0SzhBr1r+na8LEh3CnQ6zKYVU0fL0x8vU82vs4jmGjDho9mPg==
|
||||
dependencies:
|
||||
"@cspell/cspell-json-reporter" "8.17.1"
|
||||
"@cspell/cspell-pipe" "8.17.1"
|
||||
"@cspell/cspell-types" "8.17.1"
|
||||
"@cspell/dynamic-import" "8.17.1"
|
||||
"@cspell/url" "8.17.1"
|
||||
chalk "^5.3.0"
|
||||
chalk-template "^1.1.0"
|
||||
commander "^12.1.0"
|
||||
cspell-dictionary "8.17.1"
|
||||
cspell-gitignore "8.17.1"
|
||||
cspell-glob "8.17.1"
|
||||
cspell-io "8.17.1"
|
||||
cspell-lib "8.17.1"
|
||||
fast-json-stable-stringify "^2.1.0"
|
||||
file-entry-cache "^9.1.0"
|
||||
get-stdin "^9.0.0"
|
||||
semver "^7.6.3"
|
||||
tinyglobby "^0.2.10"
|
||||
|
||||
env-paths@^3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/env-paths/-/env-paths-3.0.0.tgz#2f1e89c2f6dbd3408e1b1711dd82d62e317f58da"
|
||||
integrity sha512-dtJUTepzMW3Lm/NPxRf3wP4642UWhjL2sQxc+ym2YMj1m/H2zDNQOlezafzkHwn6sMstjHTwG6iQQsctDW/b1A==
|
||||
|
||||
esprima@^4.0.1:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71"
|
||||
integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
|
||||
|
||||
fast-equals@^5.0.1:
|
||||
version "5.0.1"
|
||||
resolved "https://registry.yarnpkg.com/fast-equals/-/fast-equals-5.0.1.tgz#a4eefe3c5d1c0d021aeed0bc10ba5e0c12ee405d"
|
||||
integrity sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ==
|
||||
|
||||
fast-json-stable-stringify@^2.1.0:
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633"
|
||||
integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==
|
||||
|
||||
fdir@^6.4.2:
|
||||
version "6.4.2"
|
||||
resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.4.2.tgz#ddaa7ce1831b161bc3657bb99cb36e1622702689"
|
||||
integrity sha512-KnhMXsKSPZlAhp7+IjUkRZKPb4fUyccpDrdFXbi4QL1qkmFh9kVY09Yox+n4MaOb3lHZ1Tv829C3oaaXoMYPDQ==
|
||||
|
||||
file-entry-cache@^9.1.0:
|
||||
version "9.1.0"
|
||||
resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-9.1.0.tgz#2e66ad98ce93f49aed1b178c57b0b5741591e075"
|
||||
integrity sha512-/pqPFG+FdxWQj+/WSuzXSDaNzxgTLr/OrR1QuqfEZzDakpdYE70PwUxL7BPUa8hpjbvY1+qvCl8k+8Tq34xJgg==
|
||||
dependencies:
|
||||
flat-cache "^5.0.0"
|
||||
|
||||
fill-range@^7.1.1:
|
||||
version "7.1.1"
|
||||
resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292"
|
||||
integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==
|
||||
dependencies:
|
||||
to-regex-range "^5.0.1"
|
||||
|
||||
find-up-simple@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/find-up-simple/-/find-up-simple-1.0.0.tgz#21d035fde9fdbd56c8f4d2f63f32fd93a1cfc368"
|
||||
integrity sha512-q7Us7kcjj2VMePAa02hDAF6d+MzsdsAWEwYyOpwUtlerRBkOEPBCRZrAV4XfcSN8fHAgaD0hP7miwoay6DCprw==
|
||||
|
||||
flat-cache@^5.0.0:
|
||||
version "5.0.0"
|
||||
resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-5.0.0.tgz#26c4da7b0f288b408bb2b506b2cb66c240ddf062"
|
||||
integrity sha512-JrqFmyUl2PnPi1OvLyTVHnQvwQ0S+e6lGSwu8OkAZlSaNIZciTY2H/cOOROxsBA1m/LZNHDsqAgDZt6akWcjsQ==
|
||||
dependencies:
|
||||
flatted "^3.3.1"
|
||||
keyv "^4.5.4"
|
||||
|
||||
flatted@^3.3.1:
|
||||
version "3.3.2"
|
||||
resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.2.tgz#adba1448a9841bec72b42c532ea23dbbedef1a27"
|
||||
integrity sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==
|
||||
|
||||
gensequence@^7.0.0:
|
||||
version "7.0.0"
|
||||
resolved "https://registry.yarnpkg.com/gensequence/-/gensequence-7.0.0.tgz#bb6aedec8ff665e3a6c42f92823121e3a6ea7718"
|
||||
integrity sha512-47Frx13aZh01afHJTB3zTtKIlFI6vWY+MYCN9Qpew6i52rfKjnhCF/l1YlC8UmEMvvntZZ6z4PiCcmyuedR2aQ==
|
||||
|
||||
get-stdin@^9.0.0:
|
||||
version "9.0.0"
|
||||
resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-9.0.0.tgz#3983ff82e03d56f1b2ea0d3e60325f39d703a575"
|
||||
integrity sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA==
|
||||
|
||||
global-directory@^4.0.1:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/global-directory/-/global-directory-4.0.1.tgz#4d7ac7cfd2cb73f304c53b8810891748df5e361e"
|
||||
integrity sha512-wHTUcDUoZ1H5/0iVqEudYW4/kAlN5cZ3j/bXn0Dpbizl9iaUVeWSHqiOjsgk6OW2bkLclbBjzewBz6weQ1zA2Q==
|
||||
dependencies:
|
||||
ini "4.1.1"
|
||||
|
||||
has-own-prop@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/has-own-prop/-/has-own-prop-2.0.0.tgz#f0f95d58f65804f5d218db32563bb85b8e0417af"
|
||||
integrity sha512-Pq0h+hvsVm6dDEa8x82GnLSYHOzNDt7f0ddFa3FqcQlgzEiptPqL+XrOJNavjOzSYiYWIrgeVYYgGlLmnxwilQ==
|
||||
|
||||
import-fresh@^3.3.0:
|
||||
version "3.3.0"
|
||||
resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b"
|
||||
integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==
|
||||
dependencies:
|
||||
parent-module "^1.0.0"
|
||||
resolve-from "^4.0.0"
|
||||
|
||||
import-meta-resolve@^4.1.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/import-meta-resolve/-/import-meta-resolve-4.1.0.tgz#f9db8bead9fafa61adb811db77a2bf22c5399706"
|
||||
integrity sha512-I6fiaX09Xivtk+THaMfAwnA3MVA5Big1WHF1Dfx9hFuvNIWpXnorlkzhcQf6ehrqQiiZECRt1poOAkPmer3ruw==
|
||||
|
||||
ini@4.1.1:
|
||||
version "4.1.1"
|
||||
resolved "https://registry.yarnpkg.com/ini/-/ini-4.1.1.tgz#d95b3d843b1e906e56d6747d5447904ff50ce7a1"
|
||||
integrity sha512-QQnnxNyfvmHFIsj7gkPcYymR8Jdw/o7mp5ZFihxn6h8Ci6fh3Dx4E1gPjpQEpIuPo9XVNY/ZUwh4BPMjGyL01g==
|
||||
|
||||
is-number@^7.0.0:
|
||||
version "7.0.0"
|
||||
resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b"
|
||||
integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==
|
||||
|
||||
json-buffer@3.0.1:
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13"
|
||||
integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==
|
||||
|
||||
keyv@^4.5.4:
|
||||
version "4.5.4"
|
||||
resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93"
|
||||
integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==
|
||||
dependencies:
|
||||
json-buffer "3.0.1"
|
||||
|
||||
micromatch@^4.0.8:
|
||||
version "4.0.8"
|
||||
resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202"
|
||||
integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==
|
||||
dependencies:
|
||||
braces "^3.0.3"
|
||||
picomatch "^2.3.1"
|
||||
|
||||
parent-module@^1.0.0:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2"
|
||||
integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==
|
||||
dependencies:
|
||||
callsites "^3.0.0"
|
||||
|
||||
parent-module@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-2.0.0.tgz#fa71f88ff1a50c27e15d8ff74e0e3a9523bf8708"
|
||||
integrity sha512-uo0Z9JJeWzv8BG+tRcapBKNJ0dro9cLyczGzulS6EfeyAdeC9sbojtW6XwvYxJkEne9En+J2XEl4zyglVeIwFg==
|
||||
dependencies:
|
||||
callsites "^3.1.0"
|
||||
|
||||
picomatch@^2.3.1:
|
||||
version "2.3.1"
|
||||
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42"
|
||||
integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
|
||||
|
||||
picomatch@^4.0.2:
|
||||
version "4.0.2"
|
||||
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-4.0.2.tgz#77c742931e8f3b8820946c76cd0c1f13730d1dab"
|
||||
integrity sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==
|
||||
|
||||
repeat-string@^1.6.1:
|
||||
version "1.6.1"
|
||||
resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
|
||||
integrity sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==
|
||||
|
||||
resolve-from@^4.0.0:
|
||||
version "4.0.0"
|
||||
resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6"
|
||||
integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==
|
||||
|
||||
resolve-from@^5.0.0:
|
||||
version "5.0.0"
|
||||
resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69"
|
||||
integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==
|
||||
|
||||
semver@^7.6.3:
|
||||
version "7.6.3"
|
||||
resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143"
|
||||
integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==
|
||||
|
||||
tinyglobby@^0.2.10:
|
||||
version "0.2.10"
|
||||
resolved "https://registry.yarnpkg.com/tinyglobby/-/tinyglobby-0.2.10.tgz#e712cf2dc9b95a1f5c5bbd159720e15833977a0f"
|
||||
integrity sha512-Zc+8eJlFMvgatPZTl6A9L/yht8QqdmUNtURHaKZLmKBE12hNPSrqNkUp2cs3M/UKmNVVAMFQYSjYIVHDjW5zew==
|
||||
dependencies:
|
||||
fdir "^6.4.2"
|
||||
picomatch "^4.0.2"
|
||||
|
||||
to-regex-range@^5.0.1:
|
||||
version "5.0.1"
|
||||
resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4"
|
||||
integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==
|
||||
dependencies:
|
||||
is-number "^7.0.0"
|
||||
|
||||
vscode-languageserver-textdocument@^1.0.12:
|
||||
version "1.0.12"
|
||||
resolved "https://registry.yarnpkg.com/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.12.tgz#457ee04271ab38998a093c68c2342f53f6e4a631"
|
||||
integrity sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==
|
||||
|
||||
vscode-uri@^3.0.8:
|
||||
version "3.0.8"
|
||||
resolved "https://registry.yarnpkg.com/vscode-uri/-/vscode-uri-3.0.8.tgz#1770938d3e72588659a172d0fd4642780083ff9f"
|
||||
integrity sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==
|
||||
|
||||
xdg-basedir@^5.1.0:
|
||||
version "5.1.0"
|
||||
resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-5.1.0.tgz#1efba19425e73be1bc6f2a6ceb52a3d2c884c0c9"
|
||||
integrity sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==
|
||||
|
||||
yaml@^2.6.1:
|
||||
version "2.6.1"
|
||||
resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.6.1.tgz#42f2b1ba89203f374609572d5349fb8686500773"
|
||||
integrity sha512-7r0XPzioN/Q9kXBro/XPnA6kznR73DHq+GXh5ON7ZozRO6aMjbmiBuKste2wslTFkC5d1dw0GooOCepZXJ2SAg==
|
||||
@@ -578,7 +578,7 @@
|
||||
"type": "victoriametrics-datasource",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"expr": "sum(vm_persistentqueue_bytes_pending{job=~\"$job\", instance=~\"$instance\"})",
|
||||
"expr": "sum(vmagent_remotewrite_pending_data_bytes{job=~\"$job\", instance=~\"$instance\"})",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
|
||||
@@ -143,7 +143,7 @@
|
||||
"type": "victoriametrics-datasource",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "Shows if the last configuration update was successful. \"Not Ok\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"description": "Shows if the last configuration update was successful. \"Not OK\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
@@ -152,7 +152,7 @@
|
||||
"0": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Ok"
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
@@ -163,11 +163,21 @@
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 1,
|
||||
"text": "Not Ok"
|
||||
"text": "Not OK"
|
||||
},
|
||||
"to": 999999
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"index": 2,
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
@@ -214,7 +224,7 @@
|
||||
"uid": "$ds"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "count(vmalert_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 ) or 0",
|
||||
"expr": "count(vmalert_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 )",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
@@ -359,6 +369,7 @@
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"noValue": "0",
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
@@ -407,7 +418,7 @@
|
||||
"uid": "$ds"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "(sum(increase(vmalert_alerting_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval])) or vector(0)) + \n(sum(increase(vmalert_recording_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval])) or vector(0))",
|
||||
"expr": "sum(increase(vmalert_alerting_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval])) + \nsum(increase(vmalert_recording_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval]))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
@@ -426,6 +437,7 @@
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"noValue": "0",
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
@@ -475,7 +487,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "count(vmalert_recording_rules_last_evaluation_samples{job=~\"$job\", instance=~\"$instance\"} < 1) or 0",
|
||||
"expr": "count(vmalert_recording_rules_last_evaluation_samples{job=~\"$job\", instance=~\"$instance\"} < 1)",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"range": true,
|
||||
|
||||
@@ -225,7 +225,7 @@
|
||||
"type": "victoriametrics-datasource",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "Shows if the last configuration update was successful. \"Not Ok\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"description": "Shows if the last configuration update was successful. \"Not OK\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
@@ -234,7 +234,7 @@
|
||||
"0": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Ok"
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
@@ -245,11 +245,21 @@
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 1,
|
||||
"text": "Not Ok"
|
||||
"text": "Not OK"
|
||||
},
|
||||
"to": 999999
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"index": 2,
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
@@ -296,7 +306,7 @@
|
||||
"uid": "$ds"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "count(vmauth_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 ) or 0",
|
||||
"expr": "count(vmauth_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 )",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
|
||||
@@ -577,7 +577,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"expr": "sum(vm_persistentqueue_bytes_pending{job=~\"$job\", instance=~\"$instance\"})",
|
||||
"expr": "sum(vmagent_remotewrite_pending_data_bytes{job=~\"$job\", instance=~\"$instance\"})",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "Shows if the last configuration update was successful. \"Not Ok\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"description": "Shows if the last configuration update was successful. \"Not OK\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
@@ -151,7 +151,7 @@
|
||||
"0": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Ok"
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
@@ -162,11 +162,21 @@
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 1,
|
||||
"text": "Not Ok"
|
||||
"text": "Not OK"
|
||||
},
|
||||
"to": 999999
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"index": 2,
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
@@ -213,7 +223,7 @@
|
||||
"uid": "$ds"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "count(vmalert_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 ) or 0",
|
||||
"expr": "count(vmalert_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 )",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
@@ -358,6 +368,7 @@
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"noValue": "0",
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
@@ -406,7 +417,7 @@
|
||||
"uid": "$ds"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "(sum(increase(vmalert_alerting_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval])) or vector(0)) + \n(sum(increase(vmalert_recording_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval])) or vector(0))",
|
||||
"expr": "sum(increase(vmalert_alerting_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval])) + \nsum(increase(vmalert_recording_rules_errors_total{job=~\"$job\", instance=~\"$instance\", group=~\"$group\", file=~\"$file\"}[$__rate_interval]))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
@@ -425,6 +436,7 @@
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"min": 0,
|
||||
"noValue": "0",
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
@@ -474,7 +486,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "count(vmalert_recording_rules_last_evaluation_samples{job=~\"$job\", instance=~\"$instance\"} < 1) or 0",
|
||||
"expr": "count(vmalert_recording_rules_last_evaluation_samples{job=~\"$job\", instance=~\"$instance\"} < 1)",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"range": true,
|
||||
|
||||
@@ -224,7 +224,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds"
|
||||
},
|
||||
"description": "Shows if the last configuration update was successful. \"Not Ok\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"description": "Shows if the last configuration update was successful. \"Not OK\" means there was an unsuccessful attempt to update the configuration due to some error. Check the log for details.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
@@ -233,7 +233,7 @@
|
||||
"0": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Ok"
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
@@ -244,11 +244,21 @@
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 1,
|
||||
"text": "Not Ok"
|
||||
"text": "Not OK"
|
||||
},
|
||||
"to": 999999
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"index": 2,
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
@@ -295,7 +305,7 @@
|
||||
"uid": "$ds"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "count(vmauth_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 ) or 0",
|
||||
"expr": "count(vmauth_config_last_reload_successful{job=~\"$job\", instance=~\"$instance\"} < 1 )",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
|
||||
@@ -66,6 +66,12 @@ package-via-docker: package-base
|
||||
$(DOCKER_BUILD) \
|
||||
--build-arg src_binary=$(APP_NAME)$(APP_SUFFIX)-prod \
|
||||
--build-arg base_image=$(BASE_IMAGE) \
|
||||
--label "org.opencontainers.image.source=https://github.com/VictoriaMetrics/VictoriaMetrics" \
|
||||
--label "org.opencontainers.image.documentation=https://docs.victoriametrics.com/" \
|
||||
--label "org.opencontainers.image.title=$(APP_NAME)" \
|
||||
--label "org.opencontainers.image.vendor=VictoriaMetrics" \
|
||||
--label "org.opencontainers.image.version=$(PKG_TAG)" \
|
||||
--label "org.opencontainers.image.created=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ")" \
|
||||
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(APP_SUFFIX)$(RACE) \
|
||||
-f app/$(APP_NAME)/deployment/Dockerfile bin)
|
||||
|
||||
@@ -80,6 +86,12 @@ publish-via-docker:
|
||||
--build-arg certs_image=$(CERTS_IMAGE) \
|
||||
--build-arg root_image=$(ROOT_IMAGE) \
|
||||
--build-arg APP_NAME=$(APP_NAME) \
|
||||
--label "org.opencontainers.image.source=https://github.com/VictoriaMetrics/VictoriaMetrics" \
|
||||
--label "org.opencontainers.image.documentation=https://docs.victoriametrics.com/" \
|
||||
--label "org.opencontainers.image.title=$(APP_NAME)" \
|
||||
--label "org.opencontainers.image.vendor=VictoriaMetrics" \
|
||||
--label "org.opencontainers.image.version=$(PKG_TAG)" \
|
||||
--label "org.opencontainers.image.created=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ")" \
|
||||
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) \
|
||||
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(LATEST_TAG)$(RACE) \
|
||||
-o type=image \
|
||||
@@ -92,6 +104,12 @@ publish-via-docker:
|
||||
--build-arg certs_image=$(CERTS_IMAGE) \
|
||||
--build-arg root_image=$(ROOT_IMAGE_SCRATCH) \
|
||||
--build-arg APP_NAME=$(APP_NAME) \
|
||||
--label "org.opencontainers.image.source=https://github.com/VictoriaMetrics/VictoriaMetrics" \
|
||||
--label "org.opencontainers.image.documentation=https://docs.victoriametrics.com/" \
|
||||
--label "org.opencontainers.image.title=$(APP_NAME)" \
|
||||
--label "org.opencontainers.image.vendor=VictoriaMetrics" \
|
||||
--label "org.opencontainers.image.version=$(PKG_TAG)" \
|
||||
--label "org.opencontainers.image.created=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ")" \
|
||||
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE)-scratch \
|
||||
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(LATEST_TAG)$(RACE)-scratch \
|
||||
-o type=image \
|
||||
|
||||
@@ -720,8 +720,8 @@ Some workloads may need fine-grained resource usage limits. In these cases the f
|
||||
This means that the maximum memory usage and CPU usage a single query can use at `vmstorage` is proportional to `-search.maxUniqueTimeseries`.
|
||||
By default, `vmstorage` calculates this limit automatically based on the available memory and the maximum number of concurrent read requests (see `-search.maxConcurrentRequests`).
|
||||
The calculated limit will be printed during process start-up logs and exposed as `vm_search_max_unique_timeseries` metric.
|
||||
- `-search.maxUniqueTimeseries` at `vmselect` adjusts the limit with the same name at `vmstorage`. The vmstorage limit can be adjusted
|
||||
only to **lower value** and can't exceed it. By default, vmselect doesn't apply limit adjustments.
|
||||
- `-search.maxUniqueTimeseries` at `vmselect` adjusts the limit with the same name at `vmstorage`. The limit cannot exceed the
|
||||
value set in vmstorage if the `-search.maxUniqueTimeseries` flag is explicitly defined there. By default, vmselect doesn't apply limit adjustments.
|
||||
- `-search.maxQueryDuration` at `vmselect` limits the duration of a single query. If the query takes longer than the given duration, then it is canceled.
|
||||
This allows saving CPU and RAM at `vmselect` and `vmstorage` when executing unexpectedly heavy queries.
|
||||
The limit can be overridden to a smaller value by passing `timeout` GET parameter.
|
||||
@@ -1576,6 +1576,9 @@ Below is the output for `/path/to/vmselect -help`:
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
|
||||
-search.logSlowQueryDuration duration
|
||||
Log queries with execution time exceeding this value. Zero disables slow query logging. See also -search.logQueryMemoryUsage (default 5s)
|
||||
-search.maxBinaryOpPushdownLabelValues int
|
||||
The maximum number of values for a label in the first expression that can be extracted as a common label filter and pushed down to the second expression in a binary operation.
|
||||
A larger value makes the pushed-down filter more complex but fewer time series will be returned. This flag is useful when selective label contains numerous values, for example `instance`, and storage resources are abundant. (default 100)
|
||||
-search.maxConcurrentRequests int
|
||||
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. See also -search.maxQueueDuration and -search.maxMemoryPerQuery (default 16)
|
||||
-search.maxDeleteSeries int
|
||||
@@ -1633,7 +1636,7 @@ Below is the output for `/path/to/vmselect -help`:
|
||||
-search.maxTagValueSuffixesPerSearch int
|
||||
The maximum number of tag value suffixes returned from /metrics/find (default 100000)
|
||||
-search.maxUniqueTimeseries int
|
||||
The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage. The limit can't exceed the corresponding -search.maxUniqueTimeseries limit on vmstorage, it can be only set to lower values. (default 0)
|
||||
The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage. The limit can't exceed the corresponding value set in vmstorage if the `-search.maxUniqueTimeseries` flag is explicitly defined there. (default 0)
|
||||
-search.maxWorkersPerQuery int
|
||||
The maximum number of CPU cores a single query can use. The default value should work good for most cases. The flag can be set to lower values for improving performance of big number of concurrently executed queries. The flag can be set to bigger values for improving performance of heavy queries, which scan big number of time series (>10K) and/or big number of samples (>100M). There is no sense in setting this flag to values bigger than the number of CPU cores available on the system (default 16)
|
||||
-search.minStalenessInterval duration
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
[](https://hub.docker.com/r/victoriametrics/victoria-metrics)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
See [case studies for VictoriaMetrics](https://docs.victoriametrics.com/casestudies/).
|
||||
@@ -226,27 +228,35 @@ and then install it as a service according to the following guide:
|
||||
|
||||
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3781) for more details.
|
||||
|
||||
## Playgrounds
|
||||
|
||||
VictoriaMetrics has the following publicly available resources:
|
||||
1. [https://play.victoriametrics.com/](https://play.victoriametrics.com/) - [VMUI](#vmui) of VictoriaMetrics cluster installation.
|
||||
It is available for testing the query engine, relabeling playground, other tools and pages provided by VMUI.
|
||||
1. [https://play-grafana.victoriametrics.com/](https://play-grafana.victoriametrics.com/) - Grafana configured with many
|
||||
typical dashboards using VictoriaMetrics and VictoriaLogs as datasource. It contains VictoriaMetrics cluster dashboard with
|
||||
3 cluster installations for the recent OS and LTS versions running under the constant becnhmark.
|
||||
1. [https://play-vmlogs.victoriametrics.com/](https://play-vmlogs.victoriametrics.com/) - [VMUI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui) of VictoriaLogs installation.
|
||||
It is available for testing the query engine on demo logs set.
|
||||
|
||||
Additionally, we provide a [docker-compose environment](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker#docker-compose-environment-for-victoriametrics)
|
||||
for VictoriaMetrics and VictoriaLogs components. They are already configured, provisioned and interconnected.
|
||||
It can be used as an example for a [quick start](https://docs.victoriametrics.com/quick-start/).
|
||||
|
||||
## Prometheus setup
|
||||
|
||||
Add the following lines to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`) in order to send data to VictoriaMetrics:
|
||||
|
||||
|
||||
```yaml
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr>:8428/api/v1/write
|
||||
```
|
||||
|
||||
|
||||
Substitute `<victoriametrics-addr>` with hostname or IP address of VictoriaMetrics.
|
||||
Then apply new config via the following command:
|
||||
|
||||
|
||||
```sh
|
||||
kill -HUP `pidof prometheus`
|
||||
```
|
||||
|
||||
|
||||
Prometheus writes incoming data to local storage and replicates it to remote storage in parallel.
|
||||
This means that data remains available in local storage for `--storage.tsdb.retention.time` duration
|
||||
even if remote storage is unavailable.
|
||||
@@ -265,8 +275,6 @@ The label name can be arbitrary - `datacenter` is just an example. The label val
|
||||
across Prometheus instances, so time series could be filtered and grouped by this label.
|
||||
|
||||
For highly loaded Prometheus instances (200k+ samples per second) the following tuning may be applied:
|
||||
|
||||
|
||||
```yaml
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr>:8428/api/v1/write
|
||||
@@ -276,7 +284,6 @@ remote_write:
|
||||
max_shards: 30
|
||||
```
|
||||
|
||||
|
||||
Using remote write increases memory usage for Prometheus by up to ~25%. If you are experiencing issues with
|
||||
too high memory consumption of Prometheus, then try to lower `max_samples_per_send` and `capacity` params.
|
||||
Keep in mind that these two params are tightly connected.
|
||||
@@ -293,7 +300,6 @@ which can be used as faster and less resource-hungry alternative to Prometheus.
|
||||
|
||||
Create [Prometheus datasource](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure-prometheus-data-source/)
|
||||
in Grafana with the following url:
|
||||
|
||||
```url
|
||||
http://<victoriametrics-addr>:8428
|
||||
```
|
||||
@@ -393,7 +399,7 @@ can be loaded in VMUI via `Query Analyzer` tool.
|
||||
|
||||
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
|
||||
|
||||
## Top queries
|
||||
### Top queries
|
||||
|
||||
[VMUI](#vmui) provides `top queries` tab, which can help determining the following query types:
|
||||
|
||||
@@ -403,7 +409,7 @@ See the [example VMUI at VictoriaMetrics playground](https://play.victoriametric
|
||||
|
||||
This information is obtained from the `/api/v1/status/top_queries` HTTP endpoint.
|
||||
|
||||
## Active queries
|
||||
### Active queries
|
||||
|
||||
[VMUI](#vmui) provides `active queries` tab, which shows currently execute queries.
|
||||
It provides the following information per each query:
|
||||
@@ -414,7 +420,7 @@ It provides the following information per each query:
|
||||
|
||||
This information is obtained from the `/api/v1/status/active_queries` HTTP endpoint.
|
||||
|
||||
## Metrics explorer
|
||||
### Metrics explorer
|
||||
|
||||
[VMUI](#vmui) provides an ability to explore metrics exported by a particular `job` / `instance` in the following way:
|
||||
|
||||
@@ -426,7 +432,7 @@ This information is obtained from the `/api/v1/status/active_queries` HTTP endpo
|
||||
|
||||
It is possible to change the selected time range for the graphs in the top right corner.
|
||||
|
||||
## Cardinality explorer
|
||||
### Cardinality explorer
|
||||
|
||||
VictoriaMetrics provides an ability to explore time series cardinality at `Explore cardinality` tab in [vmui](#vmui) in the following ways:
|
||||
|
||||
@@ -448,7 +454,7 @@ Cardinality explorer is built on top of [/api/v1/status/tsdb](#tsdb-stats).
|
||||
See [cardinality explorer playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/cardinality).
|
||||
See the example of using the cardinality explorer [here](https://victoriametrics.com/blog/cardinality-explorer/).
|
||||
|
||||
## Cardinality explorer statistic inaccuracy
|
||||
### Cardinality explorer statistic inaccuracy
|
||||
|
||||
In [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/cluster-victoriametrics/) each vmstorage tracks the stored time series individually.
|
||||
vmselect requests stats via [/api/v1/status/tsdb](#tsdb-stats) API from each vmstorage node and merges the results by summing per-series stats.
|
||||
@@ -2065,6 +2071,10 @@ Retention filters configuration can be tested in enterprise version of vmui on t
|
||||
It is safe updating `-retentionFilter` during VictoriaMetrics restarts - the updated retention filters are applied eventually
|
||||
to historical data.
|
||||
|
||||
It's expected that resource usage will temporarily increase when `-retentionFilter` is applied.
|
||||
This is because additional operations are required to read the data, filter and apply retention to partitions,
|
||||
which will cost extra CPU and memory.
|
||||
|
||||
See [how to configure multiple retentions in VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/#retention-filters).
|
||||
|
||||
See also [downsampling](#downsampling).
|
||||
@@ -2121,6 +2131,10 @@ or [recording rules in vmalert](https://docs.victoriametrics.com/vmalert/#rules)
|
||||
Downsampling is performed during [background merges](https://docs.victoriametrics.com/#storage).
|
||||
It cannot be performed if there is not enough of free disk space or if vmstorage is in [read-only mode](https://docs.victoriametrics.com/cluster-victoriametrics/#readonly-mode).
|
||||
|
||||
It's expected that resource usage will temporarily increase when **downsampling with filters** is applied.
|
||||
This is because additional operations are required to read historical data, downsample, and persist it back,
|
||||
which will cost extra CPU and memory.
|
||||
|
||||
Please, note that intervals of `-downsampling.period` must be multiples of each other.
|
||||
In case [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled, value of `-dedup.minScrapeInterval` command-line flag must also
|
||||
be multiple of `-downsampling.period` intervals. This is required to ensure consistency of deduplication and downsampling results.
|
||||
|
||||
@@ -17,6 +17,7 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
|
||||
## tip
|
||||
|
||||
* FEATURE: [Datadog data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/datadog-agent/): added `-datadog.streamFields` and `-datadog.ignoreFields` flags to configured default stream and ignore fields. Useful for Datadog serverless plugin, which doesn't allow to provide extra headers of query args.
|
||||
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add support for autocomplete in LogsQL queries. This feature provides suggestions for field names, field values, and pipe names.
|
||||
|
||||
* BUGFIX: [Datadog data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/datadog-agent/): accepts `message` field as both string and object type to fix compatibility with Datadog serverless extension, which sends logs data in format, which is not documented. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7761).
|
||||
* BUGFIX: [vlinsert](https://docs.victoriametrics.com/victorialogs/): order of VL-Msg-Field values now defines a priority of these fields and it's now obvious for a user which field will be picked if multiple msg_field values exist in a row.
|
||||
@@ -60,7 +61,7 @@ Released at 2024-12-08
|
||||
|
||||
* FEATURE: add [`collapse_nums` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#collapse_nums-pipe), which replaces all the decimal and hexadecimal numbers with `<N>` in the given [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model). This can be useful for locating the most frequently seen log message patterns if log messages differ only by decimal and hexadecimal numbers (this is very frequent case). For example, the following query returns top 5 log message patterns seen over the last hour: `_time:1h | collapse_nums | top 5 by (_msg)`.
|
||||
* FEATURE: improve performance for [`stream_context` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stream_context-pipe) over [log streams](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) with big number of logs (millions and more). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7637).
|
||||
* FEATURE: [`stream_context` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stream_context-pipe) allow changing the time window for search for surrounding logs via `time_window` option. For example, the following query searches for surrouning [log stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) logs on the one week window: `_time:5m error | stream_context before 10 time_window 1w`. Thanks to @worker24h for [the idea](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7637#issuecomment-2523313740).
|
||||
* FEATURE: [`stream_context` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#stream_context-pipe) allow changing the time window for search for surrounding logs via `time_window` option. For example, the following query searches for surrounding [log stream](https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields) logs on the one week window: `_time:5m error | stream_context before 10 time_window 1w`. Thanks to @worker24h for [the idea](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7637#issuecomment-2523313740).
|
||||
|
||||
## [v1.2.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.2.0-victorialogs)
|
||||
|
||||
|
||||
@@ -318,7 +318,7 @@ See also:
|
||||
|
||||
### Querying facets
|
||||
|
||||
VictoriaLogs provides `/select/logsql/facets?query=<query>&start=<start>&end=<end>` HTTP endpoint, which returns the most fequent values
|
||||
VictoriaLogs provides `/select/logsql/facets?query=<query>&start=<start>&end=<end>` HTTP endpoint, which returns the most frequent values
|
||||
per each [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) seen in the logs returned
|
||||
by the given [`<query>`](https://docs.victoriametrics.com/victorialogs/logsql/) on the given `[<start> ... <end>]` time range.
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ Please find the example of provisioning Grafana instance with VictoriaLogs datas
|
||||
grafana:
|
||||
image: grafana/grafana:11.0.0
|
||||
environment:
|
||||
- GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.12.0/victoriametrics-logs-datasource-v0.12.0.zip;victoriametrics-logs-datasource
|
||||
- GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.1/victoriametrics-logs-datasource-v0.13.1.zip;victoriametrics-logs-datasource
|
||||
- GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-logs-datasource
|
||||
ports:
|
||||
- 3000:3000/tcp
|
||||
@@ -108,7 +108,7 @@ Option 1. Using Grafana provisioning:
|
||||
|
||||
``` yaml
|
||||
env:
|
||||
GF_INSTALL_PLUGINS: "https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.12.0/victoriametrics-logs-datasource-v0.12.0.zip;victoriametrics-logs-datasource"
|
||||
GF_INSTALL_PLUGINS: "https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.1/victoriametrics-logs-datasource-v0.13.1.zip;victoriametrics-logs-datasource"
|
||||
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: "victoriametrics-logs-datasource"
|
||||
```
|
||||
|
||||
@@ -116,7 +116,7 @@ Option 2. Using Grafana plugins section in `values.yaml`:
|
||||
|
||||
``` yaml
|
||||
plugins:
|
||||
- https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.12.0/victoriametrics-logs-datasource-v0.12.0.zip;victoriametrics-logs-datasource
|
||||
- https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.1/victoriametrics-logs-datasource-v0.13.1.zip;victoriametrics-logs-datasource
|
||||
```
|
||||
|
||||
Option 3. Using init container:
|
||||
@@ -215,7 +215,7 @@ This example uses init container to download and install plugin.
|
||||
|
||||
1. To download plugin build and move contents into Grafana plugins directory:
|
||||
|
||||
``` bash
|
||||
``` sh
|
||||
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/victorialogs-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
curl -L https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/$ver/victoriametrics-logs-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vl-plugin.tar.gz
|
||||
tar -xf /var/lib/grafana/plugins/vl-plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||
@@ -296,12 +296,9 @@ This command will build frontend part and backend part or the plugin and locate
|
||||
## How to make new release
|
||||
|
||||
1. Make sure there are no open security issues.
|
||||
1. Create a release tag:
|
||||
* `git tag -s v1.xx.y` in `master` branch
|
||||
1. Run `TAG=v1.xx.y make build-release` to build and package binaries in `*.tar.gz` release archives.
|
||||
1. Run `git push origin v1.xx.y` to push the tag created `v1.xx.y` at step 2 to public GitHub repository
|
||||
1. Go to <https://github.com/VictoriaMetrics/victorialogs-datasource/releases> and verify that draft release with the name `TAG` has been created and this release contains all the needed binaries and checksums.
|
||||
1. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
||||
1. Change version in package.json in a `main` branch
|
||||
1. Trigger [release pipeline](https://github.com/VictoriaMetrics/victorialogs-datasource/actions/workflows/release.yaml).
|
||||
1. Go to [releases page](https://github.com/VictoriaMetrics/victorialogs-datasource/releases) once pipeline is finished and verify release with the name `TAG` has been created and has all the needed binaries and checksums attached.
|
||||
|
||||
## Notes
|
||||
|
||||
|
||||
@@ -23,9 +23,18 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add `markdown` support for comments during data export. [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7828).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent/) and [Single-node VictoriaMetrics](https://docs.victoriametrics.com/): added `min` and `max` metrics for Datadog Sketches API metrics, changed `_` metric name separator to `.` if metrics are not sanitized for consistency.
|
||||
* FEATURE: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/): support `-maxIngestionRate` cmd-line flag to ratelimit samples/sec ingested. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7377) for details.
|
||||
* FEATURE: [vminsert](https://docs.victoriametrics.com/vminsert/): Storage nodes defined in `-storageNode` are now sorted, ensuring that varying node orders across different vminsert instances do not result in inconsistent replication.
|
||||
* FEATURE: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/) and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): improve query performance on systems with high number of CPU cores. See [this PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7416) for details.
|
||||
* FEATURE: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/) and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): add command-line flag `-search.maxBinaryOpPushdownLabelValues` to allow using labels with more candidate values as push down filter in binary operation. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7243). Thanks to @tydhot for implementation.
|
||||
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): properly set `host` field at debug information formatted with `dump_request_on_errors: true` setting.
|
||||
* BUGFIX: [dashboards](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards): consistently use `vmagent_remotewrite_pending_data_bytes` on vmagent dashboard to represent persistent queue size.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert/): fix the auto-generated metrics `ALERTS` and `ALERTS_FOR_STATE` for alerting rules. Previously, metrics might have incorrect labels and affect the restore process. See this [issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7796).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/victorialogs/vmalert/): do not append tenant info to VictoriaLogs datasource request path in [clusterMode](https://docs.victoriametrics.com/vmalert/#multitenancy). See [this doc](https://docs.victoriametrics.com/victorialogs/vmalert/#how-to-use-multitenancy-in-rules) for how to use multitenancy in VictoriaLogs.
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): properly set `host` field at debug information formatted with `dump_request_on_errors: true` setting.
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): properly handle discovery for ipv6 addresses. Thanks to @badie for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7955).
|
||||
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl/): fix support for migrating influx series without any tag. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7921). Thanks to @bitbidu for reporting.
|
||||
* BUGFIX: [vminsert](https://docs.victoriametrics.com/vminsert/): storage nodes defined in `-storageNode` are now sorted, ensuring that varying node orders across different vminsert instances do not result in inconsistent replication.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/) and `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): properly ingest `influx` line protocol metrics with empty tags. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7933) for details.
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): allow to override the default unique time series limit in vmstorage with command-line flags like `-search.maxUniqueTimeseries`, `-search.maxLabelsAPISeries`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7852).
|
||||
|
||||
## [v1.108.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.108.1)
|
||||
|
||||
@@ -58,7 +67,6 @@ Released at 2024-12-13
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth/): allow to start `vmauth` with empty configuration file. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6467) for details.
|
||||
* FEATURE: [vmalert-tool](https://docs.victoriametrics.com/vmalert-tool/): support debug mode for alerting rule. See [this doc](https://docs.victoriametrics.com/vmalert-tool/#debug-mode).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): update error messages for Clipboard API issues with docs links. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7677).
|
||||
* FEATURE: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/): support `-maxIngestionRate` flag to ratelimit samples/sec ingested
|
||||
|
||||
* BUGFIX: all VictoriaMetrics components: consistently deduplicate values with stale markers within deduplication interval. Previously, deduplication could randomly prefer stale marker or value on the deduplication interval. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7674) for details. Thanks to @tIGO for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7675).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent/) and [Single-node VictoriaMetrics](https://docs.victoriametrics.com/): add missing common service labels for docker swarm service discovery when `role` is set to `tasks`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7800).
|
||||
@@ -750,7 +758,7 @@ The v1.97.x line will be supported for at least 12 months since [v1.97.0](https:
|
||||
* SECURITY: upgrade Go builder from Go1.23.3 to Go1.23.4. See the list of issues addressed in [Go1.23.4](https://github.com/golang/go/issues?q=milestone%3AGo1.23.4+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: all VictoriaMetrics components: consistently deduplicate values with stale markers within deduplication interval. Previously, deduplication could randomly prefer stale marker or value on the deduplication interval. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7674) for details. Thanks to @tIGO for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7675).
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): properly parse the query rolloup window specified in milliseconds. Previous implementation could lead to precision issues resulting in the parsed window being smaller by 1ms. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5796) for details.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): properly parse the query rollup window specified in milliseconds. Previous implementation could lead to precision issues resulting in the parsed window being smaller by 1ms. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5796) for details.
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): fix requests routing by host when using `src_hosts`. Previously, request header could be ignored.
|
||||
* BUGFIX: [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager/): prevent backup scheduler from scheduling two backups immediately one after another.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): prevent accordion from collapsing when selecting text in headers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7742).
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
[](https://artifacthub.io/packages/search?repo=victoriametrics&verified_publisher=true)
|
||||
[](https://github.com/VictoriaMetrics/helm-charts/blob/master/LICENSE)
|
||||

|
||||
[](https://slack.victoriametrics.com/)
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
This repository contains helm charts for VictoriaMetrics and VictoriaLogs.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Next release
|
||||
|
||||
- updated common dependency 0.0.34 -> 0.0.35
|
||||
- updated common dependency 0.0.34 -> 0.0.36
|
||||
- Exclude markdown files from package
|
||||
|
||||
## 0.8.11
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||

|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-logs-single)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Logs Single version - high-performance, cost-effective and scalable logs storage
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
## Next release
|
||||
|
||||
- Exclude markdown files from package
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
|
||||
## 0.15.2
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-agent)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Agent - collects metrics from various sources and stores them to VictoriaMetrics
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
## Next release
|
||||
|
||||
- Exclude markdown files from package
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
|
||||
## 0.13.4
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-alert)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Alert - executes a list of given MetricsQL expressions (rules) and sends alerts to Alert Manager.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Next release
|
||||
|
||||
- updated common dependency 0.0.34 -> 0.0.35
|
||||
- updated common dependency 0.0.34 -> 0.0.36
|
||||
- Exclude markdown files from package
|
||||
|
||||
## 1.6.11
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||

|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-anomaly)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/helm-charts/blob/master/LICENSE)
|
||||

|
||||

|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Anomaly Detection - a service that continuously scans Victoria Metrics time series and detects unexpected changes within data patterns in real-time.
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
## Next release
|
||||
|
||||
- Exclude markdown files from package
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
|
||||
## 0.8.2
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-auth)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Auth - is a simple auth proxy and router for VictoriaMetrics.
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
## Next release
|
||||
|
||||
- Exclude markdown files from package
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
|
||||
## 0.16.1
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-cluster)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Cluster version - high-performance, cost-effective and scalable TSDB, long-term remote storage for Prometheus
|
||||
|
||||
|
||||
@@ -2,7 +2,16 @@
|
||||
|
||||
## Next release
|
||||
|
||||
- TODO
|
||||
|
||||
## 0.0.36
|
||||
|
||||
**Release date:** 24 Dec 2024
|
||||
|
||||

|
||||
|
||||
- Exclude markdown files from package
|
||||
- Unset empty registry in `vm.image` template to fix global registry propagation
|
||||
|
||||
## 0.0.35
|
||||
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-common)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Common - contains shared templates for all Victoria Metrics helm charts
|
||||
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
## Next release
|
||||
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
|
||||
## 0.7.0
|
||||
|
||||
**Release date:** 19 Dec 2024
|
||||
|
||||
 
|
||||
|
||||
- upgraded operator, it's required to [update CRDs manually](../victoriametrics-k8s-stack/#upgrade-guide)
|
||||
- bump version of VM components to [v1.108.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.108.1)
|
||||
- Exclude markdown files from package
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-distributed)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
A Helm chart for Running VMCluster on Multiple Availability Zones
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
## Next release
|
||||
|
||||
- Exclude markdown files from package
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
|
||||
## 0.6.2
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-gateway)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Gateway - Auth & Rate-Limitting proxy for Victoria Metrics
|
||||
|
||||
|
||||
@@ -2,6 +2,15 @@
|
||||
|
||||
- TODO
|
||||
|
||||
## 0.33.1
|
||||
|
||||
**Release date:** 24 Dec 2024
|
||||
|
||||
 
|
||||
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
- updates operator to [v0.51.2](https://github.com/VictoriaMetrics/operator/releases/tag/v0.51.2) version
|
||||
|
||||
## 0.33.0
|
||||
|
||||
**Release date:** 19 Dec 2024
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-k8s-stack)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Kubernetes monitoring on VictoriaMetrics stack. Includes VictoriaMetrics Operator, Grafana dashboards, ServiceScrapes and VMRules
|
||||
|
||||
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
- TODO
|
||||
|
||||
## 0.40.2
|
||||
|
||||
**Release date:** 24 Dec 2024
|
||||
|
||||
 
|
||||
|
||||
- add option to enable hostNetwork for custom CNI based deployments
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
- updates operator to [v0.51.2](https://github.com/VictoriaMetrics/operator/releases/tag/v0.51.2) version
|
||||
|
||||
## 0.40.1
|
||||
|
||||
**Release date:** 19 Dec 2024
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-operator)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Operator
|
||||
|
||||
@@ -221,6 +227,15 @@ extraVolumeMounts:
|
||||
|
||||
This configuration disables the automatic ServiceAccount token mount and mounts the token explicitly.
|
||||
|
||||
## Enable hostNetwork on operator
|
||||
|
||||
When running managed Kubernetes such as EKS with custom CNI solution like Cilium or Calico, EKS control plane cannot communicate with CNI's pod CIDR.
|
||||
In that scenario, we need to run webhook service i.e operator with hostNetwork so that it can share node's network namespace.
|
||||
|
||||
```yaml
|
||||
hostNetwork: true
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The following tables lists the configurable parameters of the chart and their default values.
|
||||
@@ -559,6 +574,17 @@ requests:
|
||||
</pre>
|
||||
</td>
|
||||
<td><p>Image pull secrets, that can be shared across multiple helm charts</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>hostNetwork</td>
|
||||
<td>bool</td>
|
||||
<td><pre class="helm-vars-default-value language-yaml" lang="">
|
||||
<code class="language-yaml">false
|
||||
</code>
|
||||
</pre>
|
||||
</td>
|
||||
<td><p>Enable hostNetwork on operator deployment</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
|
||||
|
||||
 
|
||||
|
||||
A subchart stores victoriametrics operator CRDs.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
## Next release
|
||||
|
||||
- Exclude markdown files from package
|
||||
- updated common dependency 0.0.35 -> 0.0.36
|
||||
|
||||
## 0.13.3
|
||||
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
 
|
||||
[](https://artifacthub.io/packages/helm/victoriametrics/victoria-metrics-single)
|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Victoria Metrics Single version - high-performance, cost-effective and scalable TSDB, long-term remote storage for Prometheus
|
||||
|
||||
|
||||
@@ -13,6 +13,26 @@ aliases:
|
||||
|
||||
## tip
|
||||
|
||||
## [v0.51.3](https://github.com/VictoriaMetrics/operator/releases/tag/v0.51.3)
|
||||
|
||||
**Release date:** 8 Jan 2025
|
||||
|
||||

|
||||

|
||||
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/operator/resources/vmagent/): properly route headless service traffic to vmagent `pods` with `statefulMode` and `shardCount` defined.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/operator/resources/vmsingle/): properly add `volumeMount` for external `storageDataPath` `volume`.
|
||||
* BUGFIX: [vmcluster](https://docs.victoriametrics.com/operator/resources/vmcluster/): properly mount `serviceAccount` for `requestsLoadBalancer` `Deployment`. See [this issue](https://github.com/VictoriaMetrics/operator/issues/1210) for details.
|
||||
|
||||
## [v0.51.2](https://github.com/VictoriaMetrics/operator/releases/tag/v0.51.2)
|
||||
|
||||
**Release date:** 23 Dec 2024
|
||||
|
||||

|
||||

|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.23 to Go1.23.4. See the list of issues addressed in [Go1.23.2](https://github.com/golang/go/issues?q=milestone%3AGo1.23.2+label%3ACherryPickApproved) and [Go1.23.3](https://github.com/golang/go/issues?q=milestone%3AGo1.23.3+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): keep `resourceVersion` and other significant `metadata` fields during `update` objects requests. See [this issue](https://github.com/VictoriaMetrics/operator/issues/1200) for details.
|
||||
* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): properly update `updateStatus: failed` field. It fixes excessive errors logging and amount of created Kubernetes `Events`.
|
||||
* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): Properly rollback incorrect object configuration. Previously diff for objects could be calculate incorrectly and update request could be skipped.
|
||||
|
||||
@@ -374,7 +374,7 @@ _Appears in:_
|
||||
| `host_aliases` | HostAliasesUnderScore provides mapping for ip and hostname,<br />that would be propagated to pod,<br />cannot be used with HostNetwork.<br />Has Priority over hostAliases field | _[HostAlias](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#hostalias-v1-core) array_ | false |
|
||||
| `imagePullSecrets` | ImagePullSecrets An optional list of references to secrets in the same namespace<br />to use for pulling images from registries<br />see https://kubernetes.io/docs/concepts/containers/images/#referring-to-an-imagepullsecrets-on-a-pod | _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#localobjectreference-v1-core) array_ | false |
|
||||
| `initContainers` | InitContainers allows adding initContainers to the pod definition.<br />Any errors during the execution of an initContainer will lead to a restart of the Pod.<br />More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ | _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#container-v1-core) array_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `priorityClassName` | PriorityClassName class assigned to the Pods | _string_ | false |
|
||||
@@ -2045,7 +2045,7 @@ _Appears in:_
|
||||
| --- | --- | --- | --- |
|
||||
| `conditions` | Known .status.conditions.type are: "Available", "Progressing", and "Degraded" | _[Condition](#condition) array_ | true |
|
||||
| `observedGeneration` | ObservedGeneration defines current generation picked by operator for the<br />reconcile | _integer_ | true |
|
||||
| `reason` | Reason defines human readadble error reason | _string_ | true |
|
||||
| `reason` | Reason defines human readable error reason | _string_ | true |
|
||||
| `updateStatus` | UpdateStatus defines a status for update rollout | _[UpdateStatus](#updatestatus)_ | true |
|
||||
|
||||
|
||||
@@ -2526,7 +2526,7 @@ _Appears in:_
|
||||
| `logLevel` | LogLevel for VictoriaLogs to be configured with. | _string_ | false |
|
||||
| `logNewStreams` | LogNewStreams Whether to log creation of new streams; this can be useful for debugging of high cardinality issues with log streams; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields | _boolean_ | true |
|
||||
| `managedMetadata` | ManagedMetadata defines metadata that will be added to the all objects<br />created by operator for the given CustomResource | _[ManagedObjectsMetadata](#managedobjectsmetadata)_ | true |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `podMetadata` | PodMetadata configures Labels and Annotations which are propagated to the VLogs pods. | _[EmbeddedObjectMetadata](#embeddedobjectmetadata)_ | false |
|
||||
@@ -2694,7 +2694,7 @@ _Appears in:_
|
||||
| `logLevel` | LogLevel for VMAgent to be configured with.<br />INFO, WARN, ERROR, FATAL, PANIC | _string_ | false |
|
||||
| `managedMetadata` | ManagedMetadata defines metadata that will be added to the all objects<br />created by operator for the given CustomResource | _[ManagedObjectsMetadata](#managedobjectsmetadata)_ | true |
|
||||
| `maxScrapeInterval` | MaxScrapeInterval allows limiting maximum scrape interval for VMServiceScrape, VMPodScrape and other scrapes<br />If interval is higher than defined limit, `maxScrapeInterval` will be used. | _string_ | true |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minScrapeInterval` | MinScrapeInterval allows limiting minimal scrape interval for VMServiceScrape, VMPodScrape and other scrapes<br />If interval is lower than defined limit, `minScrapeInterval` will be used. | _string_ | true |
|
||||
| `nodeScrapeNamespaceSelector` | NodeScrapeNamespaceSelector defines Namespaces to be selected for VMNodeScrape discovery.<br />Works in combination with Selector.<br />NamespaceSelector nil - only objects at VMAgent namespace.<br />Selector nil - only objects at NamespaceSelector namespaces.<br />If both nil - behaviour controlled by selectAllByDefault | _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#labelselector-v1-meta)_ | false |
|
||||
| `nodeScrapeRelabelTemplate` | NodeScrapeRelabelTemplate defines relabel config, that will be added to each VMNodeScrape.<br />it's useful for adding specific labels to all targets | _[RelabelConfig](#relabelconfig) array_ | false |
|
||||
@@ -2901,7 +2901,7 @@ _Appears in:_
|
||||
| `logFormat` | LogFormat for VMAlert to be configured with.<br />default or json | _string_ | false |
|
||||
| `logLevel` | LogLevel for VMAlert to be configured with. | _string_ | false |
|
||||
| `managedMetadata` | ManagedMetadata defines metadata that will be added to the all objects<br />created by operator for the given CustomResource | _[ManagedObjectsMetadata](#managedobjectsmetadata)_ | true |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `notifier` | Notifier prometheus alertmanager endpoint spec. Required at least one of notifier or notifiers when there are alerting rules. e.g. http://127.0.0.1:9093<br />If specified both notifier and notifiers, notifier will be added as last element to notifiers.<br />only one of notifier options could be chosen: notifierConfigRef or notifiers + notifier | _[VMAlertNotifierSpec](#vmalertnotifierspec)_ | false |
|
||||
| `notifierConfigRef` | NotifierConfigRef reference for secret with notifier configuration for vmalert<br />only one of notifier options could be chosen: notifierConfigRef or notifiers + notifier | _[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#secretkeyselector-v1-core)_ | false |
|
||||
@@ -3048,7 +3048,7 @@ _Appears in:_
|
||||
| `logFormat` | LogFormat for VMAlertmanager to be configured with. | _string_ | false |
|
||||
| `logLevel` | Log level for VMAlertmanager to be configured with. | _string_ | false |
|
||||
| `managedMetadata` | ManagedMetadata defines metadata that will be added to the all objects<br />created by operator for the given CustomResource | _[ManagedObjectsMetadata](#managedobjectsmetadata)_ | true |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `podDisruptionBudget` | PodDisruptionBudget created by operator | _[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec)_ | false |
|
||||
@@ -3154,7 +3154,7 @@ _Appears in:_
|
||||
| `initContainers` | InitContainers allows adding initContainers to the pod definition.<br />Any errors during the execution of an initContainer will lead to a restart of the Pod.<br />More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ | _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#container-v1-core) array_ | false |
|
||||
| `logFormat` | LogFormat for vmauth<br />default or json | _string_ | false |
|
||||
| `logLevel` | LogLevel for vmauth container. | _string_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `podDisruptionBudget` | PodDisruptionBudget created by operator | _[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec)_ | false |
|
||||
@@ -3225,7 +3225,7 @@ _Appears in:_
|
||||
| `logLevel` | LogLevel for victoria metrics single to be configured with. | _string_ | false |
|
||||
| `managedMetadata` | ManagedMetadata defines metadata that will be added to the all objects<br />created by operator for the given CustomResource | _[ManagedObjectsMetadata](#managedobjectsmetadata)_ | true |
|
||||
| `max_concurrent_requests` | MaxConcurrentRequests defines max concurrent requests per user<br />300 is default value for vmauth | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `podDisruptionBudget` | PodDisruptionBudget created by operator | _[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec)_ | false |
|
||||
@@ -3411,7 +3411,7 @@ _Appears in:_
|
||||
| `insertPorts` | InsertPorts - additional listen ports for data ingestion. | _[InsertPorts](#insertports)_ | true |
|
||||
| `logFormat` | LogFormat for VMInsert to be configured with.<br />default or json | _string_ | false |
|
||||
| `logLevel` | LogLevel for VMInsert to be configured with. | _string_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `podDisruptionBudget` | PodDisruptionBudget created by operator | _[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec)_ | false |
|
||||
@@ -3853,7 +3853,7 @@ _Appears in:_
|
||||
| `initContainers` | InitContainers allows adding initContainers to the pod definition.<br />Any errors during the execution of an initContainer will lead to a restart of the Pod.<br />More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ | _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#container-v1-core) array_ | false |
|
||||
| `logFormat` | LogFormat for VMSelect to be configured with.<br />default or json | _string_ | false |
|
||||
| `logLevel` | LogLevel for VMSelect to be configured with. | _string_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `persistentVolume` | Storage - add persistent volume for cacheMountPath<br />its useful for persistent cache<br />use storage instead of persistentVolume. | _[StorageSpec](#storagespec)_ | false |
|
||||
@@ -3988,7 +3988,7 @@ _Appears in:_
|
||||
| `logFormat` | LogFormat for VMSingle to be configured with. | _string_ | false |
|
||||
| `logLevel` | LogLevel for victoria metrics single to be configured with. | _string_ | false |
|
||||
| `managedMetadata` | ManagedMetadata defines metadata that will be added to the all objects<br />created by operator for the given CustomResource | _[ManagedObjectsMetadata](#managedobjectsmetadata)_ | true |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `podMetadata` | PodMetadata configures Labels and Annotations which are propagated to the VMSingle pods. | _[EmbeddedObjectMetadata](#embeddedobjectmetadata)_ | false |
|
||||
@@ -4092,7 +4092,7 @@ _Appears in:_
|
||||
| `logLevel` | LogLevel for VMStorage to be configured with. | _string_ | false |
|
||||
| `maintenanceInsertNodeIDs` | MaintenanceInsertNodeIDs - excludes given node ids from insert requests routing, must contain pod suffixes - for pod-0, id will be 0 and etc.<br />lets say, you have pod-0, pod-1, pod-2, pod-3. to exclude pod-0 and pod-3 from insert routing, define nodeIDs: [0,3].<br />Useful at storage expanding, when you want to rebalance some data at cluster. | _integer array_ | false |
|
||||
| `maintenanceSelectNodeIDs` | MaintenanceInsertNodeIDs - excludes given node ids from select requests routing, must contain pod suffixes - for pod-0, id will be 0 and etc. | _integer array_ | true |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minim number os seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `minReadySeconds` | MinReadySeconds defines a minimum number of seconds to wait before starting update next pod<br />if previous in healthy state<br />Has no effect for VLogs and VMSingle | _integer_ | false |
|
||||
| `nodeSelector` | NodeSelector Define which Nodes the Pods are scheduled on. | _object (keys:string, values:string)_ | false |
|
||||
| `paused` | Paused If set to true all actions on the underlying managed objects are not<br />going to be performed, except for delete actions. | _boolean_ | false |
|
||||
| `podDisruptionBudget` | PodDisruptionBudget created by operator | _[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec)_ | false |
|
||||
|
||||
134
docs/operator/resources/vlogs.md
Normal file
134
docs/operator/resources/vlogs.md
Normal file
@@ -0,0 +1,134 @@
|
||||
---
|
||||
weight: 20
|
||||
title: VLogs
|
||||
menu:
|
||||
docs:
|
||||
identifier: operator-cr-vlogs
|
||||
parent: operator-cr
|
||||
weight: 20
|
||||
aliases:
|
||||
- /operator/resources/vlogs/
|
||||
- /operator/resources/vlogs/index.html
|
||||
---
|
||||
`VLogs` represents database for storing logs.
|
||||
The `VLogs` CRD declaratively defines a [single-node VictoriaLogs](https://docs.victoriametrics.com/victorialogs/)
|
||||
installation to run in a Kubernetes cluster.
|
||||
|
||||
For each `VLogs` resource, the Operator deploys a properly configured `Deployment` in the same namespace.
|
||||
The VLogs `Pod`s are configured to mount an empty dir or `PersistentVolumeClaimSpec` for storing data.
|
||||
Deployment update strategy set to [recreate](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#recreate-deployment).
|
||||
No more than one replica allowed.
|
||||
|
||||
For each `VLogs` resource, the Operator adds `Service` and `VMServiceScrape` in the same namespace prefixed with name from `VLogs.metadata.name`.
|
||||
|
||||
## Specification
|
||||
|
||||
You can see the full actual specification of the `VLogs` resource in the **[API docs -> VLogs](https://docs.victoriametrics.com/operator/api#vlogs)**.
|
||||
|
||||
If you can't find necessary field in the specification of the custom resource,
|
||||
see [Extra arguments section](./#extra-arguments).
|
||||
|
||||
Also, you can check out the [examples](#examples) section.
|
||||
|
||||
## High availability
|
||||
|
||||
`VLogs` doesn't support high availability. Consider using [`victorialogs-single chart`](https://docs.victoriametrics.com/helm/victorialogs-single/), where it's possible to configure replica count in statefulset mode for such purpose.
|
||||
|
||||
## Version management
|
||||
|
||||
To set `VLogs` version add `spec.image.tag` name from [releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
|
||||
|
||||
```yaml
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VLogs
|
||||
metadata:
|
||||
name: example-vlogs
|
||||
spec:
|
||||
image:
|
||||
repository: victoriametrics/victoria-logs
|
||||
tag: v1.4.0
|
||||
pullPolicy: Always
|
||||
# ...
|
||||
```
|
||||
|
||||
Also, you can specify `imagePullSecrets` if you are pulling images from private repo:
|
||||
|
||||
```yaml
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VLogs
|
||||
metadata:
|
||||
name: example-vlogs
|
||||
spec:
|
||||
image:
|
||||
repository: victoriametrics/victoria-logs
|
||||
tag: v1.4.0
|
||||
pullPolicy: Always
|
||||
imagePullSecrets:
|
||||
- name: my-repo-secret
|
||||
# ...
|
||||
```
|
||||
|
||||
## Resource management
|
||||
|
||||
You can specify resources for each `VLogs` resource in the `spec` section of the `VLogs` CRD.
|
||||
|
||||
```yaml
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VLogs
|
||||
metadata:
|
||||
name: resources-example
|
||||
spec:
|
||||
# ...
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
# ...
|
||||
```
|
||||
|
||||
If these parameters are not specified, then,
|
||||
by default all `VLogs` pods have resource requests and limits from the default values of the following [operator parameters](https://docs.victoriametrics.com/operator/configuration):
|
||||
|
||||
- `VM_VLOGSDEFAULT_RESOURCE_LIMIT_MEM` - default memory limit for `VLogs` pods,
|
||||
- `VM_VLOGSDEFAULT_RESOURCE_LIMIT_CPU` - default memory limit for `VLogs` pods,
|
||||
- `VM_VLOGSDEFAULT_RESOURCE_REQUEST_MEM` - default memory limit for `VLogs` pods,
|
||||
- `VM_VLOGSDEFAULT_RESOURCE_REQUEST_CPU` - default memory limit for `VLogs` pods.
|
||||
|
||||
These default parameters will be used if:
|
||||
|
||||
- `VM_VLOGSDEFAULT_USEDEFAULTRESOURCES` is set to `true` (default value),
|
||||
- `VLogs` CR doesn't have `resources` field in `spec` section.
|
||||
|
||||
Field `resources` in `VLogs` spec have higher priority than operator parameters.
|
||||
|
||||
If you set `VM_VLOGSDEFAULT_USEDEFAULTRESOURCES` to `false` and don't specify `resources` in `VLogs` CRD,
|
||||
then `VLogs` pods will be created without resource requests and limits.
|
||||
|
||||
Also, you can specify requests without limits - in this case default values for limits will not be used.
|
||||
|
||||
## Examples
|
||||
|
||||
```yaml
|
||||
kind: VLogs
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
retentionPeriod: "12"
|
||||
removePvcAfterDelete: true
|
||||
storage:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
||||
resources:
|
||||
requests:
|
||||
memory: 500Mi
|
||||
cpu: 500m
|
||||
limits:
|
||||
memory: 10Gi
|
||||
cpu: 5
|
||||
```
|
||||
@@ -88,7 +88,7 @@ func getDedicatedServerLabels(cfg *apiConfig) ([]*promutils.Labels, error) {
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// getVPSDetails get properties of a dedicated server.
|
||||
// getDedicatedServerDetails get properties of a dedicated server.
|
||||
// Also see: https://eu.api.ovh.com/console/#/dedicated/server/%7BserviceName%7D~GET
|
||||
func getDedicatedServerDetails(cfg *apiConfig, dedicatedServerName string) (*dedicatedServer, error) {
|
||||
// get properties.
|
||||
|
||||
@@ -146,7 +146,7 @@ func ParseTimeAt(s string, currentTimestamp int64) (int64, error) {
|
||||
return t.UnixNano(), nil
|
||||
}
|
||||
|
||||
// parseNumericTimestamps parses timestamp at s in seconds, milliseconds, microseconds or nanoseconds.
|
||||
// parseNumericTimestamp parses timestamp at s in seconds, milliseconds, microseconds or nanoseconds.
|
||||
//
|
||||
// It returns nanoseconds for the parsed timestamp.
|
||||
func parseNumericTimestamp(s string) (int64, error) {
|
||||
|
||||
Reference in New Issue
Block a user