docs: fixed typos (#8878)

### Describe Your Changes

fixed typos in docs and code
fixed collision in cloud docs

### Checklist

The following checks are **mandatory**:

- [ ] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/).
This commit is contained in:
Andrii Chubatiuk
2025-05-06 13:03:56 +03:00
committed by GitHub
parent 0a4e7912fd
commit ac414d8b93
26 changed files with 45 additions and 44 deletions

View File

@@ -104,7 +104,7 @@ func parseInputValue(input string, origin bool) ([]sequenceValue, error) {
continue
}
if strings.Contains(item, "stale") {
return nil, fmt.Errorf("stale metric doesnt support operations")
return nil, fmt.Errorf("stale metric doesn't support operations")
}
vals := numReg.FindAllString(item, -1)
switch len(vals) {

View File

@@ -77,7 +77,7 @@ func TestPrometheusProcessorRun(t *testing.T) {
LabelValue: matchValue,
}
runnner, err := prometheus.NewClient(prometheus.Config{
runner, err := prometheus.NewClient(prometheus.Config{
Snapshot: testSnapshot,
Filter: filter,
})
@@ -85,7 +85,7 @@ func TestPrometheusProcessorRun(t *testing.T) {
t.Fatalf("cannot create prometheus client: %s", err)
}
p := &prometheusProcessor{
cl: runnner,
cl: runner,
im: importer,
cc: 1,
}

View File

@@ -319,14 +319,14 @@ type MetricNamesStatsRecord struct {
QueryRequestsCount uint64
}
// SnapshotCreateResponse is an in-memory reprensentation of the json response
// SnapshotCreateResponse is an in-memory representation of the json response
// returned by the /snapshot/create endpoint.
type SnapshotCreateResponse struct {
Status string
Snapshot string
}
// APIV1AdminTSDBSnapshotResponse is an in-memory reprensentation of the json
// APIV1AdminTSDBSnapshotResponse is an in-memory representation of the json
// response returned by the /api/v1/admin/tsdb/snapshot endpoint.
type APIV1AdminTSDBSnapshotResponse struct {
Status string
@@ -339,27 +339,27 @@ type SnapshotData struct {
Name string
}
// SnapshotListResponse is an in-memory reprensentation of the json response
// SnapshotListResponse is an in-memory representation of the json response
// returned by the /snapshot/list endpoint.
type SnapshotListResponse struct {
Status string
Snapshots []string
}
// SnapshotDeleteResponse is an in-memory reprensentation of the json response
// SnapshotDeleteResponse is an in-memory representation of the json response
// returned by the /snapshot/delete endpoint.
type SnapshotDeleteResponse struct {
Status string
Msg string
}
// SnapshotDeleteAllResponse is an in-memory reprensentation of the json response
// SnapshotDeleteAllResponse is an in-memory representation of the json response
// returned by the /snapshot/delete_all endpoint.
type SnapshotDeleteAllResponse struct {
Status string
}
// TSDBStatusResponse is an in-memory reprensentation of the json response
// TSDBStatusResponse is an in-memory representation of the json response
// returned by the /prometheus/api/v1/status/tsdb endpoint.
type TSDBStatusResponse struct {
IsPartial bool

View File

@@ -9,7 +9,7 @@ import (
"github.com/google/go-cmp/cmp"
)
// snapshotNameRE convers years 1970-2099.
// snapshotNameRE covers years 1970-2099.
// Corner case examples:
// - 19700101000000-0000000000000000
// - 20991231235959-38EECC8925ED5FFF

View File

@@ -11,7 +11,7 @@ tags:
---
Loki provides [LogQL](https://grafana.com/docs/loki/latest/query/) query language, while VictoriaLogs provides [LogsQL](https://docs.victoriametrics.com/victorialogs/logsql/)
query language. Both langauges are optimized for querying logs. The docs below show how to convert typical LogQL queries to LogsQL queries.
query language. Both languages are optimized for querying logs. The docs below show how to convert typical LogQL queries to LogsQL queries.
## Data model
@@ -73,7 +73,7 @@ Loki allows filtering log lines (log messages) with the following filters:
* [Regexp filter](https://docs.victoriametrics.com/victorialogs/logsql/#regexp-filter), which matches the given regexp at any position of the log line.
* Negative substring filter - `{...} != "some_text"`. It selects logs with lines without the `some_text` substring.
This query can be written as `{...} -"some_text"` in VictoriaLogs, e.g. just pre-pend the `"some_text"` with `-`.
This query can be written as `{...} -"some_text"` in VictoriaLogs, e.g. just prepend the `"some_text"` with `-`.
See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#logical-filter) for details.
* Regexp filter - `{...} |~ "regexp"`. It selects logs with lines matching the given `regexp`.
@@ -167,7 +167,7 @@ See [JSON parser](#json-parser) docs for more details.
Loki supports parsing log lines according to the provided pattern with the `{...} | pattern "..."` syntax according to [these docs](https://grafana.com/docs/loki/latest/query/log_queries/#pattern).
Such a query can be replaced with `{...} | extract "..."` at VictoriaLogs. See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#extract-pipe).
## Regluar expression parser
## Regular expression parser
Loki supports parsing log lines according to the provided regexp with the `{...} | regexp "..."` syntax.
Such a query can be replaced with `{...} | extract_regexp "..."` at VictoriaLogs. See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#extract_regexp-pipe).
@@ -277,11 +277,11 @@ for obtaining results grouped by log stream in VictoriaLogs. See [these docs](ht
Loki allows selecting top K metrics with the biggest values via `topk(K, (func_name({...} | unwrap label_name))` syntax.
This query can be translated to `... | first K (label_name desc)` at VictoriaLogs. See [these docs](https://docs.victoriametrics.com/victorialogs/logsql/#first-pipe).
The `bottomk(K, func_name({...} | unwrap label_name))` query at Loki can be translated to `... | fisrt K (label_name)` at VictoriaLogs.
The `bottomk(K, func_name({...} | unwrap label_name))` query at Loki can be translated to `... | first K (label_name)` at VictoriaLogs.
### Approximate calculations
Loki provides [`approx_topk(K, ...)`](https://grafana.com/docs/loki/latest/query/metric_queries/#probabilistic-aggregation) for probabalistic
Loki provides [`approx_topk(K, ...)`](https://grafana.com/docs/loki/latest/query/metric_queries/#probabilistic-aggregation) for probabilistic
selecting up to K metrics with the biggest values. VictoriaLogs provides [`sample` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#sample-pipe),
which can be used for probabilistic calculations.

View File

@@ -925,7 +925,7 @@ All the [HTTP querying APIs](#http-api) provided by VictoriaLogs support the fol
All the filters across all the `extra_filters` args are applied to the `query` then.
- `extra_stream_filters` - this arg may contain extra [stream filters](https://docs.victoriametrics.com/victorialogs/logsql/#stream-filter),
which must be applied to the `query` before returning results. Multiple `extra_stream_filters` args may be passed in a single request.
All the stream filters accross all the `extra_stream_filters` args are applied to the `query` then.
All the stream filters across all the `extra_stream_filters` args are applied to the `query` then.
The `extra_filters` and `extra_stream_filters` values can have the following format:

View File

@@ -44,7 +44,7 @@ Organization `Admins` can perform the following `Actions` on other existing user
{{% collapse name="API Keys" %}}
[API Keys](https://docs.victoriametrics.com/victoriametrics-cloud/api/) are needed to enforce
authentication in programatic actions (for example, in scripts) to interact with VictoriaMetrics Cloud.
authentication in programmatic actions (for example, in scripts) to interact with VictoriaMetrics Cloud.
The API itself is documented in the [api-docs](https://console.victoriametrics.cloud/api-docs) page.
In the [API Keys](https://console.victoriametrics.cloud/api_keys) page, Organization Admins can:

View File

@@ -2,6 +2,7 @@
title : "Vector"
menu:
docs:
identifier: victoriametrics-cloud-integrations-vector
parent: "integrations"
---

View File

@@ -484,7 +484,7 @@ additional_endpoints:
Disable logs (logs ingestion is not supported by VictoriaMetrics) and set a custom endpoint in `serverless.yaml`:
```
```yaml
custom:
datadog:
enableDDLogs: false # Disabled not supported DD logs

View File

@@ -50,7 +50,7 @@ Released at 2025-04-25
* FEATURE: `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): log client network errors (EOFs, timeouts) during handshake as warnings, since they are not actionable from server point of view.
* BUGFIX: all the VictoriaMetrics components: properly detect `cgroupv2` [CPU limits](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#cpu). See [#8808](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8808) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): properly init [enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/) version for `linux/arm` and non-CGO buids. See [#6019](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6019) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): properly init [enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/) version for `linux/arm` and non-CGO builds. See [#6019](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6019) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): remote write client sets correct content encoding header based on actual body content, rather than relying on configuration. See [#8650](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8650).
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/victoriametrics/vmbackup/) and [vmbackupmanager](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/): properly configure s3 client with if `configFilePath` is set. See [#8668](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8668) for details.
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/victoriametrics/vmbackup/), [vmrestore](https://docs.victoriametrics.com/victoriametrics/vmrestore/), [vmbackupmanager](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/): enable support of HTTP/2 for connections to S3-compatible storage endpoints. It was disabled in [v1.115.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.115.0) and could lead to connection errors with some S3-compatible storage providers.
@@ -103,7 +103,7 @@ Released at 2025-03-21
* FEATURE: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and [vmselect](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): improve performance of `or` binary operator. The performance was degraded in [v1.111.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.111.0). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7759) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8382) issues for details.
* FEATURE: [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/): expose `vmalert_alerts_send_duration_seconds` metric to measure the time taken to send alerts to the specified `-notifier.url`. Thanks to @eyazici90 for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8468).
* FEATURE: [dashboards/single](https://grafana.com/grafana/dashboards/10229), [dashboards/cluster](https://grafana.com/grafana/dashboards/11176), [dashboards/vmagent](https://grafana.com/grafana/dashboards/12683), [dashboards/vmalert](https://grafana.com/grafana/dashboards/14950): add panel `Memory allocations rate` to ResourceUsage tab, that shows the rate of allocations in memory and can help identifying issues with increased pressure on GC.
* FEATURE: [vmbackup](https://docs.victoriametrics.com/victoriametrics/vmbackup/), [vmrestore](https://docs.victoriametrics.com/victoriametrics/vmrestore/), [vmbackupmanager](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/): improve resilience to network issues by retrying requests failing due to `IncompleteBody`. Previously, such requests were not retried and leaded to restore/backup process failure. See [this PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8547) for details.
* FEATURE: [vmbackup](https://docs.victoriametrics.com/victoriametrics/vmbackup/), [vmrestore](https://docs.victoriametrics.com/victoriametrics/vmrestore/), [vmbackupmanager](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/): improve resilience to network issues by retrying requests failing due to `IncompleteBody`. Previously, such requests were not retried and led to restore/backup process failure. See [this PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8547) for details.
* FEATURE: [vmui](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#vmui): move legend customization settings, such as `Hide common labels` and `Table view`, closer to the legend area. This change should improve UX and make it easier for users to adjust legend visualization. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8031)
* BUGFIX: [stream aggregation](https://docs.victoriametrics.com/victoriametrics/stream-aggregation/): fix panic on `rate` output. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8469).
@@ -222,7 +222,7 @@ All these fixes are also included in [the latest community release](https://gith
The v1.110.x line will be supported for at least 12 months since [v1.110.0](https://docs.victoriametrics.com/victoriametrics/changelog/#v11100) release**
* BUGFIX: all the VictoriaMetrics components: properly detect `cgroupv2` [CPU limits](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#cpu). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8808) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): properly init [enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/) version for `linux/arm` and non-CGO buids. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6019) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): properly init [enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/) version for `linux/arm` and non-CGO builds. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6019) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): remote write client sets correct content encoding header based on actual body content, rather than relying on configuration. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8650).
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/victoriametrics/vmbackup/) and [vmbackupmanager](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/): properly configure s3 client with if `configFilePath` is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8668) for details.
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/), `vmstorage` and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): allow using `-downsampling.period=filter:0s:0s` to skip downsampling for time series that match the specified `filter`. See [this doc](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#downsampling).
@@ -419,7 +419,7 @@ All these fixes are also included in [the latest community release](https://gith
The v1.102.x line will be supported for at least 12 months since [v1.102.0](https://docs.victoriametrics.com/victoriametrics/changelog/#v11020) release**
* BUGFIX: all the VictoriaMetrics components: properly detect `cgroupv2` [CPU limits](https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#cpu). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8808) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): properly init [enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/) version for `linux/arm` and non-CGO buids. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6019) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): properly init [enterprise](https://docs.victoriametrics.com/victoriametrics/enterprise/) version for `linux/arm` and non-CGO builds. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6019) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/): remote write client sets correct content encoding header based on actual body content, rather than relying on configuration. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8650).
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/victoriametrics/vmbackup/) and [vmbackupmanager](https://docs.victoriametrics.com/victoriametrics/vmbackupmanager/): properly configure s3 client with if `configFilePath` is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8668) for details.
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/), `vmstorage` and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): allow using `-downsampling.period=filter:0s:0s` to skip downsampling for time series that match the specified `filter`. See [this doc](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#downsampling).

View File

@@ -1829,7 +1829,7 @@ See the docs at https://docs.victoriametrics.com/victoriametrics/vmagent/ .
Supports array of values separated by comma or specified via multiple flags.
Empty values are set to false.
-gcp.pubsub.subscribe.topicSubscription.messageFormat array
Message format for the corresponding -gcp.pubsub.subcribe.topicSubscription. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline . See https://docs.victoriametrics.com/victoriametrics/vmagent/#reading-metrics-from-pubsub . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Message format for the corresponding -gcp.pubsub.subscribe.topicSubscription. Valid formats: influx, prometheus, promremotewrite, graphite, jsonline . See https://docs.victoriametrics.com/victoriametrics/vmagent/#reading-metrics-from-pubsub . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/victoriametrics/enterprise/
Supports an array of values separated by comma or specified via multiple flags.
Value can contain comma inside single-quoted or double-quoted string, {}, [] and () braces.
-graphite.sanitizeMetricName

View File

@@ -833,7 +833,7 @@ _To disable explore phase and switch to the old way of data migration via single
`--vm-native-disable-per-metric-migration` cmd-line flag. Please note, in this mode vmctl won't be able to retry failed requests._
_Migration speed via vmctl is limited by available resources on `--vm-native-src-addr` and `--vm-native-dst-addr`,
and network between `src`=>vmctl=>`dst`. See the expeted migration speed [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5366#issuecomment-1854251938)._
and network between `src`=>vmctl=>`dst`. See the expected migration speed [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5366#issuecomment-1854251938)._
Importing tips:

View File

@@ -25,7 +25,7 @@ func Get() *Buffer {
return v.(*Buffer)
}
// Put returns cb to the pool, so it could be re-used via Get() call.
// Put returns cb to the pool, so it could be reused via Get() call.
//
// The cb cannot be used after Put() call.
func Put(cb *Buffer) {

View File

@@ -30,7 +30,7 @@ func GetReader(r io.Reader) *Reader {
return zr
}
// PutReader returns zr to the pool, so it could be re-used via GetReader.
// PutReader returns zr to the pool, so it could be reused via GetReader.
func PutReader(zr *Reader) {
// Do not call zr.Reset() in order to avoid CGO call.
// The zr.Reset() is automatically called when zr is destroyed by Go GC.
@@ -61,10 +61,10 @@ func GetWriter(w io.Writer, compressLevel int) *Writer {
return zw
}
// PutWriter returns zw to the pool, so it could be re-used via GetWriter.
// PutWriter returns zw to the pool, so it could be reused via GetWriter.
func PutWriter(zw *Writer) {
// Do not call zw.Reset() in order to avoid CGO call.
// The zw.Reset() is automaticall called when zw is destroyed by Go GC.
// The zw.Reset() is automatically called when zw is destroyed by Go GC.
writerPool.Put(zw)
}

View File

@@ -53,7 +53,7 @@ func GetReader(r io.Reader) *Reader {
return zr
}
// PutReader returns zr to the pool, so it could be re-used via GetReader.
// PutReader returns zr to the pool, so it could be reused via GetReader.
func PutReader(zr *Reader) {
if err := zr.d.Reset(nil); err != nil {
logger.Panicf("BUG: unexpected error when resetting ZSTD reader: %s", err)
@@ -118,7 +118,7 @@ func GetWriter(w io.Writer, level int) *Writer {
return zw
}
// PutWriter returns zw to the pool, so it could be re-used via GetWriter.
// PutWriter returns zw to the pool, so it could be reused via GetWriter.
func PutWriter(zw *Writer) {
zw.e.Reset(nil)

View File

@@ -108,7 +108,7 @@ func (lr *logRows) mustAddRows(src *LogRows) {
return
}
// a hint for the compiler for preventing from unnesesary bounds checks
// a hint for the compiler for preventing from unnecessary bounds checks
_ = streamIDs[len(rows)-1]
_ = timestamps[len(rows)-1]
@@ -580,7 +580,7 @@ func EstimatedJSONRowLen(fields []Field) int {
// GetInsertRow returns InsertRow from a pool.
//
// Pass the returned row to PutInsertRow when it is no longer needed, so it could be re-used.
// Pass the returned row to PutInsertRow when it is no longer needed, so it could be reused.
func GetInsertRow() *InsertRow {
v := insertRowsPool.Get()
if v == nil {
@@ -589,7 +589,7 @@ func GetInsertRow() *InsertRow {
return v.(*InsertRow)
}
// PutInsertRow returns r to the pool, so it could be re-used via GetInsertRow.
// PutInsertRow returns r to the pool, so it could be reused via GetInsertRow.
func PutInsertRow(r *InsertRow) {
r.Reset()
insertRowsPool.Put(r)

View File

@@ -87,7 +87,7 @@ func (pfp *pipeFieldValuesLocalProcessor) writeBlock(workerID uint, br *blockRes
for i, value := range values {
hits64, ok := tryParseUint64(hits[i])
if !ok {
logger.Panicf("BUG: unexpected hits recevied from the remote storage for %q: %q; it must be uint64", value, hits[i])
logger.Panicf("BUG: unexpected hits received from the remote storage for %q: %q; it must be uint64", value, hits[i])
}
shard.vhs = append(shard.vhs, ValueWithHits{
Value: strings.Clone(value),

View File

@@ -97,7 +97,7 @@ func (pup *pipeUniqLocalProcessor) writeBlock(workerID uint, br *blockResult) {
value := string(buf)
hits64, ok := tryParseUint64(hits[rowIdx])
if !ok {
logger.Panicf("BUG: unexpected hits recevied from the remote storage at the column %q: %q; it must be uint64", pu.hitsFieldName, hits[rowIdx])
logger.Panicf("BUG: unexpected hits received from the remote storage at the column %q: %q; it must be uint64", pu.hitsFieldName, hits[rowIdx])
}
shard.vhs = append(shard.vhs, ValueWithHits{
Value: value,

View File

@@ -700,7 +700,7 @@ func (sup *statsCountUniqProcessor) importState(src []byte, stopCh <-chan struct
return 0, fmt.Errorf("cannot read uniqValues state: %w", err)
}
if len(tail) > 0 {
return 0, fmt.Errorf("unexpected tail left after imporing uniqValues state; len(tail)=%d", len(tail))
return 0, fmt.Errorf("unexpected tail left after importing uniqValues state; len(tail)=%d", len(tail))
}
return stateSize, nil
}

View File

@@ -528,7 +528,7 @@ func (sup *statsCountUniqHashProcessor) importState(src []byte, stopCh <-chan st
return 0, fmt.Errorf("cannot read uniqValues state: %w", err)
}
if len(tail) > 0 {
return 0, fmt.Errorf("unexpected tail left after imporing uniqValues state; len(tail)=%d", len(tail))
return 0, fmt.Errorf("unexpected tail left after importing uniqValues state; len(tail)=%d", len(tail))
}
return stateSize, nil
}

View File

@@ -513,7 +513,7 @@ func TestStatsCountUniq_ExportImportState(t *testing.T) {
}
f(sup, 82, 11)
// boths shards and shardss initialized
// both shards and shardss initialized
sup = newStatsCountUniqProcessor()
sup.shardss = [][]statsCountUniqSet{
{

View File

@@ -479,7 +479,7 @@ type roundTripper struct {
trBase *http.Transport
getTLSConfigCached getTLSConfigFunc
// mu protects acces to rootCAPrev and trPrev
// mu protects access to rootCAPrev and trPrev
mu sync.Mutex
rootCAPrev *x509.CertPool
trPrev *http.Transport

View File

@@ -165,7 +165,7 @@ func TestClientProxyReadOk(t *testing.T) {
t.Fatalf("unexpected error at ReadData: %s", err)
}
if isGzipped {
t.Fatalf("the response musn't be gzipped")
t.Fatalf("the response mustn't be gzipped")
}
got, err := io.ReadAll(cb.NewReader())
if err != nil {

View File

@@ -597,7 +597,7 @@ func TestScrapeWorkScrapeInternalStreamConcurrency(t *testing.T) {
return w.String()
}
// process one serie: one batch of data, plus auto metrics pushed
// process one series: one batch of data, plus auto metrics pushed
f(generateScrape(1), &ScrapeWork{
StreamParse: true,
ScrapeTimeout: time.Second * 42,

View File

@@ -197,11 +197,11 @@ func TestMustCreatePartition(t *testing.T) {
ts := time.Date(2025, 3, 23, 14, 07, 56, 999_999_999, time.UTC).UnixMilli()
smallPath := filepath.Join(t.Name(), "small")
if fs.IsPathExist(smallPath) {
t.Errorf("small parition directory must not exist: %s", smallPath)
t.Errorf("small partition directory must not exist: %s", smallPath)
}
bigPath := filepath.Join(t.Name(), "big")
if fs.IsPathExist(bigPath) {
t.Errorf("big parition directory must not exist: %s", bigPath)
t.Errorf("big partition directory must not exist: %s", bigPath)
}
s := &Storage{}

View File

@@ -3993,7 +3993,7 @@ func TestStorageSearchTagValueSuffixes_maxTagValueSuffixes(t *testing.T) {
}
}
// First, check that all the suffixes are returned if tht limit is higher
// First, check that all the suffixes are returned if the limit is higher
// than numMetrics.
maxTagValueSuffixes := numMetrics + 1
wantCount := numMetrics