Compare commits

...

13 Commits

Author SHA1 Message Date
Vadim Alekseev
de3690671b lib/stringsutil: optimize AppendLowercase
The optimization includes the following improvements:
- Implementation of a function that processes 8 bytes per loop iteration to locate ASCII characters using bitwise manipulations.
- Implementation of the ToLowercaseFunc function that prevents string copying if the string is already in lowercase.
- Use of a lookup table for converting ASCII characters to lowercase, with logic copied from the VictoriaLogs repository.
2026-04-16 02:22:45 +04:00
Max Kotliar
cc3a14b16b docs/changelog: fix feature indention 2026-04-15 17:34:22 +03:00
Aliaksandr Valialkin
7ef08b1781 vendor: update github.com/VictoriaMetrics/VictoriaLogs from v1.50.1-0.20260415114444-d5b5febe4954 to github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0
This is needed for vmalert, so it accepts LogsQL queries with 'limit' and 'offset' pipes.

See https://github.com/VictoriaMetrics/VictoriaLogs/issues/1296#issuecomment-4252036978
2026-04-15 14:45:01 +02:00
Aliaksandr Valialkin
969cb5b4ae vendor: run make vendor-update 2026-04-15 14:03:53 +02:00
Aliaksandr Valialkin
b9f0e614bd vendor: update github.com/VictoriaMetrics/VictoriaLogs from v0.0.0-20260218111324-95b48d57d032 to v1.50.1-0.20260415114444-d5b5febe4954 2026-04-15 13:54:46 +02:00
Aliaksandr Valialkin
ed44c08f5f docs/Makefile: avoid creating a docker image with docs server at make docs-update-version
Just run a simple bash command without the heavyweight Docker image

While at it, rely on TAG environment variable instead of PKG_TAG env variable
for `make docs-update-version`, in order to be consistent with other Make commands.
2026-04-15 13:24:52 +02:00
f41gh7
3ae44e734b docs: remove promscrape.dropOriginalLabels from relabeling-debug section
Follow-up for ef507d372b.

 It's no longer needed to manually set promscrape.dropOriginalLabels
 flag, since it's has False value by default.
2026-04-15 12:34:07 +02:00
Pablo (Tomas) Fernandez
d3264bd78f docs/guides: fix broken links (#10800)
Fix broken or moved links in guides.

### Checklist

The following checks are **mandatory**:

- [X] My change adheres to [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
- [X] My change adheres to [VictoriaMetrics development
goals](https://docs.victoriametrics.com/victoriametrics/goals/).
2026-04-15 10:18:48 +02:00
hagen1778
1f87faafec docs/articles: add new 3rd party article about stream aggregation
https://medium.com/airbnb-engineering/building-a-high-volume-metrics-pipeline-with-opentelemetry-and-vmagent-c714d6910b45
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2026-04-15 10:11:57 +02:00
hagen1778
521b73dfc5 docs/vmagent: move relabeling section higher
The change is needed to group splitting/sharding section of the documentation,
so they go one after another. This should improve readability.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2026-04-15 10:10:53 +02:00
hagen1778
61db79c10a docs/vmagent: mention ability to filter scrape targets
The previous descrioption didn't mention that relabeling can be used
for filtering scrape targets. Adding this metion.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2026-04-15 10:08:31 +02:00
hagen1778
460ac6468c docs/relabeling: restore links to articles about relableing internals
These links were removed in 134501bf99
without adding complete substitution to their content.

Restoring these links as they can be useful for readers to learn about relabeling.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2026-04-15 10:07:07 +02:00
hagen1778
c42023c586 docs/playgrounds: add aliases for old links
The old links were removed in #10754
mistakenly thinking that google didn't index it. However, it did. And users can get 404
when searching in google for VM plyagrounds.

Restoring the links via aliases. It means hugo will serve the `/playgrounds` page when
user requests `/playgrounds/victoriametrics/`.

Signed-off-by: hagen1778 <roman@victoriametrics.com>
2026-04-15 10:04:46 +02:00
203 changed files with 5485 additions and 1982 deletions

View File

@@ -41,18 +41,8 @@ docs-debug: docs docs-image
$(foreach dir,$(wildcard ./docs/$(dir)/*), -v ./docs/$(notdir $(dir)):/opt/docs/content/$(notdir $(dir))) \
vmdocs-docker-package
docs-update-version: docs-image
$(if $(filter v%,$(PKG_TAG)), \
docker run \
--rm \
--entrypoint /usr/bin/find \
--platform $(DOCKER_PLATFORM) \
--name vmdocs-docker-container \
-v ./docs:/opt/docs/content/victoriametrics vmdocs-docker-package \
content \
-regex ".*\.md" \
-exec sed -i 's/{{% available_from "#" %}}/{{% available_from "$(PKG_TAG)" %}}/g' {} \;, \
$(info "Skipping docs version update, invalid $$PKG_TAG: $(PKG_TAG)"))
docs-update-version:
find docs/victoriametrics/ -name '*.md' -exec sed -i 's/{{% available_from "#" %}}/{{% available_from "$(TAG)" %}}/g' {} \;
# Converts images at docs folder to webp format
# See https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#images-in-documentation
@@ -342,4 +332,4 @@ endif
$(MAKE) docs-update-vmagent-flags && git checkout "$$orig_branch" && \
$(MAKE) docs-update-vmselect-flags && git checkout "$$orig_branch" && \
$(MAKE) docs-update-vminsert-flags && git checkout "$$orig_branch" && \
$(MAKE) docs-update-vmstorage-flags && git checkout "$$orig_branch"
$(MAKE) docs-update-vmstorage-flags && git checkout "$$orig_branch"

View File

@@ -420,7 +420,7 @@ Create two Prometheus datasources in Grafana with the following URLs: `http://vm
![Prometheus datasource](grafana-datasource-prometheus.webp)
You can also use the VictoriaMetrics [Grafana datasource](https://github.com/VictoriaMetrics/victoriametrics-datasource) plugin.
See installation instructions in [Grafana datasource - Installation](https://docs.victoriametrics.com/victoriametrics/victoriametrics-datasource/#installation).
See installation instructions in [Grafana datasource - Installation](https://docs.victoriametrics.com/victoriametrics/integrations/grafana/#victoriametrics-datasource).
Users with the `vm_access` claim will be able to query metrics from the specified tenant with extra filters applied.

View File

@@ -198,7 +198,7 @@ If you open the sidebar and select **Alerting** > **Alert rules**, you should be
Open the sidebar again and go to **Alerting** > **Active notifications** to see the active alert reported by Alertmanager.
![Screenshot of Grafana Active notifications Page](grafana-active-notifications.webp)
![Screenshot of Grafana Active notifications Page](grafana-notifications.webp)
You can also see the alerts in VMUI by opening the browser in `http://localhost:8428/vmui/?#/rules`. This is possible only when we have configured `-vmalert.proxyURL` in VictoriaMetrics.

View File

@@ -10,5 +10,11 @@ tags:
- logs
- traces
- playground
aliases:
- /playgrounds/victoriametrics/
- /playgrounds/victorialogs/
- /playgrounds/victoriatraces/
- /playgrounds/cloud/
- /playgrounds/vmanomaly/
---
{{% content "README.md" %}}

View File

@@ -113,6 +113,7 @@ See also [case studies](https://docs.victoriametrics.com/victoriametrics/casestu
* [FreeBSD: monitoring with VictoriaMetrics and Grafana](https://setevoy.medium.com/freebsd-monitoring-with-victoriametrics-and-grafana-f789904f2628)
* [QCon London 2026: Wrangling Telemetry at Scale, a Guide to Self-Hosted Observability](https://www.infoq.com/news/2026/03/self-hosted-observability/)
* [How We Made Telemetry Queries 10x Faster: Chunk-Split Caching for Metrics, Logs, and Traces](https://mirastacklabs.ai/blog/chunk-split-caching/)
* [Building a high-volume metrics pipeline with OpenTelemetry and vmagent](https://medium.com/airbnb-engineering/building-a-high-volume-metrics-pipeline-with-opentelemetry-and-vmagent-c714d6910b45)
## Third-party articles and slides about VictoriaLogs

View File

@@ -113,7 +113,7 @@ and the candidate is deployed to the sandbox environment.
1. Make sure that the release branches have no security issues.
1. Update release versions if needed in [SECURITY.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/SECURITY.md).
1. Run `PKG_TAG=v1.xx.y make docs-update-version` command to update version help tooltips.
1. Run `TAG=v1.xx.y make docs-update-version` command to update version help tooltips.
1. Cut new version in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/victoriametrics/changelog/CHANGELOG.md) and commit it. See example in this [commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/b771152039d23b5ccd637a23ea748bc44a9511a7).
1. Create the following release tags:
* `git tag -s v1.xx.y` in `master` branch

View File

@@ -176,7 +176,7 @@ The v1.136.x line will be supported for at least 12 months since [v1.136.0](http
* SECURITY: upgrade Go builder from Go1.26.0 to Go1.26.1. See [the list of issues addressed in Go1.26.1](https://github.com/golang/go/issues?q=milestone%3AGo1.26.1%20label%3ACherryPickApproved).
FEATURE: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): Disable `/graphite/tags/tagSeries` and `/graphite/tags/tagMultiSeries` for Graphite tag registration since it is unlikely it is used in context of VictoriaMetrics. See [10544](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10544).
* FEATURE: [vmsingle](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/): Disable `/graphite/tags/tagSeries` and `/graphite/tags/tagMultiSeries` for Graphite tag registration since it is unlikely it is used in context of VictoriaMetrics. See [10544](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/10544).
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/): add [histogram_fraction](https://docs.victoriametrics.com/victoriametrics/metricsql/#histogram_fraction) function to calculate the fraction of buckets falling between lowerLe and upperLe. See [#5346](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5346).
* BUGFIX: all VictoriaMetrics components: replace `histogram` with `untyped` metric metadata type for [VictoriaMetrics histograms](https://docs.victoriametrics.com/victoriametrics/keyconcepts/#histogram) when `-metrics.exposeMetadata` is set. See [#82](https://github.com/VictoriaMetrics/metrics/issues/82).

View File

@@ -24,6 +24,11 @@ VictoriaMetrics and vmagent support Prometheus-style relabeling with
The following articles contain useful information about Prometheus relabeling:
- [How to use Relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2)
- [Life of a label](https://www.robustperception.io/life-of-a-label)
- [Discarding targets and timeseries with relabeling](https://www.robustperception.io/relabelling-can-discard-targets-timeseries-and-alerts)
- [Dropping labels at scrape time](https://www.robustperception.io/dropping-metrics-at-scrape-time-with-prometheus)
- [Extracting labels from legacy metric names](https://www.robustperception.io/extracting-labels-from-legacy-metric-names)
- [relabel_configs vs metric_relabel_configs](https://www.robustperception.io/relabel_configs-vs-metric_relabel_configs)
## Relabeling Stages
@@ -352,9 +357,6 @@ see two types of targets:
service discovery, before any relabeling rules are applied. This includes
targets that may later be dropped.
_This option is only available when the component is started with the
`-promscrape.dropOriginalLabels=false` flag._
{{% collapse name="How to use `/targets` page?" %}}
This `/targets` page helps answer the following questions:
@@ -374,18 +376,12 @@ to all metrics scraped from that target.
You can click the label column of the target to see the original labels
**before** any relabeling was applied.
_This option is only available when the component is started with the
`-promscrape.dropOriginalLabels=false` flag._
**3. Why does a target have a certain set of labels?**
Click the `target` link in the `debug relabeling` column. This opens a
step-by-step view of how the relabeling rules were applied to the original
labels.
_This option is only available when the component is started with the
`-promscrape.dropOriginalLabels=false` flag._
**4. How are metric relabeling rules applied to scraped metrics?**
Click the `metrics` link in the `debug relabeling` column. This shows how the
@@ -408,9 +404,6 @@ Each column on the page shows important details:
This page shows all
[discovered targets](https://docs.victoriametrics.com/victoriametrics/sd_configs/).
_This option is only available when the component is started with the
`-promscrape.dropOriginalLabels=false` flag._
It helps answer the following questions:
**1. Why are some targets dropped during service discovery or showing unexpected

View File

@@ -146,6 +146,12 @@ and then it sends the buffered data to the remote storage in order to prevent da
so there is no need to specify multiple `-remoteWrite.url` flags when writing data to the same cluster.
See [these docs](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/#replication-and-data-safety).
### Relabeling and filtering
`vmagent` can add, remove or update labels on the collected data before sending it to the remote storage.
It can filter scrape targets or remove unwanted samples via Prometheus-like relabeling.
Please see [Relabeling cookbook](https://docs.victoriametrics.com/victoriametrics/relabeling/) for details.
### Sharding among remote storages
By default `vmagent` replicates data to remote storage systems via the `-remoteWrite.url` command-line flag.
@@ -188,12 +194,6 @@ except for the labels `instance` and `pod` must be routed to the same backend. I
See also [how to scrape large number of targets](#scraping-big-number-of-targets).
### Relabeling and filtering
`vmagent` can add, remove or update labels on the collected data before sending it to the remote storage. Additionally,
it can remove unwanted samples via Prometheus-like relabeling before sending the collected data to remote storage.
Please see [Relabeling cookbook](https://docs.victoriametrics.com/victoriametrics/relabeling/) for details.
### Splitting data streams among multiple systems
`vmagent` supports splitting the collected data between multiple destinations with the help of `-remoteWrite.urlRelabelConfig`,

60
go.mod
View File

@@ -3,11 +3,11 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.26.2
require (
cloud.google.com/go/storage v1.62.0
cloud.google.com/go/storage v1.62.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20260218111324-95b48d57d032
github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0
github.com/VictoriaMetrics/easyproto v1.2.0
github.com/VictoriaMetrics/fastcache v1.13.3
github.com/VictoriaMetrics/metrics v1.43.1
@@ -22,10 +22,10 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v1.0.0
github.com/google/go-cmp v0.7.0
github.com/googleapis/gax-go/v2 v2.21.0
github.com/influxdata/influxdb v1.12.3
github.com/googleapis/gax-go/v2 v2.22.0
github.com/influxdata/influxdb v1.12.4
github.com/klauspost/compress v1.18.5
github.com/prometheus/prometheus v0.311.1
github.com/prometheus/prometheus v0.311.2
github.com/urfave/cli/v2 v2.27.7
github.com/valyala/fastjson v1.6.10
github.com/valyala/fastrand v1.1.0
@@ -33,10 +33,10 @@ require (
github.com/valyala/gozstd v1.24.0
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.8.0
golang.org/x/net v0.52.0
golang.org/x/net v0.53.0
golang.org/x/oauth2 v0.36.0
golang.org/x/sys v0.43.0
google.golang.org/api v0.275.0
google.golang.org/api v0.276.0
gopkg.in/yaml.v2 v2.4.0
)
@@ -46,8 +46,8 @@ require (
cloud.google.com/go/auth v0.20.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
cloud.google.com/go/iam v1.7.0 // indirect
cloud.google.com/go/monitoring v1.25.0 // indirect
cloud.google.com/go/iam v1.9.0 // indirect
cloud.google.com/go/monitoring v1.27.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.12.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.7.1 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.32.0 // indirect
@@ -110,14 +110,14 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/oklog/ulid/v2 v2.1.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.149.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.149.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.149.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.150.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.150.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.150.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_golang/exp v0.0.0-20260408213824-a4984284cf47 // indirect
github.com/prometheus/client_golang/exp v0.0.0-20260411065453-32d05ffab50a // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/otlptranslator v1.0.0 // indirect
@@ -131,15 +131,15 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/collector/component v1.55.0 // indirect
go.opentelemetry.io/collector/confmap v1.55.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.149.0 // indirect
go.opentelemetry.io/collector/consumer v1.55.0 // indirect
go.opentelemetry.io/collector/featuregate v1.55.0 // indirect
go.opentelemetry.io/collector/internal/componentalias v0.149.0 // indirect
go.opentelemetry.io/collector/pdata v1.55.0 // indirect
go.opentelemetry.io/collector/pipeline v1.55.0 // indirect
go.opentelemetry.io/collector/processor v1.55.0 // indirect
go.opentelemetry.io/collector/component v1.56.0 // indirect
go.opentelemetry.io/collector/confmap v1.56.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.150.0 // indirect
go.opentelemetry.io/collector/consumer v1.56.0 // indirect
go.opentelemetry.io/collector/featuregate v1.56.0 // indirect
go.opentelemetry.io/collector/internal/componentalias v0.150.0 // indirect
go.opentelemetry.io/collector/pdata v1.56.0 // indirect
go.opentelemetry.io/collector/pipeline v1.56.0 // indirect
go.opentelemetry.io/collector/processor v1.56.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.43.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.68.0 // indirect
@@ -155,15 +155,15 @@ require (
go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v2 v2.4.4 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.49.0 // indirect
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect
golang.org/x/crypto v0.50.0 // indirect
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect
golang.org/x/sync v0.20.0 // indirect
golang.org/x/term v0.41.0 // indirect
golang.org/x/text v0.35.0 // indirect
golang.org/x/term v0.42.0 // indirect
golang.org/x/text v0.36.0 // indirect
golang.org/x/time v0.15.0 // indirect
google.golang.org/genproto v0.0.0-20260406210006-6f92a3bedf2d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260406210006-6f92a3bedf2d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d // indirect
google.golang.org/genproto v0.0.0-20260414002931-afd174a4e478 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 // indirect
google.golang.org/grpc v1.80.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
@@ -171,7 +171,7 @@ require (
k8s.io/apimachinery v0.35.3 // indirect
k8s.io/client-go v0.35.3 // indirect
k8s.io/klog/v2 v2.140.0 // indirect
k8s.io/kube-openapi v0.0.0-20260330154417-16be699c7b31 // indirect
k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f // indirect
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect

180
go.sum
View File

@@ -8,18 +8,18 @@ cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIi
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
cloud.google.com/go/iam v1.7.0 h1:JD3zh0C6LHl16aCn5Akff0+GELdp1+4hmh6ndoFLl8U=
cloud.google.com/go/iam v1.7.0/go.mod h1:tetWZW1PD/m6vcuY2Zj/aU0eCHNPuxedbnbRTyKXvdY=
cloud.google.com/go/logging v1.14.0 h1:xpPpY8cVT6n9DgIRgrWyE+YEsGlO/994pWnbc7o5Eh4=
cloud.google.com/go/logging v1.14.0/go.mod h1:jmI+Try/fZeOTOAer3wVYOuPf9WX9PyzhlSDoBAi4HM=
cloud.google.com/go/longrunning v0.9.0 h1:0EzbDEGsAvOZNbqXopgniY0w0a1phvu5IdUFq8grmqY=
cloud.google.com/go/longrunning v0.9.0/go.mod h1:pkTz846W7bF4o2SzdWJ40Hu0Re+UoNT6Q5t+igIcb8E=
cloud.google.com/go/monitoring v1.25.0 h1:HnsTIOxTN6BCSkt1P/Im23r1m7MHTTpmSYCzPkW7NK4=
cloud.google.com/go/monitoring v1.25.0/go.mod h1:wlj6rX+JGyusw/8+2duW4cJ6kmDHGmde3zMTJuG3Jpc=
cloud.google.com/go/storage v1.62.0 h1:w2pQJhpUqVerMON45vatE2FpCYsNTf7OHjkn6ux5mMU=
cloud.google.com/go/storage v1.62.0/go.mod h1:T5hz3qzcpnxZ5LdKc7y8Tw7lh4v9zeeVyrD/cLJAzZU=
cloud.google.com/go/trace v1.12.0 h1:XvWHYfr9q88cX4pZyou6qCcSagnuASyUq2ej1dB6NzQ=
cloud.google.com/go/trace v1.12.0/go.mod h1:TOYfyeoyCGsSH0ifXD6Aius24uQI9xV3RyvOdljFIyg=
cloud.google.com/go/iam v1.9.0 h1:89wyjxT6DL4b5rk/Nk8eBC9DHqf+JiMstrn5IEYxFw4=
cloud.google.com/go/iam v1.9.0/go.mod h1:KP+nKGugNJW4LcLx1uEZcq1ok5sQHFaQehQNl4QDgV4=
cloud.google.com/go/logging v1.15.0 h1:6ooUEBNT6jdWh2b36+iuPn6b/R9qN/tHCbvGS5255gg=
cloud.google.com/go/logging v1.15.0/go.mod h1:ZGKnpBaURITh+g/uom2VhbiFoFWvejcrHPDhxFtU/gI=
cloud.google.com/go/longrunning v0.10.0 h1:4OWvp1BjCvoeSZTog3sRFDu6j4IrI9TI4/Y9N+8h25g=
cloud.google.com/go/longrunning v0.10.0/go.mod h1:8nqFBPOO1U/XkhWl0I19AMZEphrHi73VNABIpKYaTwM=
cloud.google.com/go/monitoring v1.27.0 h1:BhYwMqao+e5Nn7JtWMM9m6zRtKtVUK6kJWMizXChkLU=
cloud.google.com/go/monitoring v1.27.0/go.mod h1:72NOVjJXHY/HBfoLT0+qlCZBT059+9VXLeAnL2PeeVM=
cloud.google.com/go/storage v1.62.1 h1:Os0G3XbUbjZumkpDUf2Y0rLoXJTCF1kU2kWUujKYXD8=
cloud.google.com/go/storage v1.62.1/go.mod h1:cpYz/kRVZ+UQAF1uHeea10/9ewcRbxGoGNKsS9daSXA=
cloud.google.com/go/trace v1.13.0 h1:RfqsqPOiSCG8ql50UZt5F65KrVa1zbY9mJrO7xvZfbE=
cloud.google.com/go/trace v1.13.0/go.mod h1:r+bdAn16dKLSV1G2D5v3e58IlQlizfxWrUfjx7kM7X0=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
@@ -52,8 +52,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.56.0/go.mod h1:6ZZMQhZKDvUvkJw2rc+oDP90tMMzuU/J+5HG1ZmPOmE=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20260218111324-95b48d57d032 h1:kKVeXC+HAcMeMLefoKCWf934y9MoLU8V3Da7k6WP4K8=
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20260218111324-95b48d57d032/go.mod h1:WQ8hGgfKx1lXCCcS1SJSOklN9fToSbshtvKHp3xsv4w=
github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0 h1:2x1Tszv41PnCdSMumEtejz/On1RQ45kHQ+hhKT53sOk=
github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0/go.mod h1:fQtmzaSUL+HJmHozeAKmnTJTOMBT+vBccv/VWQEwhUQ=
github.com/VictoriaMetrics/easyproto v1.2.0 h1:FJT9uNXA2isppFuJErbLqD306KoFlehl7Wn2dg/6oIE=
github.com/VictoriaMetrics/easyproto v1.2.0/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
github.com/VictoriaMetrics/fastcache v1.13.3 h1:rBabE0iIxcqKEMCwUmwHZ9dgEqXerg8FRbRDUvC7OVc=
@@ -246,8 +246,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8=
github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg=
github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI=
github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4=
github.com/googleapis/gax-go/v2 v2.22.0 h1:PjIWBpgGIVKGoCXuiCoP64altEJCj3/Ei+kSU5vlZD4=
github.com/googleapis/gax-go/v2 v2.22.0/go.mod h1:irWBbALSr0Sk3qlqb9SyJ1h68WjgeFuiOzI4Rqw5+aY=
github.com/gophercloud/gophercloud/v2 v2.11.1 h1:jCs4vLH8sJgRqrPzqVfWgl7uI6JnIIlsgeIRM0uHjxY=
github.com/gophercloud/gophercloud/v2 v2.11.1/go.mod h1:Rm0YvKQ4QYX2rY9XaDKnjRzSGwlG5ge4h6ABYnmkKQM=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
@@ -282,8 +282,8 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.36.0 h1:HlLL/aaVXUulqe+rsjoJmrxKhPi1MflL5O9iq5QEtvo=
github.com/hetznercloud/hcloud-go/v2 v2.36.0/go.mod h1:MnN/QJEa/RYNQiiVoJjNHPntM7Z1wlYPgJ2HA40/cDE=
github.com/influxdata/influxdb v1.12.3 h1:nrqbOazMNQt969yQ7fXepY9hvy7xyg+efN1eb0bihfg=
github.com/influxdata/influxdb v1.12.3/go.mod h1:czsGl4TCm2kWtzEHsGh74Nye77o/KgmKsLtF4/L9QVc=
github.com/influxdata/influxdb v1.12.4 h1:vn/1rvFYkYpg9efRw79+PUPPnMX7HwyJV+hDIB9IrOQ=
github.com/influxdata/influxdb v1.12.4/go.mod h1:czsGl4TCm2kWtzEHsGh74Nye77o/KgmKsLtF4/L9QVc=
github.com/ionos-cloud/sdk-go/v6 v6.3.6 h1:l/TtKgdQ1wUH3DDe2SfFD78AW+TJWdEbDpQhHkWd6CM=
github.com/ionos-cloud/sdk-go/v6 v6.3.6/go.mod h1:nUGHP4kZHAZngCVr4v6C8nuargFrtvt7GrzH/hqn7c4=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
@@ -342,12 +342,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.149.0 h1:Zovdium/2408dqJzSxA5XebZBxGBnDkfrai1HKT5Omc=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.149.0/go.mod h1:ughjuka9JQd81X6we9PmdvaiIjeOWtKK04BladDtzZc=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.149.0 h1:OZKthV+cLQO5MCFhBQme3AveZ5vorqaFwb0Qn8jvSQQ=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.149.0/go.mod h1:eB74l+/1nW5tofwCjD5TKRqHFYnBSWo0j0xWD8BHYuE=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.149.0 h1:4QJFwu4guYeLNnlHLYWJQx8Dps6ii1rwjE9B9dekYdY=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.149.0/go.mod h1:K/+3geevCDJiJew7MuQU481B9JNlc7eLEFv4t59WGRM=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.150.0 h1:kRIR4xDIE94IGdHMrYlfMjzlxInL8f9UO9jz+R4iHEc=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.150.0/go.mod h1:aOBH3hR2zVgVFUbpTSjxuHTCkx+rvg7OWDNnTYUDB3w=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.150.0 h1:eMU3aV8tUWhTlZwrW/eHbw3JCxsUDXqLKqswqj/vtlQ=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.150.0/go.mod h1:qYh2PcJsgK0qJ0+vjLYow+E2DAJ7o1MuZ3r0hakgxiE=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.150.0 h1:Jg2Ets7VwgAZUmnaptx7TJTnjPG7nYZhGOeaveTpC/8=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.150.0/go.mod h1:SdJHLLrcNFtcPIY4NUTLyx4+0gZZRyOTJfrVLUVxILQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -366,8 +366,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_golang/exp v0.0.0-20260408213824-a4984284cf47 h1:T3e9v0JkIsRw9GKIw8kumjtPeroVzD44+dqpWzXQD84=
github.com/prometheus/client_golang/exp v0.0.0-20260408213824-a4984284cf47/go.mod h1:xA9/W/d/j+zeg2Kp3UgOF+E2rbq7KNVa5PLJPJBE0lw=
github.com/prometheus/client_golang/exp v0.0.0-20260411065453-32d05ffab50a h1:HhtTz23wnlOur6WC5/j6+MXPweyJrkarM0sj+sg6OKI=
github.com/prometheus/client_golang/exp v0.0.0-20260411065453-32d05ffab50a/go.mod h1:xA9/W/d/j+zeg2Kp3UgOF+E2rbq7KNVa5PLJPJBE0lw=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
@@ -376,8 +376,8 @@ github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEo
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc=
github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo=
github.com/prometheus/prometheus v0.311.1 h1:15uKGfULPFWIIvrY46PiqyaXTHU+4HO3c/SFz7Z1sEY=
github.com/prometheus/prometheus v0.311.1/go.mod h1:gjsCxTKtHO1Q8T9333u1s+lUR1OjPyM7ruuGH8RvVyo=
github.com/prometheus/prometheus v0.311.2 h1:6fBxp93y08GAZGNT1o3bIhgV/AMYvBFfU+ltDNEsHg8=
github.com/prometheus/prometheus v0.311.2/go.mod h1:gjsCxTKtHO1Q8T9333u1s+lUR1OjPyM7ruuGH8RvVyo=
github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs=
github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU=
github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo=
@@ -432,42 +432,42 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/collector/component v1.55.0 h1:45nb42/UqPDhRdS8FgGRDybRsWSuvS+r6WC2VTVqIRw=
go.opentelemetry.io/collector/component v1.55.0/go.mod h1:7EpGxVpqFkZ2HidyiE9MLvh4cuKU7ye6i5OtxxiYKps=
go.opentelemetry.io/collector/component/componentstatus v0.149.0 h1:6UM+yHoMtZmyu1Sz8Gy9B27eBtURR5sFinWf2LRdE7Y=
go.opentelemetry.io/collector/component/componentstatus v0.149.0/go.mod h1:6jTQab606D+ICobKO/q4UrPy6hwvU3ZY+LcJnPrurds=
go.opentelemetry.io/collector/component/componenttest v0.149.0 h1:7SSYIiLpe84LGfYAp7RCkzYuYLuYVSZVn/K/qsJZgHY=
go.opentelemetry.io/collector/component/componenttest v0.149.0/go.mod h1:8xPU3XMsI+J4vfy87YG1bsCVTeedligKWgBcPEZ0yzw=
go.opentelemetry.io/collector/confmap v1.55.0 h1:pBJbjWfIT3q8cy+eVcHCCYXx984NxOjaGTHqIWsXC1A=
go.opentelemetry.io/collector/confmap v1.55.0/go.mod h1:rSKNE5ztWU6fS0pT8rwACn573r4jJc4QzJyoQzZIVtE=
go.opentelemetry.io/collector/confmap/xconfmap v0.149.0 h1:D/WzrxKOKedRztoY/MiAj9z8W0/2unpTCbANFCwvuuY=
go.opentelemetry.io/collector/confmap/xconfmap v0.149.0/go.mod h1:lJ1nHIQbH6L5wnj5vTWGr7RWi5Kib2KX5stAxar13Jo=
go.opentelemetry.io/collector/consumer v1.55.0 h1:7Per8P4J0nlBrFVSXb+nwZ+egiel1BRtggZngyykGsM=
go.opentelemetry.io/collector/consumer v1.55.0/go.mod h1:Qrn5fDp/HpDmUp+l2RGKsdKyOPlgGlaZPKvw/z9FfEc=
go.opentelemetry.io/collector/consumer/consumertest v0.149.0 h1:IxOkDInfuUM8mT+rMNGtdUuuDlV9X2VS4WAQ/dZSYqg=
go.opentelemetry.io/collector/consumer/consumertest v0.149.0/go.mod h1:ZMvFzch5IRjYBvj6WPc30HRy19smS0WFBXaOu16Wac0=
go.opentelemetry.io/collector/consumer/xconsumer v0.149.0 h1:2z0wRTDsWqPdcC8xp9HJIAJej+07g4/yJrS0xkJJ4hA=
go.opentelemetry.io/collector/consumer/xconsumer v0.149.0/go.mod h1:AG9w3bk38dq3Rk7C2JGf3jw4ldxR063ujYBm3eiMJ7k=
go.opentelemetry.io/collector/featuregate v1.55.0 h1:s/bE8135+8GZpVlQ9qLXQjvprE9KNOGsLhNkqm+EDEU=
go.opentelemetry.io/collector/featuregate v1.55.0/go.mod h1:PS7zY/zaCb28EqciePVwRHVhc3oKortTFXsi3I6ee4g=
go.opentelemetry.io/collector/internal/componentalias v0.149.0 h1:0cH1hCy4vujhnAc6z4baLM0mauFZPfyqF9HtQF6YvGo=
go.opentelemetry.io/collector/internal/componentalias v0.149.0/go.mod h1:8oIpxyFLZECp6O7zFDTGeWw72CQ67C8wb6FqAL9wvCo=
go.opentelemetry.io/collector/internal/testutil v0.149.0 h1:OWfUPO3NFKSaJtz/SBZph/2ENHbr/VbzzlBadKUhm8o=
go.opentelemetry.io/collector/internal/testutil v0.149.0/go.mod h1:Jkjs6rkqs973LqgZ0Fe3zrokQRKULYXPIf4HuqStiEE=
go.opentelemetry.io/collector/pdata v1.55.0 h1:WBgye8bo8koUyV9Vmp/r2Q3lgDezdsgfKDQAaM1oT2I=
go.opentelemetry.io/collector/pdata v1.55.0/go.mod h1:6jPrbM4tuliCPACDznjFtxnnHisfKfzwrBVoeuESYuk=
go.opentelemetry.io/collector/pdata/pprofile v0.149.0 h1:4/uI7wsgMnmBZm6Z/VNY6sWnaFN09+Nk3jr7XEmTtOk=
go.opentelemetry.io/collector/pdata/pprofile v0.149.0/go.mod h1:4uprs5wMp4MI1/bcP5mYERfobFxBn+QoeNFQBUSVk/U=
go.opentelemetry.io/collector/pdata/testdata v0.149.0 h1:Y9WCJpr9fvpCGmvh6wK0i+QtOn0OyGXnoOkLfq7xtok=
go.opentelemetry.io/collector/pdata/testdata v0.149.0/go.mod h1:5BscHKM7cy9lzPMpnaIFaTOMI8SI02AsEF4rH3aRJBg=
go.opentelemetry.io/collector/pipeline v1.55.0 h1:jxFicLy3QYWQaQZp2f+wdCfHpOYb3mKNTqHR1KIut+U=
go.opentelemetry.io/collector/pipeline v1.55.0/go.mod h1:RD90NG3Jbk965Xaqym3JyHkuol4uZJjQVUkD9ddXJIs=
go.opentelemetry.io/collector/processor v1.55.0 h1:d4bCnvtAVTjy1/3JOj3ud6eEZCMsaz2C9lVStB1FM/8=
go.opentelemetry.io/collector/processor v1.55.0/go.mod h1:ruMOb0N76S+H8rhzkLoIzALAMCe7XI9qFONDWsR5IOA=
go.opentelemetry.io/collector/processor/processortest v0.149.0 h1:J73vvUuqyG2Ojnc2CQd6yNqI/wqoWuZuNo/JQH7WTsU=
go.opentelemetry.io/collector/processor/processortest v0.149.0/go.mod h1:z7YKrOnM/y6h7ovZ3JKpCLqzRlE+xoeAhf9PU76EGtg=
go.opentelemetry.io/collector/processor/xprocessor v0.149.0 h1:hmQS3HfO9VqSVsf5h1qIwg5DRYzP1WHxg32tBkHW2Es=
go.opentelemetry.io/collector/processor/xprocessor v0.149.0/go.mod h1:kMEqqiVkTFedwNmFRg2wjA9A+CE44+CrM1wHb5Vfu0k=
go.opentelemetry.io/collector/component v1.56.0 h1:fOCs36Dxg95w2RQCVI2i5IsHc5IbZ99vmbipK9FM7pQ=
go.opentelemetry.io/collector/component v1.56.0/go.mod h1:MkAjcSc2T0BiYf/uARZdTlfnxBB9BwmvY6v08D+qeY4=
go.opentelemetry.io/collector/component/componentstatus v0.150.0 h1:Jy/9quAWwDN9BqMEzZn0BEzVxxWARa1b/wvBQW6yPxs=
go.opentelemetry.io/collector/component/componentstatus v0.150.0/go.mod h1:FFmyHgPqwtvkezi9Z9NYgXxY0m3N0oUMMd/HIAEq8vY=
go.opentelemetry.io/collector/component/componenttest v0.150.0 h1:pT7avT/Pfn8tAOOlmFWgtOaGvXY0nxSwrivnhOl/LH0=
go.opentelemetry.io/collector/component/componenttest v0.150.0/go.mod h1:D+7mfbcZ/TfneQRZNtVwH+/YKQdalc1joa9NhH1BGPk=
go.opentelemetry.io/collector/confmap v1.56.0 h1:YjLll5L77Z3up94t/pdOMaH35kwd28EtjBORewfIjmA=
go.opentelemetry.io/collector/confmap v1.56.0/go.mod h1:iprN8aL/euBXig6bpLZSZqi+8CZIgE9/Pm6y3qb1QWY=
go.opentelemetry.io/collector/confmap/xconfmap v0.150.0 h1:PR+c4/Ly4Plx862jJ1Cg+HFewMrHsWaN9eKxrYBhtK4=
go.opentelemetry.io/collector/confmap/xconfmap v0.150.0/go.mod h1:WDLyne6Zmoi5OZ46Hfg4z/5KhsBG1mFuYjoK20VcDcA=
go.opentelemetry.io/collector/consumer v1.56.0 h1:olhuaTI3cic6VfcraXt3qqsv1v4Qxf55gHxOO1uIVXw=
go.opentelemetry.io/collector/consumer v1.56.0/go.mod h1:FpnfeTLQAdcOtzrkQ36Z+E5aconIymkv9xpJuAdLvy0=
go.opentelemetry.io/collector/consumer/consumertest v0.150.0 h1:DQtVy0BUTQqHKKOyM0hYnxV8H2kKHjayc8aMMa2fow0=
go.opentelemetry.io/collector/consumer/consumertest v0.150.0/go.mod h1:2mgIllFOgoq+SQ7QfXzaZn65pa6OZWobcy3yj+Ik9Ug=
go.opentelemetry.io/collector/consumer/xconsumer v0.150.0 h1:URO73bAV00wTH9bJeloqaiLgS3Q80GNci+nm1iZ3W6Q=
go.opentelemetry.io/collector/consumer/xconsumer v0.150.0/go.mod h1:BMcOInfcRUpVZ2R4qa3vNglvU6mWL+0dhAayH87YSB8=
go.opentelemetry.io/collector/featuregate v1.56.0 h1:NjcbOZkdCSXddAJmFLdO+pv1gmAgrU6sC5PBga2KlKI=
go.opentelemetry.io/collector/featuregate v1.56.0/go.mod h1:4ga1QBMPEejXXmpyJS8lmaRpknJ3Lb9Bvk6e420bUFU=
go.opentelemetry.io/collector/internal/componentalias v0.150.0 h1:qvcJr0m/fFgsc3x6Oya3RNDOZp/WyfmOKIv9jtvoLYw=
go.opentelemetry.io/collector/internal/componentalias v0.150.0/go.mod h1:abuQP8ELgPpCSq6xbHM1b2hPOGqaKxUeLgHHdU/XGP0=
go.opentelemetry.io/collector/internal/testutil v0.150.0 h1:J4PLQGPfbLVaL5eI1aMc0m0TMixV9wzBhNhoHU00J0I=
go.opentelemetry.io/collector/internal/testutil v0.150.0/go.mod h1:Jkjs6rkqs973LqgZ0Fe3zrokQRKULYXPIf4HuqStiEE=
go.opentelemetry.io/collector/pdata v1.56.0 h1:W+QAfN2Iz8SNss1T5JNzRWFnw+7oP1vXBQH9ZuOJkXY=
go.opentelemetry.io/collector/pdata v1.56.0/go.mod h1:usR9utboXufbD1rp1oJy+3smQXXpZ+CsI3WN7QsiOs0=
go.opentelemetry.io/collector/pdata/pprofile v0.150.0 h1:Ae+FxmYXDdcqeLqIAdNSO3YGxco7RS2mIMTdjvavfso=
go.opentelemetry.io/collector/pdata/pprofile v0.150.0/go.mod h1:tEBeGysY/LpIh39NLoQQl3qmUBOF9wyH5p/fmn7smzM=
go.opentelemetry.io/collector/pdata/testdata v0.150.0 h1:nZE3UNuDYd9lfXTk/n5UplPwXBD4tptDIZH5PvWhHKQ=
go.opentelemetry.io/collector/pdata/testdata v0.150.0/go.mod h1:RPOOH2KNevfhu7adoEXVTNtPPZsHwbrSOQKeFZE/220=
go.opentelemetry.io/collector/pipeline v1.56.0 h1:KfyCes/EPC2hpBhU28z9WnJzSRlBYS5FfMHOYAXHbXw=
go.opentelemetry.io/collector/pipeline v1.56.0/go.mod h1:RD90NG3Jbk965Xaqym3JyHkuol4uZJjQVUkD9ddXJIs=
go.opentelemetry.io/collector/processor v1.56.0 h1:5UGXZorhoWg0gnhvDZWYIOUp2dEWfWH28vE+/wV3cMA=
go.opentelemetry.io/collector/processor v1.56.0/go.mod h1:ptpRRg6r9YdXGSKdQcsM6ePH6ZkkkMnlNY8OxIC4Q7c=
go.opentelemetry.io/collector/processor/processortest v0.150.0 h1:M3p/ZcAAKnh/3aZgTgXngWU+9C0Yx/sQLgZNPBP/ZSo=
go.opentelemetry.io/collector/processor/processortest v0.150.0/go.mod h1:szMO3iQ+CDQLCq6y8+deTUc6FbLKi9zNqwtm5+umZk8=
go.opentelemetry.io/collector/processor/xprocessor v0.150.0 h1:tyIM+WT6NRRkynTGtPwPYDRvWlv3YVt9PDteLscW2mU=
go.opentelemetry.io/collector/processor/xprocessor v0.150.0/go.mod h1:m7uoLC+a9G3EOr9v2+fItUMV0EOFrL+RUkPAspBLK1w=
go.opentelemetry.io/contrib/detectors/gcp v1.43.0 h1:62yY3dT7/ShwOxzA0RsKRgshBmfElKI4d/Myu2OxDFU=
go.opentelemetry.io/contrib/detectors/gcp v1.43.0/go.mod h1:RyaZMFY7yi1kAs45S6mbFGz8O8rqB0dTY14uzvG4LCs=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 h1:0Qx7VGBacMm9ZENQ7TnNObTYI4ShC+lHI16seduaxZo=
@@ -478,8 +478,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8V
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo=
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.42.0 h1:lSZHgNHfbmQTPfuTmWVkEu8J8qXaQwuV30pjCcAUvP8=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.42.0/go.mod h1:so9ounLcuoRDu033MW/E0AD4hhUjVqswrMF5FoZlBcw=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 h1:TC+BewnDpeiAmcscXbGMfxkO+mwYUwE/VySwvw88PfA=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0/go.mod h1:J/ZyF4vfPwsSr9xJSPyQ4LqtcTPULFR64KwTikGLe+A=
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
@@ -509,20 +509,20 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f h1:W3F4c+6OLc6H2lb//N1q4WpJkhzJCK5J6kUi1NTVXfM=
golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI=
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
golang.org/x/mod v0.35.0 h1:Ww1D637e6Pg+Zb2KrWfHQUnH2dQRLBQyAtpr/haaJeM=
golang.org/x/mod v0.35.0/go.mod h1:+GwiRhIInF8wPm+4AoT6L0FA1QWAad3OMdTRx4tFYlU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=
golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=
golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -536,34 +536,34 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U=
golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s=
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
golang.org/x/tools v0.44.0 h1:UP4ajHPIcuMjT1GqzDWRlalUEoY+uzoZKnhOjbIPD2c=
golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/api v0.275.0 h1:vfY5d9vFVJeWEZT65QDd9hbndr7FyZ2+6mIzGAh71NI=
google.golang.org/api v0.275.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw=
google.golang.org/genproto v0.0.0-20260406210006-6f92a3bedf2d h1:N1Ec54vZnIPd7MnxRiYLW+oY4fDR4BOS/LrssdD9+ek=
google.golang.org/genproto v0.0.0-20260406210006-6f92a3bedf2d/go.mod h1:c2hJ1grtnH0xUiEKGDGkjGNTJ1Hy2LrblyKOHF0sqRM=
google.golang.org/genproto/googleapis/api v0.0.0-20260406210006-6f92a3bedf2d h1:/aDRtSZJjyLQzm75d+a1wOJaqyKBMvIAfeQmoa3ORiI=
google.golang.org/genproto/googleapis/api v0.0.0-20260406210006-6f92a3bedf2d/go.mod h1:etfGUgejTiadZAUaEP14NP97xi1RGeawqkjDARA/UOs=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d h1:wT2n40TBqFY6wiwazVK9/iTWbsQrgk5ZfCSVFLO9LQA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY=
google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw=
google.golang.org/genproto v0.0.0-20260414002931-afd174a4e478 h1:aLsVTW0lZ8+IY5u/ERjZSCvAmhuR7slKzyha3YikDNA=
google.golang.org/genproto v0.0.0-20260414002931-afd174a4e478/go.mod h1:YJAzKjfHIUHb9T+bfu8L7mthAp7VVXQBUs1PLdBWS7M=
google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 h1:yQugLulqltosq0B/f8l4w9VryjV+N/5gcW0jQ3N8Qec=
google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478/go.mod h1:C6ADNqOxbgdUUeRTU+LCHDPB9ttAMCTff6auwCVa4uc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 h1:RmoJA1ujG+/lRGNfUnOMfhCy5EipVMyvUE+KNbPbTlw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
@@ -590,8 +590,8 @@ k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg=
k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c=
k8s.io/klog/v2 v2.140.0 h1:Tf+J3AH7xnUzZyVVXhTgGhEKnFqye14aadWv7bzXdzc=
k8s.io/klog/v2 v2.140.0/go.mod h1:o+/RWfJ6PwpnFn7OyAG3QnO47BFsymfEfrz6XyYSSp0=
k8s.io/kube-openapi v0.0.0-20260330154417-16be699c7b31 h1:V+sn9a/1fEYDGwnllCmqXBk8x7obZ+hl869Q3Abumkg=
k8s.io/kube-openapi v0.0.0-20260330154417-16be699c7b31/go.mod h1:uGBT7iTA6c6MvqUvSXIaYZo9ukscABYi2btjhvgKGZ0=
k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f h1:4Qiq0YAoQATdgmHALJWz9rJ4fj20pB3xebpB4CFNhYM=
k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f/go.mod h1:uGBT7iTA6c6MvqUvSXIaYZo9ukscABYi2btjhvgKGZ0=
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 h1:kBawHLSnx/mYHmRnNUf9d4CpjREbeZuxoSGOX/J+aYM=
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=

View File

@@ -1,8 +1,11 @@
package stringsutil
import (
"slices"
"sync"
"unicode"
"unicode/utf8"
"unsafe"
)
// LimitStringLen limits the length of s with maxLen.
@@ -21,9 +24,24 @@ func LimitStringLen(s string, maxLen int) string {
}
// AppendLowercase appends lowercase s to dst and returns the result.
//
// It is faster alternative to strings.ToLower.
// It is recommended to use ToLowercaseFunc if possible to avoid copying of s.
func AppendLowercase(dst []byte, s string) []byte {
// Try to find the first uppercase character.
n := uppercaseIndex(s)
if n < 0 {
// Fast path: no uppercase characters found.
dst = append(dst, s...)
return dst
}
// Slow path: convert s to lowercase.
dst = slices.Grow(dst, len(s))
dst = append(dst, s[:n]...)
s = s[n:]
return appendLowercaseInternal(dst, s)
}
func appendLowercaseInternal(dst []byte, s string) []byte {
dstLen := len(dst)
// Try fast path at first by assuming that s contains only ASCII chars.
@@ -49,3 +67,115 @@ func AppendLowercase(dst []byte, s string) []byte {
}
return dst
}
// ToLowercaseFunc calls f with a lowercase version of s.
// The resulting value is only valid during the f call.
func ToLowercaseFunc(s string, f func(s string)) {
// Try to find the first uppercase character.
n := uppercaseIndex(s)
if n < 0 {
// Fast path: no uppercase characters found.
f(s)
return
}
sb := getStringBuilder()
defer putStringBuilder(sb)
sb.buf = slices.Grow(sb.buf, len(s))
sb.appendString(s[:n])
sb.buf = appendLowercaseInternal(sb.buf, s[n:])
f(sb.string())
}
// IsLowercase returns true if the given string does not contain uppercase characters.
func IsLowercase(s string) bool {
return uppercaseIndex(s) < 0
}
// uppercaseIndex returns the index of the first uppercase character in s,
// or -1 if s does not contain uppercase characters.
func uppercaseIndex(s string) int {
idx := 0
// Fast path for ASCII-only strings - process 8 bytes at a time.
for idx <= len(s)-8 {
v := uint64FromString(s[idx:])
// ASCII characters have the 8th bit clear.
// The operation bellow is the same as s[idx] < utf8.RuneSelf, but for multiple bytes.
if isASCII := v&0x8080808080808080 == 0; !isASCII {
break
}
// Check if any byte lacks the 6th bit, which indicates uppercase symbol or '@', '[', '\', ']', '^', '_'.
mightHaveUpper := ^v&0x2020202020202020 != 0
if mightHaveUpper {
for j := 0; j < 8; j++ {
c := s[idx+j]
if c >= 'A' && c <= 'Z' {
return idx + j
}
}
}
idx += 8
}
// Handle the rest of the s.
for idx < len(s) {
if c := s[idx]; c < utf8.RuneSelf {
if c >= 'A' && c <= 'Z' {
return idx
}
idx++
continue
}
r, size := utf8.DecodeRuneInString(s[idx:])
if r != unicode.ToLower(r) {
return idx
}
idx += size
}
return -1
}
// uint64FromString interprets the first 8 bytes of string b as a little-endian uint64.
// The same as binary.LittleEndian.Uint64, but operates on strings.
//
// This function is a bit slower than (*uint64)(unsafe.Pointer(ptr)) alternative,
// but does not have the issue with data alignment. See: https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3927
func uint64FromString(b string) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
type stringBuilder struct {
buf []byte
}
func (sb *stringBuilder) appendString(s string) {
sb.buf = append(sb.buf, s...)
}
func (sb *stringBuilder) reset() {
sb.buf = sb.buf[:0]
}
func (sb *stringBuilder) string() string {
return unsafe.String(unsafe.SliceData(sb.buf), len(sb.buf))
}
var stringBuilderPool = sync.Pool{
New: func() any {
return &stringBuilder{}
},
}
func getStringBuilder() *stringBuilder {
return stringBuilderPool.Get().(*stringBuilder)
}
func putStringBuilder(sb *stringBuilder) {
sb.reset()
stringBuilderPool.Put(sb)
}

View File

@@ -23,18 +23,255 @@ func TestLimitStringLen(t *testing.T) {
f("abcde", 5, "abcde")
}
func TestAppendLowercase(t *testing.T) {
f := func(s, resultExpected string) {
func TestAppendLowercaseToLowercaseFunc(t *testing.T) {
f := func(s, expected string) {
t.Helper()
result := AppendLowercase(nil, s)
if string(result) != resultExpected {
t.Fatalf("unexpected result; got %q; want %q", result, resultExpected)
got := AppendLowercase(nil, s)
if string(got) != expected {
t.Fatalf("unexpected result; got %q; want %q", got, expected)
}
ToLowercaseFunc(s, func(s string) {
if s != expected {
t.Fatalf("unexpected result; got %q; want %q", got, expected)
}
})
}
// Empty string
f("", "")
// ASCII lowercase
f("hello", "hello")
f("world", "world")
f("abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz")
// ASCII uppercase
f("HELLO", "hello")
f("WORLD", "world")
f("ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz")
// ASCII mixed case
f("Hello", "hello")
f("heLLo", "hello")
f("WOrld", "world")
f("HeLLo WoRLd", "hello world")
// Unicode Cyrillic
f("привіт", "привіт")
f("світ", "світ")
f("ПРИВІТ", "привіт")
f("СВІТ", "світ")
f("Привіт", "привіт")
f("приВіт", "привіт")
// Unicode Greek
f("αβγδε", "αβγδε")
f("ΑΒΓΔΕ", "αβγδε")
f("Αβγδε", "αβγδε")
// Latin Extended
f("café", "café")
f("naïve", "naïve")
f("niño", "niño")
f("ærøå", "ærøå")
f("ñüöäß", "ñüöäß")
f("CAFÉ", "café")
f("NAÏVE", "naïve")
f("NIÑO", "niño")
f("ÆRØÅ", "ærøå")
f("ÑÜÖÄ", "ñüöä")
f("Café", "café")
f("naÏve", "naïve")
f("Niño", "niño")
// Thai
f("สวัสดี", "สวัสดี")
f("โลก", "โลก")
// Japanese Hiragana
f("こんにちは", "こんにちは")
f("せかい", "せかい")
// Japanese Katakana
f("コンニチハ", "コンニチハ")
f("セカイ", "セカイ")
// Chinese
f("你好", "你好")
f("世界", "世界")
// Devanagari
f("नमस्ते", "नमस्ते")
f("दुनिया", "दुनिया")
// Georgian
f("გამარჯობა", "გამარჯობა")
f("ᲒᲐᲛᲐᲠᲯᲝᲑᲐ", "გამარჯობა")
// Armenian
f("բարեւ", "բարեւ")
f("ԲԱՐԵՒ", "բարեւ")
// Turkish
f("İSTANBUL", "istanbul")
// Mixed languages
f("hello世界", "hello世界")
f("привет123", "привет123")
f("test你好", "test你好")
f("Hello世界", "hello世界")
f(ривет123", "привет123")
f("Test你好", "test你好")
// Emoji and symbols
f("hello😀world", "hello😀world")
f("test✨case", "test✨case")
f("foo🎉bar", "foo🎉bar")
f("HELLO😀WORLD", "hello😀world")
// Digits
f("hello123", "hello123")
f("test456world", "test456world")
f("abc123def456", "abc123def456")
f("123", "123")
f("456789", "456789")
f("0", "0")
f("HELLO123", "hello123")
f("TEST456WORLD", "test456world")
f("ABC123DEF456", "abc123def456")
// Special characters
f("hello-world", "hello-world")
f("test_case", "test_case")
f("foo.bar", "foo.bar")
f("a@b#c$d", "a@b#c$d")
f("!@#$%", "!@#$%")
f(".,;:-_", ".,;:-_")
f("()[]{}", "()[]{}")
f("HELLO-WORLD", "hello-world")
f("TEST_CASE", "test_case")
f("FOO.BAR", "foo.bar")
f("A@B#C$D", "a@b#c$d")
}
func TestIsLower(t *testing.T) {
f := func(s string, want bool) {
t.Helper()
if IsLowercase(s) != want {
t.Fatalf("unexpected result; got %v; want %v for %q", IsLowercase(s), want, s)
}
}
f("", "")
f("foo", "foo")
f("FOO", "foo")
f("foo БаР baz 123", "foo бар baz 123")
// Empty string
f("", true)
// ASCII lowercase
f("hello", true)
f("world", true)
f("abcdefghijklmnopqrstuvwxyz", true)
// ASCII uppercase
f("HELLO", false)
f("WORLD", false)
f("ABCDEFGHIJKLMNOPQRSTUVWXYZ", false)
// ASCII mixed case
f("Hello", false)
f("heLLo", false)
f("WOrld", false)
// Unicode Cyrillic
f("привіт", true)
f("світ", true)
f("ПРИВІТ", false)
f("СВІТ", false)
f("Привіт", false)
f("приВіт", false)
// Unicode Greek
f("αβγδε", true)
f("ΑΒΓΔΕ", false)
f("Αβγδε", false)
// Latin Extended with diacritics
f("café", true)
f("naïve", true)
f("niño", true)
f("ærøå", true)
f("ñüöäß", true)
f("CAFÉ", false)
f("NAÏVE", false)
f("NIÑO", false)
f("ÆRØÅ", false)
f("ÑÜÖÄ", false)
f("Café", false)
f("naÏve", false)
f("Niño", false)
// Thai
f("สวัสดี", true)
f("โลก", true)
// Japanese Hiragana
f("こんにちは", true)
f("せかい", true)
// Japanese Katakana
f("コンニチハ", true)
f("セカイ", true)
// Chinese characters
f("你好", true)
f("世界", true)
// Devanagari
f("नमस्ते", true)
f("दुनिया", true)
// Georgian
f("გამარჯობა", true)
f("ᲒᲐᲛᲐᲠᲯᲝᲑᲐ", false)
// Armenian
f("բարեւ", true)
f("ԲԱՐԵՒ", false)
// Mixed languages
f("hello世界", true)
f("привет123", true)
f("test你好", true)
f("Hello世界", false)
f(ривет123", false)
f("Test你好", false)
// Emoji and symbols
f("hello😀world", true)
f("test✨case", true)
f("foo🎉bar", true)
// Digits
f("hello123", true)
f("test456world", true)
f("abc123def456", true)
f("123", true)
f("456789", true)
f("0", true)
f("HELLO123", false)
f("TEST456WORLD", false)
f("ABC123DEF456", false)
// Special characters
f("hello-world", true)
f("test_case", true)
f("foo.bar", true)
f("a@b#c$d", true)
f("!@#$%", true)
f(".,;:-_", true)
f("()[]{}", true)
f("HELLO-WORLD", false)
f("TEST_CASE", false)
f("FOO.BAR", false)
f("A@B#C$D", false)
}

View File

@@ -1,96 +1,132 @@
package stringsutil
import (
"strings"
"sync/atomic"
"testing"
)
func BenchmarkAppendLowercase(b *testing.B) {
b.Run("ascii-all-lowercase", func(b *testing.B) {
benchmarkAppendLowercase(b, []string{"foo bar baz abc def", "23k umlkds", "lq, poweri2349)"})
b.Run("ascii-full-lowercase", func(b *testing.B) {
data := `started kubernetes log collector for node "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyko"`
benchmarkToLower(b, data)
})
b.Run("ascii-some-uppercase", func(b *testing.B) {
benchmarkAppendLowercase(b, []string{"Foo Bar baz ABC def", "23k umlKDs", "lq, Poweri2349)"})
b.Run("ascii-partial-lowercase", func(b *testing.B) {
data := `started Kubernetes log collector for Node "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyko"`
benchmarkToLower(b, data)
})
b.Run("ascii-all-uppercase", func(b *testing.B) {
benchmarkAppendLowercase(b, []string{"FOO BAR BAZ ABC DEF", "23K UMLKDS", "LQ, POWERI2349)"})
b.Run("ascii-full-uppercase", func(b *testing.B) {
data := `STARTED KUBERNETES LOG COLLECTOR FOR NODE "GKE-SANDBOX-E2-STANDARD-8-20250715071-5B0A2CE9-VYKO"`
benchmarkToLower(b, data)
})
b.Run("unicode-all-lowercase", func(b *testing.B) {
benchmarkAppendLowercase(b, []string{"хщцукодл длобючф дл", "23и юбывлц", "лф, длощшу2349)"})
b.Run("ascii-partial-uppercase", func(b *testing.B) {
data := `started KUBERNETES log collector FOR NODE "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyko"`
benchmarkToLower(b, data)
})
b.Run("unicode-some-uppercase", func(b *testing.B) {
benchmarkAppendLowercase(b, []string{"Хщцукодл Длобючф ДЛ", "23и юбыВЛц", "лф, Длощшу2349)"})
b.Run("ascii-full-title", func(b *testing.B) {
data := `Started Kubernetes Log Collector For Node "Gke-Sandbox-E2-Standard-8-20250715071-5b0a2ce9-Vyko"`
benchmarkToLower(b, data)
})
b.Run("unicode-all-uppercase", func(b *testing.B) {
benchmarkAppendLowercase(b, []string{"ХЩЦУКОДЛ ДЛОБЮЧФ ДЛ", "23И ЮБЫВЛЦ", "ЛФ, ДЛОЩШУ2349)"})
b.Run("ascii-partial-title", func(b *testing.B) {
data := `started Kubernetes log Collector for Node "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyko"`
benchmarkToLower(b, data)
})
b.Run("ascii-mixcase", func(b *testing.B) {
data := `Started Kubernetes log COLLECTOR for nodE "GKE-Sandbox-E2-Standard-8-20250715071-5b0a2ce9-VYKO"`
benchmarkToLower(b, data)
})
b.Run("unicode-full-lowercase", func(b *testing.B) {
data := `запущен кубернетес лог коллектор на ноде гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-вико`
benchmarkToLower(b, data)
})
b.Run("unicode-partial-lowercase", func(b *testing.B) {
data := `запущен КубернеТЕС лОг кОллектор нА НодЕ гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-вико`
benchmarkToLower(b, data)
})
b.Run("unicode-full-uppercase", func(b *testing.B) {
data := `ЗАПУЩЕН КУБЕРНЕТЕС ЛОГ КОЛЛЕКТОР НА НОДЕ ГКЕ-СЕНДБОКС-Е2-СТАНДАРТ-8-20250715071-5В0А2СЕ9-ВИКО`
benchmarkToLower(b, data)
})
b.Run("unicode-partial-uppercase", func(b *testing.B) {
data := `запущен КУБЕРНЕТЕС лог коллектор НА НОДЕ гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-вико`
benchmarkToLower(b, data)
})
b.Run("unicode-full-title", func(b *testing.B) {
data := `Запущен Кубернетес Лог Коллектор На Ноде Гке-Сендбокс-Е2-Стандарт-8-20250715071-5В0а2се9-Вико`
benchmarkToLower(b, data)
})
b.Run("unicode-partial-title", func(b *testing.B) {
data := `запущен Кубернетес лог Коллектор на Ноде гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-вико`
benchmarkToLower(b, data)
})
b.Run("unicode-mixcase", func(b *testing.B) {
data := `Запущен Кубернетес лог КОЛЛЕКТОР на нодЕ гке-Сендбокс-Е2-Стандарт-8-20250715071-5В0а2се9-ВИКО`
benchmarkToLower(b, data)
})
}
func benchmarkAppendLowercase(b *testing.B, a []string) {
n := 0
for _, s := range a {
n += len(s)
}
func benchmarkToLower(b *testing.B, s string) {
b.Helper()
b.ReportAllocs()
b.SetBytes(int64(n))
b.SetBytes(int64(len(s)))
b.RunParallel(func(pb *testing.PB) {
var buf []byte
var n uint64
for pb.Next() {
buf = buf[:0]
for _, s := range a {
buf = AppendLowercase(buf, s)
}
n += uint64(len(buf))
buf = AppendLowercase(buf[:0], s)
}
GlobalSink.Add(n)
})
}
func BenchmarkStringsToLower(b *testing.B) {
b.Run("ascii-all-lowercase", func(b *testing.B) {
benchmarkStringsToLower(b, []string{"foo bar baz abc def", "23k umlkds", "lq, poweri2349)"})
})
b.Run("ascii-some-uppercase", func(b *testing.B) {
benchmarkStringsToLower(b, []string{"Foo Bar baz ABC def", "23k umlKDs", "lq, Poweri2349)"})
})
b.Run("ascii-all-uppercase", func(b *testing.B) {
benchmarkStringsToLower(b, []string{"FOO BAR BAZ ABC DEF", "23K UMLKDS", "LQ, POWERI2349)"})
})
b.Run("unicode-all-lowercase", func(b *testing.B) {
benchmarkStringsToLower(b, []string{"хщцукодл длобючф дл", "23и юбывлц", "лф, длощшу2349)"})
})
b.Run("unicode-some-uppercase", func(b *testing.B) {
benchmarkStringsToLower(b, []string{"Хщцукодл Длобючф ДЛ", "23и юбыВЛц", "лф, Длощшу2349)"})
})
b.Run("unicode-all-uppercase", func(b *testing.B) {
benchmarkStringsToLower(b, []string{"ХЩЦУКОДЛ ДЛОБЮЧФ ДЛ", "23И ЮБЫВЛЦ", "ЛФ, ДЛОЩШУ2349)"})
})
}
func benchmarkStringsToLower(b *testing.B, a []string) {
n := 0
for _, s := range a {
n += len(s)
}
b.ReportAllocs()
b.SetBytes(int64(n))
b.RunParallel(func(pb *testing.PB) {
var buf []byte
var n uint64
for pb.Next() {
buf = buf[:0]
for _, s := range a {
sLower := strings.ToLower(s)
buf = append(buf, sLower...)
}
n += uint64(len(buf))
}
GlobalSink.Add(n)
GlobalSink.Add(uint64(len(buf)))
})
}
var GlobalSink atomic.Uint64
func BenchmarkIsLowercase(b *testing.B) {
b.Run("ascii-mismatch", func(b *testing.B) {
data := `started kubernetes log collector for node "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyko"`
benchmarkIsLowercase(b, data, true)
})
b.Run("ascii-match-start", func(b *testing.B) {
data := `started Kubernetes log collector for Node "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyko"`
benchmarkIsLowercase(b, data, false)
})
b.Run("ascii-match-middle", func(b *testing.B) {
data := `started kubernetes log collector for Node "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyko"`
benchmarkIsLowercase(b, data, false)
})
b.Run("ascii-match-end", func(b *testing.B) {
data := `started kubernetes log collector for node "gke-sandbox-e2-standard-8-20250715071-5b0a2ce9-vyKo"`
benchmarkIsLowercase(b, data, false)
})
b.Run("unicode-mismatch", func(b *testing.B) {
data := `запущен кубернетес лог коллектор на ноде гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-вико`
benchmarkIsLowercase(b, data, true)
})
b.Run("unicode-match-start", func(b *testing.B) {
data := `запущен Кубернетес лог коллектор на ноде гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-вико`
benchmarkIsLowercase(b, data, false)
})
b.Run("unicode-match-middle", func(b *testing.B) {
data := `запущен кубернетес лог коллектор на Ноде гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-вико`
benchmarkIsLowercase(b, data, false)
})
b.Run("unicode-match-end", func(b *testing.B) {
data := `запущен кубернетес лог коллектор на ноде гке-сендбокс-е2-стандарт-8-20250715071-5в0а2се9-виКо`
benchmarkIsLowercase(b, data, false)
})
}
func benchmarkIsLowercase(b *testing.B, s string, expected bool) {
b.Helper()
b.ReportAllocs()
b.SetBytes(int64(len(s)))
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if IsLowercase(s) != expected {
b.Fatalf("expected IsLower(%q) to return %v", s, expected)
}
}
})
}

View File

@@ -1,6 +1,10 @@
# Changes
## [1.9.0](https://github.com/googleapis/google-cloud-go/releases/tag/iam%2Fv1.9.0) (2026-04-13)
## [1.8.0](https://github.com/googleapis/google-cloud-go/releases/tag/iam%2Fv1.8.0) (2026-04-09)
## [1.7.0](https://github.com/googleapis/google-cloud-go/releases/tag/iam%2Fv1.7.0) (2026-04-02)
## [1.6.0](https://github.com/googleapis/google-cloud-go/releases/tag/iam%2Fv1.6.0) (2026-03-26)

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/iam/v1/iam_policy.proto
package iampb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/iam/v1/iam_policy.proto
package iampb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/iam/v1/options.proto
package iampb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/iam/v1/policy.proto
package iampb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/iam/v1/resource_policy_member.proto
package iampb

View File

@@ -26,6 +26,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -237,6 +238,16 @@ type alertPolicyGRPCClient struct {
// Cloud console (at https://console.cloud.google.com/).
func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) {
clientOpts := defaultAlertPolicyGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newAlertPolicyClientHook != nil {
hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{})
if err != nil {
@@ -258,6 +269,24 @@ func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*Al
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.ListAlertPolicies = append(client.CallOptions.ListAlertPolicies, gax.WithClientMetrics(metrics))
client.CallOptions.GetAlertPolicy = append(client.CallOptions.GetAlertPolicy, gax.WithClientMetrics(metrics))
client.CallOptions.CreateAlertPolicy = append(client.CallOptions.CreateAlertPolicy, gax.WithClientMetrics(metrics))
client.CallOptions.DeleteAlertPolicy = append(client.CallOptions.DeleteAlertPolicy, gax.WithClientMetrics(metrics))
client.CallOptions.UpdateAlertPolicy = append(client.CallOptions.UpdateAlertPolicy, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -294,6 +323,12 @@ func (c *alertPolicyGRPCClient) ListAlertPolicies(ctx context.Context, req *moni
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.AlertPolicyService/ListAlertPolicies")
}
opts = append((*c.CallOptions).ListAlertPolicies[0:len((*c.CallOptions).ListAlertPolicies):len((*c.CallOptions).ListAlertPolicies)], opts...)
it := &AlertPolicyIterator{}
req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest)
@@ -340,6 +375,12 @@ func (c *alertPolicyGRPCClient) GetAlertPolicy(ctx context.Context, req *monitor
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.AlertPolicyService/GetAlertPolicy")
}
opts = append((*c.CallOptions).GetAlertPolicy[0:len((*c.CallOptions).GetAlertPolicy):len((*c.CallOptions).GetAlertPolicy)], opts...)
var resp *monitoringpb.AlertPolicy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -358,6 +399,12 @@ func (c *alertPolicyGRPCClient) CreateAlertPolicy(ctx context.Context, req *moni
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.AlertPolicyService/CreateAlertPolicy")
}
opts = append((*c.CallOptions).CreateAlertPolicy[0:len((*c.CallOptions).CreateAlertPolicy):len((*c.CallOptions).CreateAlertPolicy)], opts...)
var resp *monitoringpb.AlertPolicy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -376,6 +423,12 @@ func (c *alertPolicyGRPCClient) DeleteAlertPolicy(ctx context.Context, req *moni
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy")
}
opts = append((*c.CallOptions).DeleteAlertPolicy[0:len((*c.CallOptions).DeleteAlertPolicy):len((*c.CallOptions).DeleteAlertPolicy)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -390,6 +443,9 @@ func (c *alertPolicyGRPCClient) UpdateAlertPolicy(ctx context.Context, req *moni
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy")
}
opts = append((*c.CallOptions).UpdateAlertPolicy[0:len((*c.CallOptions).UpdateAlertPolicy):len((*c.CallOptions).UpdateAlertPolicy)], opts...)
var resp *monitoringpb.AlertPolicy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -26,6 +26,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -258,6 +259,16 @@ type groupGRPCClient struct {
// from the infrastructure.
func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) {
clientOpts := defaultGroupGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newGroupClientHook != nil {
hookOpts, err := newGroupClientHook(ctx, clientHookParams{})
if err != nil {
@@ -279,6 +290,25 @@ func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupCli
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.ListGroups = append(client.CallOptions.ListGroups, gax.WithClientMetrics(metrics))
client.CallOptions.GetGroup = append(client.CallOptions.GetGroup, gax.WithClientMetrics(metrics))
client.CallOptions.CreateGroup = append(client.CallOptions.CreateGroup, gax.WithClientMetrics(metrics))
client.CallOptions.UpdateGroup = append(client.CallOptions.UpdateGroup, gax.WithClientMetrics(metrics))
client.CallOptions.DeleteGroup = append(client.CallOptions.DeleteGroup, gax.WithClientMetrics(metrics))
client.CallOptions.ListGroupMembers = append(client.CallOptions.ListGroupMembers, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -315,6 +345,12 @@ func (c *groupGRPCClient) ListGroups(ctx context.Context, req *monitoringpb.List
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.GroupService/ListGroups")
}
opts = append((*c.CallOptions).ListGroups[0:len((*c.CallOptions).ListGroups):len((*c.CallOptions).ListGroups)], opts...)
it := &GroupIterator{}
req = proto.Clone(req).(*monitoringpb.ListGroupsRequest)
@@ -361,6 +397,12 @@ func (c *groupGRPCClient) GetGroup(ctx context.Context, req *monitoringpb.GetGro
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.GroupService/GetGroup")
}
opts = append((*c.CallOptions).GetGroup[0:len((*c.CallOptions).GetGroup):len((*c.CallOptions).GetGroup)], opts...)
var resp *monitoringpb.Group
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -379,6 +421,12 @@ func (c *groupGRPCClient) CreateGroup(ctx context.Context, req *monitoringpb.Cre
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.GroupService/CreateGroup")
}
opts = append((*c.CallOptions).CreateGroup[0:len((*c.CallOptions).CreateGroup):len((*c.CallOptions).CreateGroup)], opts...)
var resp *monitoringpb.Group
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -397,6 +445,9 @@ func (c *groupGRPCClient) UpdateGroup(ctx context.Context, req *monitoringpb.Upd
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.GroupService/UpdateGroup")
}
opts = append((*c.CallOptions).UpdateGroup[0:len((*c.CallOptions).UpdateGroup):len((*c.CallOptions).UpdateGroup)], opts...)
var resp *monitoringpb.Group
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -415,6 +466,12 @@ func (c *groupGRPCClient) DeleteGroup(ctx context.Context, req *monitoringpb.Del
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.GroupService/DeleteGroup")
}
opts = append((*c.CallOptions).DeleteGroup[0:len((*c.CallOptions).DeleteGroup):len((*c.CallOptions).DeleteGroup)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -429,6 +486,12 @@ func (c *groupGRPCClient) ListGroupMembers(ctx context.Context, req *monitoringp
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.GroupService/ListGroupMembers")
}
opts = append((*c.CallOptions).ListGroupMembers[0:len((*c.CallOptions).ListGroupMembers):len((*c.CallOptions).ListGroupMembers)], opts...)
it := &MonitoredResourceIterator{}
req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest)

View File

@@ -26,6 +26,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -296,6 +297,16 @@ type metricGRPCClient struct {
// time series data.
func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) {
clientOpts := defaultMetricGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newMetricClientHook != nil {
hookOpts, err := newMetricClientHook(ctx, clientHookParams{})
if err != nil {
@@ -317,6 +328,28 @@ func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricC
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.ListMonitoredResourceDescriptors = append(client.CallOptions.ListMonitoredResourceDescriptors, gax.WithClientMetrics(metrics))
client.CallOptions.GetMonitoredResourceDescriptor = append(client.CallOptions.GetMonitoredResourceDescriptor, gax.WithClientMetrics(metrics))
client.CallOptions.ListMetricDescriptors = append(client.CallOptions.ListMetricDescriptors, gax.WithClientMetrics(metrics))
client.CallOptions.GetMetricDescriptor = append(client.CallOptions.GetMetricDescriptor, gax.WithClientMetrics(metrics))
client.CallOptions.CreateMetricDescriptor = append(client.CallOptions.CreateMetricDescriptor, gax.WithClientMetrics(metrics))
client.CallOptions.DeleteMetricDescriptor = append(client.CallOptions.DeleteMetricDescriptor, gax.WithClientMetrics(metrics))
client.CallOptions.ListTimeSeries = append(client.CallOptions.ListTimeSeries, gax.WithClientMetrics(metrics))
client.CallOptions.CreateTimeSeries = append(client.CallOptions.CreateTimeSeries, gax.WithClientMetrics(metrics))
client.CallOptions.CreateServiceTimeSeries = append(client.CallOptions.CreateServiceTimeSeries, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -353,6 +386,12 @@ func (c *metricGRPCClient) ListMonitoredResourceDescriptors(ctx context.Context,
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors")
}
opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...)
it := &MonitoredResourceDescriptorIterator{}
req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest)
@@ -399,6 +438,12 @@ func (c *metricGRPCClient) GetMonitoredResourceDescriptor(ctx context.Context, r
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor")
}
opts = append((*c.CallOptions).GetMonitoredResourceDescriptor[0:len((*c.CallOptions).GetMonitoredResourceDescriptor):len((*c.CallOptions).GetMonitoredResourceDescriptor)], opts...)
var resp *monitoredrespb.MonitoredResourceDescriptor
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -417,6 +462,12 @@ func (c *metricGRPCClient) ListMetricDescriptors(ctx context.Context, req *monit
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/ListMetricDescriptors")
}
opts = append((*c.CallOptions).ListMetricDescriptors[0:len((*c.CallOptions).ListMetricDescriptors):len((*c.CallOptions).ListMetricDescriptors)], opts...)
it := &MetricDescriptorIterator{}
req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest)
@@ -463,6 +514,12 @@ func (c *metricGRPCClient) GetMetricDescriptor(ctx context.Context, req *monitor
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/GetMetricDescriptor")
}
opts = append((*c.CallOptions).GetMetricDescriptor[0:len((*c.CallOptions).GetMetricDescriptor):len((*c.CallOptions).GetMetricDescriptor)], opts...)
var resp *metricpb.MetricDescriptor
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -481,6 +538,12 @@ func (c *metricGRPCClient) CreateMetricDescriptor(ctx context.Context, req *moni
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/CreateMetricDescriptor")
}
opts = append((*c.CallOptions).CreateMetricDescriptor[0:len((*c.CallOptions).CreateMetricDescriptor):len((*c.CallOptions).CreateMetricDescriptor)], opts...)
var resp *metricpb.MetricDescriptor
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -499,6 +562,12 @@ func (c *metricGRPCClient) DeleteMetricDescriptor(ctx context.Context, req *moni
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/DeleteMetricDescriptor")
}
opts = append((*c.CallOptions).DeleteMetricDescriptor[0:len((*c.CallOptions).DeleteMetricDescriptor):len((*c.CallOptions).DeleteMetricDescriptor)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -513,6 +582,12 @@ func (c *metricGRPCClient) ListTimeSeries(ctx context.Context, req *monitoringpb
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/ListTimeSeries")
}
opts = append((*c.CallOptions).ListTimeSeries[0:len((*c.CallOptions).ListTimeSeries):len((*c.CallOptions).ListTimeSeries)], opts...)
it := &TimeSeriesIterator{}
req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest)
@@ -559,6 +634,12 @@ func (c *metricGRPCClient) CreateTimeSeries(ctx context.Context, req *monitoring
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/CreateTimeSeries")
}
opts = append((*c.CallOptions).CreateTimeSeries[0:len((*c.CallOptions).CreateTimeSeries):len((*c.CallOptions).CreateTimeSeries)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -573,6 +654,12 @@ func (c *metricGRPCClient) CreateServiceTimeSeries(ctx context.Context, req *mon
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.MetricService/CreateServiceTimeSeries")
}
opts = append((*c.CallOptions).CreateServiceTimeSeries[0:len((*c.CallOptions).CreateServiceTimeSeries):len((*c.CallOptions).CreateServiceTimeSeries)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/alert.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/alert_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/alert_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/common.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/dropped_labels.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/group.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/group_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/group_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/metric.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/metric_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/metric_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/mutation_record.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/notification.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/notification_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/notification_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/query_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/query_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/service_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/service_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/snooze.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/snooze_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/snooze_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/span_context.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/uptime.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/monitoring/v3/uptime_service.proto
package monitoringpb

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/monitoring/v3/uptime_service.proto
package monitoringpb

View File

@@ -26,6 +26,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -342,6 +343,16 @@ type notificationChannelGRPCClient struct {
// controls how messages related to incidents are sent.
func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) {
clientOpts := defaultNotificationChannelGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newNotificationChannelClientHook != nil {
hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{})
if err != nil {
@@ -363,6 +374,29 @@ func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOpti
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.ListNotificationChannelDescriptors = append(client.CallOptions.ListNotificationChannelDescriptors, gax.WithClientMetrics(metrics))
client.CallOptions.GetNotificationChannelDescriptor = append(client.CallOptions.GetNotificationChannelDescriptor, gax.WithClientMetrics(metrics))
client.CallOptions.ListNotificationChannels = append(client.CallOptions.ListNotificationChannels, gax.WithClientMetrics(metrics))
client.CallOptions.GetNotificationChannel = append(client.CallOptions.GetNotificationChannel, gax.WithClientMetrics(metrics))
client.CallOptions.CreateNotificationChannel = append(client.CallOptions.CreateNotificationChannel, gax.WithClientMetrics(metrics))
client.CallOptions.UpdateNotificationChannel = append(client.CallOptions.UpdateNotificationChannel, gax.WithClientMetrics(metrics))
client.CallOptions.DeleteNotificationChannel = append(client.CallOptions.DeleteNotificationChannel, gax.WithClientMetrics(metrics))
client.CallOptions.SendNotificationChannelVerificationCode = append(client.CallOptions.SendNotificationChannelVerificationCode, gax.WithClientMetrics(metrics))
client.CallOptions.GetNotificationChannelVerificationCode = append(client.CallOptions.GetNotificationChannelVerificationCode, gax.WithClientMetrics(metrics))
client.CallOptions.VerifyNotificationChannel = append(client.CallOptions.VerifyNotificationChannel, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -399,6 +433,12 @@ func (c *notificationChannelGRPCClient) ListNotificationChannelDescriptors(ctx c
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors")
}
opts = append((*c.CallOptions).ListNotificationChannelDescriptors[0:len((*c.CallOptions).ListNotificationChannelDescriptors):len((*c.CallOptions).ListNotificationChannelDescriptors)], opts...)
it := &NotificationChannelDescriptorIterator{}
req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest)
@@ -445,6 +485,12 @@ func (c *notificationChannelGRPCClient) GetNotificationChannelDescriptor(ctx con
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor")
}
opts = append((*c.CallOptions).GetNotificationChannelDescriptor[0:len((*c.CallOptions).GetNotificationChannelDescriptor):len((*c.CallOptions).GetNotificationChannelDescriptor)], opts...)
var resp *monitoringpb.NotificationChannelDescriptor
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -463,6 +509,12 @@ func (c *notificationChannelGRPCClient) ListNotificationChannels(ctx context.Con
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/ListNotificationChannels")
}
opts = append((*c.CallOptions).ListNotificationChannels[0:len((*c.CallOptions).ListNotificationChannels):len((*c.CallOptions).ListNotificationChannels)], opts...)
it := &NotificationChannelIterator{}
req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest)
@@ -509,6 +561,12 @@ func (c *notificationChannelGRPCClient) GetNotificationChannel(ctx context.Conte
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/GetNotificationChannel")
}
opts = append((*c.CallOptions).GetNotificationChannel[0:len((*c.CallOptions).GetNotificationChannel):len((*c.CallOptions).GetNotificationChannel)], opts...)
var resp *monitoringpb.NotificationChannel
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -527,6 +585,12 @@ func (c *notificationChannelGRPCClient) CreateNotificationChannel(ctx context.Co
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/CreateNotificationChannel")
}
opts = append((*c.CallOptions).CreateNotificationChannel[0:len((*c.CallOptions).CreateNotificationChannel):len((*c.CallOptions).CreateNotificationChannel)], opts...)
var resp *monitoringpb.NotificationChannel
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -545,6 +609,9 @@ func (c *notificationChannelGRPCClient) UpdateNotificationChannel(ctx context.Co
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel")
}
opts = append((*c.CallOptions).UpdateNotificationChannel[0:len((*c.CallOptions).UpdateNotificationChannel):len((*c.CallOptions).UpdateNotificationChannel)], opts...)
var resp *monitoringpb.NotificationChannel
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -563,6 +630,12 @@ func (c *notificationChannelGRPCClient) DeleteNotificationChannel(ctx context.Co
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel")
}
opts = append((*c.CallOptions).DeleteNotificationChannel[0:len((*c.CallOptions).DeleteNotificationChannel):len((*c.CallOptions).DeleteNotificationChannel)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -577,6 +650,12 @@ func (c *notificationChannelGRPCClient) SendNotificationChannelVerificationCode(
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode")
}
opts = append((*c.CallOptions).SendNotificationChannelVerificationCode[0:len((*c.CallOptions).SendNotificationChannelVerificationCode):len((*c.CallOptions).SendNotificationChannelVerificationCode)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -591,6 +670,12 @@ func (c *notificationChannelGRPCClient) GetNotificationChannelVerificationCode(c
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode")
}
opts = append((*c.CallOptions).GetNotificationChannelVerificationCode[0:len((*c.CallOptions).GetNotificationChannelVerificationCode):len((*c.CallOptions).GetNotificationChannelVerificationCode)], opts...)
var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -609,6 +694,12 @@ func (c *notificationChannelGRPCClient) VerifyNotificationChannel(ctx context.Co
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel")
}
opts = append((*c.CallOptions).VerifyNotificationChannel[0:len((*c.CallOptions).VerifyNotificationChannel):len((*c.CallOptions).VerifyNotificationChannel)], opts...)
var resp *monitoringpb.NotificationChannel
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -25,6 +25,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -144,6 +145,16 @@ type queryGRPCClient struct {
// the time-varying values of a metric.
func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) {
clientOpts := defaultQueryGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newQueryClientHook != nil {
hookOpts, err := newQueryClientHook(ctx, clientHookParams{})
if err != nil {
@@ -165,6 +176,20 @@ func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryCli
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.QueryTimeSeries = append(client.CallOptions.QueryTimeSeries, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -201,6 +226,9 @@ func (c *queryGRPCClient) QueryTimeSeries(ctx context.Context, req *monitoringpb
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.QueryService/QueryTimeSeries")
}
opts = append((*c.CallOptions).QueryTimeSeries[0:len((*c.CallOptions).QueryTimeSeries):len((*c.CallOptions).QueryTimeSeries)], opts...)
it := &TimeSeriesDataIterator{}
req = proto.Clone(req).(*monitoringpb.QueryTimeSeriesRequest)

View File

@@ -26,6 +26,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -289,6 +290,16 @@ type serviceMonitoringGRPCClient struct {
// taxonomy of categorized Health Metrics.
func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) {
clientOpts := defaultServiceMonitoringGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newServiceMonitoringClientHook != nil {
hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{})
if err != nil {
@@ -310,6 +321,29 @@ func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.CreateService = append(client.CallOptions.CreateService, gax.WithClientMetrics(metrics))
client.CallOptions.GetService = append(client.CallOptions.GetService, gax.WithClientMetrics(metrics))
client.CallOptions.ListServices = append(client.CallOptions.ListServices, gax.WithClientMetrics(metrics))
client.CallOptions.UpdateService = append(client.CallOptions.UpdateService, gax.WithClientMetrics(metrics))
client.CallOptions.DeleteService = append(client.CallOptions.DeleteService, gax.WithClientMetrics(metrics))
client.CallOptions.CreateServiceLevelObjective = append(client.CallOptions.CreateServiceLevelObjective, gax.WithClientMetrics(metrics))
client.CallOptions.GetServiceLevelObjective = append(client.CallOptions.GetServiceLevelObjective, gax.WithClientMetrics(metrics))
client.CallOptions.ListServiceLevelObjectives = append(client.CallOptions.ListServiceLevelObjectives, gax.WithClientMetrics(metrics))
client.CallOptions.UpdateServiceLevelObjective = append(client.CallOptions.UpdateServiceLevelObjective, gax.WithClientMetrics(metrics))
client.CallOptions.DeleteServiceLevelObjective = append(client.CallOptions.DeleteServiceLevelObjective, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -346,6 +380,12 @@ func (c *serviceMonitoringGRPCClient) CreateService(ctx context.Context, req *mo
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/CreateService")
}
opts = append((*c.CallOptions).CreateService[0:len((*c.CallOptions).CreateService):len((*c.CallOptions).CreateService)], opts...)
var resp *monitoringpb.Service
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -364,6 +404,12 @@ func (c *serviceMonitoringGRPCClient) GetService(ctx context.Context, req *monit
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/GetService")
}
opts = append((*c.CallOptions).GetService[0:len((*c.CallOptions).GetService):len((*c.CallOptions).GetService)], opts...)
var resp *monitoringpb.Service
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -382,6 +428,12 @@ func (c *serviceMonitoringGRPCClient) ListServices(ctx context.Context, req *mon
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/ListServices")
}
opts = append((*c.CallOptions).ListServices[0:len((*c.CallOptions).ListServices):len((*c.CallOptions).ListServices)], opts...)
it := &ServiceIterator{}
req = proto.Clone(req).(*monitoringpb.ListServicesRequest)
@@ -428,6 +480,9 @@ func (c *serviceMonitoringGRPCClient) UpdateService(ctx context.Context, req *mo
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/UpdateService")
}
opts = append((*c.CallOptions).UpdateService[0:len((*c.CallOptions).UpdateService):len((*c.CallOptions).UpdateService)], opts...)
var resp *monitoringpb.Service
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -446,6 +501,12 @@ func (c *serviceMonitoringGRPCClient) DeleteService(ctx context.Context, req *mo
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/DeleteService")
}
opts = append((*c.CallOptions).DeleteService[0:len((*c.CallOptions).DeleteService):len((*c.CallOptions).DeleteService)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -460,6 +521,12 @@ func (c *serviceMonitoringGRPCClient) CreateServiceLevelObjective(ctx context.Co
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective")
}
opts = append((*c.CallOptions).CreateServiceLevelObjective[0:len((*c.CallOptions).CreateServiceLevelObjective):len((*c.CallOptions).CreateServiceLevelObjective)], opts...)
var resp *monitoringpb.ServiceLevelObjective
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -478,6 +545,12 @@ func (c *serviceMonitoringGRPCClient) GetServiceLevelObjective(ctx context.Conte
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective")
}
opts = append((*c.CallOptions).GetServiceLevelObjective[0:len((*c.CallOptions).GetServiceLevelObjective):len((*c.CallOptions).GetServiceLevelObjective)], opts...)
var resp *monitoringpb.ServiceLevelObjective
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -496,6 +569,12 @@ func (c *serviceMonitoringGRPCClient) ListServiceLevelObjectives(ctx context.Con
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives")
}
opts = append((*c.CallOptions).ListServiceLevelObjectives[0:len((*c.CallOptions).ListServiceLevelObjectives):len((*c.CallOptions).ListServiceLevelObjectives)], opts...)
it := &ServiceLevelObjectiveIterator{}
req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest)
@@ -542,6 +621,9 @@ func (c *serviceMonitoringGRPCClient) UpdateServiceLevelObjective(ctx context.Co
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective")
}
opts = append((*c.CallOptions).UpdateServiceLevelObjective[0:len((*c.CallOptions).UpdateServiceLevelObjective):len((*c.CallOptions).UpdateServiceLevelObjective)], opts...)
var resp *monitoringpb.ServiceLevelObjective
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -560,6 +642,12 @@ func (c *serviceMonitoringGRPCClient) DeleteServiceLevelObjective(ctx context.Co
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective")
}
opts = append((*c.CallOptions).DeleteServiceLevelObjective[0:len((*c.CallOptions).DeleteServiceLevelObjective):len((*c.CallOptions).DeleteServiceLevelObjective)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error

View File

@@ -26,6 +26,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -195,6 +196,16 @@ type snoozeGRPCClient struct {
// or more alert policies should not fire alerts for the specified duration.
func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) {
clientOpts := defaultSnoozeGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newSnoozeClientHook != nil {
hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{})
if err != nil {
@@ -216,6 +227,23 @@ func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeC
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.CreateSnooze = append(client.CallOptions.CreateSnooze, gax.WithClientMetrics(metrics))
client.CallOptions.ListSnoozes = append(client.CallOptions.ListSnoozes, gax.WithClientMetrics(metrics))
client.CallOptions.GetSnooze = append(client.CallOptions.GetSnooze, gax.WithClientMetrics(metrics))
client.CallOptions.UpdateSnooze = append(client.CallOptions.UpdateSnooze, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -252,6 +280,12 @@ func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.C
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.SnoozeService/CreateSnooze")
}
opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...)
var resp *monitoringpb.Snooze
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -270,6 +304,12 @@ func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.Li
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.SnoozeService/ListSnoozes")
}
opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...)
it := &SnoozeIterator{}
req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest)
@@ -316,6 +356,12 @@ func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetS
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.SnoozeService/GetSnooze")
}
opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...)
var resp *monitoringpb.Snooze
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -334,6 +380,9 @@ func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.U
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.SnoozeService/UpdateSnooze")
}
opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...)
var resp *monitoringpb.Snooze
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

View File

@@ -26,6 +26,7 @@ import (
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
gax "github.com/googleapis/gax-go/v2"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -245,6 +246,16 @@ type uptimeCheckGRPCClient struct {
// Monitoring, and then clicking on “Uptime”.
func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) {
clientOpts := defaultUptimeCheckGRPCClientOptions()
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
clientOpts = append(clientOpts, internaloption.WithTelemetryAttributes(map[string]string{
"gcp.client.service": "monitoring",
"gcp.client.version": getVersionClient(),
"gcp.client.repo": "googleapis/google-cloud-go",
"gcp.client.artifact": "cloud.google.com/go/monitoring/apiv3/v2",
"gcp.client.language": "go",
"url.domain": "monitoring.googleapis.com",
}))
}
if newUptimeCheckClientHook != nil {
hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{})
if err != nil {
@@ -266,6 +277,25 @@ func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*Up
logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
if gax.IsFeatureEnabled("METRICS") {
metrics := gax.NewClientMetrics(
gax.WithTelemetryLogger(c.logger),
gax.WithTelemetryAttributes(map[string]string{
gax.ClientService: "monitoring",
gax.ClientVersion: getVersionClient(),
gax.ClientArtifact: "cloud.google.com/go/monitoring/apiv3/v2",
gax.RPCSystem: "grpc",
gax.URLDomain: "monitoring.googleapis.com",
}),
)
client.CallOptions.ListUptimeCheckConfigs = append(client.CallOptions.ListUptimeCheckConfigs, gax.WithClientMetrics(metrics))
client.CallOptions.GetUptimeCheckConfig = append(client.CallOptions.GetUptimeCheckConfig, gax.WithClientMetrics(metrics))
client.CallOptions.CreateUptimeCheckConfig = append(client.CallOptions.CreateUptimeCheckConfig, gax.WithClientMetrics(metrics))
client.CallOptions.UpdateUptimeCheckConfig = append(client.CallOptions.UpdateUptimeCheckConfig, gax.WithClientMetrics(metrics))
client.CallOptions.DeleteUptimeCheckConfig = append(client.CallOptions.DeleteUptimeCheckConfig, gax.WithClientMetrics(metrics))
client.CallOptions.ListUptimeCheckIps = append(client.CallOptions.ListUptimeCheckIps, gax.WithClientMetrics(metrics))
}
client.internalClient = c
@@ -302,6 +332,12 @@ func (c *uptimeCheckGRPCClient) ListUptimeCheckConfigs(ctx context.Context, req
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs")
}
opts = append((*c.CallOptions).ListUptimeCheckConfigs[0:len((*c.CallOptions).ListUptimeCheckConfigs):len((*c.CallOptions).ListUptimeCheckConfigs)], opts...)
it := &UptimeCheckConfigIterator{}
req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest)
@@ -348,6 +384,12 @@ func (c *uptimeCheckGRPCClient) GetUptimeCheckConfig(ctx context.Context, req *m
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig")
}
opts = append((*c.CallOptions).GetUptimeCheckConfig[0:len((*c.CallOptions).GetUptimeCheckConfig):len((*c.CallOptions).GetUptimeCheckConfig)], opts...)
var resp *monitoringpb.UptimeCheckConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -366,6 +408,12 @@ func (c *uptimeCheckGRPCClient) CreateUptimeCheckConfig(ctx context.Context, req
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetParent()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig")
}
opts = append((*c.CallOptions).CreateUptimeCheckConfig[0:len((*c.CallOptions).CreateUptimeCheckConfig):len((*c.CallOptions).CreateUptimeCheckConfig)], opts...)
var resp *monitoringpb.UptimeCheckConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -384,6 +432,9 @@ func (c *uptimeCheckGRPCClient) UpdateUptimeCheckConfig(ctx context.Context, req
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig")
}
opts = append((*c.CallOptions).UpdateUptimeCheckConfig[0:len((*c.CallOptions).UpdateUptimeCheckConfig):len((*c.CallOptions).UpdateUptimeCheckConfig)], opts...)
var resp *monitoringpb.UptimeCheckConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@@ -402,6 +453,12 @@ func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
if gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "resource_name", fmt.Sprintf("//monitoring.googleapis.com/%v", req.GetName()))
}
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig")
}
opts = append((*c.CallOptions).DeleteUptimeCheckConfig[0:len((*c.CallOptions).DeleteUptimeCheckConfig):len((*c.CallOptions).DeleteUptimeCheckConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@@ -413,6 +470,9 @@ func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req
func (c *uptimeCheckGRPCClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
if gax.IsFeatureEnabled("METRICS") || gax.IsFeatureEnabled("TRACING") || gax.IsFeatureEnabled("LOGGING") {
ctx = callctx.WithTelemetryContext(ctx, "rpc_method", "google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps")
}
opts = append((*c.CallOptions).ListUptimeCheckIps[0:len((*c.CallOptions).ListUptimeCheckIps):len((*c.CallOptions).ListUptimeCheckIps)], opts...)
it := &UptimeCheckIpIterator{}
req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest)

View File

@@ -17,4 +17,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.25.0"
const Version = "1.27.0"

View File

@@ -1,6 +1,14 @@
# Changes
## [1.62.1](https://github.com/googleapis/google-cloud-go/releases/tag/storage%2Fv1.62.1) (2026-04-13)
### Bug Fixes
* Add retry logic for "http2: client connection lost" ([df22e9e](https://github.com/googleapis/google-cloud-go/commit/df22e9e38ea6de00d858ef00475ae286d3fc6834))
* Remove redundant gRPC imports from grpc_dp_diag.go ([c5511e2](https://github.com/googleapis/google-cloud-go/commit/c5511e2101cefbf8b6e54193e39382e86b49d822))
## [1.62.0](https://github.com/googleapis/google-cloud-go/releases/tag/storage%2Fv1.62.0) (2026-04-06)
### Features

View File

@@ -124,6 +124,10 @@ type grpcStorageClient struct {
settings *settings
config *storageConfig
dpDiag string
// configFeatureAttributes tracks client-level features that are enabled for this
// client instance.
configFeatureAttributes uint32
}
func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) (*metricsContext, error) {
@@ -240,6 +244,17 @@ func (c *grpcStorageClient) prepareDirectPathMetadata(ctx context.Context, targe
md.Set(requestParamsHeaderKey, reason)
}
}
// Client level feature tracking.
features := featureAttributes(ctx)
features |= c.configFeatureAttributes
// Merge all existing headers for this key from metadata.
features |= mergeFeatureAttributes(md[featureTrackerHeaderName])
if features > 0 {
md.Set(featureTrackerHeaderName, encodeUint32(features))
}
return metadata.NewOutgoingContext(ctx, md), nil
}

View File

@@ -22,8 +22,6 @@ import (
"cloud.google.com/go/compute/metadata"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
_ "google.golang.org/grpc/balancer/rls"
_ "google.golang.org/grpc/xds/googledirectpath"
)
const (

View File

@@ -45,6 +45,8 @@ import (
// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic
// storageClient interface.
//
// TODO(b/498422946): Add client feature tracker in HTTP client.
type httpStorageClient struct {
creds *auth.Credentials
hc *http.Client

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v4.25.7
// protoc v6.31.0
// source: google/storage/v2/storage.proto
package storagepb
@@ -2782,9 +2782,11 @@ type WriteObjectResponse struct {
//
// *WriteObjectResponse_PersistedSize
// *WriteObjectResponse_Resource
WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
// If persisted_size is set, contains checksums of persisted data.
PersistedDataChecksums *ObjectChecksums `protobuf:"bytes,3,opt,name=persisted_data_checksums,json=persistedDataChecksums,proto3" json:"persisted_data_checksums,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriteObjectResponse) Reset() {
@@ -2842,6 +2844,13 @@ func (x *WriteObjectResponse) GetResource() *Object {
return nil
}
func (x *WriteObjectResponse) GetPersistedDataChecksums() *ObjectChecksums {
if x != nil {
return x.PersistedDataChecksums
}
return nil
}
type isWriteObjectResponse_WriteStatus interface {
isWriteObjectResponse_WriteStatus()
}
@@ -3008,9 +3017,9 @@ type BidiWriteObjectRequest struct {
// *BidiWriteObjectRequest_ChecksummedData
Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
// Optional. Checksums for the complete object. If the checksums computed by
// the service don't match the specified checksums the call fails. Might only
// be provided in the first request or the last request (with finish_write
// set).
// the service don't match the specified checksums the call fails. May be
// provided in the last request (with finish_write set). For non-appendable
// objects only, may also be provided in the first request.
ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
// Optional. For each `BidiWriteObjectRequest` where `state_lookup` is `true`
// or the client closes the stream, the service sends a
@@ -3213,6 +3222,8 @@ type BidiWriteObjectResponse struct {
// *BidiWriteObjectResponse_PersistedSize
// *BidiWriteObjectResponse_Resource
WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
// If persisted_size is set, contains checksums of persisted data.
PersistedDataChecksums *ObjectChecksums `protobuf:"bytes,4,opt,name=persisted_data_checksums,json=persistedDataChecksums,proto3" json:"persisted_data_checksums,omitempty"`
// An optional write handle that is returned periodically in response
// messages. Clients should save it for later use in establishing a new stream
// if a connection is interrupted.
@@ -3276,6 +3287,13 @@ func (x *BidiWriteObjectResponse) GetResource() *Object {
return nil
}
func (x *BidiWriteObjectResponse) GetPersistedDataChecksums() *ObjectChecksums {
if x != nil {
return x.PersistedDataChecksums
}
return nil
}
func (x *BidiWriteObjectResponse) GetWriteHandle() *BidiWriteHandle {
if x != nil {
return x.WriteHandle
@@ -3562,9 +3580,11 @@ type QueryWriteStatusResponse struct {
//
// *QueryWriteStatusResponse_PersistedSize
// *QueryWriteStatusResponse_Resource
WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
// If persisted_size is set, contains checksums of persisted data.
PersistedDataChecksums *ObjectChecksums `protobuf:"bytes,3,opt,name=persisted_data_checksums,json=persistedDataChecksums,proto3" json:"persisted_data_checksums,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QueryWriteStatusResponse) Reset() {
@@ -3622,6 +3642,13 @@ func (x *QueryWriteStatusResponse) GetResource() *Object {
return nil
}
func (x *QueryWriteStatusResponse) GetPersistedDataChecksums() *ObjectChecksums {
if x != nil {
return x.PersistedDataChecksums
}
return nil
}
type isQueryWriteStatusResponse_WriteStatus interface {
isQueryWriteStatusResponse_WriteStatus()
}
@@ -8156,10 +8183,11 @@ const file_google_storage_v2_storage_proto_rawDesc = "" +
"\ffinish_write\x18\a \x01(\bB\x03\xe0A\x01R\vfinishWrite\x12r\n" +
"\x1ccommon_object_request_params\x18\b \x01(\v2,.google.storage.v2.CommonObjectRequestParamsB\x03\xe0A\x01R\x19commonObjectRequestParamsB\x0f\n" +
"\rfirst_messageB\x06\n" +
"\x04data\"\x87\x01\n" +
"\x04data\"\xe5\x01\n" +
"\x13WriteObjectResponse\x12'\n" +
"\x0epersisted_size\x18\x01 \x01(\x03H\x00R\rpersistedSize\x127\n" +
"\bresource\x18\x02 \x01(\v2\x19.google.storage.v2.ObjectH\x00R\bresourceB\x0e\n" +
"\bresource\x18\x02 \x01(\v2\x19.google.storage.v2.ObjectH\x00R\bresource\x12\\\n" +
"\x18persisted_data_checksums\x18\x03 \x01(\v2\".google.storage.v2.ObjectChecksumsR\x16persistedDataChecksumsB\x0e\n" +
"\fwrite_status\"\xe9\x03\n" +
"\x10AppendObjectSpec\x12=\n" +
"\x06bucket\x18\x01 \x01(\tB%\xe0A\x02\xfaA\x1f\n" +
@@ -8189,10 +8217,11 @@ const file_google_storage_v2_storage_proto_rawDesc = "" +
"\x1ccommon_object_request_params\x18\n" +
" \x01(\v2,.google.storage.v2.CommonObjectRequestParamsB\x03\xe0A\x01R\x19commonObjectRequestParamsB\x0f\n" +
"\rfirst_messageB\x06\n" +
"\x04data\"\xe8\x01\n" +
"\x04data\"\xc6\x02\n" +
"\x17BidiWriteObjectResponse\x12'\n" +
"\x0epersisted_size\x18\x01 \x01(\x03H\x00R\rpersistedSize\x127\n" +
"\bresource\x18\x02 \x01(\v2\x19.google.storage.v2.ObjectH\x00R\bresource\x12J\n" +
"\bresource\x18\x02 \x01(\v2\x19.google.storage.v2.ObjectH\x00R\bresource\x12\\\n" +
"\x18persisted_data_checksums\x18\x04 \x01(\v2\".google.storage.v2.ObjectChecksumsR\x16persistedDataChecksums\x12J\n" +
"\fwrite_handle\x18\x03 \x01(\v2\".google.storage.v2.BidiWriteHandleH\x01R\vwriteHandle\x88\x01\x01B\x0e\n" +
"\fwrite_statusB\x0f\n" +
"\r_write_handle\"\x9e\x05\n" +
@@ -8219,10 +8248,11 @@ const file_google_storage_v2_storage_proto_rawDesc = "" +
"_read_mask\"\xaf\x01\n" +
"\x17QueryWriteStatusRequest\x12 \n" +
"\tupload_id\x18\x01 \x01(\tB\x03\xe0A\x02R\buploadId\x12r\n" +
"\x1ccommon_object_request_params\x18\x02 \x01(\v2,.google.storage.v2.CommonObjectRequestParamsB\x03\xe0A\x01R\x19commonObjectRequestParams\"\x8c\x01\n" +
"\x1ccommon_object_request_params\x18\x02 \x01(\v2,.google.storage.v2.CommonObjectRequestParamsB\x03\xe0A\x01R\x19commonObjectRequestParams\"\xea\x01\n" +
"\x18QueryWriteStatusResponse\x12'\n" +
"\x0epersisted_size\x18\x01 \x01(\x03H\x00R\rpersistedSize\x127\n" +
"\bresource\x18\x02 \x01(\v2\x19.google.storage.v2.ObjectH\x00R\bresourceB\x0e\n" +
"\bresource\x18\x02 \x01(\v2\x19.google.storage.v2.ObjectH\x00R\bresource\x12\\\n" +
"\x18persisted_data_checksums\x18\x03 \x01(\v2\".google.storage.v2.ObjectChecksumsR\x16persistedDataChecksumsB\x0e\n" +
"\fwrite_status\"\xea\x0e\n" +
"\x14RewriteObjectRequest\x121\n" +
"\x10destination_name\x18\x18 \x01(\tB\x06\xe0A\x02\xe0A\x05R\x0fdestinationName\x12W\n" +
@@ -8827,149 +8857,152 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{
48, // 37: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
43, // 38: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
52, // 39: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
27, // 40: google.storage.v2.AppendObjectSpec.write_handle:type_name -> google.storage.v2.BidiWriteHandle
28, // 41: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
31, // 42: google.storage.v2.BidiWriteObjectRequest.append_object_spec:type_name -> google.storage.v2.AppendObjectSpec
47, // 43: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
48, // 44: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
43, // 45: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
52, // 46: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
27, // 47: google.storage.v2.BidiWriteObjectResponse.write_handle:type_name -> google.storage.v2.BidiWriteHandle
88, // 48: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
43, // 49: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
52, // 50: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
52, // 51: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
43, // 52: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
48, // 53: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
52, // 54: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
28, // 55: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
43, // 56: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
48, // 57: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
52, // 58: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
88, // 59: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
43, // 60: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
46, // 61: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
53, // 62: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
64, // 63: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
90, // 64: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
61, // 65: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
90, // 66: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp
75, // 67: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry
70, // 68: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
69, // 69: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
65, // 70: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
56, // 71: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
62, // 72: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
60, // 73: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
67, // 74: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
63, // 75: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig
71, // 76: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig
72, // 77: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
74, // 78: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace
68, // 79: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
66, // 80: google.storage.v2.Bucket.object_retention:type_name -> google.storage.v2.Bucket.ObjectRetention
73, // 81: google.storage.v2.Bucket.ip_filter:type_name -> google.storage.v2.Bucket.IpFilter
55, // 82: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
90, // 83: google.storage.v2.ObjectCustomContextPayload.create_time:type_name -> google.protobuf.Timestamp
90, // 84: google.storage.v2.ObjectCustomContextPayload.update_time:type_name -> google.protobuf.Timestamp
85, // 85: google.storage.v2.ObjectContexts.custom:type_name -> google.storage.v2.ObjectContexts.CustomEntry
53, // 86: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
90, // 87: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
90, // 88: google.storage.v2.Object.finalize_time:type_name -> google.protobuf.Timestamp
90, // 89: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
48, // 90: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
90, // 91: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
90, // 92: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
90, // 93: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
87, // 94: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
50, // 95: google.storage.v2.Object.contexts:type_name -> google.storage.v2.ObjectContexts
56, // 96: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
51, // 97: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
90, // 98: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
90, // 99: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
90, // 100: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
86, // 101: google.storage.v2.Object.retention:type_name -> google.storage.v2.Object.Retention
55, // 102: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
52, // 103: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
59, // 104: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
76, // 105: google.storage.v2.Bucket.Encryption.google_managed_encryption_enforcement_config:type_name -> google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig
77, // 106: google.storage.v2.Bucket.Encryption.customer_managed_encryption_enforcement_config:type_name -> google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig
78, // 107: google.storage.v2.Bucket.Encryption.customer_supplied_encryption_enforcement_config:type_name -> google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig
79, // 108: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
80, // 109: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
90, // 110: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp
91, // 111: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration
91, // 112: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration
90, // 113: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp
90, // 114: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp
90, // 115: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp
83, // 116: google.storage.v2.Bucket.IpFilter.public_network_source:type_name -> google.storage.v2.Bucket.IpFilter.PublicNetworkSource
84, // 117: google.storage.v2.Bucket.IpFilter.vpc_network_sources:type_name -> google.storage.v2.Bucket.IpFilter.VpcNetworkSource
90, // 118: google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig.effective_time:type_name -> google.protobuf.Timestamp
90, // 119: google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig.effective_time:type_name -> google.protobuf.Timestamp
90, // 120: google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig.effective_time:type_name -> google.protobuf.Timestamp
90, // 121: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp
81, // 122: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action
82, // 123: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition
92, // 124: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date
92, // 125: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date
92, // 126: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date
49, // 127: google.storage.v2.ObjectContexts.CustomEntry.value:type_name -> google.storage.v2.ObjectCustomContextPayload
1, // 128: google.storage.v2.Object.Retention.mode:type_name -> google.storage.v2.Object.Retention.Mode
90, // 129: google.storage.v2.Object.Retention.retain_until_time:type_name -> google.protobuf.Timestamp
2, // 130: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest
3, // 131: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest
4, // 132: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest
5, // 133: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest
7, // 134: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest
93, // 135: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
94, // 136: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
95, // 137: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
8, // 138: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
9, // 139: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
10, // 140: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
11, // 141: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
12, // 142: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
15, // 143: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
14, // 144: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
18, // 145: google.storage.v2.Storage.BidiReadObject:input_type -> google.storage.v2.BidiReadObjectRequest
42, // 146: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
29, // 147: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
32, // 148: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
34, // 149: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
37, // 150: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
40, // 151: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
35, // 152: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
39, // 153: google.storage.v2.Storage.MoveObject:input_type -> google.storage.v2.MoveObjectRequest
96, // 154: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
45, // 155: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
45, // 156: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
6, // 157: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
45, // 158: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
97, // 159: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
97, // 160: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
98, // 161: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
45, // 162: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
52, // 163: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
96, // 164: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
52, // 165: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
13, // 166: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
52, // 167: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
16, // 168: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
19, // 169: google.storage.v2.Storage.BidiReadObject:output_type -> google.storage.v2.BidiReadObjectResponse
52, // 170: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
30, // 171: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
33, // 172: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
54, // 173: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
38, // 174: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
41, // 175: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
36, // 176: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
52, // 177: google.storage.v2.Storage.MoveObject:output_type -> google.storage.v2.Object
154, // [154:178] is the sub-list for method output_type
130, // [130:154] is the sub-list for method input_type
130, // [130:130] is the sub-list for extension type_name
130, // [130:130] is the sub-list for extension extendee
0, // [0:130] is the sub-list for field type_name
48, // 40: google.storage.v2.WriteObjectResponse.persisted_data_checksums:type_name -> google.storage.v2.ObjectChecksums
27, // 41: google.storage.v2.AppendObjectSpec.write_handle:type_name -> google.storage.v2.BidiWriteHandle
28, // 42: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
31, // 43: google.storage.v2.BidiWriteObjectRequest.append_object_spec:type_name -> google.storage.v2.AppendObjectSpec
47, // 44: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
48, // 45: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
43, // 46: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
52, // 47: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
48, // 48: google.storage.v2.BidiWriteObjectResponse.persisted_data_checksums:type_name -> google.storage.v2.ObjectChecksums
27, // 49: google.storage.v2.BidiWriteObjectResponse.write_handle:type_name -> google.storage.v2.BidiWriteHandle
88, // 50: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
43, // 51: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
52, // 52: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
48, // 53: google.storage.v2.QueryWriteStatusResponse.persisted_data_checksums:type_name -> google.storage.v2.ObjectChecksums
52, // 54: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
43, // 55: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
48, // 56: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
52, // 57: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
28, // 58: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
43, // 59: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
48, // 60: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
52, // 61: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
88, // 62: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
43, // 63: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
46, // 64: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
53, // 65: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
64, // 66: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
90, // 67: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
61, // 68: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
90, // 69: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp
75, // 70: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry
70, // 71: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
69, // 72: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
65, // 73: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
56, // 74: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
62, // 75: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
60, // 76: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
67, // 77: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
63, // 78: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig
71, // 79: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig
72, // 80: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
74, // 81: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace
68, // 82: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
66, // 83: google.storage.v2.Bucket.object_retention:type_name -> google.storage.v2.Bucket.ObjectRetention
73, // 84: google.storage.v2.Bucket.ip_filter:type_name -> google.storage.v2.Bucket.IpFilter
55, // 85: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
90, // 86: google.storage.v2.ObjectCustomContextPayload.create_time:type_name -> google.protobuf.Timestamp
90, // 87: google.storage.v2.ObjectCustomContextPayload.update_time:type_name -> google.protobuf.Timestamp
85, // 88: google.storage.v2.ObjectContexts.custom:type_name -> google.storage.v2.ObjectContexts.CustomEntry
53, // 89: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
90, // 90: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
90, // 91: google.storage.v2.Object.finalize_time:type_name -> google.protobuf.Timestamp
90, // 92: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
48, // 93: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
90, // 94: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
90, // 95: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
90, // 96: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
87, // 97: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
50, // 98: google.storage.v2.Object.contexts:type_name -> google.storage.v2.ObjectContexts
56, // 99: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
51, // 100: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
90, // 101: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
90, // 102: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
90, // 103: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
86, // 104: google.storage.v2.Object.retention:type_name -> google.storage.v2.Object.Retention
55, // 105: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
52, // 106: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
59, // 107: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
76, // 108: google.storage.v2.Bucket.Encryption.google_managed_encryption_enforcement_config:type_name -> google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig
77, // 109: google.storage.v2.Bucket.Encryption.customer_managed_encryption_enforcement_config:type_name -> google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig
78, // 110: google.storage.v2.Bucket.Encryption.customer_supplied_encryption_enforcement_config:type_name -> google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig
79, // 111: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
80, // 112: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
90, // 113: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp
91, // 114: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration
91, // 115: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration
90, // 116: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp
90, // 117: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp
90, // 118: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp
83, // 119: google.storage.v2.Bucket.IpFilter.public_network_source:type_name -> google.storage.v2.Bucket.IpFilter.PublicNetworkSource
84, // 120: google.storage.v2.Bucket.IpFilter.vpc_network_sources:type_name -> google.storage.v2.Bucket.IpFilter.VpcNetworkSource
90, // 121: google.storage.v2.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig.effective_time:type_name -> google.protobuf.Timestamp
90, // 122: google.storage.v2.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig.effective_time:type_name -> google.protobuf.Timestamp
90, // 123: google.storage.v2.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig.effective_time:type_name -> google.protobuf.Timestamp
90, // 124: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp
81, // 125: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action
82, // 126: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition
92, // 127: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date
92, // 128: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date
92, // 129: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date
49, // 130: google.storage.v2.ObjectContexts.CustomEntry.value:type_name -> google.storage.v2.ObjectCustomContextPayload
1, // 131: google.storage.v2.Object.Retention.mode:type_name -> google.storage.v2.Object.Retention.Mode
90, // 132: google.storage.v2.Object.Retention.retain_until_time:type_name -> google.protobuf.Timestamp
2, // 133: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest
3, // 134: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest
4, // 135: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest
5, // 136: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest
7, // 137: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest
93, // 138: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
94, // 139: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
95, // 140: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
8, // 141: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
9, // 142: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
10, // 143: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
11, // 144: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
12, // 145: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
15, // 146: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
14, // 147: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
18, // 148: google.storage.v2.Storage.BidiReadObject:input_type -> google.storage.v2.BidiReadObjectRequest
42, // 149: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
29, // 150: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
32, // 151: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
34, // 152: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
37, // 153: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
40, // 154: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
35, // 155: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
39, // 156: google.storage.v2.Storage.MoveObject:input_type -> google.storage.v2.MoveObjectRequest
96, // 157: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
45, // 158: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
45, // 159: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
6, // 160: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
45, // 161: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
97, // 162: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
97, // 163: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
98, // 164: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
45, // 165: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
52, // 166: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
96, // 167: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
52, // 168: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
13, // 169: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
52, // 170: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
16, // 171: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
19, // 172: google.storage.v2.Storage.BidiReadObject:output_type -> google.storage.v2.BidiReadObjectResponse
52, // 173: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
30, // 174: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
33, // 175: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
54, // 176: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
38, // 177: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
41, // 178: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
36, // 179: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
52, // 180: google.storage.v2.Storage.MoveObject:output_type -> google.storage.v2.Object
157, // [157:181] is the sub-list for method output_type
133, // [133:157] is the sub-list for method input_type
133, // [133:133] is the sub-list for extension type_name
133, // [133:133] is the sub-list for extension extendee
0, // [0:133] is the sub-list for field type_name
}
func init() { file_google_storage_v2_storage_proto_init() }

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.7
// - protoc v6.31.0
// source: google/storage/v2/storage.proto
package storagepb

View File

@@ -17,4 +17,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.62.0"
const Version = "1.62.1"

View File

@@ -209,7 +209,7 @@ func ShouldRetry(err error) bool {
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
// Unfortunately the error type is unexported, so we resort to string
// matching.
retriable := []string{"connection refused", "connection reset", "broken pipe"}
retriable := []string{"connection refused", "connection reset", "broken pipe", "client connection lost"}
for _, s := range retriable {
if strings.Contains(e.Error(), s) {
return true

View File

@@ -179,6 +179,9 @@ func (w *Writer) initPCU(ctx context.Context) error {
s := newPCUSettings(cfg.MaxConcurrency)
// Track PCU operations using client feature tracking header.
ctx = addFeatureAttributes(ctx, featurePCU)
pCtx, cancel := context.WithCancel(ctx)
state := &pcuState{

View File

@@ -265,7 +265,9 @@ func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context, opts ...MRDO
for _, opt := range opts {
opt.apply(params)
}
if params.minConnections > 1 || params.maxConnections > 1 {
spanCtx = addFeatureAttributes(spanCtx, featureMultistreamInMRD)
}
// This call will return the *MultiRangeDownloader with the .impl field set.
return o.c.tc.NewMultiRangeDownloader(spanCtx, params, storageOpts...)
}

View File

@@ -16,7 +16,7 @@ package storage
// trackedFeature represents a specific client feature being tracked, represented
// as a bit in a bitmask. Each feature corresponds to a specific bit position.
type trackedFeature uint
type trackedFeature uint32
const (
featureMultistreamInMRD trackedFeature = 0

63
vendor/cloud.google.com/go/storage/tracker.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// Copyright 2026 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"github.com/googleapis/gax-go/v2/callctx"
)
const featureTrackerHeaderName = "x-goog-storage-go-features"
// addFeatureAttributes adds the specified feature codes to the context.
// Features are stored as a bitmask in the callctx headers and will be
// injected into the outgoing request headers by the transport.
func addFeatureAttributes(ctx context.Context, features ...trackedFeature) context.Context {
if len(features) == 0 {
return ctx
}
current := featureAttributes(ctx)
updated := current
for _, f := range features {
updated |= (1 << f)
}
if updated == current {
return ctx
}
return callctx.SetHeaders(ctx, featureTrackerHeaderName, encodeUint32(uint32(updated)))
}
// featureAttributes extracts and merges all feature attributes present in the context.
// It returns a bitmask represented as a uint8.
func featureAttributes(ctx context.Context) uint32 {
ctxHeaders := callctx.HeadersFromContext(ctx)
// If multiple values are present in the context (e.g. from nested calls),
// merge them into a single bitmask.
return mergeFeatureAttributes(ctxHeaders[featureTrackerHeaderName])
}
func mergeFeatureAttributes(vals []string) uint32 {
features := uint32(0)
for _, val := range vals {
if decoded, err := decodeUint32(val); err == nil {
features |= decoded
}
}
return features
}

View File

@@ -206,7 +206,7 @@ func (br *blockResult) addValues(values []string) {
}
}
func (br *blockResult) addValue(v string) {
func (br *blockResult) addValue(v string) string {
valuesBuf := br.valuesBuf
if len(valuesBuf) > 0 && v == valuesBuf[len(valuesBuf)-1] {
v = valuesBuf[len(valuesBuf)-1]
@@ -214,6 +214,7 @@ func (br *blockResult) addValue(v string) {
v = br.a.copyString(v)
}
br.valuesBuf = append(br.valuesBuf, v)
return v
}
// sizeBytes returns the size of br in bytes.
@@ -229,7 +230,7 @@ func (br *blockResult) sizeBytes() int {
return n
}
func (br *blockResult) initFromDataBlock(db *DataBlock) {
func (br *blockResult) mustInitFromDataBlock(db *DataBlock) {
br.reset()
br.rowsLen = db.RowsCount()
@@ -253,6 +254,75 @@ func (br *blockResult) initFromDataBlock(db *DataBlock) {
}
}
func (br *blockResult) mustInitFromRows(rows [][]Field) {
br.reset()
br.rowsLen = len(rows)
if len(rows) == 0 {
// Nothing to do.
return
}
if areSameFieldsInRows(rows) {
// Fast path - all the rows have the same fields
fields := rows[0]
for i := range fields {
name := br.addValue(fields[i].Name)
valuesBufLen := len(br.valuesBuf)
for _, row := range rows {
br.addValue(row[i].Value)
}
values := br.valuesBuf[valuesBufLen:]
br.addResultColumn(resultColumn{
name: name,
values: values,
})
}
return
}
// Slow path - rows have different fields.
// Create common columns across all the fields seen in the rows.
columnIdxs := getColumnIdxs()
for _, fields := range rows {
for j := range fields {
name := br.addValue(fields[j].Name)
if _, ok := columnIdxs[name]; !ok {
columnIdxs[name] = len(columnIdxs)
}
}
}
// Initialize columns
csBufLen := len(br.csBuf)
br.csBuf = slicesutil.SetLength(br.csBuf, csBufLen+len(columnIdxs))
cs := br.csBuf[csBufLen:]
for name, idx := range columnIdxs {
valuesBufLen := len(br.valuesBuf)
br.valuesBuf = slicesutil.SetLength(br.valuesBuf, valuesBufLen+len(rows))
values := br.valuesBuf[valuesBufLen:]
c := &cs[idx]
c.name = name
c.valueType = valueTypeString
c.valuesEncoded = values
}
// Add values to columns
for i := range rows {
for _, f := range rows[i] {
idx := columnIdxs[f.Name]
value := br.addValue(f.Value)
cs[idx].valuesEncoded[i] = value
}
}
putColumnIdxs(columnIdxs)
}
// setResultColumns sets the given rcs as br columns.
//
// The br is valid only until rcs are modified.
@@ -374,13 +444,14 @@ func (br *blockResult) initColumnsByFilter(pf *prefixfilter.Filter) {
}
// Add other const columns
csh := br.bs.getColumnsHeader()
bs := br.bs
csh := bs.getColumnsHeader()
for _, cc := range csh.constColumns {
if cc.Name == "" {
// We already added _msg column above
if isSpecialColumn(cc.Name) {
// Special columns have been added above.
continue
}
if pf.MatchString(cc.Name) {
if pf.MatchString(cc.Name) && !bs.isHiddenField(cc.Name) {
br.addConstColumn(cc.Name, cc.Value)
}
}
@@ -389,16 +460,29 @@ func (br *blockResult) initColumnsByFilter(pf *prefixfilter.Filter) {
chs := csh.columnHeaders
for i := range chs {
ch := &chs[i]
if ch.name == "" {
// We already added _msg column above
if isSpecialColumn(ch.name) {
// Special columns have been added above.
continue
}
if pf.MatchString(ch.name) {
if pf.MatchString(ch.name) && !bs.isHiddenField(ch.name) {
br.addColumn(ch)
}
}
}
func isSpecialColumn(c string) bool {
if len(c) == 0 {
// This is a _msg column.
return true
}
if c[0] != '_' {
return false
}
return c == "_time" || c == "_stream" || c == "_stream_id"
}
var specialColumns = []string{"_msg", "_time", "_stream", "_stream_id"}
// mustInit initializes br with the given bs and bm.
//
// br is valid until bs or bm changes.
@@ -423,6 +507,8 @@ func (br *blockResult) getMinTimestamp(minTimestamp int64) int64 {
if br.bs != nil {
th := &br.bs.bsw.bh.timestampsHeader
if br.isFull() {
// Fast path - all the rows in the br are present, so return the minTimestamp
// from blockHeader without the need to read the actual timestamps.
return min(minTimestamp, th.minTimestamp)
}
if minTimestamp <= th.minTimestamp {
@@ -430,8 +516,18 @@ func (br *blockResult) getMinTimestamp(minTimestamp int64) int64 {
}
}
// Slow path - need to scan timestamps
timestamps := br.getTimestamps()
c := br.getColumnByName("_time")
if c.isTime {
// Slower path - some of the rows in the br are filtered out,
// so try obtaining the _time column and return the first timestamp from there.
if len(timestamps) > 0 {
return min(minTimestamp, timestamps[0])
}
return minTimestamp
}
// Slow path - need to scan timestamps, since they may be not sorted.
for _, timestamp := range timestamps {
if timestamp < minTimestamp {
minTimestamp = timestamp
@@ -444,6 +540,8 @@ func (br *blockResult) getMaxTimestamp(maxTimestamp int64) int64 {
if br.bs != nil {
th := &br.bs.bsw.bh.timestampsHeader
if br.isFull() {
// Fast path - all the rows in the br are present, so return the maxTimestamp
// from blockHeader without the need to read the actual timestamps.
return max(maxTimestamp, th.maxTimestamp)
}
if maxTimestamp >= th.maxTimestamp {
@@ -451,8 +549,18 @@ func (br *blockResult) getMaxTimestamp(maxTimestamp int64) int64 {
}
}
// Slow path - need to scan timestamps
timestamps := br.getTimestamps()
c := br.getColumnByName("_time")
if c.isTime {
// Slower path - some of the rows in the br are filtered out,
// so try obtaining the _time column and return the last timestamp from there.
if len(timestamps) > 0 {
return max(maxTimestamp, timestamps[len(timestamps)-1])
}
return maxTimestamp
}
// Slow path - need to scan timestamps, since they may be not sorted.
for i := len(timestamps) - 1; i >= 0; i-- {
if timestamps[i] > maxTimestamp {
maxTimestamp = timestamps[i]

View File

@@ -99,7 +99,7 @@ type blockSearch struct {
// qs is updated by the blockSearch.search with various search stats
qs *QueryStats
// bsw is the actual work to perform on the given block pointed by bsw.ph
// bsw is the actual work to perform on the given block pointed by bsw.bh
bsw *blockSearchWork
// br contains result for the search in the block after search() call

View File

@@ -12,11 +12,14 @@ import (
//
// chunkedAllocator cannot be used from concurrently running goroutines.
type chunkedAllocator struct {
anyProcessors []statsAnyProcessor
avgProcessors []statsAvgProcessor
countProcessors []statsCountProcessor
countEmptyProcessors []statsCountEmptyProcessor
countUniqProcessors []statsCountUniqProcessor
countUniqHashProcessors []statsCountUniqHashProcessor
fieldMaxProcessors []statsFieldMaxProcessor
fieldMinProcessors []statsFieldMinProcessor
histogramProcessors []statsHistogramProcessor
jsonValuesProcessors []statsJSONValuesProcessor
jsonValuesSortedProcessors []statsJSONValuesSortedProcessor
@@ -31,6 +34,7 @@ type chunkedAllocator struct {
rowMaxProcessors []statsRowMaxProcessor
rowMinProcessors []statsRowMinProcessor
sumProcessors []statsSumProcessor
stddevProcessors []statsStddevProcessor
sumLenProcessors []statsSumLenProcessor
uniqValuesProcessors []statsUniqValuesProcessor
valuesProcessors []statsValuesProcessor
@@ -52,6 +56,10 @@ type chunkedAllocator struct {
bytesAllocated int
}
func (a *chunkedAllocator) newStatsAnyProcessor() (p *statsAnyProcessor) {
return addNewItem(&a.anyProcessors, a)
}
func (a *chunkedAllocator) newStatsAvgProcessor() (p *statsAvgProcessor) {
return addNewItem(&a.avgProcessors, a)
}
@@ -72,6 +80,14 @@ func (a *chunkedAllocator) newStatsCountUniqHashProcessor() (p *statsCountUniqHa
return addNewItem(&a.countUniqHashProcessors, a)
}
func (a *chunkedAllocator) newStatsFieldMaxProcessor() (p *statsFieldMaxProcessor) {
return addNewItem(&a.fieldMaxProcessors, a)
}
func (a *chunkedAllocator) newStatsFieldMinProcessor() (p *statsFieldMinProcessor) {
return addNewItem(&a.fieldMinProcessors, a)
}
func (a *chunkedAllocator) newStatsHistogramProcessor() (p *statsHistogramProcessor) {
return addNewItem(&a.histogramProcessors, a)
}
@@ -124,6 +140,10 @@ func (a *chunkedAllocator) newStatsRowMinProcessor() (p *statsRowMinProcessor) {
return addNewItem(&a.rowMinProcessors, a)
}
func (a *chunkedAllocator) newStatsStddevProcessor() (p *statsStddevProcessor) {
return addNewItem(&a.stddevProcessors, a)
}
func (a *chunkedAllocator) newStatsSumProcessor() (p *statsSumProcessor) {
return addNewItem(&a.sumProcessors, a)
}

View File

@@ -539,9 +539,12 @@ func (ddb *datadb) mustMergePartsInternal(pws []*partWrapper, isFinal bool, drop
// Fast path: flush a single in-memory part to disk.
mp := pws[0].mp
mp.MustStoreToDisk(dstPartPath)
srcRowsCount := mp.ph.RowsCount
dstSize := mp.ph.CompressedSizeBytes
pwNew := ddb.openCreatedPart(&mp.ph, pws, nil, dstPartPath)
ddb.swapSrcWithDstParts(pws, pwNew, dstPartType)
ddb.updateMergeMetrics(dstPartType, mp.ph.RowsCount, startTime, mp.ph.CompressedSizeBytes)
ddb.updateMergeMetrics(dstPartType, srcRowsCount, startTime, dstSize)
return true
}
@@ -1355,8 +1358,7 @@ func mustRemoveUnusedDirs(path string, partNames []string) {
}
}
// appendPartsToMerge finds optimal parts to merge from src,
// appends them to dst and returns the result.
// appendPartsToMerge finds optimal parts to merge from src, appends them to dst and returns the result.
func appendPartsToMerge(dst, src []*partWrapper, maxOutBytes uint64) []*partWrapper {
if len(src) < 2 {
// There is no need in merging zero or one part :)
@@ -1501,27 +1503,28 @@ func (ddb *datadb) deleteRows(pso *partitionSearchOptions, stopCh <-chan struct{
// Search for parts, which contain logs matching pso for the deletion and which aren't in merge at the moment.
var pwsToMerge []*partWrapper
needRepeat := false
for _, pw := range pws {
if !pw.p.hasMatchingRows(pso, stopCh) {
continue
}
ddb.partsLock.Lock()
ok := !pw.isInMerge
if ok {
if !pw.isInMerge {
pw.isInMerge = true
pwsToMerge = append(pwsToMerge, pw)
} else {
// The pw is in merge now, so it must be processed again for the rows' deletion in the future.
needRepeat = true
}
ddb.partsLock.Unlock()
if !ok {
ddb.releasePartsToMerge(pwsToMerge)
return false
}
}
// merge pwsToMerge while dropping logs matching pso.
return ddb.mustMergePartsInternal(pwsToMerge, false, pso, stopCh)
if !ddb.mustMergePartsInternal(pwsToMerge, false, pso, stopCh) {
return false
}
return !needRepeat
}
func appendAllPartsForMergeLocked(dst, src []*partWrapper) []*partWrapper {

View File

@@ -43,7 +43,7 @@ func (dt *DeleteTask) String() string {
return string(data)
}
func newDeleteTask(taskID string, tenantIDs []TenantID, filter string, startTime int64) *DeleteTask {
func newDeleteTask(taskID string, startTime int64, tenantIDs []TenantID, filter string) *DeleteTask {
return &DeleteTask{
TaskID: taskID,
TenantIDs: tenantIDs,

View File

@@ -22,6 +22,21 @@ type filter interface {
applyToBlockResult(br *blockResult, bm *bitmap)
}
// fieldFilter must implement filtering for log entries by the given fieldName
type fieldFilter interface {
// String returns string representation of the filter
String() string
// matchRow must return true if the current filter for the given fieldName matches a row with the given fields
matchRowByField(fields []Field, fieldName string) bool
// applyToBlockSearch must update bm according to the filter for the given fieldName applied to the given bs block
applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string)
// applyToBlockResult must update bm according to the filter for the given fieldName applied to the given br block
applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string)
}
// visitFilterRecursive recursively calls visitFunc for filters inside f.
//
// It stops calling visitFunc on the remaining filters as soon as visitFunc returns true.
@@ -82,27 +97,21 @@ func copyFilterInternal(f filter, visitFunc func(f filter) bool, copyFunc func(f
if err != nil {
return nil, err
}
fa := &filterAnd{
filters: filters,
}
fa := newFilterAnd(filters)
return fa, nil
case *filterOr:
filters, err := copyFilters(t.filters, visitFunc, copyFunc)
if err != nil {
return nil, err
}
fo := &filterOr{
filters: filters,
}
fo := newFilterOr(filters)
return fo, nil
case *filterNot:
f, err := copyFilter(t.f, visitFunc, copyFunc)
if err != nil {
return nil, err
}
fn := &filterNot{
f: f,
}
fn := newFilterNot(f)
return fn, nil
default:
return f, nil

View File

@@ -19,6 +19,12 @@ type filterAnd struct {
byFieldTokens []fieldTokens
}
func newFilterAnd(filters []filter) *filterAnd {
return &filterAnd{
filters: filters,
}
}
type fieldTokens struct {
field string
tokens []string
@@ -146,30 +152,11 @@ func getCommonTokensForAndFilters(filters []filter) []fieldTokens {
for _, f := range filters {
switch t := f.(type) {
case *filterExact:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterExactPrefix:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterPatternMatch:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterPhrase:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterPrefix:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterRegexp:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterSequence:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterSubstring:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterGeneric:
if !t.isWildcard {
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
}
case *filterOr:
bfts := t.getByFieldTokens()
for _, bft := range bfts {

View File

@@ -9,16 +9,13 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterAnyCasePhrase filters field entries by case-insensitive phrase match.
//
// An example LogsQL query: `fieldName:i(word)` or `fieldName:i("word1 ... wordN")`
// An example LogsQL query: `i(word)` or `i("word1 ... wordN")`
type filterAnyCasePhrase struct {
fieldName string
phrase string
phrase string
phraseLowercaseOnce sync.Once
phraseLowercase string
@@ -31,12 +28,15 @@ type filterAnyCasePhrase struct {
tokensHashesUppercase []uint64
}
func (fp *filterAnyCasePhrase) String() string {
return fmt.Sprintf("%si(%s)", quoteFieldNameIfNeeded(fp.fieldName), quoteTokenIfNeeded(fp.phrase))
func newFilterAnyCasePhrase(fieldName, phrase string) *filterGeneric {
fp := &filterAnyCasePhrase{
phrase: phrase,
}
return newFilterGeneric(fieldName, fp)
}
func (fp *filterAnyCasePhrase) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fp.fieldName)
func (fp *filterAnyCasePhrase) String() string {
return fmt.Sprintf("i(%s)", quoteTokenIfNeeded(fp.phrase))
}
func (fp *filterAnyCasePhrase) getTokensHashes() []uint64 {
@@ -78,19 +78,18 @@ func (fp *filterAnyCasePhrase) initPhraseUppercase() {
fp.phraseUppercase = strings.ToUpper(fp.phrase)
}
func (fp *filterAnyCasePhrase) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fp.fieldName)
func (fp *filterAnyCasePhrase) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
phraseLowercase := fp.getPhraseLowercase()
return matchAnyCasePhrase(v, phraseLowercase)
}
func (fp *filterAnyCasePhrase) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fp *filterAnyCasePhrase) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
phraseLowercase := fp.getPhraseLowercase()
applyToBlockResultGeneric(br, bm, fp.fieldName, phraseLowercase, matchAnyCasePhrase)
applyToBlockResultGeneric(br, bm, fieldName, phraseLowercase, matchAnyCasePhrase)
}
func (fp *filterAnyCasePhrase) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fp.fieldName
func (fp *filterAnyCasePhrase) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
phraseLowercase := fp.getPhraseLowercase()
// Verify whether fp matches const column

View File

@@ -8,18 +8,15 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterAnyCasePrefix matches the given prefix in lower, upper and mixed case.
//
// Example LogsQL: `fieldName:i(prefix*)` or `fieldName:i("some prefix"*)`
// Example LogsQL: `i(prefix*)` or `i("some prefix"*)`
//
// A special case `fieldName:i(*)` equals to `fieldName:*` and matches non-empty value for the given `fieldName` field.
// A special case `i(*)` equals to `*` and matches non-empty value.
type filterAnyCasePrefix struct {
fieldName string
prefix string
prefix string
prefixLowercaseOnce sync.Once
prefixLowercase string
@@ -32,15 +29,18 @@ type filterAnyCasePrefix struct {
tokensUppercaseHashes []uint64
}
func (fp *filterAnyCasePrefix) String() string {
if fp.prefix == "" {
return quoteFieldNameIfNeeded(fp.fieldName) + "i(*)"
func newFilterAnyCasePrefix(fieldName, prefix string) *filterGeneric {
fp := &filterAnyCasePrefix{
prefix: prefix,
}
return fmt.Sprintf("%si(%s*)", quoteFieldNameIfNeeded(fp.fieldName), quoteTokenIfNeeded(fp.prefix))
return newFilterGeneric(fieldName, fp)
}
func (fp *filterAnyCasePrefix) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fp.fieldName)
func (fp *filterAnyCasePrefix) String() string {
if fp.prefix == "" {
return "i(*)"
}
return fmt.Sprintf("i(%s*)", quoteTokenIfNeeded(fp.prefix))
}
func (fp *filterAnyCasePrefix) getTokensHashes() []uint64 {
@@ -82,19 +82,18 @@ func (fp *filterAnyCasePrefix) initPrefixUppercase() {
fp.prefixUppercase = strings.ToUpper(fp.prefix)
}
func (fp *filterAnyCasePrefix) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fp.fieldName)
func (fp *filterAnyCasePrefix) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
prefixLowercase := fp.getPrefixLowercase()
return matchAnyCasePrefix(v, prefixLowercase)
}
func (fp *filterAnyCasePrefix) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fp *filterAnyCasePrefix) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
prefixLowercase := fp.getPrefixLowercase()
applyToBlockResultGeneric(br, bm, fp.fieldName, prefixLowercase, matchAnyCasePrefix)
applyToBlockResultGeneric(br, bm, fieldName, prefixLowercase, matchAnyCasePrefix)
}
func (fp *filterAnyCasePrefix) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fp.fieldName
func (fp *filterAnyCasePrefix) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
prefixLowercase := fp.getPrefixLowercase()
// Verify whether fp matches const column

View File

@@ -5,39 +5,37 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterContainsAll matches logs containing all the given values.
//
// Example LogsQL: `fieldName:contains_all("foo", "bar baz")`
// Example LogsQL: `contains_all("foo", "bar baz")`
type filterContainsAll struct {
fieldName string
values inValues
}
func newFilterContainsAllValues(fieldName string, values []string) *filterGeneric {
var fi filterContainsAll
fi.values.values = values
return newFilterGeneric(fieldName, &fi)
}
func (fi *filterContainsAll) String() string {
args := fi.values.String()
return fmt.Sprintf("%scontains_all(%s)", quoteFieldNameIfNeeded(fi.fieldName), args)
return fmt.Sprintf("contains_all(%s)", args)
}
func (fi *filterContainsAll) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fi.fieldName)
}
func (fi *filterContainsAll) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fi.fieldName)
func (fi *filterContainsAll) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchAllPhrases(v, fi.values.values)
}
func (fi *filterContainsAll) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fi *filterContainsAll) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
if fi.values.isEmpty() || fi.values.isOnlyEmptyValue() {
return
}
c := br.getColumnByName(fi.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !matchAllPhrases(v, fi.values.values) {
@@ -125,12 +123,12 @@ func (fi *filterContainsAll) matchColumnByStringValues(br *blockResult, bm *bitm
})
}
func (fi *filterContainsAll) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
func (fi *filterContainsAll) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
if fi.values.isEmpty() || fi.values.isOnlyEmptyValue() {
return
}
v := bs.getConstColumnValue(fi.fieldName)
v := bs.getConstColumnValue(fieldName)
if v != "" {
if !matchAllPhrases(v, fi.values.values) {
bm.resetBits()
@@ -139,7 +137,7 @@ func (fi *filterContainsAll) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
}
// Verify whether filter matches other columns
ch := bs.getColumnHeader(fi.fieldName)
ch := bs.getColumnHeader(fieldName)
if ch == nil {
// Fast path - there are no matching columns.
// It matches anything only for empty phrase.

View File

@@ -5,34 +5,32 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterContainsAny matches any value from the values.
//
// Example LogsQL: `fieldName:contains_any("foo", "bar baz")`
// Example LogsQL: `contains_any("foo", "bar baz")`
type filterContainsAny struct {
fieldName string
values inValues
}
func newFilterContainsAnyValues(fieldName string, values []string) *filterGeneric {
var fi filterContainsAny
fi.values.values = values
return newFilterGeneric(fieldName, &fi)
}
func (fi *filterContainsAny) String() string {
args := fi.values.String()
return fmt.Sprintf("%scontains_any(%s)", quoteFieldNameIfNeeded(fi.fieldName), args)
return fmt.Sprintf("contains_any(%s)", args)
}
func (fi *filterContainsAny) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fi.fieldName)
}
func (fi *filterContainsAny) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fi.fieldName)
func (fi *filterContainsAny) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchAnyPhrase(v, fi.values.values)
}
func (fi *filterContainsAny) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fi *filterContainsAny) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
if fi.values.isEmpty() {
bm.resetBits()
return
@@ -42,7 +40,7 @@ func (fi *filterContainsAny) applyToBlockResult(br *blockResult, bm *bitmap) {
return
}
c := br.getColumnByName(fi.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !matchAnyPhrase(v, fi.values.values) {
@@ -107,7 +105,7 @@ func (fi *filterContainsAny) matchColumnByStringValues(br *blockResult, bm *bitm
})
}
func (fi *filterContainsAny) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
func (fi *filterContainsAny) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
if fi.values.isEmpty() {
bm.resetBits()
return
@@ -117,7 +115,7 @@ func (fi *filterContainsAny) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
return
}
v := bs.getConstColumnValue(fi.fieldName)
v := bs.getConstColumnValue(fieldName)
if v != "" {
if !matchAnyPhrase(v, fi.values.values) {
bm.resetBits()
@@ -126,7 +124,7 @@ func (fi *filterContainsAny) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
}
// Verify whether filter matches other columns
ch := bs.getColumnHeader(fi.fieldName)
ch := bs.getColumnHeader(fieldName)
if ch == nil {
// Fast path - there are no matching columns.
// It matches anything only for empty phrase.

View File

@@ -6,8 +6,6 @@ import (
"strings"
"unicode"
"unicode/utf8"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterContainsCommonCase matches words and phrases where every captial letter
@@ -17,10 +15,10 @@ import (
type filterContainsCommonCase struct {
phrases []string
containsAny *filterContainsAny
containsAny filterContainsAny
}
func newFilterContainsCommonCase(fieldName string, phrases []string) (*filterContainsCommonCase, error) {
func newFilterContainsCommonCase(fieldName string, phrases []string) (*filterGeneric, error) {
commonCasePhrases, err := getCommonCasePhrases(phrases)
if err != nil {
return nil, err
@@ -28,13 +26,11 @@ func newFilterContainsCommonCase(fieldName string, phrases []string) (*filterCon
fi := &filterContainsCommonCase{
phrases: phrases,
containsAny: &filterContainsAny{
fieldName: fieldName,
},
}
fi.containsAny.values.values = commonCasePhrases
return fi, nil
fg := newFilterGeneric(fieldName, fi)
return fg, nil
}
func (fi *filterContainsCommonCase) String() string {
@@ -43,23 +39,19 @@ func (fi *filterContainsCommonCase) String() string {
a[i] = quoteTokenIfNeeded(phrase)
}
phrases := strings.Join(a, ",")
return fmt.Sprintf("%scontains_common_case(%s)", quoteFieldNameIfNeeded(fi.containsAny.fieldName), phrases)
return fmt.Sprintf("contains_common_case(%s)", phrases)
}
func (fi *filterContainsCommonCase) updateNeededFields(pf *prefixfilter.Filter) {
fi.containsAny.updateNeededFields(pf)
func (fi *filterContainsCommonCase) matchRowByField(fields []Field, fieldName string) bool {
return fi.containsAny.matchRowByField(fields, fieldName)
}
func (fi *filterContainsCommonCase) matchRow(fields []Field) bool {
return fi.containsAny.matchRow(fields)
func (fi *filterContainsCommonCase) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
fi.containsAny.applyToBlockResultByField(br, bm, fieldName)
}
func (fi *filterContainsCommonCase) applyToBlockResult(br *blockResult, bm *bitmap) {
fi.containsAny.applyToBlockResult(br, bm)
}
func (fi *filterContainsCommonCase) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fi.containsAny.applyToBlockSearch(bs, bm)
func (fi *filterContainsCommonCase) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
fi.containsAny.applyToBlockSearchByField(bs, bm, fieldName)
}
func getCommonCasePhrases(phrases []string) ([]string, error) {

View File

@@ -23,6 +23,15 @@ type filterDayRange struct {
stringRepr string
}
func newFilterDayRange(start, end, offset int64, stringRepr string) *filterDayRange {
return &filterDayRange{
start: start,
end: end,
offset: offset,
stringRepr: stringRepr,
}
}
func (fr *filterDayRange) String() string {
return "_time:day_range" + fr.stringRepr
}

View File

@@ -20,6 +20,13 @@ type filterEqField struct {
prefixFilterOnce sync.Once
}
func newFilterEqField(fieldName, otherFieldName string) *filterEqField {
return &filterEqField{
fieldName: getCanonicalColumnName(fieldName),
otherFieldName: getCanonicalColumnName(otherFieldName),
}
}
func (fe *filterEqField) String() string {
return fmt.Sprintf("%seq_field(%s)", quoteFieldNameIfNeeded(fe.fieldName), quoteTokenIfNeeded(fe.otherFieldName))
}

View File

@@ -3,8 +3,6 @@ package logstorage
import (
"fmt"
"strings"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterEqualsCommonCase matches words and phrases where every captial letter
@@ -14,10 +12,10 @@ import (
type filterEqualsCommonCase struct {
phrases []string
equalsAny *filterIn
equalsAny filterIn
}
func newFilterEqualsCommonCase(fieldName string, phrases []string) (*filterEqualsCommonCase, error) {
func newFilterEqualsCommonCase(fieldName string, phrases []string) (*filterGeneric, error) {
commonCasePhrases, err := getCommonCasePhrases(phrases)
if err != nil {
return nil, err
@@ -25,13 +23,11 @@ func newFilterEqualsCommonCase(fieldName string, phrases []string) (*filterEqual
fi := &filterEqualsCommonCase{
phrases: phrases,
equalsAny: &filterIn{
fieldName: fieldName,
},
}
fi.equalsAny.values.values = commonCasePhrases
return fi, nil
fg := newFilterGeneric(fieldName, fi)
return fg, nil
}
func (fi *filterEqualsCommonCase) String() string {
@@ -40,21 +36,17 @@ func (fi *filterEqualsCommonCase) String() string {
a[i] = quoteTokenIfNeeded(phrase)
}
phrases := strings.Join(a, ",")
return fmt.Sprintf("%sequals_common_case(%s)", quoteFieldNameIfNeeded(fi.equalsAny.fieldName), phrases)
return fmt.Sprintf("equals_common_case(%s)", phrases)
}
func (fi *filterEqualsCommonCase) updateNeededFields(pf *prefixfilter.Filter) {
fi.equalsAny.updateNeededFields(pf)
func (fi *filterEqualsCommonCase) matchRowByField(fields []Field, fieldName string) bool {
return fi.equalsAny.matchRowByField(fields, fieldName)
}
func (fi *filterEqualsCommonCase) matchRow(fields []Field) bool {
return fi.equalsAny.matchRow(fields)
func (fi *filterEqualsCommonCase) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
fi.equalsAny.applyToBlockResultByField(br, bm, fieldName)
}
func (fi *filterEqualsCommonCase) applyToBlockResult(br *blockResult, bm *bitmap) {
fi.equalsAny.applyToBlockResult(br, bm)
}
func (fi *filterEqualsCommonCase) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fi.equalsAny.applyToBlockSearch(bs, bm)
func (fi *filterEqualsCommonCase) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
fi.equalsAny.applyToBlockSearchByField(bs, bm, fieldName)
}

View File

@@ -7,28 +7,28 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterExact matches the exact value.
//
// Example LogsQL: `fieldName:exact("foo bar")` of `fieldName:="foo bar"
// Example LogsQL: `exact("foo bar")` of `="foo bar"
type filterExact struct {
fieldName string
value string
value string
tokensOnce sync.Once
tokens []string
tokensHashes []uint64
}
func (fe *filterExact) String() string {
return fmt.Sprintf("%s=%s", quoteFieldNameIfNeeded(fe.fieldName), quoteTokenIfNeeded(fe.value))
func newFilterExact(fieldName, value string) *filterGeneric {
fe := &filterExact{
value: value,
}
return newFilterGeneric(fieldName, fe)
}
func (fe *filterExact) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fe.fieldName)
func (fe *filterExact) String() string {
return fmt.Sprintf("=%s", quoteTokenIfNeeded(fe.value))
}
func (fe *filterExact) getTokens() []string {
@@ -46,15 +46,15 @@ func (fe *filterExact) initTokens() {
fe.tokensHashes = appendTokensHashes(nil, fe.tokens)
}
func (fe *filterExact) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fe.fieldName)
func (fe *filterExact) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return v == fe.value
}
func (fe *filterExact) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fe *filterExact) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
value := fe.value
c := br.getColumnByName(fe.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if v != value {
@@ -188,8 +188,7 @@ func matchColumnByExactValue(br *blockResult, bm *bitmap, c *blockResultColumn,
})
}
func (fe *filterExact) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fe.fieldName
func (fe *filterExact) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
value := fe.value
v := bs.getConstColumnValue(fieldName)

View File

@@ -6,28 +6,28 @@ import (
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterExactPrefix matches the exact prefix.
//
// Example LogsQL: `fieldName:exact("foo bar"*)
// Example LogsQL: `="foo bar"*`
type filterExactPrefix struct {
fieldName string
prefix string
prefix string
tokensOnce sync.Once
tokens []string
tokensHashes []uint64
}
func (fep *filterExactPrefix) String() string {
return fmt.Sprintf("%s=%s*", quoteFieldNameIfNeeded(fep.fieldName), quoteTokenIfNeeded(fep.prefix))
func newFilterExactPrefix(fieldName, prefix string) *filterGeneric {
fe := &filterExactPrefix{
prefix: prefix,
}
return newFilterGeneric(fieldName, fe)
}
func (fep *filterExactPrefix) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fep.fieldName)
func (fep *filterExactPrefix) String() string {
return fmt.Sprintf("=%s*", quoteTokenIfNeeded(fep.prefix))
}
func (fep *filterExactPrefix) getTokens() []string {
@@ -45,17 +45,16 @@ func (fep *filterExactPrefix) initTokens() {
fep.tokensHashes = appendTokensHashes(nil, fep.tokens)
}
func (fep *filterExactPrefix) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fep.fieldName)
func (fep *filterExactPrefix) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchExactPrefix(v, fep.prefix)
}
func (fep *filterExactPrefix) applyToBlockResult(br *blockResult, bm *bitmap) {
applyToBlockResultGeneric(br, bm, fep.fieldName, fep.prefix, matchExactPrefix)
func (fep *filterExactPrefix) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
applyToBlockResultGeneric(br, bm, fieldName, fep.prefix, matchExactPrefix)
}
func (fep *filterExactPrefix) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fep.fieldName
func (fep *filterExactPrefix) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
prefix := fep.prefix
v := bs.getConstColumnValue(fieldName)

View File

@@ -0,0 +1,270 @@
package logstorage
import (
"fmt"
"strings"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterGeneric applies the given filter f to the given fieldName
type filterGeneric struct {
// fieldName is the name of the field to apply f to.
//
// It may end with '*' if isWildcard is true.
fieldName string
// isWildcard indicates whether fieldName is a wildcard ending with '*'
//
// In this case f is applied to all the fields with the given fieldName prefix until the first match.
isWildcard bool
// f is the filter to apply.
f fieldFilter
}
func newFilterGeneric(fieldName string, f fieldFilter) *filterGeneric {
if prefixfilter.IsWildcardFilter(fieldName) {
return &filterGeneric{
fieldName: fieldName,
isWildcard: true,
f: f,
}
}
fieldNameCanonical := getCanonicalColumnName(fieldName)
return &filterGeneric{
fieldName: fieldNameCanonical,
f: f,
}
}
func (fg *filterGeneric) getTokens() []string {
switch t := fg.f.(type) {
case *filterExact:
return t.getTokens()
case *filterExactPrefix:
return t.getTokens()
case *filterPhrase:
return t.getTokens()
case *filterPrefix:
return t.getTokens()
case *filterPatternMatch:
return t.getTokens()
case *filterRegexp:
return t.getTokens()
case *filterSequence:
return t.getTokens()
case *filterSubstring:
return t.getTokens()
default:
return nil
}
}
func (fg *filterGeneric) visitSubqueries(visitFunc func(q *Query)) {
switch t := fg.f.(type) {
case *filterContainsAll:
t.values.q.visitSubqueries(visitFunc)
case *filterContainsAny:
t.values.q.visitSubqueries(visitFunc)
case *filterIn:
t.values.q.visitSubqueries(visitFunc)
default:
// nothing to do
}
}
func (fg *filterGeneric) hasFilterInWithQuery() bool {
switch t := fg.f.(type) {
case *filterContainsAll:
return t.values.q != nil
case *filterContainsAny:
return t.values.q != nil
case *filterIn:
return t.values.q != nil
default:
return false
}
}
func (fg *filterGeneric) initFilterInValues(cache *inValuesCache, getFieldValues getFieldValuesFunc) (filter, error) {
switch t := fg.f.(type) {
case *filterContainsAll:
values, err := getValuesForQuery(t.values.q, t.values.qFieldName, cache, getFieldValues)
if err != nil {
return nil, fmt.Errorf("cannot obtain unique values for %s: %w", t, err)
}
return newFilterContainsAllValues(fg.fieldName, values), nil
case *filterContainsAny:
values, err := getValuesForQuery(t.values.q, t.values.qFieldName, cache, getFieldValues)
if err != nil {
return nil, fmt.Errorf("cannot obtain unique values for %s: %w", t, err)
}
return newFilterContainsAnyValues(fg.fieldName, values), nil
case *filterIn:
values, err := getValuesForQuery(t.values.q, t.values.qFieldName, cache, getFieldValues)
if err != nil {
return nil, fmt.Errorf("cannot obtain unique values for %s: %w", t, err)
}
return newFilterInValues(fg.fieldName, values), nil
default:
return fg, nil
}
}
// String returns string representation of the fg.
func (fg *filterGeneric) String() string {
if !fg.isWildcard {
return quoteFieldNameIfNeeded(fg.fieldName) + fg.f.String()
}
return quoteFieldFilterIfNeeded(fg.fieldName) + ":" + fg.f.String()
}
func (fg *filterGeneric) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fg.fieldName)
}
func (fg *filterGeneric) matchRow(fields []Field) bool {
if !fg.isWildcard {
// Fast path - match the row by the given fieldName.
return fg.f.matchRowByField(fields, fg.fieldName)
}
// Slow path - match the row by wildcard
prefix := fg.fieldName[:len(fg.fieldName)-1]
for _, f := range fields {
if !strings.HasPrefix(f.Name, prefix) {
continue
}
if fg.f.matchRowByField(fields, f.Name) {
return true
}
}
return false
}
func (fg *filterGeneric) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
if !fg.isWildcard {
// Fast path - apply filter only to the given fieldName.
fg.f.applyToBlockSearchByField(bs, bm, fg.fieldName)
return
}
// Slow path - apply filter to all the matching fields.
prefix := fg.fieldName[:len(fg.fieldName)-1]
bmResult := getBitmap(bm.bitsLen)
bmTmp := getBitmap(bm.bitsLen)
defer putBitmap(bmTmp)
defer putBitmap(bmResult)
bmResult.copyFrom(bm)
for _, fieldName := range specialColumns {
if !strings.HasPrefix(fieldName, prefix) {
continue
}
if bs.isHiddenField(fieldName) {
continue
}
bmTmp.copyFrom(bmResult)
fg.f.applyToBlockSearchByField(bs, bmTmp, fieldName)
bmResult.andNot(bmTmp)
if bmResult.isZero() {
return
}
}
csh := bs.getColumnsHeader()
for _, cc := range csh.constColumns {
if isSpecialColumn(cc.Name) {
continue
}
if !strings.HasPrefix(cc.Name, prefix) {
continue
}
if bs.isHiddenField(cc.Name) {
continue
}
bmTmp.copyFrom(bmResult)
fg.f.applyToBlockSearchByField(bs, bmTmp, cc.Name)
bmResult.andNot(bmTmp)
if bmResult.isZero() {
return
}
}
chs := csh.columnHeaders
for i := range chs {
ch := &chs[i]
if isSpecialColumn(ch.name) {
continue
}
if !strings.HasPrefix(ch.name, prefix) {
continue
}
if bs.isHiddenField(ch.name) {
continue
}
bmTmp.copyFrom(bmResult)
fg.f.applyToBlockSearchByField(bs, bmTmp, ch.name)
bmResult.andNot(bmTmp)
if bmResult.isZero() {
return
}
}
bm.andNot(bmResult)
}
func (fg *filterGeneric) applyToBlockResult(br *blockResult, bm *bitmap) {
if !fg.isWildcard {
// Fast path - apply filter to the given fieldName
fg.f.applyToBlockResultByField(br, bm, fg.fieldName)
return
}
// Slow path - apply filter to all the matching fields.
prefix := fg.fieldName[:len(fg.fieldName)-1]
bmResult := getBitmap(bm.bitsLen)
bmTmp := getBitmap(bm.bitsLen)
defer putBitmap(bmTmp)
defer putBitmap(bmResult)
bmResult.copyFrom(bm)
cs := br.getColumns()
for _, c := range cs {
if !strings.HasPrefix(c.name, prefix) {
continue
}
bmTmp.copyFrom(bmResult)
fg.f.applyToBlockResultByField(br, bmTmp, c.name)
bmResult.andNot(bmTmp)
if bmResult.isZero() {
return
}
}
bm.andNot(bmResult)
}
func quoteFieldNameIfNeeded(s string) string {
if isMsgFieldName(s) {
return ""
}
return quoteTokenIfNeeded(s) + ":"
}
func isMsgFieldName(fieldName string) bool {
return fieldName == "" || fieldName == "_msg"
}

View File

@@ -5,42 +5,40 @@ import (
"slices"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterIn matches any exact value from the values map.
//
// Example LogsQL: `fieldName:in("foo", "bar baz")`
// Example LogsQL: `in("foo", "bar baz")`
type filterIn struct {
fieldName string
values inValues
}
func newFilterInValues(fieldName string, values []string) *filterGeneric {
var fi filterIn
fi.values.values = values
return newFilterGeneric(fieldName, &fi)
}
func (fi *filterIn) String() string {
args := fi.values.String()
return fmt.Sprintf("%sin(%s)", quoteFieldNameIfNeeded(fi.fieldName), args)
return fmt.Sprintf("in(%s)", args)
}
func (fi *filterIn) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fi.fieldName)
}
func (fi *filterIn) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fi.fieldName)
func (fi *filterIn) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
stringValues := fi.values.getStringValues()
_, ok := stringValues[v]
return ok
}
func (fi *filterIn) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fi *filterIn) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
if fi.values.isEmpty() {
bm.resetBits()
return
}
c := br.getColumnByName(fi.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
stringValues := fi.values.getStringValues()
v := c.valuesEncoded[0]
@@ -125,9 +123,7 @@ func matchColumnByBinValues(br *blockResult, bm *bitmap, c *blockResultColumn, b
})
}
func (fi *filterIn) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fi.fieldName
func (fi *filterIn) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
if fi.values.isEmpty() {
bm.resetBits()
return

View File

@@ -4,35 +4,36 @@ import (
"fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterIPv4Range matches the given ipv4 range [minValue..maxValue].
//
// Example LogsQL: `fieldName:ipv4_range(127.0.0.1, 127.0.0.255)`
// Example LogsQL: `ipv4_range(127.0.0.1, 127.0.0.255)`
type filterIPv4Range struct {
fieldName string
minValue uint32
maxValue uint32
minValue uint32
maxValue uint32
}
func newFilterIPv4Range(fieldName string, minValue, maxValue uint32) *filterGeneric {
fr := &filterIPv4Range{
minValue: minValue,
maxValue: maxValue,
}
return newFilterGeneric(fieldName, fr)
}
func (fr *filterIPv4Range) String() string {
minValue := marshalIPv4String(nil, fr.minValue)
maxValue := marshalIPv4String(nil, fr.maxValue)
return fmt.Sprintf("%sipv4_range(%s, %s)", quoteFieldNameIfNeeded(fr.fieldName), minValue, maxValue)
return fmt.Sprintf("ipv4_range(%s, %s)", minValue, maxValue)
}
func (fr *filterIPv4Range) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fr.fieldName)
}
func (fr *filterIPv4Range) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fr.fieldName)
func (fr *filterIPv4Range) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchIPv4Range(v, fr.minValue, fr.maxValue)
}
func (fr *filterIPv4Range) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fr *filterIPv4Range) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
minValue := fr.minValue
maxValue := fr.maxValue
@@ -41,7 +42,7 @@ func (fr *filterIPv4Range) applyToBlockResult(br *blockResult, bm *bitmap) {
return
}
c := br.getColumnByName(fr.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !matchIPv4Range(v, minValue, maxValue) {
@@ -101,8 +102,7 @@ func (fr *filterIPv4Range) applyToBlockResult(br *blockResult, bm *bitmap) {
}
}
func (fr *filterIPv4Range) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fr.fieldName
func (fr *filterIPv4Range) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
minValue := fr.minValue
maxValue := fr.maxValue

View File

@@ -7,17 +7,14 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterIPv6Range matches the given ipv6 range [minValue..maxValue].
//
// Example LogsQL: `fieldName:ipv6_range(::1, ::2)`
// Example LogsQL: `ipv6_range(::1, ::2)`
type filterIPv6Range struct {
fieldName string
minValue [16]byte
maxValue [16]byte
minValue [16]byte
maxValue [16]byte
minMaxIPv4ValuesOnce sync.Once
minIPv4Value uint32
@@ -25,10 +22,18 @@ type filterIPv6Range struct {
isIPv4 bool
}
func newFilterIPv6Range(fieldName string, minValue, maxValue [16]byte) *filterGeneric {
fr := &filterIPv6Range{
minValue: minValue,
maxValue: maxValue,
}
return newFilterGeneric(fieldName, fr)
}
func (fr *filterIPv6Range) String() string {
minValue := netip.AddrFrom16(fr.minValue).String()
maxValue := netip.AddrFrom16(fr.maxValue).String()
return fmt.Sprintf("%sipv6_range(%s, %s)", quoteFieldNameIfNeeded(fr.fieldName), minValue, maxValue)
return fmt.Sprintf("ipv6_range(%s, %s)", minValue, maxValue)
}
func (fr *filterIPv6Range) getMinMaxIPv4Values() (uint32, uint32, bool) {
@@ -70,16 +75,12 @@ func getIPv4ValueFrom16(a [16]byte) (uint32, bool) {
return encoding.UnmarshalUint32(ip4[:]), true
}
func (fr *filterIPv6Range) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fr.fieldName)
}
func (fr *filterIPv6Range) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fr.fieldName)
func (fr *filterIPv6Range) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchIPv6Range(v, fr.minValue, fr.maxValue)
}
func (fr *filterIPv6Range) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fr *filterIPv6Range) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
minValue := fr.minValue
maxValue := fr.maxValue
@@ -88,7 +89,7 @@ func (fr *filterIPv6Range) applyToBlockResult(br *blockResult, bm *bitmap) {
return
}
c := br.getColumnByName(fr.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !matchIPv6Range(v, minValue, maxValue) {
@@ -153,8 +154,7 @@ func (fr *filterIPv6Range) applyToBlockResult(br *blockResult, bm *bitmap) {
}
}
func (fr *filterIPv6Range) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fr.fieldName
func (fr *filterIPv6Range) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
minValue := fr.minValue
maxValue := fr.maxValue

View File

@@ -0,0 +1,268 @@
package logstorage
import (
"fmt"
"slices"
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/valyala/fastjson"
)
// filterJSONArrayContainsAny matches if the JSON array in the given field contains the given value.
//
// Example LogsQL: `tags:json_array_contains_any("prod","dev")`
type filterJSONArrayContainsAny struct {
values []string
tokensOnce sync.Once
tokenss [][]string
tokensHashess [][]uint64
}
func newFilterJSONArrayContainsAny(fieldName string, values []string) *filterGeneric {
fa := &filterJSONArrayContainsAny{
values: values,
}
return newFilterGeneric(fieldName, fa)
}
func (fa *filterJSONArrayContainsAny) getTokenss() [][]string {
fa.tokensOnce.Do(fa.initTokens)
return fa.tokenss
}
func (fa *filterJSONArrayContainsAny) getTokensHashes() [][]uint64 {
fa.tokensOnce.Do(fa.initTokens)
return fa.tokensHashess
}
func (fa *filterJSONArrayContainsAny) initTokens() {
tokenss := make([][]string, len(fa.values))
for i, v := range fa.values {
tokenss[i] = tokenizeStrings(nil, []string{v})
}
fa.tokenss = tokenss
tokensHashess := make([][]uint64, len(tokenss))
for i, tokens := range tokenss {
tokensHashess[i] = appendTokensHashes(nil, tokens)
}
fa.tokensHashess = tokensHashess
}
func (fa *filterJSONArrayContainsAny) String() string {
a := make([]string, len(fa.values))
for i, v := range fa.values {
a[i] = quoteTokenIfNeeded(v)
}
args := strings.Join(a, ",")
return fmt.Sprintf("json_array_contains_any(%s)", args)
}
func (fa *filterJSONArrayContainsAny) matchRowByField(fields []Field, fieldName string) bool {
tokenss := fa.getTokenss()
v := getFieldValueByName(fields, fieldName)
return matchJSONArrayContainsAny(v, fa.values, tokenss)
}
func (fa *filterJSONArrayContainsAny) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
tokenss := fa.getTokenss()
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !matchJSONArrayContainsAny(v, fa.values, tokenss) {
bm.resetBits()
}
return
}
if c.isTime {
bm.resetBits()
return
}
switch c.valueType {
case valueTypeString:
values := c.getValues(br)
bm.forEachSetBit(func(idx int) bool {
v := values[idx]
return matchJSONArrayContainsAny(v, fa.values, tokenss)
})
case valueTypeDict:
bb := bbPool.Get()
for _, v := range c.dictValues {
c := byte(0)
if matchJSONArrayContainsAny(v, fa.values, tokenss) {
c = 1
}
bb.B = append(bb.B, c)
}
valuesEncoded := c.getValuesEncoded(br)
bm.forEachSetBit(func(idx int) bool {
n := valuesEncoded[idx][0]
return bb.B[n] == 1
})
bbPool.Put(bb)
default:
bm.resetBits()
}
}
func (fa *filterJSONArrayContainsAny) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
tokenss := fa.getTokenss()
v := bs.getConstColumnValue(fieldName)
if v != "" {
if !matchJSONArrayContainsAny(v, fa.values, tokenss) {
bm.resetBits()
}
return
}
// Verify whether filter matches other columns
ch := bs.getColumnHeader(fieldName)
if ch == nil {
// Fast path - there are no matching columns.
bm.resetBits()
return
}
switch ch.valueType {
case valueTypeString:
tokensHashess := fa.getTokensHashes()
if !matchAnyTokensHashess(bs, ch, tokensHashess) {
bm.resetBits()
return
}
visitValues(bs, ch, bm, func(v string) bool {
return matchJSONArrayContainsAny(v, fa.values, tokenss)
})
case valueTypeDict:
bb := bbPool.Get()
for _, v := range ch.valuesDict.values {
c := byte(0)
if matchJSONArrayContainsAny(v, fa.values, tokenss) {
c = 1
}
bb.B = append(bb.B, c)
}
matchEncodedValuesDict(bs, ch, bm, bb.B)
bbPool.Put(bb)
default:
bm.resetBits()
}
}
func matchAnyTokensHashess(bs *blockSearch, ch *columnHeader, tokensHashess [][]uint64) bool {
for _, tokensHashes := range tokensHashess {
if matchBloomFilterAllTokens(bs, ch, tokensHashes) {
return true
}
}
return false
}
func matchJSONArrayContainsAny(s string, values []string, tokenss [][]string) bool {
if s == "" {
// Fast path for empty strings.
return false
}
s = trimJSONWhitespace(s)
if !strings.HasPrefix(s, "[") {
// Fast path - s is not a JSON array.
return false
}
if !matchAnyTokenss(s, tokenss) {
// Fast path - s doesn't contain any of the given values.
return false
}
// Slow path - parse JSON array at s and search for matching values.
p := jspp.Get()
defer jspp.Put(p)
v, err := p.Parse(s)
if err != nil {
return false
}
if v.Type() != fastjson.TypeArray {
return false
}
jsa, err := v.Array()
if err != nil {
logger.Panicf("BUG: v.Array() mustn't return error; got %s", err)
}
for _, e := range jsa {
// We only support checking against string representation of values in the array.
switch e.Type() {
case fastjson.TypeString:
b, err := e.StringBytes()
if err != nil {
logger.Panicf("BUG: e.StringBytes() mustn't return error; got %s", err)
}
bs := bytesutil.ToUnsafeString(b)
if slices.Contains(values, bs) {
return true
}
case fastjson.TypeNumber, fastjson.TypeTrue, fastjson.TypeFalse, fastjson.TypeNull:
bb := bbPool.Get()
bb.B = e.MarshalTo(bb.B[:0])
bs := bytesutil.ToUnsafeString(bb.B)
ok := slices.Contains(values, bs)
bbPool.Put(bb)
if ok {
return true
}
}
}
return false
}
func matchAnyTokenss(s string, tokenss [][]string) bool {
for _, tokens := range tokenss {
if matchAllSubstrings(s, tokens) {
return true
}
}
return false
}
func matchAllSubstrings(s string, tokens []string) bool {
for _, token := range tokens {
if !strings.Contains(s, token) {
return false
}
}
return true
}
func trimJSONWhitespace(s string) string {
// trim whitespace prefix
for len(s) > 0 {
c := s[0]
if c != ' ' && c != '\t' && c != '\n' && c != '\r' {
break
}
s = s[1:]
}
// trim whitespace suffix
for len(s) > 0 {
c := s[len(s)-1]
if c != ' ' && c != '\t' && c != '\n' && c != '\r' {
break
}
s = s[:len(s)-1]
}
return s
}

View File

@@ -23,6 +23,15 @@ type filterLeField struct {
prefixFilterOnce sync.Once
}
func newFilterLeField(fieldName, otherFieldName string, excludeEqualValues bool) *filterLeField {
return &filterLeField{
fieldName: getCanonicalColumnName(fieldName),
otherFieldName: getCanonicalColumnName(otherFieldName),
excludeEqualValues: excludeEqualValues,
}
}
func (fe *filterLeField) String() string {
funcName := "le_field"
if fe.excludeEqualValues {

View File

@@ -4,35 +4,38 @@ import (
"unicode/utf8"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterLenRange matches field values with the length in the given range [minLen, maxLen].
//
// Example LogsQL: `fieldName:len_range(10, 20)`
// Example LogsQL: `len_range(10, 20)`
type filterLenRange struct {
fieldName string
minLen uint64
maxLen uint64
minLen uint64
maxLen uint64
stringRepr string
}
func newFilterLenRange(fieldName string, minLen, maxLen uint64, stringRepr string) *filterGeneric {
fr := &filterLenRange{
minLen: minLen,
maxLen: maxLen,
stringRepr: stringRepr,
}
return newFilterGeneric(fieldName, fr)
}
func (fr *filterLenRange) String() string {
return quoteFieldNameIfNeeded(fr.fieldName) + "len_range" + fr.stringRepr
return "len_range" + fr.stringRepr
}
func (fr *filterLenRange) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fr.fieldName)
}
func (fr *filterLenRange) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fr.fieldName)
func (fr *filterLenRange) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchLenRange(v, fr.minLen, fr.maxLen)
}
func (fr *filterLenRange) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fr *filterLenRange) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
minLen := fr.minLen
maxLen := fr.maxLen
@@ -41,7 +44,7 @@ func (fr *filterLenRange) applyToBlockResult(br *blockResult, bm *bitmap) {
return
}
c := br.getColumnByName(fr.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !matchLenRange(v, minLen, maxLen) {
@@ -128,8 +131,7 @@ func matchColumnByLenRange(br *blockResult, bm *bitmap, c *blockResultColumn, mi
})
}
func (fr *filterLenRange) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fr.fieldName
func (fr *filterLenRange) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
minLen := fr.minLen
maxLen := fr.maxLen

View File

@@ -8,6 +8,12 @@ import (
type filterNoop struct {
}
func newFilterNoop() *filterNoop {
return &noopFilter
}
var noopFilter filterNoop
func (fn *filterNoop) String() string {
return "*"
}

View File

@@ -11,6 +11,12 @@ type filterNot struct {
f filter
}
func newFilterNot(f filter) *filterNot {
return &filterNot{
f: f,
}
}
func (fn *filterNot) String() string {
s := fn.f.String()
switch fn.f.(type) {

View File

@@ -17,6 +17,12 @@ type filterOr struct {
byFieldTokens []fieldTokens
}
func newFilterOr(filters []filter) *filterOr {
return &filterOr{
filters: filters,
}
}
func (fo *filterOr) String() string {
filters := fo.filters
a := make([]string, len(filters))
@@ -45,20 +51,19 @@ func (fo *filterOr) matchRow(fields []Field) bool {
func (fo *filterOr) applyToBlockResult(br *blockResult, bm *bitmap) {
bmResult := getBitmap(bm.bitsLen)
bmTmp := getBitmap(bm.bitsLen)
defer putBitmap(bmTmp)
defer putBitmap(bmResult)
bmResult.copyFrom(bm)
for _, f := range fo.filters {
bmTmp.copyFrom(bmResult)
f.applyToBlockResult(br, bmTmp)
bmResult.andNot(bmTmp)
if bmResult.isZero() {
putBitmap(bmTmp)
putBitmap(bmResult)
return
}
}
bm.andNot(bmResult)
putBitmap(bmTmp)
putBitmap(bmResult)
}
func (fo *filterOr) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
@@ -70,20 +75,19 @@ func (fo *filterOr) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
bmResult := getBitmap(bm.bitsLen)
bmTmp := getBitmap(bm.bitsLen)
defer putBitmap(bmTmp)
defer putBitmap(bmResult)
bmResult.copyFrom(bm)
for _, f := range fo.filters {
bmTmp.copyFrom(bmResult)
f.applyToBlockSearch(bs, bmTmp)
bmResult.andNot(bmTmp)
if bmResult.isZero() {
putBitmap(bmTmp)
putBitmap(bmResult)
return
}
}
bm.andNot(bmResult)
putBitmap(bmTmp)
putBitmap(bmResult)
}
func (fo *filterOr) matchBloomFilters(bs *blockSearch) bool {
@@ -150,30 +154,11 @@ func getCommonTokensForOrFilters(filters []filter) []fieldTokens {
for _, f := range filters {
switch t := f.(type) {
case *filterExact:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterExactPrefix:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterPatternMatch:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterPhrase:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterPrefix:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterRegexp:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterSequence:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterSubstring:
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
case *filterGeneric:
if !t.isWildcard {
tokens := t.getTokens()
mergeFieldTokens(t.fieldName, tokens)
}
case *filterAnd:
bfts := t.getByFieldTokens()
for _, bft := range bfts {

View File

@@ -5,16 +5,12 @@ import (
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterPatternMatch filters field entries by the given pattern, which could be generated by collapse_nums pipe.
//
// See https://docs.victoriametrics.com/victorialogs/logsql/#collapse_nums-pipe
type filterPatternMatch struct {
fieldName string
funcName string
pm *patternMatcher
@@ -23,12 +19,16 @@ type filterPatternMatch struct {
tokensHashes []uint64
}
func (fp *filterPatternMatch) String() string {
return fmt.Sprintf("%s%s(%s)", quoteFieldNameIfNeeded(fp.fieldName), fp.funcName, quoteTokenIfNeeded(fp.pm.String()))
func newFilterPatternMatch(fieldName, funcName string, pm *patternMatcher) *filterGeneric {
fp := &filterPatternMatch{
funcName: funcName,
pm: pm,
}
return newFilterGeneric(fieldName, fp)
}
func (fp *filterPatternMatch) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fp.fieldName)
func (fp *filterPatternMatch) String() string {
return fmt.Sprintf("%s(%s)", fp.funcName, quoteTokenIfNeeded(fp.pm.String()))
}
func (fp *filterPatternMatch) getTokens() []string {
@@ -157,13 +157,13 @@ func (fp *filterPatternMatch) initTokens() {
fp.tokensHashes = appendTokensHashes(nil, fp.tokens)
}
func (fp *filterPatternMatch) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fp.fieldName)
func (fp *filterPatternMatch) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return fp.pm.Match(v)
}
func (fp *filterPatternMatch) applyToBlockResult(br *blockResult, bm *bitmap) {
c := br.getColumnByName(fp.fieldName)
func (fp *filterPatternMatch) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !fp.pm.Match(v) {
@@ -222,9 +222,7 @@ func (fp *filterPatternMatch) matchColumnGeneric(br *blockResult, bm *bitmap, c
})
}
func (fp *filterPatternMatch) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fp.fieldName
func (fp *filterPatternMatch) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
// Verify whether fp matches const column
v := bs.getConstColumnValue(fieldName)
if v != "" {

View File

@@ -8,8 +8,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterPhrase filters field entries by phrase match (aka full text search).
@@ -17,26 +15,28 @@ import (
// A phrase consists of any number of words with delimiters between them.
//
// An empty phrase matches only an empty string.
// A single-word phrase is the simplest LogsQL query: `fieldName:word`
// A single-word phrase is the simplest LogsQL query: `word`
//
// Multi-word phrase is expressed as `fieldName:"word1 ... wordN"` in LogsQL.
// Multi-word phrase is expressed as `"word1 ... wordN"` in LogsQL.
//
// A special case `fieldName:""` matches any value without `fieldName` field.
// A special case `""` matches any log entry without the given `fieldName` field.
type filterPhrase struct {
fieldName string
phrase string
phrase string
tokensOnce sync.Once
tokens []string
tokensHashes []uint64
}
func (fp *filterPhrase) String() string {
return quoteFieldNameIfNeeded(fp.fieldName) + quoteTokenIfNeeded(fp.phrase)
func newFilterPhrase(fieldName, phrase string) *filterGeneric {
fp := &filterPhrase{
phrase: phrase,
}
return newFilterGeneric(fieldName, fp)
}
func (fp *filterPhrase) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fp.fieldName)
func (fp *filterPhrase) String() string {
return quoteTokenIfNeeded(fp.phrase)
}
func (fp *filterPhrase) getTokens() []string {
@@ -54,17 +54,16 @@ func (fp *filterPhrase) initTokens() {
fp.tokensHashes = appendTokensHashes(nil, fp.tokens)
}
func (fp *filterPhrase) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fp.fieldName)
func (fp *filterPhrase) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchPhrase(v, fp.phrase)
}
func (fp *filterPhrase) applyToBlockResult(br *blockResult, bm *bitmap) {
applyToBlockResultGeneric(br, bm, fp.fieldName, fp.phrase, matchPhrase)
func (fp *filterPhrase) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
applyToBlockResultGeneric(br, bm, fieldName, fp.phrase, matchPhrase)
}
func (fp *filterPhrase) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fp.fieldName
func (fp *filterPhrase) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
phrase := fp.phrase
// Verify whether fp matches const column
@@ -312,17 +311,6 @@ func matchBloomFilterAllTokens(bs *blockSearch, ch *columnHeader, tokens []uint6
return bf.containsAll(tokens)
}
func quoteFieldNameIfNeeded(s string) string {
if isMsgFieldName(s) {
return ""
}
return quoteTokenIfNeeded(s) + ":"
}
func isMsgFieldName(fieldName string) bool {
return fieldName == "" || fieldName == "_msg"
}
func toFloat64String(bs *blockSearch, bb *bytesutil.ByteBuffer, v string) string {
if len(v) != 8 {
logger.Panicf("FATAL: %s: unexpected length for binary representation of floating-point number: got %d; want 8", bs.partPath(), len(v))

View File

@@ -8,33 +8,33 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterPrefix matches the given prefix.
//
// Example LogsQL: `fieldName:prefix*` or `fieldName:"some prefix"*`
// Example LogsQL: `prefix*` or `"some prefix"*`
//
// A special case `fieldName:*` matches non-empty value for the given `fieldName` field
// A special case `*` matches non-empty value for the given `fieldName` field
type filterPrefix struct {
fieldName string
prefix string
prefix string
tokensOnce sync.Once
tokens []string
tokensHashes []uint64
}
func (fp *filterPrefix) String() string {
if fp.prefix == "" {
return quoteFieldNameIfNeeded(fp.fieldName) + "*"
func newFilterPrefix(fieldName, prefix string) *filterGeneric {
fp := &filterPrefix{
prefix: prefix,
}
return fmt.Sprintf("%s%s*", quoteFieldNameIfNeeded(fp.fieldName), quoteTokenIfNeeded(fp.prefix))
return newFilterGeneric(fieldName, fp)
}
func (fp *filterPrefix) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fp.fieldName)
func (fp *filterPrefix) String() string {
if fp.prefix == "" {
return "*"
}
return fmt.Sprintf("%s*", quoteTokenIfNeeded(fp.prefix))
}
func (fp *filterPrefix) getTokens() []string {
@@ -52,17 +52,16 @@ func (fp *filterPrefix) initTokens() {
fp.tokensHashes = appendTokensHashes(nil, fp.tokens)
}
func (fp *filterPrefix) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fp.fieldName)
func (fp *filterPrefix) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchPrefix(v, fp.prefix)
}
func (fp *filterPrefix) applyToBlockResult(bs *blockResult, bm *bitmap) {
applyToBlockResultGeneric(bs, bm, fp.fieldName, fp.prefix, matchPrefix)
func (fp *filterPrefix) applyToBlockResultByField(bs *blockResult, bm *bitmap, fieldName string) {
applyToBlockResultGeneric(bs, bm, fieldName, fp.prefix, matchPrefix)
}
func (fp *filterPrefix) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fp.fieldName
func (fp *filterPrefix) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
prefix := fp.prefix
// Verify whether fp matches const column

View File

@@ -4,36 +4,38 @@ import (
"math"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterRange matches the given range [minValue..maxValue].
//
// Example LogsQL: `fieldName:range(minValue, maxValue]`
// Example LogsQL: `range(minValue, maxValue]`
type filterRange struct {
fieldName string
minValue float64
maxValue float64
stringRepr string
}
func newFilterRange(fieldName string, minValue, maxValue float64, stringRepr string) *filterGeneric {
fr := &filterRange{
minValue: minValue,
maxValue: maxValue,
stringRepr: stringRepr,
}
return newFilterGeneric(fieldName, fr)
}
func (fr *filterRange) String() string {
return quoteFieldNameIfNeeded(fr.fieldName) + fr.stringRepr
return fr.stringRepr
}
func (fr *filterRange) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fr.fieldName)
}
func (fr *filterRange) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fr.fieldName)
func (fr *filterRange) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return matchRange(v, fr.minValue, fr.maxValue)
}
func (fr *filterRange) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fr *filterRange) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
minValue := fr.minValue
maxValue := fr.maxValue
@@ -42,7 +44,7 @@ func (fr *filterRange) applyToBlockResult(br *blockResult, bm *bitmap) {
return
}
c := br.getColumnByName(fr.fieldName)
c := br.getColumnByName(fieldName)
if c.isConst {
v := c.valuesEncoded[0]
if !matchRange(v, minValue, maxValue) {
@@ -182,8 +184,7 @@ func (fr *filterRange) applyToBlockResult(br *blockResult, bm *bitmap) {
}
}
func (fr *filterRange) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fr.fieldName
func (fr *filterRange) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
minValue := fr.minValue
maxValue := fr.maxValue

View File

@@ -7,28 +7,28 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
"github.com/VictoriaMetrics/VictoriaLogs/lib/prefixfilter"
)
// filterRegexp matches the given regexp
//
// Example LogsQL: `fieldName:re("regexp")`
// Example LogsQL: `re("regexp")`
type filterRegexp struct {
fieldName string
re *regexutil.Regex
re *regexutil.Regex
tokensOnce sync.Once
tokens []string
tokensHashes []uint64
}
func (fr *filterRegexp) String() string {
return fmt.Sprintf("%s~%s", quoteFieldNameIfNeeded(fr.fieldName), quoteTokenIfNeeded(fr.re.String()))
func newFilterRegexp(fieldName string, re *regexutil.Regex) *filterGeneric {
fp := &filterRegexp{
re: re,
}
return newFilterGeneric(fieldName, fp)
}
func (fr *filterRegexp) updateNeededFields(pf *prefixfilter.Filter) {
pf.AddAllowFilter(fr.fieldName)
func (fr *filterRegexp) String() string {
return fmt.Sprintf("~%s", quoteTokenIfNeeded(fr.re.String()))
}
func (fr *filterRegexp) getTokens() []string {
@@ -76,20 +76,19 @@ func skipLastToken(s string) string {
}
}
func (fr *filterRegexp) matchRow(fields []Field) bool {
v := getFieldValueByName(fields, fr.fieldName)
func (fr *filterRegexp) matchRowByField(fields []Field, fieldName string) bool {
v := getFieldValueByName(fields, fieldName)
return fr.re.MatchString(v)
}
func (fr *filterRegexp) applyToBlockResult(br *blockResult, bm *bitmap) {
func (fr *filterRegexp) applyToBlockResultByField(br *blockResult, bm *bitmap, fieldName string) {
re := fr.re
applyToBlockResultGeneric(br, bm, fr.fieldName, "", func(v, _ string) bool {
applyToBlockResultGeneric(br, bm, fieldName, "", func(v, _ string) bool {
return re.MatchString(v)
})
}
func (fr *filterRegexp) applyToBlockSearch(bs *blockSearch, bm *bitmap) {
fieldName := fr.fieldName
func (fr *filterRegexp) applyToBlockSearchByField(bs *blockSearch, bm *bitmap, fieldName string) {
re := fr.re
// Verify whether filter matches const column

Some files were not shown because too many files have changed in this diff Show More