vendor: run make vendor-update

This commit is contained in:
Aliaksandr Valialkin
2026-01-19 15:26:38 +01:00
parent bc8f6c5688
commit 34f242a6b8
1449 changed files with 75529 additions and 75978 deletions

View File

@@ -4,8 +4,10 @@ import (
"context"
"fmt"
"log"
"strings"
"sync"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@@ -61,19 +63,19 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
var it chunkenc.Iterator
for ss.Next() {
var name string
var labels []vm.LabelPair
var labelPairs []vm.LabelPair
series := ss.At()
for _, label := range series.Labels() {
series.Labels().Range(func(label labels.Label) {
if label.Name == "__name__" {
name = label.Value
continue
return
}
labels = append(labels, vm.LabelPair{
Name: label.Name,
Value: label.Value,
labelPairs = append(labelPairs, vm.LabelPair{
Name: strings.Clone(label.Name),
Value: strings.Clone(label.Value),
})
}
})
if name == "" {
return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID)
}
@@ -99,7 +101,7 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
}
ts := vm.TimeSeries{
Name: name,
LabelPairs: labels,
LabelPairs: labelPairs,
Timestamps: timestamps,
Values: values,
}

View File

@@ -22,7 +22,17 @@ func NewPrometheusMockStorage(series []*prompb.TimeSeries) *PrometheusMockStorag
return &PrometheusMockStorage{store: series}
}
// Read implements the storage.Storage interface for reading time series data.
// ReadMultiple implemnets the storage.ReadClient interface for reading time series data.
func (ms *PrometheusMockStorage) ReadMultiple(ctx context.Context, queries []*prompb.Query, sortSeries bool) (storage.SeriesSet, error) {
if len(queries) != 1 {
panic(fmt.Errorf("reading multiple queries isn't implemented"))
}
query := queries[0]
return ms.Read(ctx, query, sortSeries)
}
// Read implements the storage.ReadClient interface for reading time series data.
func (ms *PrometheusMockStorage) Read(_ context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) {
if ms.query != nil {
return nil, fmt.Errorf("expected only one call to remote client got: %v", query)

View File

@@ -162,7 +162,7 @@ func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
var matchers []*labels.Matcher
cb := func() (int64, error) { return 0, nil }
c := remote.NewSampleAndChunkQueryableClient(rrs.storage, nil, matchers, true, cb)
c := remote.NewSampleAndChunkQueryableClient(rrs.storage, labels.New(), matchers, true, cb)
q, err := c.ChunkQuerier(startTs, endTs)
if err != nil {
@@ -317,13 +317,13 @@ func generateRemoteReadSamples(idx int, startTime, endTime, numOfSamples int64)
return samples
}
func labelsToLabelsProto(labels labels.Labels) []prompb.Label {
result := make([]prompb.Label, 0, len(labels))
for _, l := range labels {
func labelsToLabelsProto(ls labels.Labels) []prompb.Label {
result := make([]prompb.Label, 0, ls.Len())
ls.Range(func(l labels.Label) {
result = append(result, prompb.Label{
Name: l.Name,
Value: l.Value,
Name: strings.Clone(l.Name),
Value: strings.Clone(l.Value),
})
}
})
return result
}

161
go.mod
View File

@@ -3,29 +3,29 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.25.6
require (
cloud.google.com/go/storage v1.57.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
cloud.google.com/go/storage v1.59.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4
github.com/VictoriaMetrics/VictoriaLogs v1.36.2-0.20251008164716-21c0fb3de84d
github.com/VictoriaMetrics/easyproto v1.1.3
github.com/VictoriaMetrics/fastcache v1.13.2
github.com/VictoriaMetrics/metrics v1.40.2
github.com/VictoriaMetrics/metricsql v0.84.8
github.com/aws/aws-sdk-go-v2 v1.39.2
github.com/aws/aws-sdk-go-v2/config v1.31.12
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4
github.com/bmatcuk/doublestar/v4 v4.9.1
github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/config v1.32.7
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.19
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1
github.com/bmatcuk/doublestar/v4 v4.9.2
github.com/cespare/xxhash/v2 v2.3.0
github.com/cheggaaa/pb/v3 v3.1.7
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v1.0.0
github.com/google/go-cmp v0.7.0
github.com/googleapis/gax-go/v2 v2.15.0
github.com/googleapis/gax-go/v2 v2.16.0
github.com/influxdata/influxdb v1.12.2
github.com/klauspost/compress v1.18.0
github.com/prometheus/prometheus v0.303.1
github.com/klauspost/compress v1.18.3
github.com/prometheus/prometheus v0.309.1
github.com/urfave/cli/v2 v2.27.7
github.com/valyala/fastjson v1.6.7
github.com/valyala/fastrand v1.1.0
@@ -33,65 +33,67 @@ require (
github.com/valyala/gozstd v1.24.0
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.8.0
golang.org/x/net v0.47.0
golang.org/x/oauth2 v0.32.0
golang.org/x/sys v0.38.0
google.golang.org/api v0.252.0
golang.org/x/net v0.49.0
golang.org/x/oauth2 v0.34.0
golang.org/x/sys v0.40.0
google.golang.org/api v0.260.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cel.dev/expr v0.24.0 // indirect
cel.dev/expr v0.25.1 // indirect
cloud.google.com/go v0.123.0 // indirect
cloud.google.com/go/auth v0.17.0 // indirect
cloud.google.com/go/auth v0.18.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
cloud.google.com/go/iam v1.5.3 // indirect
cloud.google.com/go/monitoring v1.24.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.18.16 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 // indirect
github.com/aws/smithy-go v1.23.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/clipperhouse/stringish v0.1.1 // indirect
github.com/clipperhouse/uax29/v2 v2.3.1 // indirect
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/go-version v1.8.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
@@ -108,17 +110,19 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/oklog/ulid/v2 v2.1.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.137.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.137.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.137.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.143.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.143.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.143.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/prometheus/sigv4 v0.2.1 // indirect
github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/otlptranslator v1.0.0 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/prometheus/sigv4 v0.4.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect
@@ -126,46 +130,43 @@ require (
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/collector/component v1.43.0 // indirect
go.opentelemetry.io/collector/confmap v1.43.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.137.0 // indirect
go.opentelemetry.io/collector/consumer v1.43.0 // indirect
go.opentelemetry.io/collector/featuregate v1.43.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.137.0 // indirect
go.opentelemetry.io/collector/pdata v1.43.0 // indirect
go.opentelemetry.io/collector/pipeline v1.43.0 // indirect
go.opentelemetry.io/collector/processor v1.43.0 // indirect
go.opentelemetry.io/collector/component v1.49.0 // indirect
go.opentelemetry.io/collector/confmap v1.49.0 // indirect
go.opentelemetry.io/collector/confmap/xconfmap v0.143.0 // indirect
go.opentelemetry.io/collector/consumer v1.49.0 // indirect
go.opentelemetry.io/collector/featuregate v1.49.0 // indirect
go.opentelemetry.io/collector/pdata v1.49.0 // indirect
go.opentelemetry.io/collector/pipeline v1.49.0 // indirect
go.opentelemetry.io/collector/processor v1.49.0 // indirect
go.opentelemetry.io/collector/semconv v0.128.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/log v0.14.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/sdk v1.39.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect
go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.3.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.14.0 // indirect
google.golang.org/genproto v0.0.0-20251007200510-49b9836ed3ff // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251007200510-49b9836ed3ff // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251007200510-49b9836ed3ff // indirect
google.golang.org/grpc v1.76.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
google.golang.org/genproto v0.0.0-20260114163908-3f89685c29c3 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 // indirect
google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.34.1 // indirect
k8s.io/client-go v0.34.1 // indirect
k8s.io/apimachinery v0.35.0 // indirect
k8s.io/client-go v0.35.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
k8s.io/utils v0.0.0-20260108192941-914a6e750570 // indirect
)

532
go.sum
View File

@@ -1,29 +1,29 @@
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE=
cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU=
cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0=
cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc=
cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU=
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY=
cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw=
cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8=
cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk=
cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE=
cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI=
cloud.google.com/go/storage v1.57.0 h1:4g7NB7Ta7KetVbOMpCqy89C+Vg5VE8scqlSHUPm7Rds=
cloud.google.com/go/storage v1.57.0/go.mod h1:329cwlpzALLgJuu8beyJ/uvQznDHpa2U5lGjWednkzg=
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
cloud.google.com/go/storage v1.59.1 h1:DXAZLcTimtiXdGqDSnebROVPd9QvRsFVVlptz02Wk58=
cloud.google.com/go/storage v1.59.1/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI=
cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U=
cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
@@ -34,12 +34,12 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4 h1:jWQK1GI+LeGGUKBADtcH2rRqPxYB1Ljwms5gFA2LqrM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4/go.mod h1:8mwH4klAm9DUgR2EEHyEEAQlRDvLPyg5fQry3y+cDew=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=
@@ -70,60 +70,72 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I=
github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
github.com/aws/aws-sdk-go-v2/config v1.31.12 h1:pYM1Qgy0dKZLHX2cXslNacbcEFMkDMl+Bcj5ROuS6p8=
github.com/aws/aws-sdk-go-v2/config v1.31.12/go.mod h1:/MM0dyD7KSDPR+39p9ZNVKaHDLb9qnfDurvVS2KAhN8=
github.com/aws/aws-sdk-go-v2/credentials v1.18.16 h1:4JHirI4zp958zC026Sm+V4pSDwW4pwLefKrc0bF2lwI=
github.com/aws/aws-sdk-go-v2/credentials v1.18.16/go.mod h1:qQMtGx9OSw7ty1yLclzLxXCRbrkjWAM7JnObZjmCB7I=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 h1:Mv4Bc0mWmv6oDuSWTKnk+wgeqPL5DRFu5bQL9BGPQ8Y=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9/go.mod h1:IKlKfRppK2a1y0gy1yH6zD+yX5uplJ6UuPlgd48dJiQ=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 h1:ofHawDLJTI6ytDIji+g4dXQ6u2idzTb04tDlN9AS614=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12/go.mod h1:f5pL4iLDfbcxj1SZcdRdIokBB5eHbuYPS/Fs9DwUPRQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 h1:X0FveUndcZ3lKbSpIC6rMYGRiQTcUVRNH6X4yYtIrlU=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls=
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 h1:mUI3b885qJgfqKDUSj6RgbRqLdX0wGmg8ruM03zNfQA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4/go.mod h1:6v8ukAxc7z4x4oBjGUsLnH7KGLY9Uhcgij19UJNkiMg=
github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 h1:A1oRkiSQOWstGh61y4Wc/yQ04sqrQZr1Si/oAXj20/s=
github.com/aws/aws-sdk-go-v2/service/sso v1.29.6/go.mod h1:5PfYspyCU5Vw1wNPsxi15LZovOnULudOQuVxphSflQA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 h1:5fm5RTONng73/QA73LhCNR7UT9RpFH3hR6HWL6bIgVY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1/go.mod h1:xBEjWD13h+6nq+z4AkqSfSvqRKFgDIQeaMguAJndOWo=
github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 h1:p3jIvqYwUZgu/XYeI48bJxOhvm47hZb5HUQ0tn6Q9kA=
github.com/aws/aws-sdk-go-v2/service/sts v1.38.6/go.mod h1:WtKK+ppze5yKPkZ0XwqIVWD4beCwv056ZbPQNoeHqM8=
github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.19 h1:Gxj3kAlmM+a/VVO4YNsmgHGVUZhSxs0tuVwLIxZBCtM=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.19/go.mod h1:XGq5kImVqQT4HUNbbG+0Y8O74URsPNH7CGPg1s1HW5E=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.277.0 h1:RHJSkRXDGkAKrV4CTEsZsZkOmSpxXKO4aKx4rXd94K4=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.277.0/go.mod h1:Wg68QRgy2gEGGdmTPU/UbVpdv8sM14bUZmF64KFwAsY=
github.com/aws/aws-sdk-go-v2/service/ecs v1.69.5 h1:5nkhwt0d/gjuT3AQ2LUK0aFRNB3MGlzB2elqy/ZsKP4=
github.com/aws/aws-sdk-go-v2/service/ecs v1.69.5/go.mod h1:LQMlcWBoiFVD3vUVEz42ST0yTiaDujv2dRE6sXt1yPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10 h1:MQuZZ6Tq1qQabPlkVxrCMdyVl70Ogl4AERZKo+y9Wzo=
github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10/go.mod h1:U5C3JME1ibKESmpzBAqlRpTYZfVbTqrb5ICJm+sVVd8=
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 h1:C2dUPSnEpy4voWFIq3JNd8gN0Y5vYGDo44eUE58a/p8=
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE=
github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmatcuk/doublestar/v4 v4.9.2 h1:b0mc6WyRSYLjzofB2v/0cuDUZ+MqoGyH3r0dVij35GI=
github.com/bmatcuk/doublestar/v4 v4.9.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb/v3 v3.1.7 h1:2FsIW307kt7A/rz/ZI2lvPO+v3wKazzE4K/0LtTWsOI=
github.com/cheggaaa/pb/v3 v3.1.7/go.mod h1:/Ji89zfVPeC/u5j8ukD0MBPHt2bzTYp74lQ7KlgFWTQ=
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
github.com/clipperhouse/uax29/v2 v2.3.1 h1:RjM8gnVbFbgI67SBekIC7ihFpyXwRPYWXn9BZActHbw=
github.com/clipperhouse/uax29/v2 v2.3.1/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w=
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -132,12 +144,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.136.0 h1:DTxugljFJSMBPfEGq4KeXpnKeAHicggNqogcrw/YdZw=
github.com/digitalocean/godo v1.136.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/digitalocean/godo v1.171.0 h1:QwpkwWKr3v7yxc8D4NQG973NoR9APCEWjYnLOQeXVpQ=
github.com/digitalocean/godo v1.171.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0=
github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -146,22 +158,22 @@ github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM=
github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs=
github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=
github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=
github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
@@ -171,16 +183,38 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E=
github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc=
github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=
github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=
github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=
github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6xTM4=
github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA=
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -206,20 +240,20 @@ github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
github.com/gophercloud/gophercloud/v2 v2.6.0 h1:XJKQ0in3iHOZHVAFMXq/OhjCuvvG+BKR0unOqRfG1EI=
github.com/gophercloud/gophercloud/v2 v2.6.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao=
github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8=
github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y=
github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14=
github.com/gophercloud/gophercloud/v2 v2.9.0 h1:Y9OMrwKF9EDERcHFSOTpf/6XGoAI0yOxmsLmQki4LPM=
github.com/gophercloud/gophercloud/v2 v2.9.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/hashicorp/consul/api v1.31.2 h1:NicObVJHcCmyOIl7Z9iHPvvFrocgTYo9cITSGg0/7pw=
github.com/hashicorp/consul/api v1.31.2/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/consul/api v1.32.1 h1:0+osr/3t/aZNAdJX558crU3PEjVrG4x6715aZHRgceE=
github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
@@ -234,24 +268,20 @@ github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISH
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/nomad/api v0.0.0-20251216171439-1dee0671280e h1:wGl06iy/H90NSbWjfXWeRwk9SJOks0u4voIryeJFlSA=
github.com/hashicorp/nomad/api v0.0.0-20251216171439-1dee0671280e/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.19.1 h1:UU/7h3uc/rdgspM8xkQF7wokmwZXePWDXcLqrQRRzzY=
github.com/hetznercloud/hcloud-go/v2 v2.19.1/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA=
github.com/hetznercloud/hcloud-go/v2 v2.32.0 h1:BRe+k7ESdYv3xQLBGdKUfk+XBFRJNGKzq70nJI24ciM=
github.com/hetznercloud/hcloud-go/v2 v2.32.0/go.mod h1:hAanyyfn9M0cMmZ68CXzPCF54KRb9EXd8eiE2FHKGIE=
github.com/influxdata/influxdb v1.12.2 h1:Y0ZBu47gYVbDCRPMFOrlRRZ3grdqPGIJxerFysVSq+g=
github.com/influxdata/influxdb v1.12.2/go.mod h1:EwqFMB6GKV0Huug82Msa5f8QfXhqETUmC4L9A0QZJQM=
github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k=
github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/ionos-cloud/sdk-go/v6 v6.3.5 h1:6fHArdV1lf50iRhCkCP7wkvGwWzVwi+l9w1t5mwkOa8=
github.com/ionos-cloud/sdk-go/v6 v6.3.5/go.mod h1:nUGHP4kZHAZngCVr4v6C8nuargFrtvt7GrzH/hqn7c4=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -260,8 +290,8 @@ github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRt
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
@@ -276,18 +306,16 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U=
github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/linode/linodego v1.63.0 h1:MdjizfXNJDVJU6ggoJmMO5O9h4KGPGivNX0fzrAnstk=
github.com/linode/linodego v1.63.0/go.mod h1:GoiwLVuLdBQcAebxAVKVL3mMYUgJZR/puOUSla04xBE=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/miekg/dns v1.1.69 h1:Kb7Y/1Jo+SG+a2GtfoFUfDkG//csdRPwRLkCsxDG9Sc=
github.com/miekg/dns v1.1.69/go.mod h1:7OyjD9nEba5OkqQ/hB4fy3PIoxafSZJtducccIelz3g=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -310,18 +338,18 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.137.0 h1:yXGavfQt72MqJiwqv2hfSFX00t9M7lywUyC1Y6vKk34=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.137.0/go.mod h1:2o1cG7vPMb3wQk9rOaszPjK+1nd5uDOKP2O6jyuIR6s=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.137.0 h1:gzYqqK2ZOnbrEQfbS/2LnQa4t4oCofJdPKC9TkMJUQY=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.137.0/go.mod h1:unML3A0mPOFWZcDJkzNEmv46eUwFxN9FqMcaNWxLh4g=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.137.0 h1:kYhcFZ6wzwmvnQOXNnK0NS0F3CdFC6B9XK/gDs69WGg=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.137.0/go.mod h1:M2qsf2dhEKsnXjmwFqp7vrTCRvwusDCMBvtGaXYWafU=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.143.0 h1:upyPOkUAWpESV01xbGFqr4OHAVNh8wNHL543yh5gtKM=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.143.0/go.mod h1:pzxHDwrT3+38i6wa1AalpumYPnlkHYe0OVN5LSQ/CjY=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.143.0 h1:M2bfp6Dz3ENrsHG401rneY/A9PepsAEzi0rWsAtPQE4=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.143.0/go.mod h1:MFCX7ipRa+GD7b+DBRSJd1ngZ3NXxwd5FTwPiCeUARE=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.143.0 h1:aW5LQYgioeXr36lbQbnNvQKNhuliSQ7kRYnyTyjqbGQ=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.143.0/go.mod h1:5xgvDQsVzmQaAbfbZPxACeF1evdZnQ1tH19M3TcwxsU=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw=
github.com/ovh/go-ovh v1.7.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
@@ -334,28 +362,34 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 h1:vwqZvuobg82U0gcG2eVrFH27806bUbNr32SvfRbvdsg=
github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562/go.mod h1:PmAYDB13uBFBG9qE1qxZZgZWhg7Rg6SfKM5DMK7hjyI=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI=
github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/prometheus/prometheus v0.303.1 h1:He/2jRE6sB23Ew38AIoR1WRR3fCMgPlJA2E0obD2WSY=
github.com/prometheus/prometheus v0.303.1/go.mod h1:WEq2ogBPZoLjj9x5K67VEk7ECR0nRD9XCjaOt1lsYck=
github.com/prometheus/sigv4 v0.2.1 h1:hl8D3+QEzU9rRmbKIRwMKRwaFGyLkbPdH5ZerglRHY0=
github.com/prometheus/sigv4 v0.2.1/go.mod h1:ySk6TahIlsR2sxADuHy4IBFhwEjRGGsfbbLGhFYFj6Q=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
github.com/prometheus/prometheus v0.309.1 h1:jutK6eCYDpWdPTUbVbkcQsNCMO9CCkSwjQRMLds4jSo=
github.com/prometheus/prometheus v0.309.1/go.mod h1:d+dOGiVhuNDa4MaFXHVdnUBy/CzqlcNTooR8oM1wdTU=
github.com/prometheus/sigv4 v0.4.0 h1:s8oiq+S4ORkpjftnBvzObLrz5Hw49YwEhumNGBdfg4M=
github.com/prometheus/sigv4 v0.4.0/go.mod h1:D6dQeKEsDyUWzoNGjby5HgXshiOAbsz7vuApHTCmOxA=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 h1:4+LP7qmsLSGbmc66m1s5dKRMBwztRppfxFKlYqYte/c=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32/go.mod h1:kzh+BSAvpoyHHdHBCDhmSWtBc1NbLMZ2lWHqnBoxFks=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35 h1:8xfn1RzeI9yoCUuEwDy08F+No6PcKZGEDOQ6hrRyLts=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35/go.mod h1:47B1d/YXmSAxlJxUJxClzHR6b3T4M1WyCvwENPQNBWc=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=
github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=
github.com/stackitcloud/stackit-sdk-go/core v0.20.1 h1:odiuhhRXmxvEvnVTeZSN9u98edvw2Cd3DcnkepncP3M=
github.com/stackitcloud/stackit-sdk-go/core v0.20.1/go.mod h1:fqto7M82ynGhEnpZU6VkQKYWYoFG5goC076JWXTUPRQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -394,82 +428,76 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/collector/component v1.43.0 h1:9dyOmV0UuIhrNSASMeDH125jhfv7+FhWMq0HtNHHCs8=
go.opentelemetry.io/collector/component v1.43.0/go.mod h1:Pw3qM5HhgnSMpebNRUiiJuEiXxZyHq83vl7wXqxD8hU=
go.opentelemetry.io/collector/component/componentstatus v0.137.0 h1:rs2p8Pc3b17xVe8rMKkfg8wdZnXqIYV35RaLLFxunNY=
go.opentelemetry.io/collector/component/componentstatus v0.137.0/go.mod h1:J8CVhqRVl1+2+1wJatY8zMJZmtfQaOKs2K9j4pJv1mQ=
go.opentelemetry.io/collector/component/componenttest v0.137.0 h1:QC9MZsYyzQqN9qMlleJb78wf7FeCjbr4jLeCuNlKHLU=
go.opentelemetry.io/collector/component/componenttest v0.137.0/go.mod h1:JuiX9pv7qE5G8keihhjM66LeidryEnziPND0sXuK9PQ=
go.opentelemetry.io/collector/confmap v1.43.0 h1:QVAnbS7A+2Ra61xsuG355vhlW6uOMaKWysrwLQzDUz4=
go.opentelemetry.io/collector/confmap v1.43.0/go.mod h1:N5GZpFCmwD1GynDu3IWaZW5Ycfc/7YxSU0q1/E3vLdg=
go.opentelemetry.io/collector/confmap/xconfmap v0.137.0 h1:IKzD6w4YuvBi6GvxZfhz7SJR6GR1UpSQRuxtx20/+9U=
go.opentelemetry.io/collector/confmap/xconfmap v0.137.0/go.mod h1:psXdQr13pVrCqNPdoER2QZZorvONAR5ZUEHURe4POh4=
go.opentelemetry.io/collector/consumer v1.43.0 h1:51pfN5h6PLlaBwGPtyHn6BdK0DgtVGRV0UYRPbbscbs=
go.opentelemetry.io/collector/consumer v1.43.0/go.mod h1:v3J2g+6IwOPbLsnzL9cQfvgpmmsZt1YS7aXSNDFmJfk=
go.opentelemetry.io/collector/consumer/consumertest v0.137.0 h1:tkqBk/DmJcrkRvHwNdDwvdiWfqyS6ymGgr9eyn6Vy6A=
go.opentelemetry.io/collector/consumer/consumertest v0.137.0/go.mod h1:6bKAlEgrAZ3NSn7ULLFZQMQtlW2xJlvVWkzIaGprucg=
go.opentelemetry.io/collector/consumer/xconsumer v0.137.0 h1:p3tkV3O9bL3bZl3RN2wmoxl22f8B8eMomKUqz656OPY=
go.opentelemetry.io/collector/consumer/xconsumer v0.137.0/go.mod h1:N+nRnP0ga4Scu8Ew87F+kxVajE/eGjRLbWC9H+elN5Q=
go.opentelemetry.io/collector/featuregate v1.43.0 h1:Aq8UR5qv1zNlbbkTyqv8kLJtnoQMq/sG1/jS9o1cCJI=
go.opentelemetry.io/collector/featuregate v1.43.0/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4=
go.opentelemetry.io/collector/internal/telemetry v0.137.0 h1:KlJcaBnIIn+QJzQIfA1eXbYUvHmgM7h/gLp/vjvUBMw=
go.opentelemetry.io/collector/internal/telemetry v0.137.0/go.mod h1:GWOiXBZ82kMzwGMEihJ5rEo5lFL7gurfHD++5q0XtI8=
go.opentelemetry.io/collector/pdata v1.43.0 h1:zVkj2hcjiMLwX+QDDNwb7iTh3LBjNXKv2qPSgj1Rzb4=
go.opentelemetry.io/collector/pdata v1.43.0/go.mod h1:KsJzdDG9e5BaHlmYr0sqdSEKeEiSfKzoF+rdWU7J//w=
go.opentelemetry.io/collector/pdata/pprofile v0.137.0 h1:bLVp8p8hpH81eQhhEQBkvLtS00GbnMU+ItNweBJLqZ8=
go.opentelemetry.io/collector/pdata/pprofile v0.137.0/go.mod h1:QfhMf7NnG+fTuwGGB1mXgcPzcXNxEYSW6CrVouOsF7Q=
go.opentelemetry.io/collector/pdata/testdata v0.137.0 h1:+oaGvbt0v7xryTX827szmyYWSAtvA0LbysEFV2nFjs0=
go.opentelemetry.io/collector/pdata/testdata v0.137.0/go.mod h1:3512FJaQsZz5EBlrY46xKjzoBc0MoMcQtAqYs2NaRQM=
go.opentelemetry.io/collector/pipeline v1.43.0 h1:IJjdqE5UCQlyVvFUUzlhSWhP4WIwpH6UyJQ9iWXpyww=
go.opentelemetry.io/collector/pipeline v1.43.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
go.opentelemetry.io/collector/processor v1.43.0 h1:JmsceK1UUFtXoe3CALb+/A09RUQBsCbcqA+fSs4O0c0=
go.opentelemetry.io/collector/processor v1.43.0/go.mod h1:w40CABuhIGpUoXtkIKik/5L5nfK2RTEjUuwl83n2PEo=
go.opentelemetry.io/collector/processor/processortest v0.137.0 h1:ArZ6fFzE7Fyyfy4A7/skOGJMnG6bZDkYzOb0XPWEj9o=
go.opentelemetry.io/collector/processor/processortest v0.137.0/go.mod h1:eBXM8LmHFsnMKfS441uYGGKMk0Lid189DVS9pLBwYSQ=
go.opentelemetry.io/collector/processor/xprocessor v0.137.0 h1:mN8ucEyZr9lUaTDx5h2nRTW5Tw43T9pv9SmZOweukLQ=
go.opentelemetry.io/collector/processor/xprocessor v0.137.0/go.mod h1:8G9DTxSA1v7anuTx2sq2VsJJnyntCeaEHCKYiDKyTy8=
go.opentelemetry.io/collector/component v1.49.0 h1:iJ56qiTWNtTyqafDx/X6zMukGEF8UZJA/+HNyPGVbks=
go.opentelemetry.io/collector/component v1.49.0/go.mod h1:EZd8hSQkzy/SJwahBKLF/NXsdhBEteiP4B6KXN7Ttpg=
go.opentelemetry.io/collector/component/componentstatus v0.143.0 h1:mtjfxahSl7LqreJ1fKrvmVLWv5wM6gNcmcAhFIBQLpo=
go.opentelemetry.io/collector/component/componentstatus v0.143.0/go.mod h1:7Is2U4lChyTtkOOpnPZy2bHVnj8kDETVUUnEX3UYIMY=
go.opentelemetry.io/collector/component/componenttest v0.143.0 h1:63Z2/UaFQSHnBs5fKLZ2BP9WTM7OL6CalMadq86PpeQ=
go.opentelemetry.io/collector/component/componenttest v0.143.0/go.mod h1:zUC76cTk9l+P7+0GPXgXgj8J+LxxrTD0j8EJHfX6Xa8=
go.opentelemetry.io/collector/confmap v1.49.0 h1:QUUymb4To6wgxDpD5USPkFqqsTe97vIEUmAmldXsvOM=
go.opentelemetry.io/collector/confmap v1.49.0/go.mod h1:nXdTzIrHuIJ6Q30Woy/JgeHRnCvEmao6AEFZJiP28T4=
go.opentelemetry.io/collector/confmap/xconfmap v0.143.0 h1:yhnDnSpB1snKv6kn7dthZYMiN9zwD0r6agDjHuamn7s=
go.opentelemetry.io/collector/confmap/xconfmap v0.143.0/go.mod h1:d0bg4cm1+Xf8/QOWEAdpxHmgS4EFLwYBiZluwV01Ceg=
go.opentelemetry.io/collector/consumer v1.49.0 h1:xNQxfM/5P+wYrwl6IaU35RsLA8ANM74okG1ahZdWO0c=
go.opentelemetry.io/collector/consumer v1.49.0/go.mod h1:LAzZPC8d2CpmLqXpn3K4zTM/z8a6VxA0hMGOE9MWXxo=
go.opentelemetry.io/collector/consumer/consumertest v0.143.0 h1:69w92MikFVvzV22VFkjmddELHV1V3BlIKWb4L+epcgM=
go.opentelemetry.io/collector/consumer/consumertest v0.143.0/go.mod h1:Qi4RlpzDuO/2+k+UrV9Nw0Km2UlunnN1RU8nIhsI/LA=
go.opentelemetry.io/collector/consumer/xconsumer v0.143.0 h1:m5NjAWhKczxWzsCENEmQoiKdIK0yfOR3Rn0c5J0puMQ=
go.opentelemetry.io/collector/consumer/xconsumer v0.143.0/go.mod h1:7hyToLEwxC4PwGjjTsSdLAiiABUh6Mg5poJb9BC/gP0=
go.opentelemetry.io/collector/featuregate v1.49.0 h1:4UfnqTvSvm6GkeD/w39LYLPmnZDfk4f+grkWuyl0NPU=
go.opentelemetry.io/collector/featuregate v1.49.0/go.mod h1:/1bclXgP91pISaEeNulRxzzmzMTm4I5Xih2SnI4HRSo=
go.opentelemetry.io/collector/internal/testutil v0.143.0 h1:rp3vIsOhXg/H3YXuStdggGTLuU+Udf1BdDIF/I7+Tyk=
go.opentelemetry.io/collector/internal/testutil v0.143.0/go.mod h1:YAD9EAkwh/l5asZNbEBEUCqEjoL1OKMjAMoPjPqH76c=
go.opentelemetry.io/collector/pdata v1.49.0 h1:h6V3rdLNxweI3K8B5SZzjMiVdsPPBB1TPAWwZkCtGZE=
go.opentelemetry.io/collector/pdata v1.49.0/go.mod h1:gidKN58CUnhd4DSM61UzPKWjXmG0vyoIn7dd+URZW9A=
go.opentelemetry.io/collector/pdata/pprofile v0.143.0 h1:qFrT+33PvKGr1F8yCpn3ysGWmEXYJjMvDKTGcwPKP1A=
go.opentelemetry.io/collector/pdata/pprofile v0.143.0/go.mod h1:RCZhNPEvZ1ctaPxDJ7tUdfVwGd0ee8uY4h4twq+01PE=
go.opentelemetry.io/collector/pdata/testdata v0.143.0 h1:csvYoOv8c6vD8pZ4dmkkfsjk1qVhaIUbNBWkSGx1VWo=
go.opentelemetry.io/collector/pdata/testdata v0.143.0/go.mod h1:DLjTEVsK9+lTsEuyjNKNaEdfWEM2wYeMCNl7waSlpfg=
go.opentelemetry.io/collector/pipeline v1.49.0 h1:JlczxvcgjnwMP2bm55lHt8A3eBE/qIv/Swv5twBOUpg=
go.opentelemetry.io/collector/pipeline v1.49.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
go.opentelemetry.io/collector/processor v1.49.0 h1:vALRR0gW+WIoE2ERTJo381FHLUfypOsJZw3mTPA2/hw=
go.opentelemetry.io/collector/processor v1.49.0/go.mod h1:fGWONigLHkkoDODevNv6BIZIfk/gZxxIBe0QZXL1pBI=
go.opentelemetry.io/collector/processor/processortest v0.143.0 h1:QPNLk7eRLQulS3EH9CMkuxV4+wte5BjlYGZoGlbz/74=
go.opentelemetry.io/collector/processor/processortest v0.143.0/go.mod h1:oGDwx8e2BeS8glxfkehswTRics/s8WGzN5LPKywoxWU=
go.opentelemetry.io/collector/processor/xprocessor v0.143.0 h1:8UXrve/Ak0c5jNI1VqTUiyxPMkMMwYEcqANgLX92SK8=
go.opentelemetry.io/collector/processor/xprocessor v0.143.0/go.mod h1:0pSR0Fj+gTMRgfOg6/Wg5AGE5GTIqAAVIPZwe7SiB/4=
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
go.opentelemetry.io/contrib/bridges/otelzap v0.13.0 h1:aBKdhLVieqvwWe9A79UHI/0vgp2t/s2euY8X59pGRlw=
go.opentelemetry.io/contrib/bridges/otelzap v0.13.0/go.mod h1:SYqtxLQE7iINgh6WFuVi2AI70148B8EI35DSk0Wr8m4=
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs=
go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0/go.mod h1:rjbQTDEPQymPE0YnRQp9/NuPwwtL0sesz/fnqRW/v84=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM=
go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno=
go.opentelemetry.io/otel/log/logtest v0.14.0 h1:BGTqNeluJDK2uIHAY8lRqxjVAYfqgcaTbVk1n3MWe5A=
go.opentelemetry.io/otel/log/logtest v0.14.0/go.mod h1:IuguGt8XVP4XA4d2oEEDMVDBBCesMg8/tSGWDjuKfoA=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/slim/otlp v1.8.0 h1:afcLwp2XOeCbGrjufT1qWyruFt+6C9g5SOuymrSPUXQ=
go.opentelemetry.io/proto/slim/otlp v1.8.0/go.mod h1:Yaa5fjYm1SMCq0hG0x/87wV1MP9H5xDuG/1+AhvBcsI=
go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0 h1:Uc+elixz922LHx5colXGi1ORbsW8DTIGM+gg+D9V7HE=
go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0/go.mod h1:VyU6dTWBWv6h9w/+DYgSZAPMabWbPTFTuxp25sM8+s0=
go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0 h1:i8YpvWGm/Uq1koL//bnbJ/26eV3OrKWm09+rDYo7keU=
go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0/go.mod h1:pQ70xHY/ZVxNUBPn+qUWPl8nwai87eWdqL3M37lNi9A=
go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE=
go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 h1:RN3ifU8y4prNWeEnQp2kRRHz8UwonAEYZl8tUzHEXAk=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0/go.mod h1:habDz3tEWiFANTo6oUE99EmaFUrCNYAAg3wiVmusm70=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0 h1:OXSUzgmIFkcC4An+mv+lqqZSndTffXpjAyoR+1f8k/A=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0/go.mod h1:1A4GVLFIm54HFqVdOpWmukap7rgb0frrE3zWXohLPdM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w=
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE=
go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI=
go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8=
go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA=
go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk=
go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
@@ -477,71 +505,71 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.252.0 h1:xfKJeAJaMwb8OC9fesr369rjciQ704AjU/psjkKURSI=
google.golang.org/api v0.252.0/go.mod h1:dnHOv81x5RAmumZ7BWLShB/u7JZNeyalImxHmtTHxqw=
google.golang.org/genproto v0.0.0-20251007200510-49b9836ed3ff h1:3jGSSqkLOAYU1gI52uHoj51zxEsGMEYatnBFU0m6pB8=
google.golang.org/genproto v0.0.0-20251007200510-49b9836ed3ff/go.mod h1:45Y7O/+fGjlhL8+FRpuLqM9YKvn+AU5dolRkE3DOaX8=
google.golang.org/genproto/googleapis/api v0.0.0-20251007200510-49b9836ed3ff h1:8Zg5TdmcbU8A7CXGjGXF1Slqu/nIFCRaR3S5gT2plIA=
google.golang.org/genproto/googleapis/api v0.0.0-20251007200510-49b9836ed3ff/go.mod h1:dbWfpVPvW/RqafStmRWBUpMN14puDezDMHxNYiRfQu0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251007200510-49b9836ed3ff h1:A90eA31Wq6HOMIQlLfzFwzqGKBTuaVztYu/g8sn+8Zc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251007200510-49b9836ed3ff/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
google.golang.org/api v0.260.0 h1:XbNi5E6bOVEj/uLXQRlt6TKuEzMD7zvW/6tNwltE4P4=
google.golang.org/api v0.260.0/go.mod h1:Shj1j0Phr/9sloYrKomICzdYgsSDImpTxME8rGLaZ/o=
google.golang.org/genproto v0.0.0-20260114163908-3f89685c29c3 h1:rUamZFBwsWVWg4Yb7iTbwYp81XVHUvOXNdrFCoYRRNE=
google.golang.org/genproto v0.0.0-20260114163908-3f89685c29c3/go.mod h1:wE6SUYr3iNtF/D0GxVAjT+0CbDFktQNssYs9PVptCt4=
google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3 h1:X9z6obt+cWRX8XjDVOn+SZWhWe5kZHm46TThU9j+jss=
google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3/go.mod h1:dd646eSK+Dk9kxVBl1nChEOhJPtMXriCcVb4x3o6J+E=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 h1:C4WAdL+FbjnGlpp2S+HMVhBeCq2Lcib4xZqfPNF6OoQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
@@ -551,20 +579,20 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY=
k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=

View File

@@ -16,7 +16,6 @@ go_library(
importpath = "cel.dev/expr",
visibility = ["//visibility:public"],
deps = [
"@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect",
"@org_golang_google_protobuf//runtime/protoimpl",
"@org_golang_google_protobuf//types/known/anypb",

View File

@@ -11,26 +11,9 @@ bazel_dep(
version = "0.39.1",
repo_name = "bazel_gazelle",
)
bazel_dep(
name = "googleapis",
version = "0.0.0-20241220-5e258e33.bcr.1",
repo_name = "com_google_googleapis",
)
bazel_dep(
name = "googleapis-cc",
version = "1.0.0",
)
bazel_dep(
name = "googleapis-java",
version = "1.0.0",
)
bazel_dep(
name = "googleapis-go",
version = "1.0.0",
)
bazel_dep(
name = "protobuf",
version = "27.0",
version = "27.1",
repo_name = "com_google_protobuf",
)
bazel_dep(
@@ -63,12 +46,11 @@ python.toolchain(
)
go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
go_sdk.download(version = "1.22.0")
go_sdk.download(version = "1.23.0")
go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
go_deps.from_file(go_mod = "//:go.mod")
use_repo(
go_deps,
"org_golang_google_genproto_googleapis_rpc",
"org_golang_google_protobuf",
)

749
vendor/cel.dev/expr/checked.pb.go generated vendored

File diff suppressed because it is too large Load Diff

77
vendor/cel.dev/expr/eval.pb.go generated vendored
View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: cel/expr/eval.proto
@@ -12,6 +12,7 @@ import (
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -373,58 +374,39 @@ func (x *EvalState_Result) GetValue() int64 {
var File_cel_expr_eval_proto protoreflect.FileDescriptor
var file_cel_expr_eval_proto_rawDesc = []byte{
0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f,
0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b,
0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13,
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72,
0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74,
0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65,
0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12,
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05,
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65,
0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48,
0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e,
0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48,
0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69,
0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28,
0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14,
0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65,
0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8,
0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
const file_cel_expr_eval_proto_rawDesc = "" +
"\n" +
"\x13cel/expr/eval.proto\x12\bcel.expr\x1a\x19google/protobuf/any.proto\x1a\x14cel/expr/value.proto\"\xa2\x01\n" +
"\tEvalState\x12+\n" +
"\x06values\x18\x01 \x03(\v2\x13.cel.expr.ExprValueR\x06values\x124\n" +
"\aresults\x18\x03 \x03(\v2\x1a.cel.expr.EvalState.ResultR\aresults\x1a2\n" +
"\x06Result\x12\x12\n" +
"\x04expr\x18\x01 \x01(\x03R\x04expr\x12\x14\n" +
"\x05value\x18\x02 \x01(\x03R\x05value\"\x9a\x01\n" +
"\tExprValue\x12'\n" +
"\x05value\x18\x01 \x01(\v2\x0f.cel.expr.ValueH\x00R\x05value\x12*\n" +
"\x05error\x18\x02 \x01(\v2\x12.cel.expr.ErrorSetH\x00R\x05error\x120\n" +
"\aunknown\x18\x03 \x01(\v2\x14.cel.expr.UnknownSetH\x00R\aunknownB\x06\n" +
"\x04kind\"4\n" +
"\bErrorSet\x12(\n" +
"\x06errors\x18\x01 \x03(\v2\x10.cel.expr.StatusR\x06errors\"f\n" +
"\x06Status\x12\x12\n" +
"\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\x12.\n" +
"\adetails\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\adetails\"\"\n" +
"\n" +
"UnknownSet\x12\x14\n" +
"\x05exprs\x18\x01 \x03(\x03R\x05exprsB,\n" +
"\fdev.cel.exprB\tEvalProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3"
var (
file_cel_expr_eval_proto_rawDescOnce sync.Once
file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
file_cel_expr_eval_proto_rawDescData []byte
)
func file_cel_expr_eval_proto_rawDescGZIP() []byte {
file_cel_expr_eval_proto_rawDescOnce.Do(func() {
file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_eval_proto_rawDesc), len(file_cel_expr_eval_proto_rawDesc)))
})
return file_cel_expr_eval_proto_rawDescData
}
@@ -470,7 +452,7 @@ func file_cel_expr_eval_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_cel_expr_eval_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_eval_proto_rawDesc), len(file_cel_expr_eval_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
@@ -481,7 +463,6 @@ func file_cel_expr_eval_proto_init() {
MessageInfos: file_cel_expr_eval_proto_msgTypes,
}.Build()
File_cel_expr_eval_proto = out.File
file_cel_expr_eval_proto_rawDesc = nil
file_cel_expr_eval_proto_goTypes = nil
file_cel_expr_eval_proto_depIdxs = nil
}

113
vendor/cel.dev/expr/explain.pb.go generated vendored
View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.21.5
// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: cel/expr/explain.proto
package expr
@@ -11,6 +11,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -20,23 +21,20 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Deprecated: Do not use.
// Deprecated: Marked as deprecated in cel/expr/explain.proto.
type Explain struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
unknownFields protoimpl.UnknownFields
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
sizeCache protoimpl.SizeCache
}
func (x *Explain) Reset() {
*x = Explain{}
if protoimpl.UnsafeEnabled {
mi := &file_cel_expr_explain_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_cel_expr_explain_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Explain) String() string {
@@ -47,7 +45,7 @@ func (*Explain) ProtoMessage() {}
func (x *Explain) ProtoReflect() protoreflect.Message {
mi := &file_cel_expr_explain_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -77,21 +75,18 @@ func (x *Explain) GetExprSteps() []*Explain_ExprStep {
}
type Explain_ExprStep struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
unknownFields protoimpl.UnknownFields
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
sizeCache protoimpl.SizeCache
}
func (x *Explain_ExprStep) Reset() {
*x = Explain_ExprStep{}
if protoimpl.UnsafeEnabled {
mi := &file_cel_expr_explain_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_cel_expr_explain_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Explain_ExprStep) String() string {
@@ -102,7 +97,7 @@ func (*Explain_ExprStep) ProtoMessage() {}
func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
mi := &file_cel_expr_explain_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -133,42 +128,33 @@ func (x *Explain_ExprStep) GetValueIndex() int32 {
var File_cel_expr_explain_proto protoreflect.FileDescriptor
var file_cel_expr_explain_proto_rawDesc = []byte{
0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
const file_cel_expr_explain_proto_rawDesc = "" +
"\n" +
"\x16cel/expr/explain.proto\x12\bcel.expr\x1a\x14cel/expr/value.proto\"\xae\x01\n" +
"\aExplain\x12'\n" +
"\x06values\x18\x01 \x03(\v2\x0f.cel.expr.ValueR\x06values\x129\n" +
"\n" +
"expr_steps\x18\x02 \x03(\v2\x1a.cel.expr.Explain.ExprStepR\texprSteps\x1a;\n" +
"\bExprStep\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x03R\x02id\x12\x1f\n" +
"\vvalue_index\x18\x02 \x01(\x05R\n" +
"valueIndex:\x02\x18\x01B/\n" +
"\fdev.cel.exprB\fExplainProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3"
var (
file_cel_expr_explain_proto_rawDescOnce sync.Once
file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
file_cel_expr_explain_proto_rawDescData []byte
)
func file_cel_expr_explain_proto_rawDescGZIP() []byte {
file_cel_expr_explain_proto_rawDescOnce.Do(func() {
file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_explain_proto_rawDesc), len(file_cel_expr_explain_proto_rawDesc)))
})
return file_cel_expr_explain_proto_rawDescData
}
var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_cel_expr_explain_proto_goTypes = []interface{}{
var file_cel_expr_explain_proto_goTypes = []any{
(*Explain)(nil), // 0: cel.expr.Explain
(*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
(*Value)(nil), // 2: cel.expr.Value
@@ -189,37 +175,11 @@ func file_cel_expr_explain_proto_init() {
return
}
file_cel_expr_value_proto_init()
if !protoimpl.UnsafeEnabled {
file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Explain); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Explain_ExprStep); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_cel_expr_explain_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_explain_proto_rawDesc), len(file_cel_expr_explain_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
@@ -230,7 +190,6 @@ func file_cel_expr_explain_proto_init() {
MessageInfos: file_cel_expr_explain_proto_msgTypes,
}.Build()
File_cel_expr_explain_proto = out.File
file_cel_expr_explain_proto_rawDesc = nil
file_cel_expr_explain_proto_goTypes = nil
file_cel_expr_explain_proto_depIdxs = nil
}

879
vendor/cel.dev/expr/syntax.pb.go generated vendored

File diff suppressed because it is too large Load Diff

348
vendor/cel.dev/expr/value.pb.go generated vendored
View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.21.5
// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: cel/expr/value.proto
package expr
@@ -13,6 +13,7 @@ import (
structpb "google.golang.org/protobuf/types/known/structpb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -23,11 +24,8 @@ const (
)
type Value struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Kind:
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Kind:
//
// *Value_NullValue
// *Value_BoolValue
@@ -41,16 +39,16 @@ type Value struct {
// *Value_MapValue
// *Value_ListValue
// *Value_TypeValue
Kind isValue_Kind `protobuf_oneof:"kind"`
Kind isValue_Kind `protobuf_oneof:"kind"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Value) Reset() {
*x = Value{}
if protoimpl.UnsafeEnabled {
mi := &file_cel_expr_value_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_cel_expr_value_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Value) String() string {
@@ -61,7 +59,7 @@ func (*Value) ProtoMessage() {}
func (x *Value) ProtoReflect() protoreflect.Message {
mi := &file_cel_expr_value_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -76,93 +74,117 @@ func (*Value) Descriptor() ([]byte, []int) {
return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
}
func (m *Value) GetKind() isValue_Kind {
if m != nil {
return m.Kind
func (x *Value) GetKind() isValue_Kind {
if x != nil {
return x.Kind
}
return nil
}
func (x *Value) GetNullValue() structpb.NullValue {
if x, ok := x.GetKind().(*Value_NullValue); ok {
return x.NullValue
if x != nil {
if x, ok := x.Kind.(*Value_NullValue); ok {
return x.NullValue
}
}
return structpb.NullValue(0)
}
func (x *Value) GetBoolValue() bool {
if x, ok := x.GetKind().(*Value_BoolValue); ok {
return x.BoolValue
if x != nil {
if x, ok := x.Kind.(*Value_BoolValue); ok {
return x.BoolValue
}
}
return false
}
func (x *Value) GetInt64Value() int64 {
if x, ok := x.GetKind().(*Value_Int64Value); ok {
return x.Int64Value
if x != nil {
if x, ok := x.Kind.(*Value_Int64Value); ok {
return x.Int64Value
}
}
return 0
}
func (x *Value) GetUint64Value() uint64 {
if x, ok := x.GetKind().(*Value_Uint64Value); ok {
return x.Uint64Value
if x != nil {
if x, ok := x.Kind.(*Value_Uint64Value); ok {
return x.Uint64Value
}
}
return 0
}
func (x *Value) GetDoubleValue() float64 {
if x, ok := x.GetKind().(*Value_DoubleValue); ok {
return x.DoubleValue
if x != nil {
if x, ok := x.Kind.(*Value_DoubleValue); ok {
return x.DoubleValue
}
}
return 0
}
func (x *Value) GetStringValue() string {
if x, ok := x.GetKind().(*Value_StringValue); ok {
return x.StringValue
if x != nil {
if x, ok := x.Kind.(*Value_StringValue); ok {
return x.StringValue
}
}
return ""
}
func (x *Value) GetBytesValue() []byte {
if x, ok := x.GetKind().(*Value_BytesValue); ok {
return x.BytesValue
if x != nil {
if x, ok := x.Kind.(*Value_BytesValue); ok {
return x.BytesValue
}
}
return nil
}
func (x *Value) GetEnumValue() *EnumValue {
if x, ok := x.GetKind().(*Value_EnumValue); ok {
return x.EnumValue
if x != nil {
if x, ok := x.Kind.(*Value_EnumValue); ok {
return x.EnumValue
}
}
return nil
}
func (x *Value) GetObjectValue() *anypb.Any {
if x, ok := x.GetKind().(*Value_ObjectValue); ok {
return x.ObjectValue
if x != nil {
if x, ok := x.Kind.(*Value_ObjectValue); ok {
return x.ObjectValue
}
}
return nil
}
func (x *Value) GetMapValue() *MapValue {
if x, ok := x.GetKind().(*Value_MapValue); ok {
return x.MapValue
if x != nil {
if x, ok := x.Kind.(*Value_MapValue); ok {
return x.MapValue
}
}
return nil
}
func (x *Value) GetListValue() *ListValue {
if x, ok := x.GetKind().(*Value_ListValue); ok {
return x.ListValue
if x != nil {
if x, ok := x.Kind.(*Value_ListValue); ok {
return x.ListValue
}
}
return nil
}
func (x *Value) GetTypeValue() string {
if x, ok := x.GetKind().(*Value_TypeValue); ok {
return x.TypeValue
if x != nil {
if x, ok := x.Kind.(*Value_TypeValue); ok {
return x.TypeValue
}
}
return ""
}
@@ -244,21 +266,18 @@ func (*Value_ListValue) isValue_Kind() {}
func (*Value_TypeValue) isValue_Kind() {}
type EnumValue struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
sizeCache protoimpl.SizeCache
}
func (x *EnumValue) Reset() {
*x = EnumValue{}
if protoimpl.UnsafeEnabled {
mi := &file_cel_expr_value_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_cel_expr_value_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EnumValue) String() string {
@@ -269,7 +288,7 @@ func (*EnumValue) ProtoMessage() {}
func (x *EnumValue) ProtoReflect() protoreflect.Message {
mi := &file_cel_expr_value_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -299,20 +318,17 @@ func (x *EnumValue) GetValue() int32 {
}
type ListValue struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
unknownFields protoimpl.UnknownFields
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
sizeCache protoimpl.SizeCache
}
func (x *ListValue) Reset() {
*x = ListValue{}
if protoimpl.UnsafeEnabled {
mi := &file_cel_expr_value_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_cel_expr_value_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListValue) String() string {
@@ -323,7 +339,7 @@ func (*ListValue) ProtoMessage() {}
func (x *ListValue) ProtoReflect() protoreflect.Message {
mi := &file_cel_expr_value_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -346,20 +362,17 @@ func (x *ListValue) GetValues() []*Value {
}
type MapValue struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
unknownFields protoimpl.UnknownFields
Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
sizeCache protoimpl.SizeCache
}
func (x *MapValue) Reset() {
*x = MapValue{}
if protoimpl.UnsafeEnabled {
mi := &file_cel_expr_value_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_cel_expr_value_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapValue) String() string {
@@ -370,7 +383,7 @@ func (*MapValue) ProtoMessage() {}
func (x *MapValue) ProtoReflect() protoreflect.Message {
mi := &file_cel_expr_value_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -393,21 +406,18 @@ func (x *MapValue) GetEntries() []*MapValue_Entry {
}
type MapValue_Entry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
sizeCache protoimpl.SizeCache
}
func (x *MapValue_Entry) Reset() {
*x = MapValue_Entry{}
if protoimpl.UnsafeEnabled {
mi := &file_cel_expr_value_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
mi := &file_cel_expr_value_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapValue_Entry) String() string {
@@ -418,7 +428,7 @@ func (*MapValue_Entry) ProtoMessage() {}
func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
mi := &file_cel_expr_value_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -449,83 +459,58 @@ func (x *MapValue_Entry) GetValue() *Value {
var File_cel_expr_value_proto protoreflect.FileDescriptor
var file_cel_expr_value_proto_rawDesc = []byte{
0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
const file_cel_expr_value_proto_rawDesc = "" +
"\n" +
"\x14cel/expr/value.proto\x12\bcel.expr\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x9d\x04\n" +
"\x05Value\x12;\n" +
"\n" +
"null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12\x1f\n" +
"\n" +
"bool_value\x18\x02 \x01(\bH\x00R\tboolValue\x12!\n" +
"\vint64_value\x18\x03 \x01(\x03H\x00R\n" +
"int64Value\x12#\n" +
"\fuint64_value\x18\x04 \x01(\x04H\x00R\vuint64Value\x12#\n" +
"\fdouble_value\x18\x05 \x01(\x01H\x00R\vdoubleValue\x12#\n" +
"\fstring_value\x18\x06 \x01(\tH\x00R\vstringValue\x12!\n" +
"\vbytes_value\x18\a \x01(\fH\x00R\n" +
"bytesValue\x124\n" +
"\n" +
"enum_value\x18\t \x01(\v2\x13.cel.expr.EnumValueH\x00R\tenumValue\x129\n" +
"\fobject_value\x18\n" +
" \x01(\v2\x14.google.protobuf.AnyH\x00R\vobjectValue\x121\n" +
"\tmap_value\x18\v \x01(\v2\x12.cel.expr.MapValueH\x00R\bmapValue\x124\n" +
"\n" +
"list_value\x18\f \x01(\v2\x13.cel.expr.ListValueH\x00R\tlistValue\x12\x1f\n" +
"\n" +
"type_value\x18\x0f \x01(\tH\x00R\ttypeValueB\x06\n" +
"\x04kind\"5\n" +
"\tEnumValue\x12\x12\n" +
"\x04type\x18\x01 \x01(\tR\x04type\x12\x14\n" +
"\x05value\x18\x02 \x01(\x05R\x05value\"4\n" +
"\tListValue\x12'\n" +
"\x06values\x18\x01 \x03(\v2\x0f.cel.expr.ValueR\x06values\"\x91\x01\n" +
"\bMapValue\x122\n" +
"\aentries\x18\x01 \x03(\v2\x18.cel.expr.MapValue.EntryR\aentries\x1aQ\n" +
"\x05Entry\x12!\n" +
"\x03key\x18\x01 \x01(\v2\x0f.cel.expr.ValueR\x03key\x12%\n" +
"\x05value\x18\x02 \x01(\v2\x0f.cel.expr.ValueR\x05valueB-\n" +
"\fdev.cel.exprB\n" +
"ValueProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3"
var (
file_cel_expr_value_proto_rawDescOnce sync.Once
file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
file_cel_expr_value_proto_rawDescData []byte
)
func file_cel_expr_value_proto_rawDescGZIP() []byte {
file_cel_expr_value_proto_rawDescOnce.Do(func() {
file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_value_proto_rawDesc), len(file_cel_expr_value_proto_rawDesc)))
})
return file_cel_expr_value_proto_rawDescData
}
var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_cel_expr_value_proto_goTypes = []interface{}{
var file_cel_expr_value_proto_goTypes = []any{
(*Value)(nil), // 0: cel.expr.Value
(*EnumValue)(nil), // 1: cel.expr.EnumValue
(*ListValue)(nil), // 2: cel.expr.ListValue
@@ -556,69 +541,7 @@ func file_cel_expr_value_proto_init() {
if File_cel_expr_value_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Value); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EnumValue); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListValue); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MapValue); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MapValue_Entry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []any{
(*Value_NullValue)(nil),
(*Value_BoolValue)(nil),
(*Value_Int64Value)(nil),
@@ -636,7 +559,7 @@ func file_cel_expr_value_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_cel_expr_value_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_value_proto_rawDesc), len(file_cel_expr_value_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
@@ -647,7 +570,6 @@ func file_cel_expr_value_proto_init() {
MessageInfos: file_cel_expr_value_proto_msgTypes,
}.Build()
File_cel_expr_value_proto = out.File
file_cel_expr_value_proto_rawDesc = nil
file_cel_expr_value_proto_goTypes = nil
file_cel_expr_value_proto_depIdxs = nil
}

View File

@@ -1,3 +1,13 @@
# Changes
## [0.18.0](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.18.0) (2025-12-15)
### Features
* Support scopes field from impersonated credential json (#13308) ([e3f62e1](https://github.com/googleapis/google-cloud-go/commit/e3f62e102840127a0058f5cced4c9738f2bf45f2))
* add support for parsing EC private key (#13317) ([ea6bc62](https://github.com/googleapis/google-cloud-go/commit/ea6bc62ffe2cc0a6d607d698a181b37fa46c340d))
* deprecate unsafe credentials JSON loading options (#13397) ([0dd2a3b](https://github.com/googleapis/google-cloud-go/commit/0dd2a3bdece9a85ee7216a737559fa9f5a869545))
## [0.17.0](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.17.0) (2025-10-02)
### Features
@@ -5,8 +15,6 @@
* Add trust boundary support for service accounts and impersonation (HTTP/gRPC) (#11870) ([5c2b665](https://github.com/googleapis/google-cloud-go/commit/5c2b665f392e6dd90192f107188720aa1357e7da))
* add trust boundary support for external accounts (#12864) ([a67a146](https://github.com/googleapis/google-cloud-go/commit/a67a146a6a88a6f1ba10c409dfce8015ecd60a64))
# Changelog
## [0.16.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.4...auth/v0.16.5) (2025-08-14)

View File

@@ -52,6 +52,44 @@ var (
allowOnGCECheck = true
)
// CredType specifies the type of JSON credentials being provided
// to a loading function such as [NewCredentialsFromFile] or
// [NewCredentialsFromJSON].
type CredType string
const (
// ServiceAccount represents a service account file type.
ServiceAccount CredType = "service_account"
// AuthorizedUser represents a user credentials file type.
AuthorizedUser CredType = "authorized_user"
// ExternalAccount represents an external account file type.
//
// IMPORTANT:
// This credential type does not validate the credential configuration. A security
// risk occurs when a credential configuration configured with malicious urls
// is used.
// You should validate credential configurations provided by untrusted sources.
// See [Security requirements when using credential configurations from an external
// source] https://cloud.google.com/docs/authentication/external/externally-sourced-credentials
// for more details.
ExternalAccount CredType = "external_account"
// ImpersonatedServiceAccount represents an impersonated service account file type.
//
// IMPORTANT:
// This credential type does not validate the credential configuration. A security
// risk occurs when a credential configuration configured with malicious urls
// is used.
// You should validate credential configurations provided by untrusted sources.
// See [Security requirements when using credential configurations from an external
// source] https://cloud.google.com/docs/authentication/external/externally-sourced-credentials
// for more details.
ImpersonatedServiceAccount CredType = "impersonated_service_account"
// GDCHServiceAccount represents a GDCH service account credentials.
GDCHServiceAccount CredType = "gdch_service_account"
// ExternalAccountAuthorizedUser represents an external account authorized user credentials.
ExternalAccountAuthorizedUser CredType = "external_account_authorized_user"
)
// TokenBindingType specifies the type of binding used when requesting a token
// whether to request a hard-bound token using mTLS or an instance identity
// bound token using ALTS.
@@ -92,6 +130,14 @@ func OnGCE() bool {
// - On Google Compute Engine, Google App Engine standard second generation
// runtimes, and Google App Engine flexible environment, it fetches
// credentials from the metadata server.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
if err := opts.validate(); err != nil {
return nil, err
@@ -189,25 +235,61 @@ type DetectOptions struct {
// from the provided filepath. If provided, CredentialsJSON must not be.
// Optional.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
// Deprecated: This field is deprecated because of a potential security risk.
// It does not validate the credential configuration. The security risk occurs
// when a credential configuration is accepted from a source that is not
// under your control and used without validation on your side.
//
// If you know that you will be loading credential configurations of a
// specific type, it is recommended to use a credential-type-specific
// NewCredentialsFromFile method. This will ensure that an unexpected
// credential type with potential for malicious intent is not loaded
// unintentionally. You might still have to do validation for certain
// credential types. Please follow the recommendation for that method. For
// example, if you want to load only service accounts, you can use
//
// creds, err := credentials.NewCredentialsFromFile(ctx, credentials.ServiceAccount, filename, opts)
//
// If you are loading your credential configuration from an untrusted source
// and have not mitigated the risks (e.g. by validating the configuration
// yourself), make these changes as soon as possible to prevent security
// risks to your environment.
//
// Regardless of the method used, it is always your responsibility to
// validate configurations received from external sources.
//
// For more details see:
// https://cloud.google.com/docs/authentication/external/externally-sourced-credentials
CredentialsFile string
// CredentialsJSON overrides detection logic and uses the JSON bytes as the
// source for the credential. If provided, CredentialsFile must not be.
// Optional.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
// Deprecated: This field is deprecated because of a potential security risk.
// It does not validate the credential configuration. The security risk occurs
// when a credential configuration is accepted from a source that is not
// under your control and used without validation on your side.
//
// If you know that you will be loading credential configurations of a
// specific type, it is recommended to use a credential-type-specific
// NewCredentialsFromJSON method. This will ensure that an unexpected
// credential type with potential for malicious intent is not loaded
// unintentionally. You might still have to do validation for certain
// credential types. Please follow the recommendation for that method. For
// example, if you want to load only service accounts, you can use
//
// creds, err := credentials.NewCredentialsFromJSON(ctx, credentials.ServiceAccount, json, opts)
//
// If you are loading your credential configuration from an untrusted source
// and have not mitigated the risks (e.g. by validating the configuration
// yourself), make these changes as soon as possible to prevent security
// risks to your environment.
//
// Regardless of the method used, it is always your responsibility to
// validate configurations received from external sources.
//
// For more details see:
// https://cloud.google.com/docs/authentication/external/externally-sourced-credentials
CredentialsJSON []byte
// UseSelfSignedJWT directs service account based credentials to create a
// self-signed JWT with the private key found in the file, skipping any
@@ -227,6 +309,61 @@ type DetectOptions struct {
Logger *slog.Logger
}
// NewCredentialsFromFile creates a [cloud.google.com/go/auth.Credentials] from
// the provided file. The credType argument specifies the expected credential
// type. If the file content does not match the expected type, an error is
// returned.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func NewCredentialsFromFile(credType CredType, filename string, opts *DetectOptions) (*auth.Credentials, error) {
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
return NewCredentialsFromJSON(credType, b, opts)
}
// NewCredentialsFromJSON creates a [cloud.google.com/go/auth.Credentials] from
// the provided JSON bytes. The credType argument specifies the expected
// credential type. If the JSON does not match the expected type, an error is
// returned.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func NewCredentialsFromJSON(credType CredType, b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err := checkCredentialType(b, credType); err != nil {
return nil, err
}
// We can't use readCredentialsFileJSON because it does auto-detection
// for client_credentials.json which we don't support here (no type field).
// Instead, we call fileCredentials just as readCredentialsFileJSON does
// when it doesn't detect client_credentials.json.
return fileCredentials(b, opts)
}
func checkCredentialType(b []byte, expected CredType) error {
fileType, err := credsfile.ParseFileType(b)
if err != nil {
return err
}
if CredType(fileType) != expected {
return fmt.Errorf("credentials: expected type %q, found %q", expected, fileType)
}
return nil
}
func (o *DetectOptions) validate() error {
if o == nil {
return errors.New("credentials: options must be provided")

View File

@@ -28,18 +28,21 @@ import (
"cloud.google.com/go/auth/internal/trustboundary"
)
const cloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
fileType, err := credsfile.ParseFileType(b)
if err != nil {
return nil, err
}
if fileType == "" {
return nil, errors.New("credentials: unsupported unidentified file type")
}
var projectID, universeDomain string
var tp auth.TokenProvider
switch fileType {
case credsfile.UnknownCredType:
return nil, errors.New("credentials: unsupported unidentified file type")
case credsfile.ServiceAccountKey:
switch CredType(fileType) {
case ServiceAccount:
f, err := credsfile.ParseServiceAccount(b)
if err != nil {
return nil, err
@@ -50,7 +53,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
}
projectID = f.ProjectID
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.UserCredentialsKey:
case AuthorizedUser:
f, err := credsfile.ParseUserCredentials(b)
if err != nil {
return nil, err
@@ -60,7 +63,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
return nil, err
}
universeDomain = f.UniverseDomain
case credsfile.ExternalAccountKey:
case ExternalAccount:
f, err := credsfile.ParseExternalAccount(b)
if err != nil {
return nil, err
@@ -70,7 +73,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
return nil, err
}
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.ExternalAccountAuthorizedUserKey:
case ExternalAccountAuthorizedUser:
f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
if err != nil {
return nil, err
@@ -80,7 +83,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
return nil, err
}
universeDomain = f.UniverseDomain
case credsfile.ImpersonatedServiceAccountKey:
case ImpersonatedServiceAccount:
f, err := credsfile.ParseImpersonatedServiceAccount(b)
if err != nil {
return nil, err
@@ -90,7 +93,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
return nil, err
}
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.GDCHServiceAccountKey:
case GDCHServiceAccount:
f, err := credsfile.ParseGDCHServiceAccount(b)
if err != nil {
return nil, err
@@ -275,14 +278,24 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil
return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials")
}
sourceTP, err := fileCredentials(f.CredSource, opts)
sourceOpts := *opts
// Source credential needs IAM or Cloud Platform scope to call the
// iamcredentials endpoint. The scopes provided by the user are for the
// impersonated credentials.
sourceOpts.Scopes = []string{cloudPlatformScope}
sourceTP, err := fileCredentials(f.CredSource, &sourceOpts)
if err != nil {
return nil, err
}
ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
scopes := opts.scopes()
if len(scopes) == 0 {
scopes = f.Scopes
}
impOpts := &impersonate.Options{
URL: f.ServiceAccountImpersonationURL,
Scopes: opts.scopes(),
Scopes: scopes,
Tp: sourceTP,
Delegates: f.Delegates,
Client: opts.client(),

View File

@@ -32,50 +32,6 @@ const (
userCredsFilename = "application_default_credentials.json"
)
// CredentialType represents different credential filetypes Google credentials
// can be.
type CredentialType int
const (
// UnknownCredType is an unidentified file type.
UnknownCredType CredentialType = iota
// UserCredentialsKey represents a user creds file type.
UserCredentialsKey
// ServiceAccountKey represents a service account file type.
ServiceAccountKey
// ImpersonatedServiceAccountKey represents a impersonated service account
// file type.
ImpersonatedServiceAccountKey
// ExternalAccountKey represents a external account file type.
ExternalAccountKey
// GDCHServiceAccountKey represents a GDCH file type.
GDCHServiceAccountKey
// ExternalAccountAuthorizedUserKey represents a external account authorized
// user file type.
ExternalAccountAuthorizedUserKey
)
// parseCredentialType returns the associated filetype based on the parsed
// typeString provided.
func parseCredentialType(typeString string) CredentialType {
switch typeString {
case "service_account":
return ServiceAccountKey
case "authorized_user":
return UserCredentialsKey
case "impersonated_service_account":
return ImpersonatedServiceAccountKey
case "external_account":
return ExternalAccountKey
case "external_account_authorized_user":
return ExternalAccountAuthorizedUserKey
case "gdch_service_account":
return GDCHServiceAccountKey
default:
return UnknownCredType
}
}
// GetFileNameFromEnv returns the override if provided or detects a filename
// from the environment.
func GetFileNameFromEnv(override string) string {

View File

@@ -140,6 +140,7 @@ type ImpersonatedServiceAccountFile struct {
Type string `json:"type"`
ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
Delegates []string `json:"delegates"`
Scopes []string `json:"scopes"`
CredSource json.RawMessage `json:"source_credentials"`
UniverseDomain string `json:"universe_domain"`
}

View File

@@ -89,10 +89,11 @@ type fileTypeChecker struct {
}
// ParseFileType determines the [CredentialType] based on bytes provided.
func ParseFileType(b []byte) (CredentialType, error) {
// Only returns error for json.Unmarshal.
func ParseFileType(b []byte) (string, error) {
var f fileTypeChecker
if err := json.Unmarshal(b, &f); err != nil {
return 0, err
return "", err
}
return parseCredentialType(f.Type), nil
return f.Type, nil
}

View File

@@ -88,12 +88,13 @@ func ParseKey(key []byte) (crypto.Signer, error) {
key = block.Bytes
}
var parsedKey crypto.PrivateKey
var err error
parsedKey, err = x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
var errPKCS8, errPKCS1, errEC error
if parsedKey, errPKCS8 = x509.ParsePKCS8PrivateKey(key); errPKCS8 != nil {
if parsedKey, errPKCS1 = x509.ParsePKCS1PrivateKey(key); errPKCS1 != nil {
if parsedKey, errEC = x509.ParseECPrivateKey(key); errEC != nil {
return nil, fmt.Errorf("failed to parse private key. Tried PKCS8, PKCS1, and EC formats. Errors: [PKCS8: %v], [PKCS1: %v], [EC: %v]", errPKCS8, errPKCS1, errEC)
}
}
}
parsed, ok := parsedKey.(crypto.Signer)

View File

@@ -17,4 +17,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "0.17.0"
const Version = "0.18.0"

10
vendor/cloud.google.com/go/storage/.repo-metadata.json generated vendored Normal file
View File

@@ -0,0 +1,10 @@
{
"api_shortname": "storage",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest",
"client_library_type": "manual",
"description": "Cloud Storage (GCS)",
"distribution_name": "cloud.google.com/go/storage",
"language": "go",
"library_type": "GAPIC_MANUAL",
"release_level": "stable"
}

View File

@@ -1,6 +1,58 @@
# Changes
## [1.59.1](https://github.com/googleapis/google-cloud-go/releases/tag/storage%2Fv1.59.1) (2026-01-14)
### Bug Fixes
* close attrsReady channel when metadata is missing (#13574) ([712f562](https://github.com/googleapis/google-cloud-go/commit/712f56272ac5a219bac1b20894e4825f3682c920))
* don't update global object's readhandle in MRD (#13575) ([bc92500](https://github.com/googleapis/google-cloud-go/commit/bc925001a2f5b186c231c2885f9162713bb4b1bf))
* remove mandatory attrs response in MRD (#13585) ([6752a49](https://github.com/googleapis/google-cloud-go/commit/6752a496e756c214faf345c302b58ed7593c6017))
## [1.59.0](https://github.com/googleapis/google-cloud-go/releases/tag/storage%2Fv1.59.0) (2026-01-09)
### Features
* add default checksums for appendable writer (excludes appendable takeover writer) (#13379) ([647baf3](https://github.com/googleapis/google-cloud-go/commit/647baf3249b01e7d5eb5902197bb828706c4c08f))
### Bug Fixes
* refactor MultiRangeDownloader to resolve deadlock and race conditions (#13524) ([1cfd100](https://github.com/googleapis/google-cloud-go/commit/1cfd10089f206bca0bdcef1e873574b552ae6abb)
## [1.58.0](https://github.com/googleapis/google-cloud-go/releases/tag/storage%2Fv1.58.0) (2025-12-03)
### Features
* calculate crc32c by default and pass checksum in trailing and per-chunk request (#13205) ([2ab1c77](https://github.com/googleapis/google-cloud-go/commit/2ab1c77826f2d9c9d02b977296a78cf0ba3bd8bf))
* add support for partial success in ListBuckets (#13320) ([d91e47f](https://github.com/googleapis/google-cloud-go/commit/d91e47f2fc91a95ad4fd54e574b371e172a3889b))
* add object contexts in Go GCS SDK (#13390) ([079c4d9](https://github.com/googleapis/google-cloud-go/commit/079c4d960a2bafa5d170e2b1c97b00ea8b7917d9))
### Bug Fixes
* omit empty filter in http list object request (#13434) ([377eb13](https://github.com/googleapis/google-cloud-go/commit/377eb13bbadb4f455fac61c500f50ba9057890d4))
## [1.57.2](https://github.com/googleapis/google-cloud-go/releases/tag/storage%2Fv1.57.2) (2025-11-14)
### Bug Fixes
* Handle redirect on takeover. (#13354) ([b0f1362](https://github.com/googleapis/google-cloud-go/commit/b0f136268be1e4c629e288353bc277549ac5c663))
* add env var to allow disabling bound token (#13236) ([cdaf6a6](https://github.com/googleapis/google-cloud-go/commit/cdaf6a6da006a19db932a74885ea3722b4e42311))
### Documentation
* updates to docs and docs formatting ([93ca68d](https://github.com/googleapis/google-cloud-go/commit/93ca68d54b6d213f22e0d67ae01d135cf26d37c6))
## [1.57.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.57.0...storage/v1.57.1) (2025-10-28)
### Bug Fixes
* **storage:** Takeover idempotence ([#13230](https://github.com/googleapis/google-cloud-go/issues/13230)) ([cc5d2a1](https://github.com/googleapis/google-cloud-go/commit/cc5d2a12293a509a14da9bea8a86c8655eaf4a71))
* **storage:** Copy metadata when using Copier with grpc ([#12919](https://github.com/googleapis/google-cloud-go/issues/12919)) ([57a2e80](https://github.com/googleapis/google-cloud-go/commit/57a2e804f690ec8d4c55fd1c73b0dafd5cff46e5))
* **storage:** Fix takeover response handling ([#13239](https://github.com/googleapis/google-cloud-go/issues/13239)) ([26d75bc](https://github.com/googleapis/google-cloud-go/commit/26d75bc08e242348d26691877aba7fa68cf30f7f))
* **storage:** Remove default timeout for gRPC operations ([#13022](https://github.com/googleapis/google-cloud-go/issues/13022)) ([b94c3ba](https://github.com/googleapis/google-cloud-go/commit/b94c3ba69994d9c56ae8f302449dd8df6f287296))
* **storage:** Skip download of file outside of target dir ([#12945](https://github.com/googleapis/google-cloud-go/issues/12945)) ([6259aee](https://github.com/googleapis/google-cloud-go/commit/6259aeec393d0d996961cac38396daa57ad1a290))
* **storage:** Upgrade gRPC service registration func ([8fffca2](https://github.com/googleapis/google-cloud-go/commit/8fffca2819fa3dc858c213aa0c503e0df331b084))
## [1.57.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.56.1...storage/v1.57.0) (2025-09-23)

View File

@@ -2331,6 +2331,11 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
// whose names begin with the prefix. By default, all buckets in the project
// are returned.
//
// To receive a partial list of buckets when some are unavailable, set the
// iterator's ReturnPartialSuccess field to true. You can then call the
// iterator's Unreachable method to retrieve the names of the unreachable
// buckets.
//
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
o := makeStorageOpts(true, c.retry, "")
@@ -2343,12 +2348,24 @@ func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator
type BucketIterator struct {
// Prefix restricts the iterator to buckets whose names begin with it.
Prefix string
// If true, the iterator will return a partial result of buckets even if
// some buckets are unreachable. Call the Unreachable() method to retrieve the
// list of unreachable buckets. By default (false), the iterator will return
// an error if any buckets are unreachable.
ReturnPartialSuccess bool
ctx context.Context
projectID string
buckets []*BucketAttrs
pageInfo *iterator.PageInfo
nextFunc func() error
ctx context.Context
projectID string
buckets []*BucketAttrs
unreachable []string
pageInfo *iterator.PageInfo
nextFunc func() error
}
// Unreachable returns a list of bucket names that could not be reached
// during the iteration if ReturnPartialSuccess was set to true.
func (it *BucketIterator) Unreachable() []string {
return it.unreachable
}
// Next returns the next result. Its second return value is iterator.Done if

View File

@@ -265,6 +265,9 @@ type openWriterParams struct {
// sendCRC32C - see `Writer.SendCRC32C`.
// Optional.
sendCRC32C bool
// disableAutoChecksum - see `Writer.DisableAutoChecksum`.
// Optional.
disableAutoChecksum bool
// append - Write with appendable object semantics.
// Optional.
append bool

115
vendor/cloud.google.com/go/storage/contexts.go generated vendored Normal file
View File

@@ -0,0 +1,115 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"time"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
raw "google.golang.org/api/storage/v1"
)
// ObjectContexts is a container for custom object contexts.
type ObjectContexts struct {
Custom map[string]ObjectCustomContextPayload
}
// ObjectCustomContextPayload holds the value of a user-defined object context and
// other metadata. To delete a key from Custom object contexts, set Delete as true.
type ObjectCustomContextPayload struct {
Value string
Delete bool
// Read-only fields. Any updates to CreateTime and UpdateTime will be ignored.
// These fields are handled by the server.
CreateTime time.Time
UpdateTime time.Time
}
// toContexts converts the raw library's ObjectContexts type to the object contexts.
func toObjectContexts(c *raw.ObjectContexts) *ObjectContexts {
if c == nil {
return nil
}
customContexts := make(map[string]ObjectCustomContextPayload, len(c.Custom))
for k, v := range c.Custom {
customContexts[k] = ObjectCustomContextPayload{
Value: v.Value,
CreateTime: convertTime(v.CreateTime),
UpdateTime: convertTime(v.UpdateTime),
}
}
return &ObjectContexts{
Custom: customContexts,
}
}
// toRawObjectContexts converts the object contexts to the raw library's ObjectContexts type.
func toRawObjectContexts(c *ObjectContexts) *raw.ObjectContexts {
if c == nil {
return nil
}
customContexts := make(map[string]raw.ObjectCustomContextPayload)
for k, v := range c.Custom {
if v.Delete {
// If Delete is true, populate null fields to signify deletion.
customContexts[k] = raw.ObjectCustomContextPayload{NullFields: []string{k}}
} else {
customContexts[k] = raw.ObjectCustomContextPayload{
Value: v.Value,
ForceSendFields: []string{k},
}
}
}
return &raw.ObjectContexts{
Custom: customContexts,
}
}
func toObjectContextsFromProto(c *storagepb.ObjectContexts) *ObjectContexts {
if c == nil {
return nil
}
customContexts := make(map[string]ObjectCustomContextPayload, len(c.GetCustom()))
for k, v := range c.GetCustom() {
customContexts[k] = ObjectCustomContextPayload{
Value: v.GetValue(),
CreateTime: v.GetCreateTime().AsTime(),
UpdateTime: v.GetUpdateTime().AsTime(),
}
}
return &ObjectContexts{
Custom: customContexts,
}
}
func toProtoObjectContexts(c *ObjectContexts) *storagepb.ObjectContexts {
if c == nil {
return nil
}
customContexts := make(map[string]*storagepb.ObjectCustomContextPayload)
for k, v := range c.Custom {
// To delete a key, it is added to gRPC fieldMask and with an empty value
// in gRPC request body. Hence, the key is skipped here in customContexts map.
// See grpcStorageClient.UpdateObject method for more details.
if !v.Delete {
customContexts[k] = &storagepb.ObjectCustomContextPayload{
Value: v.Value,
}
}
}
return &storagepb.ObjectContexts{
Custom: customContexts,
}
}

View File

@@ -18,8 +18,6 @@ import (
"context"
"errors"
"fmt"
"cloud.google.com/go/internal/trace"
)
// CopierFrom creates a Copier that can copy src to dst.
@@ -82,8 +80,8 @@ type Copier struct {
// Run performs the copy.
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
defer func() { trace.EndSpan(ctx, err) }()
ctx, _ = startSpan(ctx, "Copier.Run")
defer func() { endSpan(ctx, err) }()
if err := c.src.validate(); err != nil {
return nil, err
@@ -180,8 +178,8 @@ type Composer struct {
// Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
defer func() { trace.EndSpan(ctx, err) }()
ctx, _ = startSpan(ctx, "Composer.Run")
defer func() { endSpan(ctx, err) }()
if err := c.dst.validate(); err != nil {
return nil, err

View File

@@ -407,6 +407,10 @@ roles which must be enabled in order to do the export successfully. To
disable this export, you can use the [WithDisabledClientMetrics] client
option.
The gRPC client automatically computes and sends CRC32C checksums for uploads using [Writer],
which provides an additional layer of data integrity validation when compared to the HTTP client.
This behavior can optionally be disabled by using [Writer.DisableAutoChecksum].
# Storage Control API
Certain control plane and long-running operations for Cloud Storage (including Folder

View File

@@ -31,6 +31,7 @@ if [ "$minor_ver" -lt "$min_minor_ver" ]; then
exit 0
fi
export DOCKER_API_VERSION=1.39
export STORAGE_EMULATOR_HOST="http://localhost:9000"
export STORAGE_EMULATOR_HOST_GRPC="localhost:8888"
@@ -66,6 +67,7 @@ function cleanup() {
docker stop $CONTAINER_NAME
unset STORAGE_EMULATOR_HOST;
unset STORAGE_EMULATOR_HOST_GRPC;
unset DOCKER_API_VERSION
}
trap cleanup EXIT
@@ -89,5 +91,12 @@ then
fi
# Run tests
go test -v -timeout 17m ./ ./dataflux -run="^Test(RetryConformance|.*Emulated)$" -short -race 2>&1 | tee -a sponge_log.log
gotestsum --packages="./ ./dataflux" \
--junitfile sponge_log_emulator.xml \
--format standard-verbose \
-- \
-timeout 17m \
-run="^Test(RetryConformance|.*Emulated)$" \
-short \
-race \
2>&1 | tee -a sponge_log.log

View File

@@ -23,10 +23,9 @@ import (
"io"
"log"
"os"
"sync"
"strconv"
"cloud.google.com/go/iam/apiv1/iampb"
"cloud.google.com/go/internal/trace"
gapic "cloud.google.com/go/storage/internal/apiv2"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"github.com/googleapis/gax-go/v2"
@@ -102,8 +101,10 @@ func defaultGRPCOptions() []option.ClientOption {
defaults = append(defaults,
internaloption.AllowNonDefaultServiceAccount(true),
internaloption.EnableDirectPath(true),
internaloption.EnableDirectPathXds(),
internaloption.AllowHardBoundTokens("ALTS"))
internaloption.EnableDirectPathXds())
if disableBoundToken, _ := strconv.ParseBool(os.Getenv("STORAGE_DISABLE_DIRECTPATH_BOUND_TOKEN")); !disableBoundToken {
defaults = append(defaults, internaloption.AllowHardBoundTokens("ALTS"))
}
}
return defaults
@@ -143,7 +144,7 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (*grpcStor
s := initSettings(opts...)
s.clientOption = append(defaultGRPCOptions(), s.clientOption...)
// Disable all gax-level retries in favor of retry logic in the veneer client.
s.gax = append(s.gax, gax.WithRetry(nil))
s.gax = append(s.gax, gax.WithRetry(nil), gax.WithTimeout(0))
config := newStorageConfig(s.clientOption...)
if config.readAPIWasSet {
@@ -243,8 +244,9 @@ func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opt
// BucketIterator is returned to them from the veneer.
if pageToken == "" {
req := &storagepb.ListBucketsRequest{
Parent: toProjectResource(it.projectID),
Prefix: it.Prefix,
Parent: toProjectResource(it.projectID),
Prefix: it.Prefix,
ReturnPartialSuccess: it.ReturnPartialSuccess,
}
gitr = c.raw.ListBuckets(ctx, req, s.gax...)
}
@@ -260,6 +262,9 @@ func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opt
it.buckets = append(it.buckets, b)
}
if resp, ok := gitr.Response.(*storagepb.ListBucketsResponse); ok {
it.unreachable = resp.Unreachable
}
return next, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
@@ -454,6 +459,7 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
SoftDeleted: it.query.SoftDeleted,
IncludeFoldersAsPrefixes: it.query.IncludeFoldersAsPrefixes,
Filter: it.query.Filter,
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
@@ -624,6 +630,18 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje
}
}
if uattrs.Contexts != nil && uattrs.Contexts.Custom != nil {
if len(uattrs.Contexts.Custom) == 0 {
// pass fieldMask with no key value and empty map to delete all keys
fieldMask.Paths = append(fieldMask.Paths, "contexts.custom")
} else {
for key := range uattrs.Contexts.Custom {
// pass fieldMask with key value with empty value in map to delete key
fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("contexts.custom.%s", key))
}
}
}
req.UpdateMask = fieldMask
if len(fieldMask.Paths) < 1 {
@@ -953,14 +971,24 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec
}
func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
s := callSettings(c.settings, opts...)
obj := req.dstObject.attrs.toProtoObject("")
var dst *storagepb.Object
// If the destination object attributes are not set, do not include them
// in the request. This indicates that the object attributes should be
// copied from the source object.
if req.dstObject.attrs.isZero() {
dst = nil
} else {
dst = req.dstObject.attrs.toProtoObject("")
}
call := &storagepb.RewriteObjectRequest{
SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket),
SourceObject: req.srcObject.name,
RewriteToken: req.token,
DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket),
DestinationName: req.dstObject.name,
Destination: obj,
Destination: dst,
DestinationKmsKey: req.dstObject.keyName,
DestinationPredefinedAcl: req.predefinedACL,
CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey),
@@ -1064,558 +1092,14 @@ func contextMetadataFromBidiReadObject(req *storagepb.BidiReadObjectRequest) []s
return []string{"x-goog-request-params", fmt.Sprintf("bucket=%s", req.GetReadObjectSpec().GetBucket())}
}
func (c *grpcStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) {
if !c.config.grpcBidiReads {
return nil, errors.New("storage: MultiRangeDownloader requires the experimental.WithGRPCBidiReads option")
}
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewMultiRangeDownloader")
defer func() { trace.EndSpan(ctx, err) }()
s := callSettings(c.settings, opts...)
// Force the use of the custom codec to enable zero-copy reads.
s.gax = append(s.gax, gax.WithGRPCOptions(
grpc.ForceCodecV2(bytesCodecV2{}),
))
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
b := bucketResourceName(globalProjectAlias, params.bucket)
object := params.object
bidiObject := &storagepb.BidiReadObjectSpec{
Bucket: b,
Object: object,
CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
}
// The default is a negative value, which means latest.
if params.gen >= 0 {
bidiObject.Generation = params.gen
}
if params.handle != nil && len(*params.handle) != 0 {
bidiObject.ReadHandle = &storagepb.BidiReadHandle{
Handle: *params.handle,
}
}
req := &storagepb.BidiReadObjectRequest{
ReadObjectSpec: bidiObject,
}
openStream := func(readHandle ReadHandle) (*bidiReadStreamResponse, context.CancelFunc, error) {
if err := applyCondsProto("grpcStorageClient.BidiReadObject", params.gen, params.conds, bidiObject); err != nil {
return nil, nil, err
}
if len(readHandle) != 0 {
req.GetReadObjectSpec().ReadHandle = &storagepb.BidiReadHandle{
Handle: readHandle,
}
}
databufs := mem.BufferSlice{}
var stream storagepb.Storage_BidiReadObjectClient
var decoder *readResponseDecoder
cc, cancel := context.WithCancel(ctx)
err = run(cc, func(ctx context.Context) error {
openAndSendReq := func() error {
mdCtx := gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...)
stream, err = c.raw.BidiReadObject(mdCtx, s.gax...)
if err != nil {
return err
}
// If stream opened succesfully, send first message on the stream.
// First message to stream should contain read_object_spec
err = stream.Send(req)
if err != nil {
return err
}
// Use RecvMsg to get the raw buffer slice instead of Recv().
err = stream.RecvMsg(&databufs)
if err != nil {
return err
}
return nil
}
err := openAndSendReq()
// We might get a redirect error here for an out-of-region request.
// Add the routing token and read handle to the request and do one
// retry.
if st, ok := status.FromError(err); ok && st.Code() == codes.Aborted {
// BidiReadObjectRedirectedError error is only returned on initial open in case of a redirect.
// The routing token that should be used when reopening the read stream. Needs to be exported.
for _, detail := range st.Details() {
if bidiError, ok := detail.(*storagepb.BidiReadObjectRedirectedError); ok {
bidiObject.ReadHandle = bidiError.ReadHandle
bidiObject.RoutingToken = bidiError.RoutingToken
databufs = mem.BufferSlice{}
err = openAndSendReq()
break
}
}
}
if err != nil {
databufs.Free()
return err
}
// Use the custom decoder to parse the raw buffer without copying object data.
decoder = &readResponseDecoder{
databufs: databufs,
}
err = decoder.readFullObjectResponse()
return err
}, s.retry, s.idempotent)
if err != nil {
// Close the stream context we just created to ensure we don't leak
// resources.
cancel()
return nil, nil, err
}
return &bidiReadStreamResponse{stream: stream, decoder: decoder}, cancel, nil
}
// For the first time open stream without adding any range.
resp, cancel, err := openStream(nil)
if err != nil {
return nil, err
}
// The first message was Recv'd on stream open, use it to populate the
// object metadata.
msg := resp.decoder.msg
obj := msg.GetMetadata()
mrd := &gRPCBidiReader{
stream: resp.stream,
cancel: cancel,
settings: s,
readHandle: msg.GetReadHandle().GetHandle(),
readIDGenerator: &readIDGenerator{},
reopen: openStream,
readSpec: bidiObject,
rangesToRead: make(chan []mrdRange, 100),
ctx: ctx,
closeReceiver: make(chan bool, 10),
closeSender: make(chan bool, 10),
senderRetry: make(chan bool), // create unbuffered channel for closing the streamManager goroutine.
receiverRetry: make(chan bool), // create unbuffered channel for closing the streamReceiver goroutine.
activeRanges: make(map[int64]mrdRange),
done: false,
numActiveRanges: 0,
streamRecreation: false,
}
// sender receives ranges from user adds and requests these ranges from GCS.
sender := func() {
var currentSpec []mrdRange
for {
select {
case <-mrd.ctx.Done():
mrd.mu.Lock()
mrd.done = true
mrd.mu.Unlock()
return
case <-mrd.senderRetry:
return
case <-mrd.closeSender:
mrd.mu.Lock()
if len(mrd.activeRanges) != 0 {
for key := range mrd.activeRanges {
mrd.activeRanges[key].callback(mrd.activeRanges[key].offset, mrd.activeRanges[key].totalBytesWritten, fmt.Errorf("stream closed early"))
delete(mrd.activeRanges, key)
}
}
mrd.numActiveRanges = 0
mrd.mu.Unlock()
return
case currentSpec = <-mrd.rangesToRead:
var readRanges []*storagepb.ReadRange
var err error
mrd.mu.Lock()
for _, v := range currentSpec {
mrd.activeRanges[v.readID] = v
readRanges = append(readRanges, &storagepb.ReadRange{ReadOffset: v.offset, ReadLength: v.limit, ReadId: v.readID})
}
mrd.mu.Unlock()
// We can just send 100 request to gcs in one request.
// In case of Add we will send only one range request to gcs but in case of retry we can have more than 100 ranges.
// Hence be will divide the request in chunk of 100.
// For example with 457 ranges on stream we will have 5 request to gcs [0:99], [100:199], [200:299], [300:399], [400:456]
requestCount := len(readRanges) / 100
if len(readRanges)%100 != 0 {
requestCount++
}
for i := 0; i < requestCount; i++ {
start := i * 100
end := (i + 1) * 100
if end > len(readRanges) {
end = len(readRanges)
}
curReq := readRanges[start:end]
err = mrd.stream.Send(&storagepb.BidiReadObjectRequest{
ReadRanges: curReq,
})
if err != nil {
// cancel stream and reopen the stream again.
// Incase again an error is thrown close the streamManager goroutine.
mrd.retrier(err, "manager")
break
}
}
}
}
}
// receives ranges responses on the stream and executes the callback.
receiver := func() {
var err error
for {
select {
case <-mrd.ctx.Done():
mrd.done = true
return
case <-mrd.receiverRetry:
return
case <-mrd.closeReceiver:
return
default:
// This function reads the data sent for a particular range request and has a callback
// to indicate that output buffer is filled.
databufs := mem.BufferSlice{}
err = mrd.stream.RecvMsg(&databufs)
if err == io.EOF {
err = nil
} else {
// Cancel stream and reopen the stream again.
// In case again an error is thrown, close the streamManager goroutine.
// TODO: special handling for not found error.
mrd.retrier(err, "receiver")
}
if err == nil {
// Use the custom decoder to parse the message.
decoder := &readResponseDecoder{databufs: databufs}
if err := decoder.readFullObjectResponse(); err != nil {
mrd.retrier(err, "receiver")
continue // Move to next iteration after retry
}
msg := decoder.msg
if msg.GetReadHandle().GetHandle() != nil {
mrd.readHandle = msg.GetReadHandle().GetHandle()
}
mrd.mu.Lock()
if len(mrd.activeRanges) == 0 && mrd.numActiveRanges == 0 {
mrd.mu.Unlock()
mrd.closeReceiver <- true
mrd.closeSender <- true
return
}
mrd.mu.Unlock()
for _, val := range msg.GetObjectDataRanges() {
id := val.GetReadRange().GetReadId()
func() {
mrd.mu.Lock()
defer mrd.mu.Unlock()
currRange, ok := mrd.activeRanges[id]
if !ok {
// it's ok to ignore responses for read_id not in map as user would have been notified by callback.
return
}
// The decoder holds the object content. writeToAndUpdateCRC writes
// it to the user's buffer without an intermediate copy.
written, _, err := decoder.writeToAndUpdateCRC(currRange.writer, id, func(b []byte) {
// crc update logic can be added here if needed
})
if err != nil {
currRange.callback(currRange.offset, currRange.totalBytesWritten, err)
mrd.numActiveRanges--
delete(mrd.activeRanges, id)
} else {
currRange = mrdRange{
readID: currRange.readID,
writer: currRange.writer,
offset: currRange.offset,
limit: currRange.limit,
currentBytesWritten: currRange.currentBytesWritten + written,
totalBytesWritten: currRange.totalBytesWritten + written,
callback: currRange.callback,
}
mrd.activeRanges[id] = currRange
}
if val.GetRangeEnd() {
currRange.callback(currRange.offset, currRange.totalBytesWritten, nil)
mrd.numActiveRanges--
delete(mrd.activeRanges, id)
}
}()
}
// Free the buffers once the message has been processed.
decoder.databufs.Free()
}
}
}
}
mrd.retrier = func(err error, thread string) {
mrd.mu.Lock()
if !mrd.streamRecreation {
mrd.streamRecreation = true
} else {
mrd.mu.Unlock()
return
}
mrd.mu.Unlock()
// close both the go routines to make the stream recreation syncronous.
if thread == "receiver" {
mrd.senderRetry <- true
} else {
mrd.receiverRetry <- true
}
err = mrd.retryStream(err)
if err != nil {
mrd.mu.Lock()
for key := range mrd.activeRanges {
mrd.activeRanges[key].callback(mrd.activeRanges[key].offset, mrd.activeRanges[key].totalBytesWritten, err)
delete(mrd.activeRanges, key)
}
// In case we hit an permanent error, delete entries from map and remove active tasks.
mrd.numActiveRanges = 0
mrd.mu.Unlock()
mrd.close()
} else {
// If stream recreation happened successfully lets again start
// both the goroutine making the whole flow asynchronous again.
if thread == "receiver" {
go sender()
} else {
go receiver()
}
}
mrd.mu.Lock()
mrd.streamRecreation = false
mrd.mu.Unlock()
}
go sender()
go receiver()
return &MultiRangeDownloader{
Attrs: ReaderObjectAttrs{
Size: obj.GetSize(), // this is the size of the entire object, even if only a range was requested.
ContentType: obj.GetContentType(),
ContentEncoding: obj.GetContentEncoding(),
CacheControl: obj.GetCacheControl(),
LastModified: obj.GetUpdateTime().AsTime(),
Metageneration: obj.GetMetageneration(),
Generation: obj.GetGeneration(),
},
reader: mrd,
}, nil
}
type gRPCBidiReader struct {
ctx context.Context
stream storagepb.Storage_BidiReadObjectClient
cancel context.CancelFunc
settings *settings
readHandle ReadHandle
readIDGenerator *readIDGenerator
reopen func(ReadHandle) (*bidiReadStreamResponse, context.CancelFunc, error)
readSpec *storagepb.BidiReadObjectSpec
closeReceiver chan bool
closeSender chan bool
senderRetry chan bool
receiverRetry chan bool
// rangesToRead are ranges that have not yet been sent or have been sent but
// must be retried.
rangesToRead chan []mrdRange
// activeRanges are ranges that are currently being sent or are waiting for
// a response from GCS.
activeRanges map[int64]mrdRange // always use the mutex when accessing the map
numActiveRanges int64 // always use the mutex when accessing this variable
done bool // always use the mutex when accessing this variable, indicates whether stream is closed or not.
mu sync.Mutex // protects all vars in gRPCBidiReader from concurrent access
retrier func(error, string)
streamRecreation bool // This helps us identify if stream recreation is in progress or not. If stream recreation gets called from two goroutine then this will stop second one.
}
func (mrd *gRPCBidiReader) activeRange() []mrdRange {
mrd.mu.Lock()
defer mrd.mu.Unlock()
var activeRange []mrdRange
for k, v := range mrd.activeRanges {
activeRange = append(activeRange, mrdRange{
readID: k,
writer: v.writer,
offset: (v.offset + v.currentBytesWritten),
limit: v.limit - v.currentBytesWritten,
callback: v.callback,
currentBytesWritten: 0,
totalBytesWritten: v.totalBytesWritten,
})
mrd.activeRanges[k] = activeRange[len(activeRange)-1]
}
return activeRange
}
// retryStream cancel's stream and reopen the stream again.
func (mrd *gRPCBidiReader) retryStream(err error) error {
if mrd.settings.retry.runShouldRetry(err) {
// This will "close" the existing stream and immediately attempt to
// reopen the stream, but will backoff if further attempts are necessary.
// When Reopening the stream only failed readID will be added to stream.
return mrd.reopenStream(mrd.activeRange())
}
return err
}
// reopenStream "closes" the existing stream and attempts to reopen a stream and
// sets the Reader's stream and cancelStream properties in the process.
func (mrd *gRPCBidiReader) reopenStream(failSpec []mrdRange) error {
// Close existing stream and initialize new stream with updated offset.
if mrd.cancel != nil {
mrd.cancel()
}
res, cancel, err := mrd.reopen(mrd.readHandle)
if err != nil {
return err
}
mrd.stream = res.stream
mrd.cancel = cancel
msg := res.decoder.msg
if msg.GetReadHandle().GetHandle() != nil {
mrd.readHandle = msg.GetReadHandle().GetHandle()
}
// Process any data ranges that came back in the initial response.
// This prevents data loss from the first message on the new stream.
for _, val := range msg.GetObjectDataRanges() {
id := val.GetReadRange().GetReadId()
mrd.mu.Lock()
activeRange, ok := mrd.activeRanges[id]
if !ok {
mrd.mu.Unlock()
continue
}
// Use the decoder's zero-copy write method.
written, _, writeErr := res.decoder.writeToAndUpdateCRC(activeRange.writer, id, nil)
if writeErr != nil {
activeRange.callback(activeRange.offset, activeRange.totalBytesWritten, writeErr)
mrd.numActiveRanges--
delete(mrd.activeRanges, id)
} else {
activeRange.currentBytesWritten += written
activeRange.totalBytesWritten += written
mrd.activeRanges[id] = activeRange
}
if val.GetRangeEnd() {
activeRange.callback(activeRange.offset, activeRange.totalBytesWritten, nil)
mrd.numActiveRanges--
delete(mrd.activeRanges, id)
}
mrd.mu.Unlock()
}
// Once all data in the initial response has been read out, free buffers.
res.decoder.databufs.Free()
if failSpec != nil {
mrd.rangesToRead <- failSpec
}
return nil
}
// add will add current range to stream. The size of the range is not validated
// by add; if the client requests more bytes than are available in the object
// the server will return an error.
func (mrd *gRPCBidiReader) add(output io.Writer, offset, limit int64, callback func(int64, int64, error)) {
if limit < 0 {
callback(offset, 0, errors.New("storage: cannot add range because the limit cannot be negative"))
return
}
id := mrd.readIDGenerator.Next()
if !mrd.done {
spec := mrdRange{readID: id, writer: output, offset: offset, limit: limit, currentBytesWritten: 0, totalBytesWritten: 0, callback: callback}
mrd.mu.Lock()
mrd.numActiveRanges++
mrd.mu.Unlock()
mrd.rangesToRead <- []mrdRange{spec}
} else {
callback(offset, 0, errors.New("storage: cannot add range because the stream is closed"))
}
}
func (mrd *gRPCBidiReader) wait() {
mrd.mu.Lock()
// we should wait until there is active task or an entry in the map.
// there can be a scenario we have nothing in map for a moment or too but still have active task.
// hence in case we have permanent errors we reduce active task to 0 so that this does not block wait.
keepWaiting := len(mrd.activeRanges) != 0 || mrd.numActiveRanges != 0
mrd.mu.Unlock()
for keepWaiting {
mrd.mu.Lock()
keepWaiting = len(mrd.activeRanges) != 0 || mrd.numActiveRanges != 0
mrd.mu.Unlock()
}
}
// Close will notify stream manager goroutine that the reader has been closed, if it's still running.
func (mrd *gRPCBidiReader) close() error {
if mrd.cancel != nil {
mrd.cancel()
}
mrd.mu.Lock()
mrd.done = true
mrd.numActiveRanges = 0
mrd.mu.Unlock()
mrd.closeReceiver <- true
mrd.closeSender <- true
return nil
}
func (mrd *gRPCBidiReader) getHandle() []byte {
return mrd.readHandle
}
func (mrd *gRPCBidiReader) error() error {
mrd.mu.Lock()
defer mrd.mu.Unlock()
if mrd.done {
return errors.New("storage: stream is permanently closed")
}
return nil
}
type mrdRange struct {
readID int64
writer io.Writer
offset int64
limit int64
currentBytesWritten int64
totalBytesWritten int64
callback func(int64, int64, error)
}
func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
// If bidi reads was not selected, use the legacy read object API.
if !c.config.grpcBidiReads {
return c.NewRangeReaderReadObject(ctx, params, opts...)
}
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
ctx, _ = startSpan(ctx, "grpcStorageClient.NewRangeReader")
defer func() { endSpan(ctx, err) }()
s := callSettings(c.settings, opts...)

View File

@@ -22,7 +22,6 @@ import (
"hash/crc32"
"io"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"github.com/googleapis/gax-go/v2"
"google.golang.org/grpc"
@@ -83,8 +82,8 @@ func (bytesCodecReadObject) Name() string {
// NewRangeReaderReadObject is the legacy (non-bidi) implementation of reads.
func (c *grpcStorageClient) NewRangeReaderReadObject(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReaderReadObject")
defer func() { trace.EndSpan(ctx, err) }()
ctx, _ = startSpan(ctx, "grpcStorageClient.NewRangeReaderReadObject")
defer func() { endSpan(ctx, err) }()
s := callSettings(c.settings, opts...)

View File

@@ -14,27 +14,889 @@
package storage
import "sync"
import (
"context"
"errors"
"fmt"
"io"
"sync"
// readIDGenerator generates unique read IDs for multi-range reads.
// Call readIDGenerator.Next to get the next ID. Safe to be called concurrently.
type readIDGenerator struct {
initOnce sync.Once
nextID chan int64 // do not use this field directly
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
)
const (
mrdCommandChannelSize = 1
mrdResponseChannelSize = 100
)
// --- internalMultiRangeDownloader Interface ---
// This provides an internal wrapper for the gRPC methods to avoid polluting
// reader.go with gRPC implementation details. The only implementation
// currently is for the gRPC transport with bidi APIs enabled. Creating
// a MultiRangeDownloader with any other client type will fail.
type internalMultiRangeDownloader interface {
add(output io.Writer, offset, length int64, callback func(int64, int64, error))
close(err error) error
wait()
getHandle() []byte
getPermanentError() error
getSpanCtx() context.Context
}
func (g *readIDGenerator) init() {
g.nextID = make(chan int64, 1)
g.nextID <- 1
// --- grpcStorageClient method ---
// Top level entry point into the MultiRangeDownloader via the storageClient interface.
func (c *grpcStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (*MultiRangeDownloader, error) {
if !c.config.grpcBidiReads {
return nil, errors.New("storage: MultiRangeDownloader requires the experimental.WithGRPCBidiReads option")
}
s := callSettings(c.settings, opts...)
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
if s.retry == nil {
s.retry = defaultRetry
}
b := bucketResourceName(globalProjectAlias, params.bucket)
readSpec := &storagepb.BidiReadObjectSpec{
Bucket: b,
Object: params.object,
CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
}
if params.gen >= 0 {
readSpec.Generation = params.gen
}
if params.handle != nil && len(*params.handle) > 0 {
readSpec.ReadHandle = &storagepb.BidiReadHandle{
Handle: *params.handle,
}
}
mCtx, cancel := context.WithCancel(ctx)
// Create the manager
manager := &multiRangeDownloaderManager{
ctx: mCtx,
cancel: cancel,
client: c,
settings: s,
params: params,
cmds: make(chan mrdCommand, mrdCommandChannelSize),
sessionResps: make(chan mrdSessionResult, mrdResponseChannelSize),
pendingRanges: make(map[int64]*rangeRequest),
readIDCounter: 1,
readSpec: readSpec,
attrsReady: make(chan struct{}),
spanCtx: ctx,
}
mrd := &MultiRangeDownloader{
impl: manager,
}
manager.wg.Add(1)
go func() {
defer manager.wg.Done()
manager.eventLoop()
}()
// Wait for attributes to be ready
select {
case <-manager.attrsReady:
if manager.permanentErr != nil {
cancel()
manager.wg.Wait()
return nil, manager.permanentErr
}
if manager.attrs != nil {
mrd.Attrs = *manager.attrs
}
return mrd, nil
case <-ctx.Done():
cancel()
manager.wg.Wait()
return nil, ctx.Err()
}
}
// Next returns the Next read ID. It initializes the readIDGenerator if needed.
func (g *readIDGenerator) Next() int64 {
g.initOnce.Do(g.init)
id := <-g.nextID
n := id + 1
g.nextID <- n
return id
// --- mrdCommand Interface and Implementations ---
// Used to pass commands from the user-facing code to the MRD manager.
// mrdCommand handlers are applied sequentially in the event loop. Therefore, it's okay
// for them to read/modify the manager state without concern for thread safety.
type mrdCommand interface {
apply(ctx context.Context, m *multiRangeDownloaderManager)
}
type mrdAddCmd struct {
output io.Writer
offset int64
length int64
callback func(int64, int64, error)
}
func (c *mrdAddCmd) apply(ctx context.Context, m *multiRangeDownloaderManager) {
m.handleAddCmd(ctx, c)
}
type mrdCloseCmd struct {
err error
}
func (c *mrdCloseCmd) apply(ctx context.Context, m *multiRangeDownloaderManager) {
m.handleCloseCmd(ctx, c)
}
type mrdWaitCmd struct {
doneC chan struct{}
}
func (c *mrdWaitCmd) apply(ctx context.Context, m *multiRangeDownloaderManager) {
m.handleWaitCmd(ctx, c)
}
type mrdGetHandleCmd struct {
respC chan []byte
}
func (c *mrdGetHandleCmd) apply(ctx context.Context, m *multiRangeDownloaderManager) {
select {
case <-m.attrsReady:
select {
case c.respC <- m.lastReadHandle:
case <-m.ctx.Done():
close(c.respC)
}
case <-m.ctx.Done():
close(c.respC)
}
}
type mrdErrorCmd struct {
respC chan error
}
func (c *mrdErrorCmd) apply(ctx context.Context, m *multiRangeDownloaderManager) {
select {
case c.respC <- m.permanentErr:
case <-ctx.Done():
close(c.respC)
}
}
// --- mrdSessionResult ---
// This is used to pass the zero-copy decoded response from the recv stream
// back up to the multiRangeDownloadManager for processing, or to pass
// an error if the session failed.
type mrdSessionResult struct {
decoder *readResponseDecoder
err error
redirect *storagepb.BidiReadObjectRedirectedError
}
var errClosed = errors.New("downloader closed")
// --- multiRangeDownloaderManager ---
// Manages main event loop for MRD commands and processing responses.
// Spawns bidiStreamSession to deal with actual stream management, retries, etc.
type multiRangeDownloaderManager struct {
ctx context.Context
cancel context.CancelFunc
client *grpcStorageClient
settings *settings
params *newMultiRangeDownloaderParams
wg sync.WaitGroup // syncs completion of event loop.
cmds chan mrdCommand
sessionResps chan mrdSessionResult
// State
currentSession *bidiReadStreamSession
readIDCounter int64
pendingRanges map[int64]*rangeRequest
permanentErr error
waiters []chan struct{}
readSpec *storagepb.BidiReadObjectSpec
lastReadHandle []byte
attrs *ReaderObjectAttrs
attrsReady chan struct{}
attrsOnce sync.Once
spanCtx context.Context
callbackWg sync.WaitGroup
}
type rangeRequest struct {
output io.Writer
offset int64
length int64
callback func(int64, int64, error)
origOffset int64
origLength int64
readID int64
bytesWritten int64
completed bool
}
// Methods implementing internalMultiRangeDownloader
func (m *multiRangeDownloaderManager) add(output io.Writer, offset, length int64, callback func(int64, int64, error)) {
if err := m.ctx.Err(); err != nil {
if m.permanentErr != nil {
err = m.permanentErr
}
m.runCallback(offset, length, err, callback)
return
}
if length < 0 {
m.runCallback(offset, length, fmt.Errorf("storage: MultiRangeDownloader.Add limit cannot be negative"), callback)
return
}
cmd := &mrdAddCmd{output: output, offset: offset, length: length, callback: callback}
select {
case m.cmds <- cmd:
case <-m.ctx.Done():
err := m.ctx.Err()
if m.permanentErr != nil {
err = m.permanentErr
}
m.runCallback(offset, length, err, callback)
}
}
func (m *multiRangeDownloaderManager) close(err error) error {
cmd := &mrdCloseCmd{err: err}
select {
case m.cmds <- cmd:
<-m.ctx.Done()
m.wg.Wait()
if m.permanentErr != nil && !errors.Is(m.permanentErr, errClosed) {
return m.permanentErr
}
return nil
case <-m.ctx.Done():
m.wg.Wait()
return m.ctx.Err()
}
}
func (m *multiRangeDownloaderManager) wait() {
doneC := make(chan struct{})
cmd := &mrdWaitCmd{doneC: doneC}
select {
case m.cmds <- cmd:
select {
case <-doneC:
m.callbackWg.Wait()
return
case <-m.ctx.Done():
m.callbackWg.Wait()
return
}
case <-m.ctx.Done():
m.callbackWg.Wait()
return
}
}
func (m *multiRangeDownloaderManager) getHandle() []byte {
select {
case <-m.attrsReady:
case <-m.ctx.Done():
return nil
}
respC := make(chan []byte, 1)
cmd := &mrdGetHandleCmd{respC: respC}
select {
case m.cmds <- cmd:
select {
case h, ok := <-respC:
if !ok {
return nil
}
return h
case <-m.ctx.Done():
return nil
}
case <-m.ctx.Done():
return nil
}
}
func (m *multiRangeDownloaderManager) getPermanentError() error {
return m.permanentErr
}
func (m *multiRangeDownloaderManager) getSpanCtx() context.Context {
return m.spanCtx
}
func (m *multiRangeDownloaderManager) runCallback(origOffset, numBytes int64, err error, cb func(int64, int64, error)) {
m.callbackWg.Add(1)
go func() {
defer m.callbackWg.Done()
cb(origOffset, numBytes, err)
}()
}
func (m *multiRangeDownloaderManager) eventLoop() {
defer func() {
if m.currentSession != nil {
m.currentSession.Shutdown()
}
finalErr := m.permanentErr
if finalErr == nil {
if ctxErr := m.ctx.Err(); ctxErr != nil {
finalErr = ctxErr
}
}
if finalErr == nil {
finalErr = errClosed
}
m.failAllPending(finalErr)
for _, waiter := range m.waiters {
close(waiter)
}
m.attrsOnce.Do(func() { close(m.attrsReady) })
m.callbackWg.Wait()
}()
// Blocking call to establish the first session and get attributes.
if err := m.establishInitialSession(); err != nil {
// permanentErr is set within establishInitialSession if necessary.
return // Exit eventLoop if we can't start.
}
for {
select {
case <-m.ctx.Done():
return
case cmd := <-m.cmds:
cmd.apply(m.ctx, m)
if _, ok := cmd.(*mrdCloseCmd); ok {
return
}
case result := <-m.sessionResps:
m.processSessionResult(result)
}
if len(m.pendingRanges) == 0 {
for _, waiter := range m.waiters {
close(waiter)
}
m.waiters = nil
}
}
}
func (m *multiRangeDownloaderManager) establishInitialSession() error {
retry := m.settings.retry
var firstResult mrdSessionResult
openStreamAndReceiveFirst := func(ctx context.Context, spec *storagepb.BidiReadObjectSpec) (*bidiReadStreamSession, mrdSessionResult) {
session, err := newBidiReadStreamSession(m.ctx, m.sessionResps, m.client, m.settings, m.params, spec)
if err != nil {
return nil, mrdSessionResult{err: err}
}
select {
case result := <-m.sessionResps:
return session, result
case <-ctx.Done():
session.Shutdown()
return nil, mrdSessionResult{err: ctx.Err()}
}
}
err := run(m.ctx, func(ctx context.Context) error {
if m.currentSession != nil {
m.currentSession.Shutdown()
m.currentSession = nil
}
currentSpec := proto.Clone(m.readSpec).(*storagepb.BidiReadObjectSpec)
session, result := openStreamAndReceiveFirst(ctx, currentSpec)
if result.err != nil {
if result.redirect != nil {
m.readSpec.RoutingToken = result.redirect.RoutingToken
m.readSpec.ReadHandle = result.redirect.ReadHandle
if session != nil {
session.Shutdown()
}
// We might get a redirect error here for an out-of-region request.
// Add the routing token and read handle to the request and do one
// retry.
currentSpec = proto.Clone(m.readSpec).(*storagepb.BidiReadObjectSpec)
session, result = openStreamAndReceiveFirst(ctx, currentSpec)
if result.err != nil {
if session != nil {
session.Shutdown()
}
return result.err
}
} else {
// Not a redirect error, return to run()
if session != nil {
session.Shutdown()
}
return result.err
}
}
// Success
m.currentSession = session
firstResult = result
return nil
}, retry, true)
if err != nil {
m.setPermanentError(err)
return m.permanentErr
}
// Process the successful first result
m.processSessionResult(firstResult)
if m.permanentErr != nil {
return m.permanentErr
}
return nil
}
func (m *multiRangeDownloaderManager) handleAddCmd(ctx context.Context, cmd *mrdAddCmd) {
if m.permanentErr != nil {
m.runCallback(cmd.offset, cmd.length, m.permanentErr, cmd.callback)
return
}
req := &rangeRequest{
output: cmd.output,
offset: cmd.offset,
length: cmd.length,
origOffset: cmd.offset,
origLength: cmd.length,
callback: cmd.callback,
readID: m.readIDCounter,
}
m.readIDCounter++
// Convert to positive offset only if attributes are available.
if m.attrs != nil && req.offset < 0 {
err := m.convertToPositiveOffset(req)
if err != nil {
return
}
}
if m.currentSession == nil {
// This should not happen if establishInitialSession was successful
m.failRange(req, errors.New("storage: session not available"))
return
}
m.pendingRanges[req.readID] = req
protoReq := &storagepb.BidiReadObjectRequest{
ReadRanges: []*storagepb.ReadRange{{
ReadOffset: req.offset,
ReadLength: req.length,
ReadId: req.readID,
}},
}
m.currentSession.SendRequest(protoReq)
}
func (m *multiRangeDownloaderManager) convertToPositiveOffset(req *rangeRequest) error {
if req.offset >= 0 {
return nil
}
var objSize int64
if m.attrs != nil {
objSize = m.attrs.Size
}
if objSize <= 0 {
err := errors.New("storage: cannot resolve negative offset with object size as 0")
m.failRange(req, err)
return err
}
start := max(objSize+req.offset, 0)
req.offset = start
if req.length == 0 {
req.length = objSize - start
}
return nil
}
func (m *multiRangeDownloaderManager) handleCloseCmd(ctx context.Context, cmd *mrdCloseCmd) {
var err error
if cmd.err != nil {
err = cmd.err
} else {
err = errClosed
}
m.setPermanentError(err)
m.cancel()
}
func (m *multiRangeDownloaderManager) handleWaitCmd(ctx context.Context, cmd *mrdWaitCmd) {
if len(m.pendingRanges) == 0 {
close(cmd.doneC)
} else {
m.waiters = append(m.waiters, cmd.doneC)
}
}
func (m *multiRangeDownloaderManager) processSessionResult(result mrdSessionResult) {
if result.err != nil {
m.handleStreamEnd(result)
return
}
resp := result.decoder.msg
if handle := resp.GetReadHandle().GetHandle(); len(handle) > 0 {
m.lastReadHandle = handle
}
m.attrsOnce.Do(func() {
defer close(m.attrsReady)
if meta := resp.GetMetadata(); meta != nil {
obj := newObjectFromProto(meta)
attrs := readerAttrsFromObject(obj)
m.attrs = &attrs
for _, req := range m.pendingRanges {
if req.offset < 0 {
_ = m.convertToPositiveOffset(req)
}
}
}
})
for _, dataRange := range resp.GetObjectDataRanges() {
readID := dataRange.GetReadRange().GetReadId()
req, exists := m.pendingRanges[readID]
if !exists || req.completed {
continue
}
written, _, err := result.decoder.writeToAndUpdateCRC(req.output, readID, nil)
req.bytesWritten += written
if err != nil {
m.failRange(req, err)
continue
}
if dataRange.GetRangeEnd() {
req.completed = true
delete(m.pendingRanges, req.readID)
m.runCallback(req.origOffset, req.bytesWritten, nil, req.callback)
}
}
// Once all data in the initial response has been read out, free buffers.
result.decoder.databufs.Free()
}
// ensureSession is now only for reconnecting *after* the initial session is up.
func (m *multiRangeDownloaderManager) ensureSession(ctx context.Context) error {
if m.currentSession != nil {
return nil
}
if m.permanentErr != nil {
return m.permanentErr
}
// Using run for retries
return run(ctx, func(ctx context.Context) error {
if m.currentSession != nil {
return nil
}
if m.permanentErr != nil {
return m.permanentErr
}
session, err := newBidiReadStreamSession(m.ctx, m.sessionResps, m.client, m.settings, m.params, proto.Clone(m.readSpec).(*storagepb.BidiReadObjectSpec))
if err != nil {
redirectErr, isRedirect := isRedirectError(err)
if isRedirect {
m.readSpec.RoutingToken = redirectErr.RoutingToken
m.readSpec.ReadHandle = redirectErr.ReadHandle
return fmt.Errorf("%w: %v", errBidiReadRedirect, err)
}
return err
}
m.currentSession = session
var rangesToResend []*storagepb.ReadRange
for _, req := range m.pendingRanges {
if !req.completed {
readLength := req.length
if req.length > 0 {
readLength -= req.bytesWritten
}
if readLength < 0 {
readLength = 0
}
if req.length == 0 || readLength > 0 {
rangesToResend = append(rangesToResend, &storagepb.ReadRange{
ReadOffset: req.offset + req.bytesWritten,
ReadLength: readLength,
ReadId: req.readID,
})
}
}
}
if len(rangesToResend) > 0 {
m.currentSession.SendRequest(&storagepb.BidiReadObjectRequest{ReadRanges: rangesToResend})
}
return nil
}, m.settings.retry, true)
}
var errBidiReadRedirect = errors.New("bidi read object redirected")
func (m *multiRangeDownloaderManager) handleStreamEnd(result mrdSessionResult) {
if m.currentSession != nil {
m.currentSession.Shutdown()
m.currentSession = nil
}
err := result.err
var ensureErr error
if result.redirect != nil {
m.readSpec.RoutingToken = result.redirect.RoutingToken
m.readSpec.ReadHandle = result.redirect.ReadHandle
ensureErr = m.ensureSession(m.ctx)
} else if m.settings.retry != nil && m.settings.retry.runShouldRetry(err) {
ensureErr = m.ensureSession(m.ctx)
} else {
if !errors.Is(err, context.Canceled) && !errors.Is(err, errClosed) {
m.setPermanentError(err)
} else if m.permanentErr == nil {
m.setPermanentError(errClosed)
}
m.failAllPending(m.permanentErr)
}
// Handle error from ensureSession.
if ensureErr != nil {
m.setPermanentError(ensureErr)
m.failAllPending(m.permanentErr)
}
}
func (m *multiRangeDownloaderManager) failRange(req *rangeRequest, err error) {
if req.completed {
return
}
req.completed = true
delete(m.pendingRanges, req.readID)
m.runCallback(req.origOffset, req.bytesWritten, err, req.callback)
}
func (m *multiRangeDownloaderManager) failAllPending(err error) {
for _, req := range m.pendingRanges {
if !req.completed {
req.completed = true
m.runCallback(req.origOffset, req.bytesWritten, err, req.callback)
}
}
m.pendingRanges = make(map[int64]*rangeRequest)
}
// Set permanent error to the provided error, if it hasn't been set already.
func (m *multiRangeDownloaderManager) setPermanentError(err error) {
if m.permanentErr == nil {
m.permanentErr = err
}
}
// --- bidiReadStreamSession ---
// Controls lifespan of an individual bi-directional gRPC stream to the
// object in GCS. Spins up goroutines for the read and write sides of the
// stream.
type bidiReadStreamSession struct {
ctx context.Context
cancel context.CancelFunc
stream storagepb.Storage_BidiReadObjectClient
client *grpcStorageClient
settings *settings
params *newMultiRangeDownloaderParams
readSpec *storagepb.BidiReadObjectSpec
reqC chan *storagepb.BidiReadObjectRequest
respC chan<- mrdSessionResult
wg sync.WaitGroup
errOnce sync.Once
streamErr error
}
func newBidiReadStreamSession(ctx context.Context, respC chan<- mrdSessionResult, client *grpcStorageClient, settings *settings, params *newMultiRangeDownloaderParams, readSpec *storagepb.BidiReadObjectSpec) (*bidiReadStreamSession, error) {
sCtx, cancel := context.WithCancel(ctx)
s := &bidiReadStreamSession{
ctx: sCtx,
cancel: cancel,
client: client,
settings: settings,
params: params,
readSpec: readSpec,
reqC: make(chan *storagepb.BidiReadObjectRequest, 100),
respC: respC,
}
initialReq := &storagepb.BidiReadObjectRequest{
ReadObjectSpec: s.readSpec,
}
reqCtx := gax.InsertMetadataIntoOutgoingContext(s.ctx, contextMetadataFromBidiReadObject(initialReq)...)
// Force the use of the custom codec to enable zero-copy reads.
s.settings.gax = append(s.settings.gax, gax.WithGRPCOptions(
grpc.ForceCodecV2(bytesCodecV2{}),
))
var err error
s.stream, err = client.raw.BidiReadObject(reqCtx, s.settings.gax...)
if err != nil {
cancel()
return nil, err
}
if err := s.stream.Send(initialReq); err != nil {
s.stream.CloseSend()
cancel()
return nil, err
}
s.wg.Add(2)
go s.sendLoop()
go s.receiveLoop()
go func() {
s.wg.Wait()
s.cancel()
}()
return s, nil
}
func (s *bidiReadStreamSession) SendRequest(req *storagepb.BidiReadObjectRequest) {
select {
case s.reqC <- req:
case <-s.ctx.Done():
}
}
func (s *bidiReadStreamSession) Shutdown() {
s.cancel()
s.wg.Wait()
}
func (s *bidiReadStreamSession) setError(err error) {
s.errOnce.Do(func() {
s.streamErr = err
})
}
func (s *bidiReadStreamSession) sendLoop() {
defer s.wg.Done()
defer s.stream.CloseSend()
for {
select {
case req, ok := <-s.reqC:
if !ok {
return
}
if err := s.stream.Send(req); err != nil {
s.setError(err)
s.cancel()
return
}
case <-s.ctx.Done():
return
}
}
}
func (s *bidiReadStreamSession) receiveLoop() {
defer s.wg.Done()
defer s.cancel()
for {
if err := s.ctx.Err(); err != nil {
return
}
// Receive message without a copy.
databufs := mem.BufferSlice{}
err := s.stream.RecvMsg(&databufs)
var decoder *readResponseDecoder
if err == nil {
// Use the custom decoder to parse the raw buffer without copying object data.
decoder = &readResponseDecoder{
databufs: databufs,
}
err = decoder.readFullObjectResponse()
}
if err != nil {
databufs.Free()
redirectErr, isRedirect := isRedirectError(err)
result := mrdSessionResult{err: err}
if isRedirect {
result.redirect = redirectErr
err = fmt.Errorf("%w: %v", errBidiReadRedirect, err)
result.err = err
}
s.setError(err)
select {
case s.respC <- result:
case <-s.ctx.Done():
}
return
}
select {
case s.respC <- mrdSessionResult{decoder: decoder}:
case <-s.ctx.Done():
return
}
}
}
func isRedirectError(err error) (*storagepb.BidiReadObjectRedirectedError, bool) {
st, ok := status.FromError(err)
if !ok {
return nil, false
}
if st.Code() != codes.Aborted {
return nil, false
}
for _, d := range st.Details() {
if bidiError, ok := d.(*storagepb.BidiReadObjectRedirectedError); ok {
if bidiError.RoutingToken != nil {
return bidiError, true
}
}
}
return nil, false
}
func readerAttrsFromObject(o *ObjectAttrs) ReaderObjectAttrs {
if o == nil {
return ReaderObjectAttrs{}
}
return ReaderObjectAttrs{
Size: o.Size,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
CacheControl: o.CacheControl,
LastModified: o.Updated,
Generation: o.Generation,
Metageneration: o.Metageneration,
CRC32C: o.CRC32C,
}
}

View File

@@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
"hash/crc32"
"io"
"net/http"
"net/url"
@@ -51,6 +52,10 @@ func (w *gRPCWriter) Write(p []byte) (n int, err error) {
case <-w.donec:
return 0, w.streamResult
case w.writesChan <- cmd:
// update fullObjectChecksum on every write and send it on finalWrite
if !w.disableAutoChecksum {
w.fullObjectChecksum = crc32.Update(w.fullObjectChecksum, crc32cTable, p)
}
// write command successfully delivered to sender. We no longer own cmd.
break
}
@@ -170,6 +175,7 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage
flushSupported: params.append,
sendCRC32C: params.sendCRC32C,
disableAutoChecksum: params.disableAutoChecksum,
forceOneShot: params.chunkSize <= 0,
forceEmptyContentType: params.forceEmptyContentType,
append: params.append,
@@ -239,8 +245,11 @@ type gRPCWriter struct {
setSize func(int64)
setTakeoverOffset func(int64)
fullObjectChecksum uint32
flushSupported bool
sendCRC32C bool
disableAutoChecksum bool
forceOneShot bool
forceEmptyContentType bool
append bool
@@ -785,25 +794,64 @@ func completion(r *storagepb.BidiWriteObjectResponse) *gRPCBidiWriteCompletion {
}
}
func bidiWriteObjectRequest(buf []byte, offset int64, flush, finishWrite bool) *storagepb.BidiWriteObjectRequest {
// Server contract expects full object checksum to be sent only on first or last write.
// Checksums of full object are already being sent on first Write during initialization of sender.
// Send objectChecksums only on final request and nil in other cases.
func bidiWriteObjectRequest(r gRPCBidiWriteRequest, bufChecksum *uint32, objectChecksums *storagepb.ObjectChecksums) *storagepb.BidiWriteObjectRequest {
var data *storagepb.BidiWriteObjectRequest_ChecksummedData
if buf != nil {
if r.buf != nil {
data = &storagepb.BidiWriteObjectRequest_ChecksummedData{
ChecksummedData: &storagepb.ChecksummedData{
Content: buf,
Content: r.buf,
Crc32C: bufChecksum,
},
}
}
req := &storagepb.BidiWriteObjectRequest{
Data: data,
WriteOffset: offset,
FinishWrite: finishWrite,
Flush: flush,
StateLookup: flush,
Data: data,
WriteOffset: r.offset,
FinishWrite: r.finishWrite,
Flush: r.flush,
StateLookup: r.flush,
ObjectChecksums: objectChecksums,
}
return req
}
type getObjectChecksumsParams struct {
sendCRC32C bool
disableAutoChecksum bool
objectAttrs *ObjectAttrs
fullObjectChecksum func() uint32
finishWrite bool
takeoverWriter bool
}
// getObjectChecksums determines what checksum information to include in the final
// gRPC request
//
// function returns a populated ObjectChecksums only when finishWrite is true
// If CRC32C is disabled, it returns the user-provided checksum if available.
// If CRC32C is enabled, it returns the user-provided checksum if available,
// or the computed checksum of the entire object.
func getObjectChecksums(params *getObjectChecksumsParams) *storagepb.ObjectChecksums {
if !params.finishWrite {
return nil
}
// send user's checksum on last write op if available
if params.sendCRC32C {
return toProtoChecksums(params.sendCRC32C, params.objectAttrs)
}
// TODO(b/461982277): Enable checksum validation for appendable takeover writer gRPC
if params.disableAutoChecksum || params.takeoverWriter {
return nil
}
return &storagepb.ObjectChecksums{
Crc32C: proto.Uint32(params.fullObjectChecksum()),
}
}
type gRPCBidiWriteBufferSender interface {
// connect implementations may attempt to establish a connection for issuing
// writes.
@@ -832,6 +880,12 @@ type gRPCOneshotBidiWriteBufferSender struct {
bucket string
firstMessage *storagepb.BidiWriteObjectRequest
streamErr error
// Checksum related settings.
sendCRC32C bool
disableAutoChecksum bool
objectAttrs *ObjectAttrs
fullObjectChecksum func() uint32
}
func (w *gRPCWriter) newGRPCOneshotBidiWriteBufferSender() *gRPCOneshotBidiWriteBufferSender {
@@ -843,11 +897,13 @@ func (w *gRPCWriter) newGRPCOneshotBidiWriteBufferSender() *gRPCOneshotBidiWrite
WriteObjectSpec: w.spec,
},
CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
// For a non-resumable upload, checksums must be sent in this message.
// TODO: Currently the checksums are only sent on the first message
// of the stream, but in the future, we must also support sending it
// on the *last* message of the stream (instead of the first).
ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
},
sendCRC32C: w.sendCRC32C,
disableAutoChecksum: w.disableAutoChecksum,
objectAttrs: w.attrs,
fullObjectChecksum: func() uint32 {
return w.fullObjectChecksum
},
}
}
@@ -888,7 +944,19 @@ func (s *gRPCOneshotBidiWriteBufferSender) connect(ctx context.Context, cs gRPCB
continue
}
req := bidiWriteObjectRequest(r.buf, r.offset, r.flush, r.finishWrite)
var bufChecksum *uint32
if !s.disableAutoChecksum {
bufChecksum = proto.Uint32(crc32.Checksum(r.buf, crc32cTable))
}
objectChecksums := getObjectChecksums(&getObjectChecksumsParams{
sendCRC32C: s.sendCRC32C,
objectAttrs: s.objectAttrs,
fullObjectChecksum: s.fullObjectChecksum,
disableAutoChecksum: s.disableAutoChecksum,
finishWrite: r.finishWrite,
})
req := bidiWriteObjectRequest(r, bufChecksum, objectChecksums)
if firstSend {
proto.Merge(req, s.firstMessage)
firstSend = false
@@ -932,6 +1000,12 @@ type gRPCResumableBidiWriteBufferSender struct {
startWriteRequest *storagepb.StartResumableWriteRequest
upid string
// Checksum related settings.
sendCRC32C bool
disableAutoChecksum bool
objectAttrs *ObjectAttrs
fullObjectChecksum func() uint32
streamErr error
}
@@ -942,10 +1016,13 @@ func (w *gRPCWriter) newGRPCResumableBidiWriteBufferSender() *gRPCResumableBidiW
startWriteRequest: &storagepb.StartResumableWriteRequest{
WriteObjectSpec: w.spec,
CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
// TODO: Currently the checksums are only sent on the request to initialize
// the upload, but in the future, we must also support sending it
// on the *last* message of the stream.
ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
},
sendCRC32C: w.sendCRC32C,
disableAutoChecksum: w.disableAutoChecksum,
objectAttrs: w.attrs,
fullObjectChecksum: func() uint32 {
return w.fullObjectChecksum
},
}
}
@@ -1005,7 +1082,20 @@ func (s *gRPCResumableBidiWriteBufferSender) connect(ctx context.Context, cs gRP
cs.requestAcks <- struct{}{}
continue
}
req := bidiWriteObjectRequest(r.buf, r.offset, r.flush, r.finishWrite)
var bufChecksum *uint32
if !s.disableAutoChecksum {
bufChecksum = proto.Uint32(crc32.Checksum(r.buf, crc32cTable))
}
objectChecksums := getObjectChecksums(&getObjectChecksumsParams{
sendCRC32C: s.sendCRC32C,
objectAttrs: s.objectAttrs,
fullObjectChecksum: s.fullObjectChecksum,
disableAutoChecksum: s.disableAutoChecksum,
finishWrite: r.finishWrite,
})
req := bidiWriteObjectRequest(r, bufChecksum, objectChecksums)
if firstSend {
req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: s.upid}
firstSend = false
@@ -1058,12 +1148,18 @@ type gRPCAppendBidiWriteBufferSender struct {
bucket string
routingToken *string
firstMessage *storagepb.BidiWriteObjectRequest
objectChecksums *storagepb.ObjectChecksums
firstMessage *storagepb.BidiWriteObjectRequest
finalizeOnClose bool
objResource *storagepb.Object
// Checksum related settings.
sendCRC32C bool
disableAutoChecksum bool
objectAttrs *ObjectAttrs
fullObjectChecksum func() uint32
takeoverWriter bool
streamErr error
}
@@ -1080,8 +1176,13 @@ func (w *gRPCWriter) newGRPCAppendableObjectBufferSender() *gRPCAppendBidiWriteB
},
CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
},
objectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
finalizeOnClose: w.finalizeOnClose,
finalizeOnClose: w.finalizeOnClose,
sendCRC32C: w.sendCRC32C,
disableAutoChecksum: w.disableAutoChecksum,
objectAttrs: w.attrs,
fullObjectChecksum: func() uint32 {
return w.fullObjectChecksum
},
}
}
@@ -1169,8 +1270,8 @@ func (s *gRPCAppendBidiWriteBufferSender) handleStream(stream storagepb.Storage_
type gRPCAppendTakeoverBidiWriteBufferSender struct {
gRPCAppendBidiWriteBufferSender
takeoverReported bool
setTakeoverOffset func(int64)
takeoverReported bool
handleTakeoverCompletion func(gRPCBidiWriteCompletion)
}
func writeObjectSpecAsAppendObjectSpec(s *storagepb.WriteObjectSpec, gen int64) *storagepb.AppendObjectSpec {
@@ -1194,11 +1295,20 @@ func (w *gRPCWriter) newGRPCAppendTakeoverWriteBufferSender() *gRPCAppendTakeove
AppendObjectSpec: writeObjectSpecAsAppendObjectSpec(w.spec, w.appendGen),
},
},
objectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
finalizeOnClose: w.finalizeOnClose,
finalizeOnClose: w.finalizeOnClose,
takeoverWriter: true,
sendCRC32C: w.sendCRC32C,
disableAutoChecksum: w.disableAutoChecksum,
objectAttrs: w.attrs,
fullObjectChecksum: func() uint32 {
return w.fullObjectChecksum
},
},
takeoverReported: false,
handleTakeoverCompletion: func(c gRPCBidiWriteCompletion) {
w.handleCompletion(c)
w.setTakeoverOffset(c.flushOffset)
},
takeoverReported: false,
setTakeoverOffset: w.setTakeoverOffset,
}
}
@@ -1226,7 +1336,8 @@ func (s *gRPCAppendTakeoverBidiWriteBufferSender) connect(ctx context.Context, c
resp, err := stream.Recv()
if err != nil {
s.streamErr = err
// A Recv() error may be a redirect.
s.streamErr = s.maybeHandleRedirectionError(err)
close(cs.completions)
return
}
@@ -1238,9 +1349,9 @@ func (s *gRPCAppendTakeoverBidiWriteBufferSender) connect(ctx context.Context, c
return
}
s.setTakeoverOffset(c.flushOffset)
s.maybeUpdateFirstMessage(resp)
s.takeoverReported = true
cs.completions <- *c
s.handleTakeoverCompletion(*c)
}
go s.handleStream(stream, cs, firstSend)
@@ -1315,11 +1426,26 @@ func (s *gRPCAppendBidiWriteBufferSender) maybeHandleRedirectionError(err error)
func (s *gRPCAppendBidiWriteBufferSender) send(stream storagepb.Storage_BidiWriteObjectClient, buf []byte, offset int64, flush, finishWrite, sendFirstMessage bool) error {
finalizeObject := finishWrite && s.finalizeOnClose
flush = flush || finishWrite
req := bidiWriteObjectRequest(buf, offset, flush, finalizeObject)
if finalizeObject {
// appendable objects pass checksums on the finalize message only
req.ObjectChecksums = s.objectChecksums
r := gRPCBidiWriteRequest{
buf: buf,
offset: offset,
flush: flush,
finishWrite: finalizeObject,
}
var bufChecksum *uint32
if !s.disableAutoChecksum {
bufChecksum = proto.Uint32(crc32.Checksum(r.buf, crc32cTable))
}
objectChecksums := getObjectChecksums(&getObjectChecksumsParams{
sendCRC32C: s.sendCRC32C,
objectAttrs: s.objectAttrs,
fullObjectChecksum: s.fullObjectChecksum,
disableAutoChecksum: s.disableAutoChecksum,
finishWrite: finalizeObject,
takeoverWriter: s.takeoverWriter,
})
req := bidiWriteObjectRequest(r, bufChecksum, objectChecksums)
if sendFirstMessage {
proto.Merge(req, s.firstMessage)
}

View File

@@ -33,7 +33,6 @@ import (
"cloud.google.com/go/auth"
"cloud.google.com/go/iam/apiv1/iampb"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"github.com/google/uuid"
"github.com/googleapis/gax-go/v2/callctx"
"google.golang.org/api/googleapi"
@@ -225,6 +224,7 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
req.Projection("full")
req.Prefix(it.Prefix)
req.PageToken(pageToken)
req.ReturnPartialSuccess(it.ReturnPartialSuccess)
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
@@ -243,6 +243,7 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
}
it.buckets = append(it.buckets, b)
}
it.unreachable = resp.Unreachable
return resp.NextPageToken, nil
}
@@ -364,6 +365,12 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
req.MatchGlob(it.query.MatchGlob)
req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes)
// Cannot pass empty filter
if it.query.Filter != "" {
req.Filter(it.query.Filter)
}
if selection := it.query.toFieldSelection(); selection != "" {
req.Fields("nextPageToken", googleapi.Field(selection))
}
@@ -517,6 +524,19 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObje
forceSendFields = append(forceSendFields, "Retention")
}
}
if uattrs.Contexts != nil && uattrs.Contexts.Custom != nil {
if len(uattrs.Contexts.Custom) == 0 {
// To delete all contexts, "Contexts" must be added to nullFields.
// Sending empty Custom map in the request body is a no-op without this.
nullFields = append(nullFields, "Contexts")
} else {
attrs.Contexts = uattrs.Contexts
// This is to ensure any new values or deletions are updated
forceSendFields = append(forceSendFields, "Contexts")
}
}
rawObj := attrs.toRawObject(params.bucket)
rawObj.ForceSendFields = forceSendFields
rawObj.NullFields = nullFields
@@ -847,8 +867,8 @@ func (c *httpStorageClient) NewMultiRangeDownloader(ctx context.Context, params
}
func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
ctx, _ = startSpan(ctx, "httpStorageClient.NewRangeReader")
defer func() { endSpan(ctx, err) }()
s := callSettings(c.settings, opts...)

View File

@@ -0,0 +1,10 @@
{
"api_shortname": "storage",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2",
"client_library_type": "generated",
"description": "Cloud Storage API",
"distribution_name": "cloud.google.com/go/storage/internal/apiv2",
"language": "go",
"library_type": "GAPIC_AUTO",
"release_level": "stable"
}

View File

@@ -432,7 +432,8 @@ type internalClient interface {
//
// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through
// the abstractions of buckets and objects. For a description of these
// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs).
// abstractions please see Cloud Storage
// documentation (at https://cloud.google.com/storage/docs).
//
// Resources are named as follows:
//
@@ -440,18 +441,14 @@ type internalClient interface {
// using strings like projects/123456 or projects/my-string-id.
//
// Buckets are named using string names of the form:
// projects/{project}/buckets/{bucket}
// For globally unique buckets, _ may be substituted for the project.
// projects/{project}/buckets/{bucket}.
// For globally unique buckets, _ might be substituted for the project.
//
// Objects are uniquely identified by their name along with the name of the
// bucket they belong to, as separate strings in this API. For example:
//
// ReadObjectRequest {
// bucket: projects/_/buckets/my-bucket
// object: my-object
// }
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
type Client struct {
// The internal transport-dependent client.
internalClient internalClient
@@ -484,40 +481,121 @@ func (c *Client) Connection() *grpc.ClientConn {
}
// DeleteBucket permanently deletes an empty bucket.
// The request fails if there are any live or
// noncurrent objects in the bucket, but the request succeeds if the
// bucket only contains soft-deleted objects or incomplete uploads, such
// as ongoing XML API multipart uploads. Does not permanently delete
// soft-deleted objects.
//
// When this API is used to delete a bucket containing an object that has a
// soft delete policy
// enabled, the object becomes soft deleted, and the
// softDeleteTime and hardDeleteTime properties are set on the
// object.
//
// Objects and multipart uploads that were in the bucket at the time of
// deletion are also retained for the specified retention duration. When
// a soft-deleted bucket reaches the end of its retention duration, it
// is permanently deleted. The hardDeleteTime of the bucket always
// equals
// or exceeds the expiration time of the last soft-deleted object in the
// bucket.
//
// IAM Permissions:
//
// Requires storage.buckets.delete IAM permission on the bucket.
func (c *Client) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error {
return c.internalClient.DeleteBucket(ctx, req, opts...)
}
// GetBucket returns metadata for the specified bucket.
//
// IAM Permissions:
//
// Requires storage.buckets.get
// IAM permission on
// the bucket. Additionally, to return specific bucket metadata, the
// authenticated user must have the following permissions:
//
// To return the IAM policies: storage.buckets.getIamPolicy
//
// To return the bucket IP filtering rules: storage.buckets.getIpFilter
func (c *Client) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
return c.internalClient.GetBucket(ctx, req, opts...)
}
// CreateBucket creates a new bucket.
//
// IAM Permissions:
//
// Requires storage.buckets.create IAM permission on the bucket.
// Additionally, to enable specific bucket features, the authenticated user
// must have the following permissions:
//
// To enable object retention using the enableObjectRetention query
// parameter: storage.buckets.enableObjectRetention
//
// To set the bucket IP filtering rules: storage.buckets.setIpFilter
func (c *Client) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
return c.internalClient.CreateBucket(ctx, req, opts...)
}
// ListBuckets retrieves a list of buckets for a given project.
// ListBuckets retrieves a list of buckets for a given project, ordered
// lexicographically by name.
//
// IAM Permissions:
//
// Requires storage.buckets.list IAM permission on the bucket.
// Additionally, to enable specific bucket features, the authenticated
// user must have the following permissions:
//
// To list the IAM policies: storage.buckets.getIamPolicy
//
// To list the bucket IP filtering rules: storage.buckets.getIpFilter
func (c *Client) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator {
return c.internalClient.ListBuckets(ctx, req, opts...)
}
// LockBucketRetentionPolicy locks retention policy on a bucket.
// LockBucketRetentionPolicy permanently locks the retention
// policy that is
// currently applied to the specified bucket.
//
// Caution: Locking a bucket is an
// irreversible action. Once you lock a bucket:
//
// You cannot remove the retention policy from the bucket.
//
// You cannot decrease the retention period for the policy.
//
// Once locked, you must delete the entire bucket in order to remove the
// buckets retention policy. However, before you can delete the bucket, you
// must delete all the objects in the bucket, which is only
// possible if all the objects have reached the retention period set by the
// retention policy.
//
// IAM Permissions:
//
// Requires storage.buckets.update IAM permission on the bucket.
func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...)
}
// GetIamPolicy gets the IAM policy for a specified bucket.
// GetIamPolicy gets the IAM policy for a specified bucket or managed folder.
// The resource field in the request should be
// projects/_/buckets/{bucket} for a bucket, or
// projects/_/buckets/{bucket}/managedFolders/{managedFolder}
// for a managed folder.
//
// IAM Permissions:
//
// Requires storage.buckets.getIamPolicy on the bucket or
// storage.managedFolders.getIamPolicy IAM permission on the
// managed folder.
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
return c.internalClient.GetIamPolicy(ctx, req, opts...)
}
// SetIamPolicy updates an IAM policy for the specified bucket.
// SetIamPolicy updates an IAM policy for the specified bucket or managed folder.
// The resource field in the request should be
// projects/_/buckets/{bucket} for a bucket, or
// projects/_/buckets/{bucket}/managedFolders/{managedFolder}
@@ -527,9 +605,8 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques
}
// TestIamPermissions tests a set of permissions on the given bucket, object, or managed folder
// to see which, if any, are held by the caller.
// The resource field in the request should be
// projects/_/buckets/{bucket} for a bucket,
// to see which, if any, are held by the caller. The resource field in the
// request should be projects/_/buckets/{bucket} for a bucket,
// projects/_/buckets/{bucket}/objects/{object} for an object, or
// projects/_/buckets/{bucket}/managedFolders/{managedFolder}
// for a managed folder.
@@ -537,20 +614,42 @@ func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermi
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
// UpdateBucket updates a bucket. Equivalent to JSON APIs storage.buckets.patch method.
// UpdateBucket updates a bucket. Changes to the bucket are readable immediately after
// writing, but configuration changes might take time to propagate. This
// method supports patch semantics.
//
// IAM Permissions:
//
// Requires storage.buckets.update IAM permission on the bucket.
// Additionally, to enable specific bucket features, the authenticated user
// must have the following permissions:
//
// To set bucket IP filtering rules: storage.buckets.setIpFilter
//
// To update public access prevention policies or access control lists
// (ACLs): storage.buckets.setIamPolicy
func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
return c.internalClient.UpdateBucket(ctx, req, opts...)
}
// ComposeObject concatenates a list of existing objects into a new object in the same
// bucket.
// bucket. The existing source objects are unaffected by this operation.
//
// IAM Permissions:
//
// Requires the storage.objects.create and storage.objects.get IAM
// permissions to use this method. If the new composite object
// overwrites an existing object, the authenticated user must also have
// the storage.objects.delete permission. If the request body includes
// the retention property, the authenticated user must also have the
// storage.objects.setRetention IAM permission.
func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.ComposeObject(ctx, req, opts...)
}
// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning
// is not enabled for the bucket, or if the generation parameter is used, or
// if soft delete (at https://cloud.google.com/storage/docs/soft-delete) is not
// if soft delete is not
// enabled for the bucket.
// When this API is used to delete an object from a bucket that has soft
// delete policy enabled, the object becomes soft deleted, and the
@@ -565,14 +664,56 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject
//
// IAM Permissions:
//
// Requires storage.objects.delete
// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
// the bucket.
// Requires storage.objects.delete IAM permission on the bucket.
func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
return c.internalClient.DeleteObject(ctx, req, opts...)
}
// RestoreObject restores a soft-deleted object.
// RestoreObject restores a
// soft-deleted object.
// When a soft-deleted object is restored, a new copy of that object is
// created in the same bucket and inherits the same metadata as the
// soft-deleted object. The inherited metadata is the metadata that existed
// when the original object became soft deleted, with the following
// exceptions:
//
// The createTime of the new object is set to the time at which the
// soft-deleted object was restored.
//
// The softDeleteTime and hardDeleteTime values are cleared.
//
// A new generation is assigned and the metageneration is reset to 1.
//
// If the soft-deleted object was in a bucket that had Autoclass enabled,
// the new object is
// restored to Standard storage.
//
// The restored object inherits the buckets default object ACL, unless
// copySourceAcl is true.
//
// If a live object using the same name already exists in the bucket and
// becomes overwritten, the live object becomes a noncurrent object if Object
// Versioning is enabled on the bucket. If Object Versioning is not enabled,
// the live object becomes soft deleted.
//
// IAM Permissions:
//
// Requires the following IAM permissions to use this method:
//
// storage.objects.restore
//
// storage.objects.create
//
// storage.objects.delete (only required if overwriting an existing
// object)
//
// storage.objects.getIamPolicy (only required if projection is full
// and the relevant bucket
// has uniform bucket-level access disabled)
//
// storage.objects.setIamPolicy (only required if copySourceAcl is
// true and the relevant
// bucket has uniform bucket-level access disabled)
func (c *Client) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.RestoreObject(ctx, req, opts...)
}
@@ -580,9 +721,9 @@ func (c *Client) RestoreObject(ctx context.Context, req *storagepb.RestoreObject
// CancelResumableWrite cancels an in-progress resumable upload.
//
// Any attempts to write to the resumable upload after cancelling the upload
// will fail.
// fail.
//
// The behavior for currently in progress write operations is not guaranteed -
// The behavior for any in-progress write operations is not guaranteed;
// they could either complete before the cancellation or fail if the
// cancellation completes first.
func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) {
@@ -593,9 +734,8 @@ func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.Cancel
//
// IAM Permissions:
//
// Requires storage.objects.get
// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
// the bucket. To return object ACLs, the authenticated user must also have
// Requires storage.objects.get IAM permission on the bucket.
// To return object ACLs, the authenticated user must also have
// the storage.objects.getIamPolicy permission.
func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.GetObject(ctx, req, opts...)
@@ -605,38 +745,34 @@ func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest,
//
// IAM Permissions:
//
// Requires storage.objects.get
// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
// the bucket.
// Requires storage.objects.get IAM permission on the bucket.
func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
return c.internalClient.ReadObject(ctx, req, opts...)
}
// BidiReadObject reads an objects data.
//
// This is a bi-directional API with the added support for reading multiple
// ranges within one stream both within and across multiple messages.
// If the server encountered an error for any of the inputs, the stream will
// be closed with the relevant error code.
// Because the API allows for multiple outstanding requests, when the stream
// is closed the error response will contain a BidiReadObjectRangesError proto
// in the error extension describing the error for each outstanding read_id.
// This bi-directional API reads data from an object, allowing you to
// request multiple data ranges within a single stream, even across
// several messages. If an error occurs with any request, the stream
// closes with a relevant error code. Since you can have multiple
// outstanding requests, the error response includes a
// BidiReadObjectRangesError field detailing the specific error for
// each pending read_id.
//
// IAM Permissions:
//
// # Requires storage.objects.get
//
// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
// the bucket.
//
// This API is currently in preview and is not yet available for general
// use.
// Requires storage.objects.get IAM permission on the bucket.
func (c *Client) BidiReadObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error) {
return c.internalClient.BidiReadObject(ctx, opts...)
}
// UpdateObject updates an objects metadata.
// Equivalent to JSON APIs storage.objects.patch.
// Equivalent to JSON APIs storage.objects.patch method.
//
// IAM Permissions:
//
// Requires storage.objects.update IAM permission on the bucket.
func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.UpdateObject(ctx, req, opts...)
}
@@ -661,10 +797,10 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
// Check the result Status of the stream, to determine if writing can be
// resumed on this stream or must be restarted from scratch (by calling
// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED,
// INTERNAL, and UNAVAILABLE. For each case, the client should use binary
// exponential backoff before retrying. Additionally, writes can be
// resumed after RESOURCE_EXHAUSTED errors, but only after taking
// appropriate measures, which may include reducing aggregate send rate
// INTERNAL, and UNAVAILABLE. For each case, the client should use
// binary exponential backoff before retrying. Additionally, writes can
// be resumed after RESOURCE_EXHAUSTED errors, but only after taking
// appropriate measures, which might include reducing aggregate send rate
// across clients and/or requesting a quota increase for your project.
//
// If the call to WriteObject returns ABORTED, that indicates
@@ -672,43 +808,43 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
// multiple racing clients or by a single client where the previous
// request was timed out on the client side but nonetheless reached the
// server. In this case the client should take steps to prevent further
// concurrent writes (e.g., increase the timeouts, stop using more than
// one process to perform the upload, etc.), and then should follow the
// steps below for resuming the upload.
// concurrent writes. For example, increase the timeouts and stop using
// more than one process to perform the upload. Follow the steps below for
// resuming the upload.
//
// For resumable errors, the client should call QueryWriteStatus() and
// then continue writing from the returned persisted_size. This may be
// then continue writing from the returned persisted_size. This might be
// less than the amount of data the client previously sent. Note also that
// it is acceptable to send data starting at an offset earlier than the
// returned persisted_size; in this case, the service will skip data at
// returned persisted_size; in this case, the service skips data at
// offsets that were already persisted (without checking that it matches
// the previously written data), and write only the data starting from the
// persisted offset. Even though the data isnt written, it may still
// persisted offset. Even though the data isnt written, it might still
// incur a performance cost over resuming at the correct write offset.
// This behavior can make client-side handling simpler in some cases.
//
// Clients must only send data that is a multiple of 256 KiB per message,
// unless the object is being finished with finish_write set to true.
//
// The service will not view the object as complete until the client has
// The service does not view the object as complete until the client has
// sent a WriteObjectRequest with finish_write set to true. Sending any
// requests on a stream after sending a request with finish_write set to
// true will cause an error. The client should check the response it
// receives to determine how much data the service was able to commit and
// true causes an error. The client must check the response it
// receives to determine how much data the service is able to commit and
// whether the service views the object as complete.
//
// Attempting to resume an already finalized object will result in an OK
// Attempting to resume an already finalized object results in an OK
// status, with a WriteObjectResponse containing the finalized objects
// metadata.
//
// Alternatively, the BidiWriteObject operation may be used to write an
// Alternatively, you can use the BidiWriteObject operation to write an
// object with controls over flushing and the ability to fetch the ability to
// determine the current persisted size.
//
// IAM Permissions:
//
// Requires storage.objects.create
// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
// IAM permission on
// the bucket.
func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) {
return c.internalClient.WriteObject(ctx, opts...)
@@ -720,15 +856,15 @@ func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (stora
// manual flushing of persisted state, and the ability to determine current
// persisted size without closing the stream.
//
// The client may specify one or both of the state_lookup and flush fields
// in each BidiWriteObjectRequest. If flush is specified, the data written
// so far will be persisted to storage. If state_lookup is specified, the
// service will respond with a BidiWriteObjectResponse that contains the
// The client might specify one or both of the state_lookup and flush
// fields in each BidiWriteObjectRequest. If flush is specified, the data
// written so far is persisted to storage. If state_lookup is specified, the
// service responds with a BidiWriteObjectResponse that contains the
// persisted size. If both flush and state_lookup are specified, the flush
// will always occur before a state_lookup, so that both may be set in the
// same request and the returned state will be the state of the object
// post-flush. When the stream is closed, a BidiWriteObjectResponse will
// always be sent to the client, regardless of the value of state_lookup.
// always occurs before a state_lookup, so that both might be set in the
// same request and the returned state is the state of the object
// post-flush. When the stream is closed, a BidiWriteObjectResponse
// is always sent to the client, regardless of the value of state_lookup.
func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) {
return c.internalClient.BidiWriteObject(ctx, opts...)
}
@@ -738,8 +874,8 @@ func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (s
// IAM Permissions:
//
// The authenticated user requires storage.objects.list
// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions)
// to use this method. To return object ACLs, the authenticated user must also
// IAM permission to use this method. To return object ACLs, the
// authenticated user must also
// have the storage.objects.getIamPolicy permission.
func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator {
return c.internalClient.ListObjects(ctx, req, opts...)
@@ -753,7 +889,7 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject
// StartResumableWrite starts a resumable write operation. This
// method is part of the Resumable
// upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature.
// upload feature.
// This allows you to upload large objects in multiple chunks, which is more
// resilient to network interruptions than a single upload. The validity
// duration of the write operation, and the consequences of it becoming
@@ -761,16 +897,14 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject
//
// IAM Permissions:
//
// Requires storage.objects.create
// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
// the bucket.
// Requires storage.objects.create IAM permission on the bucket.
func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
return c.internalClient.StartResumableWrite(ctx, req, opts...)
}
// QueryWriteStatus determines the persisted_size of an object that is being written. This
// method is part of the resumable
// upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature.
// upload feature.
// The returned value is the size of the object that has been persisted so
// far. The value can be used as the write_offset for the next Write()
// call.
@@ -790,6 +924,21 @@ func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWrite
}
// MoveObject moves the source object to the destination object in the same bucket.
// This operation moves a source object to a destination object in the
// same bucket by renaming the object. The move itself is an atomic
// transaction, ensuring all steps either complete successfully or no
// changes are made.
//
// IAM Permissions:
//
// Requires the following IAM permissions to use this method:
//
// storage.objects.move
//
// storage.objects.create
//
// storage.objects.delete (only required if overwriting an existing
// object)
func (c *Client) MoveObject(ctx context.Context, req *storagepb.MoveObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.MoveObject(ctx, req, opts...)
}
@@ -818,7 +967,8 @@ type gRPCClient struct {
//
// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through
// the abstractions of buckets and objects. For a description of these
// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs).
// abstractions please see Cloud Storage
// documentation (at https://cloud.google.com/storage/docs).
//
// Resources are named as follows:
//
@@ -826,18 +976,14 @@ type gRPCClient struct {
// using strings like projects/123456 or projects/my-string-id.
//
// Buckets are named using string names of the form:
// projects/{project}/buckets/{bucket}
// For globally unique buckets, _ may be substituted for the project.
// projects/{project}/buckets/{bucket}.
// For globally unique buckets, _ might be substituted for the project.
//
// Objects are uniquely identified by their name along with the name of the
// bucket they belong to, as separate strings in this API. For example:
//
// ReadObjectRequest {
// bucket: projects/_/buckets/my-bucket
// object: my-object
// }
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
clientOpts := defaultGRPCClientOptions()
if newClientHook != nil {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2022 Google LLC
// Copyright 2026 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapicgen. DO NOT EDIT.
package internal
// Version is the current tagged release of the library.
const Version = "1.57.0"
const Version = "1.59.1"

364
vendor/cloud.google.com/go/storage/pcu.go generated vendored Normal file
View File

@@ -0,0 +1,364 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"maps"
"math/rand"
"path"
"runtime"
"strconv"
"sync"
"time"
gax "github.com/googleapis/gax-go/v2"
)
// parallelUploadConfig holds configuration for Parallel Composite Uploads.
// Setting this config and EnableParallelUpload flag on Writer enables PCU.
//
// **Note:** This feature is currently experimental and its API surface may change
// in future releases. It is not yet recommended for production use.
type parallelUploadConfig struct {
// minSize is the minimum size of an object in bytes to use PCU.
// If an object's size is less than this value, a simple upload is performed.
// If this is not set, a default of 64 MiB will be used.
// To enable PCU for all uploads regardless of size, set this to 0.
minSize *int64
// partSize is the size of each part to be uploaded in parallel.
// Defaults to 16MiB. Must be a multiple of 256KiB.
partSize int
// numWorkers is the number of goroutines to use for uploading parts in parallel.
// Defaults to a dynamic value based on the number of CPUs (min(4 + NumCPU/2, 16)).
numWorkers int
// bufferPoolSize is the number of PartSize buffers to pool.
// Defaults to NumWorkers + 1.
bufferPoolSize int
// tmpObjectPrefix is the prefix for temporary object names.
// Defaults to "gcs-go-sdk-pcu-tmp/".
tmpObjectPrefix string
// retryOptions defines the retry behavior for uploading parts.
// Defaults to a sensible policy for part uploads (e.g., max 3 retries).
retryOptions []RetryOption
// cleanupStrategy dictates how temporary parts are cleaned up.
// Defaults to CleanupAlways.
cleanupStrategy partCleanupStrategy
// namingStrategy provides a strategy for naming temporary part objects.
// Defaults to a strategy that includes a random element to avoid hotspotting.
namingStrategy partNamingStrategy
// metadataDecorator allows adding custom metadata to temporary part objects.
metadataDecorator partMetadataDecorator
}
// partCleanupStrategy defines when temporary objects are deleted.
type partCleanupStrategy int
const (
// cleanupAlways clean up temporary parts on both success and failure.
cleanupAlways partCleanupStrategy = iota
// cleanupOnSuccess clean up temporary parts only on successful final composition.
cleanupOnSuccess
// cleanupNever means the application is responsible for cleaning up temporary parts.
cleanupNever
)
func (s partCleanupStrategy) String() string {
switch s {
case cleanupAlways:
return "always"
case cleanupOnSuccess:
return "on_success"
case cleanupNever:
return "never"
default:
return fmt.Sprintf("PartCleanupStrategy(%d)", s)
}
}
// partNamingStrategy interface for generating temporary object names.
type partNamingStrategy interface {
newPartName(bucket, prefix, finalName string, partNumber int) string
}
// defaultNamingStrategy provides a default implementation for naming temporary parts.
type defaultNamingStrategy struct{}
// newPartName creates a unique name for a temporary part to avoid hotspotting.
func (d *defaultNamingStrategy) newPartName(bucket, prefix, finalName string, partNumber int) string {
rnd := rand.Uint64()
return path.Join(prefix, fmt.Sprintf("%x-%s-part-%d", rnd, finalName, partNumber))
}
// partMetadataDecorator interface for modifying temporary object metadata.
type partMetadataDecorator interface {
Decorate(attrs *ObjectAttrs)
}
const (
defaultPartSize = 16 * 1024 * 1024 // 16 MiB
defaultMinSize = 64 * 1024 * 1024 // 64 MiB
baseWorkers = 4
maxWorkers = 16
defaultTmpObjectPrefix = "gcs-go-sdk-pcu-tmp/"
maxComposeComponents = 32
defaultMaxRetries = 3
defaultBaseDelay = 100 * time.Millisecond
defaultMaxDelay = 5 * time.Second
pcuPartNumberMetadataKey = "x-goog-meta-gcs-pcu-part-number"
pcuFinalObjectMetadataKey = "x-goog-meta-gcs-pcu-final-object"
)
func (c *parallelUploadConfig) defaults() {
if c.minSize == nil {
c.minSize = new(int64)
*c.minSize = defaultMinSize
}
if c.partSize == 0 {
c.partSize = defaultPartSize
}
// Use a heuristic for the number of workers: start with 4, add 1 for
// every 2 CPUs, but don't exceed a cap of 16. This provides a
// balance between parallelism and resource contention.
if c.numWorkers == 0 {
c.numWorkers = min(baseWorkers+(runtime.NumCPU()/2), maxWorkers)
}
if c.bufferPoolSize == 0 {
c.bufferPoolSize = c.numWorkers + 1
}
if c.tmpObjectPrefix == "" {
c.tmpObjectPrefix = defaultTmpObjectPrefix
}
if c.retryOptions == nil {
c.retryOptions = []RetryOption{
WithMaxAttempts(defaultMaxRetries),
WithBackoff(gax.Backoff{
Initial: defaultBaseDelay,
Max: defaultMaxDelay,
}),
}
}
if c.cleanupStrategy == 0 {
c.cleanupStrategy = cleanupAlways
}
if c.namingStrategy == nil {
c.namingStrategy = &defaultNamingStrategy{}
}
}
type pcuState struct {
ctx context.Context
cancel context.CancelFunc
w *Writer
config *parallelUploadConfig
mu sync.Mutex
// Handles to the uploaded temporary parts, keyed by partNumber.
partMap map[int]*ObjectHandle
// Handles to intermediate composite objects, keyed by their object name.
intermediateMap map[string]*ObjectHandle
failedDeletes []*ObjectHandle
errOnce sync.Once
firstErr error
errors []error
partNum int
currentBuffer []byte
bytesBuffered int64
bufferCh chan []byte
uploadCh chan uploadTask
resultCh chan uploadResult
workerWG sync.WaitGroup
collectorWG sync.WaitGroup
started bool
// Function to upload a part; can be overridden for testing.
uploadPartFn func(s *pcuState, task uploadTask) (*ObjectHandle, *ObjectAttrs, error)
}
type uploadTask struct {
partNumber int
buffer []byte
size int64
}
type uploadResult struct {
partNumber int
obj *ObjectAttrs
handle *ObjectHandle
err error
}
func (w *Writer) initPCU(ctx context.Context) error {
// TODO: Check if PCU is enabled on the Writer.
// TODO: Get the config from the Writer.
cfg := &parallelUploadConfig{}
cfg.defaults()
// Ensure PartSize is a multiple of googleapi.MinUploadChunkSize.
cfg.partSize = gRPCChunkSize(cfg.partSize)
pCtx, cancel := context.WithCancel(ctx)
state := &pcuState{
ctx: pCtx,
cancel: cancel,
w: w,
config: cfg,
bufferCh: make(chan []byte, cfg.bufferPoolSize),
uploadCh: make(chan uploadTask),
resultCh: make(chan uploadResult),
partMap: make(map[int]*ObjectHandle),
intermediateMap: make(map[string]*ObjectHandle),
uploadPartFn: (*pcuState).uploadPart,
}
// TODO: Assign the state to the Writer
for i := 0; i < cfg.bufferPoolSize; i++ {
state.bufferCh <- make([]byte, cfg.partSize)
}
state.workerWG.Add(cfg.numWorkers)
for i := 0; i < cfg.numWorkers; i++ {
go state.worker()
}
state.collectorWG.Add(1)
go state.resultCollector()
// Handle to get the first buffer.
select {
case <-state.ctx.Done():
return state.ctx.Err()
case state.currentBuffer = <-state.bufferCh:
state.bytesBuffered = 0
}
state.started = true
return nil
}
// worker processes upload tasks from upload channel, reporting results
// and returning buffers to the pool.
func (s *pcuState) worker() {
defer s.workerWG.Done()
for {
select {
case <-s.ctx.Done():
return
case task, ok := <-s.uploadCh:
if !ok {
return
}
func(t uploadTask) {
// Ensure the buffer is returned to the pool.
defer func() { s.bufferCh <- t.buffer }()
// This handles the case where cancellation happens before we begin upload.
select {
case <-s.ctx.Done():
s.resultCh <- uploadResult{partNumber: t.partNumber, err: s.ctx.Err()}
return
default:
}
handle, attrs, err := s.uploadPartFn(s, t)
// Always send a result to the collector.
s.resultCh <- uploadResult{partNumber: t.partNumber, obj: attrs, handle: handle, err: err}
}(task)
}
}
}
// TODO: add retry logic.
func (s *pcuState) uploadPart(task uploadTask) (*ObjectHandle, *ObjectAttrs, error) {
partName := s.config.namingStrategy.newPartName(s.w.o.bucket, s.config.tmpObjectPrefix, s.w.o.object, task.partNumber)
partHandle := s.w.o.c.Bucket(s.w.o.bucket).Object(partName)
pw := partHandle.NewWriter(s.ctx)
pw.ObjectAttrs.Name = partName
pw.ObjectAttrs.Size = task.size
pw.SendCRC32C = s.w.SendCRC32C
pw.ChunkSize = 0 // Force single-shot upload for parts.
// Clear fields not applicable to parts or that are set by compose.
pw.ObjectAttrs.CRC32C = 0
pw.ObjectAttrs.MD5 = nil
setPartMetadata(pw, s, task)
_, err := pw.Write(task.buffer[:task.size])
if err != nil {
pw.CloseWithError(err)
return nil, nil, fmt.Errorf("failed to write part %d: %w", task.partNumber, err)
}
if err := pw.Close(); err != nil {
return nil, nil, fmt.Errorf("failed to close part %d: %w", task.partNumber, err)
}
return partHandle, pw.Attrs(), nil
}
func setPartMetadata(pw *Writer, s *pcuState, task uploadTask) {
partNumberStr := strconv.Itoa(task.partNumber)
var md map[string]string
if s.w.ObjectAttrs.Metadata != nil {
md = maps.Clone(s.w.ObjectAttrs.Metadata)
} else {
md = make(map[string]string)
}
pw.ObjectAttrs.Metadata = md
pw.ObjectAttrs.Metadata[pcuPartNumberMetadataKey] = partNumberStr
pw.ObjectAttrs.Metadata[pcuFinalObjectMetadataKey] = s.w.o.object
if s.config.metadataDecorator != nil {
s.config.metadataDecorator.Decorate(&pw.ObjectAttrs)
}
}
func (s *pcuState) resultCollector() {
defer s.collectorWG.Done()
for result := range s.resultCh {
if result.err != nil {
s.setError(result.err)
} else if result.handle != nil {
s.mu.Lock()
s.partMap[result.partNumber] = result.handle
s.mu.Unlock()
}
}
}
func (s *pcuState) setError(err error) {
if err == nil || errors.Is(err, context.Canceled) {
return
}
s.mu.Lock()
defer s.mu.Unlock()
s.errors = append(s.errors, err)
s.errOnce.Do(func() {
s.firstErr = err
s.cancel() // Cancel context on first error.
})
}

View File

@@ -23,8 +23,6 @@ import (
"strings"
"sync"
"time"
"cloud.google.com/go/internal/trace"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
@@ -116,7 +114,8 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
// This span covers the life of the reader. It is closed via the context
// in Reader.Close.
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Reader")
ctx, _ = startSpan(ctx, "Object.Reader")
defer func() { endSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
@@ -150,8 +149,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
// span now if there is an error.
if err == nil {
r.ctx = ctx
} else {
trace.EndSpan(ctx, err)
}
return r, err
@@ -164,10 +161,19 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
// preview; please contact your account manager if interested. The option
// [experimental.WithGRPCBidiReads] or [experimental.WithZonalBucketAPIs]
// must be selected in order to use this API.
// NewMultiRangeDownloader creates a multi-range reader for an object.
// Must be called on a gRPC client created using [NewGRPCClient].
func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (mrd *MultiRangeDownloader, err error) {
// This span covers the life of the reader. It is closed via the context
// in Reader.Close.
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.MultiRangeDownloader")
// This span covers the life of the MRD. It is closed via the context
// in MultiRangeDownloader.Close.
var spanCtx context.Context
spanCtx, _ = startSpan(ctx, "Object.MultiRangeDownloader")
defer func() {
if err != nil {
endSpan(spanCtx, err)
}
}()
if err := o.validate(); err != nil {
return nil, err
@@ -189,17 +195,8 @@ func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (mrd *MultiR
handle: &o.readHandle,
}
r, err := o.c.tc.NewMultiRangeDownloader(ctx, params, opts...)
// Pass the context so that the span can be closed in MultiRangeDownloader.Close(), or close the
// span now if there is an error.
if err == nil {
r.ctx = ctx
} else {
trace.EndSpan(ctx, err)
}
return r, err
// This call will return the *MultiRangeDownloader with the .impl field set.
return o.c.tc.NewMultiRangeDownloader(spanCtx, params, opts...)
}
// decompressiveTranscoding returns true if the request was served decompressed
@@ -285,7 +282,7 @@ type Reader struct {
// Close closes the Reader. It must be called when done reading.
func (r *Reader) Close() error {
err := r.reader.Close()
trace.EndSpan(r.ctx, err)
endSpan(r.ctx, err)
return err
}
@@ -391,17 +388,9 @@ func (r *Reader) ReadHandle() ReadHandle {
//
// This API is currently in preview and is not yet available for general use.
type MultiRangeDownloader struct {
Attrs ReaderObjectAttrs
reader multiRangeDownloader
ctx context.Context
}
type multiRangeDownloader interface {
add(output io.Writer, offset, limit int64, callback func(int64, int64, error))
wait()
close() error
getHandle() []byte
error() error
// Attrs is populated when NewMultiRangeDownloader returns.
Attrs ReaderObjectAttrs
impl internalMultiRangeDownloader
}
// Add adds a new range to MultiRangeDownloader.
@@ -411,8 +400,11 @@ type multiRangeDownloader interface {
//
// A negative offset value will be interpreted as the number of bytes from the
// end of the object to be returned. Requesting a negative offset with magnitude
// larger than the size of the object will return the entire object. An offset
// larger than the size of the object will result in an OutOfRange error.
// larger than the size of the object will return the entire object.
//
// An offset larger than the size of the object returns an OutOfRange error via
// the callback and enters a permanent error state. All subsequent calls to Close
// will return this same error.
//
// A limit of zero indicates that there is no limit, and a negative limit will
// cause an error.
@@ -425,7 +417,7 @@ type multiRangeDownloader interface {
// of the read. Note that the length of the data read may be less than the
// requested length if the end of the object is reached.
func (mrd *MultiRangeDownloader) Add(output io.Writer, offset, length int64, callback func(int64, int64, error)) {
mrd.reader.add(output, offset, length, callback)
mrd.impl.add(output, offset, length, callback)
}
// Close the MultiRangeDownloader. It must be called when done reading.
@@ -434,9 +426,11 @@ func (mrd *MultiRangeDownloader) Add(output io.Writer, offset, length int64, cal
// This will immediately close the stream and can result in a
// "stream closed early" error if a response for a range is still not processed.
// Call [MultiRangeDownloader.Wait] to avoid this error.
//
// If the downloader is in a permanent error state, this will return an error.
func (mrd *MultiRangeDownloader) Close() error {
err := mrd.reader.close()
trace.EndSpan(mrd.ctx, err)
err := mrd.impl.close(nil)
endSpan(mrd.impl.getSpanCtx(), err)
return err
}
@@ -444,18 +438,18 @@ func (mrd *MultiRangeDownloader) Close() error {
// Adding new ranges after this has been called will cause an error.
// Wait will wait for all callbacks to finish.
func (mrd *MultiRangeDownloader) Wait() {
mrd.reader.wait()
mrd.impl.wait()
}
// GetHandle returns the read handle. This can be used to further speed up the
// follow up read if the same object is read through a different stream.
func (mrd *MultiRangeDownloader) GetHandle() []byte {
return mrd.reader.getHandle()
return mrd.impl.getHandle() // TODO: Consider plumbing context from caller
}
// Error returns an error if the MultiRangeDownloader is in a permanent failure
// state. It returns a nil error if the MultiRangeDownloader is open and can be
// used.
func (mrd *MultiRangeDownloader) Error() error {
return mrd.reader.error()
return mrd.impl.getPermanentError()
}

View File

@@ -40,7 +40,6 @@ import (
"cloud.google.com/go/auth"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/storage/internal"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"github.com/googleapis/gax-go/v2"
@@ -1120,6 +1119,13 @@ type ObjectAttrsToUpdate struct {
// extending the RetainUntil time on the object retention must be done
// on an ObjectHandle with OverrideUnlockedRetention set to true.
Retention *ObjectRetention
// Contexts allows adding, modifying, or deleting individual object contexts.
// To add or modify a context, set the value field in ObjectCustomContextPayload.
// To delete a context, set the Delete field in ObjectCustomContextPayload to true.
// To remove all contexts, pass Custom as an empty map in Contexts. Passing nil Custom
// map will be no-op.
Contexts *ObjectContexts
}
// Delete deletes the single specified object.
@@ -1248,7 +1254,7 @@ type MoveObjectDestination struct {
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Writer")
ctx, _ = startSpan(ctx, "Object.Writer")
return &Writer{
ctx: ctx,
o: o,
@@ -1284,7 +1290,7 @@ func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
// objects which were created append semantics and not finalized.
// This feature is in preview and is not yet available for general use.
func (o *ObjectHandle) NewWriterFromAppendableObject(ctx context.Context, opts *AppendableWriterOpts) (*Writer, int64, error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Writer")
ctx, _ = startSpan(ctx, "Object.WriterFromAppendableObject")
if o.gen < 0 {
return nil, 0, errors.New("storage: ObjectHandle.Generation must be set to use NewWriterFromAppendableObject")
}
@@ -1412,6 +1418,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
Metadata: o.Metadata,
CustomTime: ct,
Retention: o.Retention.toRawObjectRetention(),
Contexts: toRawObjectContexts(o.Contexts),
}
}
@@ -1446,6 +1453,7 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
KmsKey: o.KMSKeyName,
Generation: o.Generation,
Size: o.Size,
Contexts: toProtoObjectContexts(o.Contexts),
}
}
@@ -1489,6 +1497,10 @@ func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storage
o.Metadata = uattrs.Metadata
if uattrs.Contexts != nil {
o.Contexts = toProtoObjectContexts(uattrs.Contexts)
}
return o
}
@@ -1671,6 +1683,18 @@ type ObjectAttrs struct {
// ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted.
// This field is read-only.
HardDeleteTime time.Time
// Contexts store custom key-value metadata that the user could
// annotate object with. These key-value pairs can be used to filter objects
// during list calls. See https://cloud.google.com/storage/docs/object-contexts
// for more details.
Contexts *ObjectContexts
}
// isZero reports whether the ObjectAttrs struct is empty (i.e. all the
// fields are their zero value).
func (o *ObjectAttrs) isZero() bool {
return reflect.DeepEqual(o, &ObjectAttrs{})
}
// ObjectRetention contains the retention configuration for this object.
@@ -1778,6 +1802,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
Retention: toObjectRetention(o.Retention),
SoftDeleteTime: convertTime(o.SoftDeleteTime),
HardDeleteTime: convertTime(o.HardDeleteTime),
Contexts: toObjectContexts(o.Contexts),
}
}
@@ -1816,6 +1841,7 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs {
ComponentCount: int64(o.ComponentCount),
SoftDeleteTime: convertProtoTime(o.GetSoftDeleteTime()),
HardDeleteTime: convertProtoTime(o.GetHardDeleteTime()),
Contexts: toObjectContextsFromProto(o.GetContexts()),
}
}
@@ -1928,6 +1954,11 @@ type Query struct {
// If true, only objects that have been soft-deleted will be listed.
// By default, soft-deleted objects are not listed.
SoftDeleted bool
// Filters objects based on object attributes like custom contexts.
// See https://docs.cloud.google.com/storage/docs/listing-objects#filter-by-object-contexts
// for more details.
Filter string
}
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
@@ -1966,6 +1997,7 @@ var attrToFieldMap = map[string]string{
"Retention": "retention",
"HardDeleteTime": "hardDeleteTime",
"SoftDeleteTime": "softDeleteTime",
"Contexts": "contexts",
}
// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field
@@ -2001,6 +2033,7 @@ var attrToProtoFieldMap = map[string]string{
"ComponentCount": "component_count",
"HardDeleteTime": "hard_delete_time",
"SoftDeleteTime": "soft_delete_time",
"Contexts": "contexts",
// MediaLink was explicitly excluded from the proto as it is an HTTP-ism.
// "MediaLink": "mediaLink",
// TODO: add object retention - b/308194853

View File

@@ -22,8 +22,6 @@ import (
"sync"
"time"
"unicode/utf8"
"cloud.google.com/go/internal/trace"
)
// Interface internalWriter wraps low-level implementations which may vary
@@ -43,17 +41,40 @@ type Writer struct {
// attributes are ignored.
ObjectAttrs
// SendCRC32C specifies whether to transmit a CRC32C field. It should be set
// to true in addition to setting the Writer's CRC32C field, because zero
// is a valid CRC and normally a zero would not be transmitted.
// If a CRC32C is sent, and the data written does not match the checksum,
// the write will be rejected.
// SendCRC32C specifies whether to transmit a CRC32C checksum. When this is
// true and the Writer's CRC32C field is set, that checksum is sent to GCS.
// If the data written does not match the checksum, the write is rejected.
// It is necessary to set this field to true in addition to setting the
// Writer's CRC32C field because zero is a valid CRC.
//
// Note: SendCRC32C must be set to true BEFORE the first call to
// Writer.Write() in order to send the checksum. If it is set after that
// point, the checksum will be ignored.
// When using gRPC, the client automatically calculates and sends checksums
// per-chunk and for the full object. However, A user-provided checksum takes
// precedence over the auto-calculated checksum for full object.
// To disable auto checksum behavior, see DisableAutoChecksum.
//
// Note: SendCRC32C must be set before the first call to Writer.Write().
SendCRC32C bool
// DisableAutoChecksum disables automatic CRC32C checksum calculation and
// validation in gRPC Writer. By default when using gRPC, the Writer
// automatically performs checksum validation for both individual chunks and
// the entire object. Setting this to true disables this behavior. This flag
// is ignored when not using gRPC.
//
// Disabling automatic checksumming does not prevent a user-provided checksum
// from being sent. If SendCRC32C is true and the Writer's CRC32C field is
// populated, that checksum will still be sent to GCS for validation.
//
// Automatic CRC32C checksum calculation introduces increased CPU overhead
// because of checksum computation in gRPC writes. Use this field to disable
// it if needed.
//
// Note: DisableAutoChecksum must be set before the first call to
// Writer.Write(). Automatic checksumming is not enabled for writes
// using the HTTP client or for full object checksums for unfinalized writes to
// appendable objects in gRPC.
DisableAutoChecksum bool
// ChunkSize controls the maximum number of bytes of the object that the
// Writer will attempt to send to the server in a single request. Objects
// smaller than the size will be sent in a single request, while larger
@@ -261,7 +282,7 @@ func (w *Writer) Close() error {
w.closed = true
w.mu.Lock()
defer w.mu.Unlock()
trace.EndSpan(w.ctx, w.err)
endSpan(w.ctx, w.err)
return w.err
}
@@ -274,6 +295,8 @@ func (w *Writer) openWriter() (err error) {
}
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist)
// Append operations that takeover a specific generation are idempotent.
isIdempotent = isIdempotent || w.Append && w.o.gen > 0
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
params := &openWriterParams{
ctx: w.ctx,
@@ -286,6 +309,7 @@ func (w *Writer) openWriter() (err error) {
appendGen: w.o.gen,
encryptionKey: w.o.encryptionKey,
sendCRC32C: w.SendCRC32C,
disableAutoChecksum: w.DisableAutoChecksum,
append: w.Append,
finalizeOnClose: w.FinalizeOnClose,
donec: w.donec,

View File

@@ -1,5 +1,27 @@
# Release History
## 1.21.0 (2026-01-12)
### Features Added
* Added `runtime/datetime` package which provides specialized time type wrappers for serializing and deserializing
time values in various formats used by Azure services.
### Other Changes
* Aligned `cloud.AzureGovernment` and `cloud.AzureChina` audience values with Azure CLI
## 1.20.0 (2025-11-06)
### Features Added
* Added `runtime.FetcherForNextLinkOptions.HTTPVerb` to specify the HTTP verb when fetching the next page via next link. Defaults to `http.MethodGet`.
### Bugs Fixed
* Fixed potential panic when decoding base64 strings.
* Fixed an issue in resource identifier parsing which prevented it from returning an error for malformed resource IDs.
## 1.19.1 (2025-09-11)
### Bugs Fixed

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -217,6 +214,7 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err
func splitStringAndOmitEmpty(v, sep string) []string {
r := make([]string, 0)
for _, s := range strings.Split(v, sep) {
s = strings.TrimSpace(s)
if len(s) == 0 {
continue
}

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -10,11 +7,11 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
func init() {
cloud.AzureChina.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
Audience: "https://management.core.chinacloudapi.cn",
Audience: "https://management.core.chinacloudapi.cn/",
Endpoint: "https://management.chinacloudapi.cn",
}
cloud.AzureGovernment.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
Audience: "https://management.core.usgovcloudapi.net",
Audience: "https://management.core.usgovcloudapi.net/",
Endpoint: "https://management.usgovcloudapi.net",
}
cloud.AzurePublic.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -92,7 +89,7 @@ func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error {
return nil
}
payload := string(s)
if payload[0] == '"' {
if len(payload) >= 2 && payload[0] == '"' && payload[len(payload)-1] == '"' {
// remove surrounding quotes
payload = payload[1 : len(payload)-1]
}

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -115,7 +112,7 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Reque
if req.URL.Host == "" {
return nil, errors.New("no Host in request URL")
}
if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") {
if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme)
}
// populate values so that the same instance is propagated across policies

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -128,10 +125,11 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
}
var req *exported.Request
var err error
if p.Method == http.MethodPatch || p.Method == http.MethodPut {
switch p.Method {
case http.MethodPatch, http.MethodPut:
// for PATCH and PUT, the final GET is on the original resource URL
req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
} else if p.Method == http.MethodPost {
case http.MethodPost:
if p.FinalState == pollers.FinalStateViaAzureAsyncOp {
// no final GET required
} else if p.FinalState == pollers.FinalStateViaOriginalURI {

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -165,7 +162,10 @@ func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out
return nil
}
defer resp.Body.Close()
defer func() {
_ = resp.Body.Close()
}()
if !poller.StatusCodeValid(resp) || failed {
// the LRO failed. unmarshall the error and update state
return azexported.NewResponseError(resp)

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -40,5 +37,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.19.1"
Version = "v1.21.0"
)

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -99,6 +96,11 @@ type FetcherForNextLinkOptions struct {
// StatusCodes contains additional HTTP status codes indicating success.
// The default value is http.StatusOK.
StatusCodes []int
// HTTPVerb specifies the HTTP verb to use when fetching the next page.
// The default value is http.MethodGet.
// This field is only used when NextReq is not specified.
HTTPVerb string
}
// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL.
@@ -119,7 +121,11 @@ func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, first
if options.NextReq != nil {
req, err = options.NextReq(ctx, nextLink)
} else {
req, err = NewRequest(ctx, http.MethodGet, nextLink)
verb := http.MethodGet
if options.HTTPVerb != "" {
verb = options.HTTPVerb
}
req, err = NewRequest(ctx, verb, nextLink)
}
}
if err != nil {

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -34,6 +31,7 @@ func httpHeaderPolicy(req *policy.Request) (*http.Response, error) {
// WithHTTPHeader adds the specified http.Header to the parent context.
// Use this to specify custom HTTP headers at the API-call level.
// Any overlapping headers will have their values replaced with the values specified here.
//
// Deprecated: use [policy.WithHTTPHeader] instead.
func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
return policy.WithHTTPHeader(parent, header)

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -29,6 +26,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) {
// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
// The resp parameter will contain the HTTP response after the request has completed.
//
// Deprecated: use [policy.WithCaptureResponse] instead.
func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
return policy.WithCaptureResponse(parent, resp)

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -117,7 +114,10 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// wrap the body so we control when it's actually closed.
// do this outside the for loop so defers don't accumulate.
rwbody = &retryableRequestBody{body: req.Body()}
defer rwbody.realClose()
defer func() {
// TODO: https://github.com/Azure/azure-sdk-for-go/issues/25649
_ = rwbody.realClose()
}()
}
try := int32(1)
for {
@@ -222,6 +222,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// WithRetryOptions adds the specified RetryOptions to the parent context.
// Use this to specify custom RetryOptions at the API-call level.
//
// Deprecated: use [policy.WithRetryOptions] instead.
func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context {
return policy.WithRetryOptions(parent, options)

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -87,7 +84,10 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol
}, nil
}
defer resp.Body.Close()
defer func() {
_ = resp.Body.Close()
}()
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
if !poller.StatusCodeValid(resp) {

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -84,8 +81,9 @@ func UnmarshalAsXML(resp *http.Response, v any) error {
// Drain reads the response body to completion then closes it. The bytes read are discarded.
func Drain(resp *http.Response) {
if resp != nil && resp.Body != nil {
// TODO: this might not be necessary when the bodyDownloadPolicy is in play
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
_ = resp.Body.Close()
}
}

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,6 +1,3 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

View File

@@ -1,5 +1,11 @@
# Release History
## 1.13.1 (2025-11-10)
### Bugs Fixed
- `AzureCLICredential` quoted arguments incorrectly on Windows
## 1.13.0 (2025-10-07)
### Features Added

View File

@@ -12,7 +12,6 @@ import (
"errors"
"os"
"os/exec"
"runtime"
"strings"
"time"
)
@@ -30,17 +29,9 @@ var shellExec = func(ctx context.Context, credName, command string) ([]byte, err
ctx, cancel = context.WithTimeout(ctx, cliTimeout)
defer cancel()
}
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
dir := os.Getenv("SYSTEMROOT")
if dir == "" {
return nil, newCredentialUnavailableError(credName, `environment variable "SYSTEMROOT" has no value`)
}
cmd = exec.CommandContext(ctx, "cmd.exe", "/c", command)
cmd.Dir = dir
} else {
cmd = exec.CommandContext(ctx, "/bin/sh", "-c", command)
cmd.Dir = "/bin"
cmd, err := buildCmd(ctx, credName, command)
if err != nil {
return nil, err
}
cmd.Env = os.Environ()
stderr := bytes.Buffer{}

View File

@@ -0,0 +1,17 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
//go:build !windows
package azidentity
import (
"context"
"os/exec"
)
func buildCmd(ctx context.Context, _, command string) (*exec.Cmd, error) {
cmd := exec.CommandContext(ctx, "/bin/sh", "-c", command)
cmd.Dir = "/bin"
return cmd, nil
}

Some files were not shown because too many files have changed in this diff Show More