mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 00:26:36 +03:00
go.mod: unpin cloud.google.com/go/storage
Add build tag `disable_grpc_modules` for vmbackup, vmrestore and vmbackupmanager. Binary size increases only for 3MB with it. It's acceptable trade-off for security and feature updates. Related issue: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
This commit is contained in:
9
Makefile
9
Makefile
@@ -12,6 +12,7 @@ PKG_TAG := $(BUILDINFO_TAG)
|
||||
endif
|
||||
|
||||
EXTRA_DOCKER_TAG_SUFFIX ?=
|
||||
EXTRA_GO_BUILD_TAGS ?=
|
||||
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
|
||||
TAR_OWNERSHIP ?= --owner=1000 --group=1000
|
||||
@@ -470,16 +471,16 @@ vendor-update:
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-pure:
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-goos-goarch:
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-windows-goarch:
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc
|
||||
|
||||
@@ -1,106 +1,110 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
# special tag to reduce resulting binary size
|
||||
# See this issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
VMBACKUP_GO_BUILD_TAGS=disable_grpc_modules
|
||||
|
||||
vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) app-local
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local
|
||||
|
||||
vmbackup-race:
|
||||
APP_NAME=vmbackup RACE=-race $(MAKE) app-local
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) RACE=-race $(MAKE) app-local
|
||||
|
||||
vmbackup-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker
|
||||
|
||||
vmbackup-pure-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-pure
|
||||
|
||||
vmbackup-linux-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmbackup-linux-arm-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmbackup-linux-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmbackup-linux-ppc64le-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-ppc64le
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmbackup-linux-386-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-386
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmbackup-darwin-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
vmbackup-darwin-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmbackup-freebsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-freebsd-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmbackup-openbsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-openbsd-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmbackup-windows-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-windows-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker
|
||||
|
||||
package-vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmbackup-amd64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmbackup-arm:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmbackup-arm64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmbackup-ppc64le:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-ppc64le
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmbackup-386:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-386
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) publish-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) publish-via-docker
|
||||
|
||||
vmbackup-linux-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-arm:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-ppc64le:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-s390x:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-loong64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-386:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-darwin-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-darwin-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-freebsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-openbsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmbackup $(MAKE) app-local-windows-goarch
|
||||
GOARCH=amd64 APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) app-local-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local-pure
|
||||
|
||||
@@ -1,106 +1,110 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
# special tag to reduce resulting binary size
|
||||
# See this issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
VMRESTORE_GO_BUILD_TAGS=disable_grpc_modules
|
||||
|
||||
vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) app-local
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local
|
||||
|
||||
vmrestore-race:
|
||||
APP_NAME=vmrestore RACE=-race $(MAKE) app-local
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) RACE=-race $(MAKE) app-local
|
||||
|
||||
vmrestore-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker
|
||||
|
||||
vmrestore-pure-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-pure
|
||||
|
||||
vmrestore-linux-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmrestore-linux-arm-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmrestore-linux-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmrestore-linux-ppc64le-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-ppc64le
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmrestore-linux-386-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-386
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmrestore-darwin-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
vmrestore-darwin-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmrestore-freebsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-freebsd-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmrestore-openbsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-openbsd-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmrestore-windows-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-windows-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker
|
||||
|
||||
package-vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmrestore-amd64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmrestore-arm:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmrestore-arm64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmrestore-ppc64le:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-ppc64le
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmrestore-386:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-386
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) publish-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) publish-via-docker
|
||||
|
||||
vmrestore-linux-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-arm:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-ppc64le:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-s390x:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-loong64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-386:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-darwin-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-darwin-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-freebsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-openbsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmrestore $(MAKE) app-local-windows-goarch
|
||||
GOARCH=amd64 APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) app-local-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local-pure
|
||||
|
||||
@@ -43,7 +43,7 @@ app-via-docker: package-builder
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -trimpath -buildvcs=false \
|
||||
-ldflags "-extldflags '-static' $(GO_BUILDINFO)" \
|
||||
-tags 'netgo osusergo musl' \
|
||||
-tags 'netgo osusergo musl $(EXTRA_GO_BUILD_TAGS)' \
|
||||
-o bin/$(APP_NAME)$(APP_SUFFIX)-prod $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-via-docker-windows: package-builder
|
||||
@@ -58,7 +58,7 @@ app-via-docker-windows: package-builder
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -trimpath -buildvcs=false \
|
||||
-ldflags "-s -w -extldflags '-static' $(GO_BUILDINFO)" \
|
||||
-tags 'netgo osusergo' \
|
||||
-tags 'netgo osusergo $(EXTRA_GO_BUILD_TAGS)' \
|
||||
-o bin/$(APP_NAME)-windows$(APP_SUFFIX)-prod.exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
package-via-docker: package-base
|
||||
|
||||
20
go.mod
20
go.mod
@@ -2,12 +2,6 @@ module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
go 1.25.0
|
||||
|
||||
// This is needed in order to avoid vmbackup and vmrestore binary size increase by 20MB
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
//
|
||||
// TODO: remove this entry after https://github.com/googleapis/google-cloud-go/issues/11448 is fixed
|
||||
replace cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0
|
||||
|
||||
// Pin AWS libraries to version before 2025-01-15
|
||||
// Release notes: https://github.com/aws/aws-sdk-go-v2/releases/tag/release-2025-01-15
|
||||
// This version enabled request and response checksum verification by default which
|
||||
@@ -66,13 +60,18 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.4 // indirect
|
||||
cloud.google.com/go/auth v0.16.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
|
||||
@@ -92,11 +91,15 @@ require (
|
||||
github.com/aws/smithy-go v1.22.5 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
@@ -127,6 +130,7 @@ require (
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.131.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.131.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
@@ -136,9 +140,11 @@ require (
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/component v1.37.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.37.0 // indirect
|
||||
@@ -151,6 +157,7 @@ require (
|
||||
go.opentelemetry.io/collector/processor v1.37.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.128.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
|
||||
@@ -158,6 +165,7 @@ require (
|
||||
go.opentelemetry.io/otel/log v0.13.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
|
||||
37
go.sum
37
go.sum
@@ -1,3 +1,5 @@
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.121.4 h1:cVvUiY0sX0xwyxPwdSU2KsF9knOVmtRyAMt8xou0iTs=
|
||||
cloud.google.com/go v0.121.4/go.mod h1:XEBchUiHFJbz4lKBZwYBDHV/rSyfFktk737TLDU089s=
|
||||
cloud.google.com/go/auth v0.16.3 h1:kabzoQ9/bobUmnseYnBO6qQG7q4a/CffFRlJSxv2wCc=
|
||||
@@ -8,10 +10,16 @@ cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeO
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
||||
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
||||
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
|
||||
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
|
||||
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
|
||||
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
|
||||
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
|
||||
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
||||
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
||||
cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsLxI=
|
||||
cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU=
|
||||
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 h1:Hr5FTipp7SL07o2FvoVOX9HRiRH3CR3Mj8pxqCcdD5A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
@@ -34,8 +42,16 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJ
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20250728123024-98593029b5aa h1:qTB0QsUpBe/WzXQKcALj3Ossizb2daUHXmaVoWFdVlE=
|
||||
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20250728123024-98593029b5aa/go.mod h1:jeov7Un2x4Dpxw2Qn2MWa0kbwNn1Gc2Iw+8gvPqGsZk=
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
|
||||
@@ -131,8 +147,11 @@ github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
||||
@@ -145,6 +164,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -340,6 +361,8 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 h1:4+LP7qmsLSGbmc66m1s5dKRMBw
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32/go.mod h1:kzh+BSAvpoyHHdHBCDhmSWtBc1NbLMZ2lWHqnBoxFks=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -376,6 +399,8 @@ github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAz
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
||||
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/component v1.37.0 h1:yc5X0WhZwlpJ+W8Sg1fpRRjiUu3nByLe1wVOKWWRWRQ=
|
||||
@@ -416,6 +441,8 @@ go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCu
|
||||
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 h1:FGre0nZh5BSw7G73VpT3xs38HchsfPsa2aZtMp0NPOs=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0/go.mod h1:X2PYPViI2wTPIMIOBjG17KNybTzsrATnvPJ02kkz7LM=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 h1:rbRJ8BBoVMsQShESYZ0FkvcITu8X8QNwJogcLUmDNNw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0/go.mod h1:ru6KHrNtNHxM4nD/vd6QrLVWgKhxPYgblq4VAtNawTQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.62.0 h1:wCeciVlAfb5DC8MQl/DlmAv/FVPNpQgFvI/71+hatuc=
|
||||
@@ -424,6 +451,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/X
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/log v0.13.0 h1:yoxRoIZcohB6Xf0lNv9QIyCzQvrtGZklVbdCoyb7dls=
|
||||
go.opentelemetry.io/otel/log v0.13.0/go.mod h1:INKfG4k1O9CL25BaM1qLe0zIedOpvlS5Z7XgSbmN83E=
|
||||
go.opentelemetry.io/otel/log/logtest v0.13.0 h1:xxaIcgoEEtnwdgj6D6Uo9K/Dynz9jqIxSDu2YObJ69Q=
|
||||
|
||||
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
7.3.2
|
||||
# Keep this pinned version in parity with cel-go
|
||||
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.pb.go linguist-generated=true
|
||||
*.pb.go -diff -merge
|
||||
2
vendor/cel.dev/expr/.gitignore
vendored
Normal file
2
vendor/cel.dev/expr/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
bazel-*
|
||||
MODULE.bazel.lock
|
||||
34
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
34
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
go_library(
|
||||
name = "expr",
|
||||
srcs = [
|
||||
"checked.pb.go",
|
||||
"eval.pb.go",
|
||||
"explain.pb.go",
|
||||
"syntax.pb.go",
|
||||
"value.pb.go",
|
||||
],
|
||||
importpath = "cel.dev/expr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||
"@org_golang_google_protobuf//runtime/protoimpl",
|
||||
"@org_golang_google_protobuf//types/known/anypb",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/emptypb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":expr",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Contributor Code of Conduct
|
||||
## Version 0.1.1 (adapted from 0.3b-angular)
|
||||
|
||||
As contributors and maintainers of the Common Expression Language
|
||||
(CEL) project, we pledge to respect everyone who contributes by
|
||||
posting issues, updating documentation, submitting pull requests,
|
||||
providing feedback in comments, and any other activities.
|
||||
|
||||
Communication through any of CEL's channels (GitHub, Gitter, IRC,
|
||||
mailing lists, Google+, Twitter, etc.) must be constructive and never
|
||||
resort to personal attacks, trolling, public or private harassment,
|
||||
insults, or other unprofessional conduct.
|
||||
|
||||
We promise to extend courtesy and respect to everyone involved in this
|
||||
project regardless of gender, gender identity, sexual orientation,
|
||||
disability, age, race, ethnicity, religion, or level of experience. We
|
||||
expect anyone contributing to the project to do the same.
|
||||
|
||||
If any member of the community violates this code of conduct, the
|
||||
maintainers of the CEL project may take action, removing issues,
|
||||
comments, and PRs or blocking accounts as deemed appropriate.
|
||||
|
||||
If you are subject to or witness unacceptable behavior, or have any
|
||||
other concerns, please email us at
|
||||
[cel-conduct@google.com](mailto:cel-conduct@google.com).
|
||||
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# How to Contribute
|
||||
|
||||
We'd love to accept your patches and contributions to this project. There are a
|
||||
few guidelines you need to follow.
|
||||
|
||||
## Contributor License Agreement
|
||||
|
||||
Contributions to this project must be accompanied by a Contributor License
|
||||
Agreement. You (or your employer) retain the copyright to your contribution,
|
||||
this simply gives us permission to use and redistribute your contributions as
|
||||
part of the project. Head over to <https://cla.developers.google.com/> to see
|
||||
your current agreements on file or to sign a new one.
|
||||
|
||||
You generally only need to submit a CLA once, so if you've already submitted one
|
||||
(even if it was for a different project), you probably don't need to do it
|
||||
again.
|
||||
|
||||
## Code reviews
|
||||
|
||||
All submissions, including submissions by project members, require review. We
|
||||
use GitHub pull requests for this purpose. Consult
|
||||
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
|
||||
information on using pull requests.
|
||||
|
||||
## What to expect from maintainers
|
||||
|
||||
Expect maintainers to respond to new issues or pull requests within a week.
|
||||
For outstanding and ongoing issues and particularly for long-running
|
||||
pull requests, expect the maintainers to review within a week of a
|
||||
contributor asking for a new review. There is no commitment to resolution --
|
||||
merging or closing a pull request, or fixing or closing an issue -- because some
|
||||
issues will require more discussion than others.
|
||||
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Project Governance
|
||||
|
||||
This document defines the governance process for the CEL language. CEL is
|
||||
Google-developed, but openly governed. Major contributors to the CEL
|
||||
specification and its corresponding implementations constitute the CEL
|
||||
Language Council. New members may be added by a unanimous vote of the
|
||||
Council.
|
||||
|
||||
The MAINTAINERS.md file lists the members of the CEL Language Council, and
|
||||
unofficially indicates the "areas of expertise" of each member with respect
|
||||
to the publicly available CEL repos.
|
||||
|
||||
## Code Changes
|
||||
|
||||
Code changes must follow the standard pull request (PR) model documented in the
|
||||
CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
|
||||
maintainer. The maintainer reserves the right to request that any feature
|
||||
request (FR) or PR be reviewed by the language council.
|
||||
|
||||
## Syntax and Semantic Changes
|
||||
|
||||
Syntactic and semantic changes must be reviewed by the CEL Language Council.
|
||||
Maintainers may also request language council review at their discretion.
|
||||
|
||||
The review process is as follows:
|
||||
|
||||
- Create a Feature Request in the CEL-Spec repo. The feature description will
|
||||
serve as an abstract for the detailed design document.
|
||||
- Co-develop a design document with the Language Council.
|
||||
- Once the proposer gives the design document approval, the document will be
|
||||
linked to the FR in the CEL-Spec repo and opened for comments to members of
|
||||
the cel-lang-discuss@googlegroups.com.
|
||||
- The Language Council will review the design doc at the next council meeting
|
||||
(once every three weeks) and the council decision included in the document.
|
||||
|
||||
If the proposal is approved, the spec will be updated by a maintainer (if
|
||||
applicable) and a rationale will be included in the CEL-Spec wiki to ensure
|
||||
future developers may follow CEL's growth and direction over time.
|
||||
|
||||
Approved proposals may be implemented by the proposer or by the maintainers as
|
||||
the parties see fit. At the discretion of the maintainer, changes from the
|
||||
approved design are permitted during implementation if they improve the user
|
||||
experience and clarity of the feature.
|
||||
202
vendor/cel.dev/expr/LICENSE
vendored
Normal file
202
vendor/cel.dev/expr/LICENSE
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# CEL Language Council
|
||||
|
||||
| Name | Company | Area of Expertise |
|
||||
|-----------------|--------------|-------------------|
|
||||
| Alfred Fuller | Facebook | cel-cpp, cel-spec |
|
||||
| Jim Larson | Google | cel-go, cel-spec |
|
||||
| Matthais Blume | Google | cel-spec |
|
||||
| Tristan Swadell | Google | cel-go, cel-spec |
|
||||
|
||||
## Emeritus
|
||||
|
||||
* Sanjay Ghemawat (Google)
|
||||
* Wolfgang Grieskamp (Facebook)
|
||||
74
vendor/cel.dev/expr/MODULE.bazel
vendored
Normal file
74
vendor/cel.dev/expr/MODULE.bazel
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
module(
|
||||
name = "cel-spec",
|
||||
)
|
||||
|
||||
bazel_dep(
|
||||
name = "bazel_skylib",
|
||||
version = "1.7.1",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "gazelle",
|
||||
version = "0.39.1",
|
||||
repo_name = "bazel_gazelle",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis",
|
||||
version = "0.0.0-20241220-5e258e33.bcr.1",
|
||||
repo_name = "com_google_googleapis",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis-cc",
|
||||
version = "1.0.0",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis-java",
|
||||
version = "1.0.0",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis-go",
|
||||
version = "1.0.0",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "protobuf",
|
||||
version = "27.0",
|
||||
repo_name = "com_google_protobuf",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_cc",
|
||||
version = "0.0.17",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_go",
|
||||
version = "0.53.0",
|
||||
repo_name = "io_bazel_rules_go",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_java",
|
||||
version = "7.6.5",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_proto",
|
||||
version = "7.0.2",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_python",
|
||||
version = "0.35.0",
|
||||
)
|
||||
|
||||
### PYTHON ###
|
||||
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
|
||||
python.toolchain(
|
||||
ignore_root_user_error = True,
|
||||
python_version = "3.11",
|
||||
)
|
||||
|
||||
go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
|
||||
go_sdk.download(version = "1.22.0")
|
||||
|
||||
go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
|
||||
go_deps.from_file(go_mod = "//:go.mod")
|
||||
use_repo(
|
||||
go_deps,
|
||||
"org_golang_google_genproto_googleapis_rpc",
|
||||
"org_golang_google_protobuf",
|
||||
)
|
||||
71
vendor/cel.dev/expr/README.md
vendored
Normal file
71
vendor/cel.dev/expr/README.md
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
# Common Expression Language
|
||||
|
||||
The Common Expression Language (CEL) implements common semantics for expression
|
||||
evaluation, enabling different applications to more easily interoperate.
|
||||
|
||||
Key Applications
|
||||
|
||||
* Security policy: organizations have complex infrastructure and need common
|
||||
tooling to reason about the system as a whole
|
||||
* Protocols: expressions are a useful data type and require interoperability
|
||||
across programming languages and platforms.
|
||||
|
||||
|
||||
Guiding philosophy:
|
||||
|
||||
1. Keep it small & fast.
|
||||
* CEL evaluates in linear time, is mutation free, and not Turing-complete.
|
||||
This limitation is a feature of the language design, which allows the
|
||||
implementation to evaluate orders of magnitude faster than equivalently
|
||||
sandboxed JavaScript.
|
||||
2. Make it extensible.
|
||||
* CEL is designed to be embedded in applications, and allows for
|
||||
extensibility via its context which allows for functions and data to be
|
||||
provided by the software that embeds it.
|
||||
3. Developer-friendly.
|
||||
* The language is approachable to developers. The initial spec was based
|
||||
on the experience of developing Firebase Rules and usability testing
|
||||
many prior iterations.
|
||||
* The library itself and accompanying toolings should be easy to adopt by
|
||||
teams that seek to integrate CEL into their platforms.
|
||||
|
||||
The required components of a system that supports CEL are:
|
||||
|
||||
* The textual representation of an expression as written by a developer. It is
|
||||
of similar syntax to expressions in C/C++/Java/JavaScript
|
||||
* A representation of the program's abstract syntax tree (AST).
|
||||
* A compiler library that converts the textual representation to the binary
|
||||
representation. This can be done ahead of time (in the control plane) or
|
||||
just before evaluation (in the data plane).
|
||||
* A context containing one or more typed variables, often protobuf messages.
|
||||
Most use-cases will use `attribute_context.proto`
|
||||
* An evaluator library that takes the binary format in the context and
|
||||
produces a result, usually a Boolean.
|
||||
|
||||
For use cases which require persistence or cross-process communcation, it is
|
||||
highly recommended to serialize the type-checked expression as a protocol
|
||||
buffer. The CEL team will maintains canonical protocol buffers for ASTs and
|
||||
will keep these versions identical and wire-compatible in perpetuity:
|
||||
|
||||
* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
|
||||
* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
|
||||
|
||||
|
||||
Example of boolean conditions and object construction:
|
||||
|
||||
``` c
|
||||
// Condition
|
||||
account.balance >= transaction.withdrawal
|
||||
|| (account.overdraftProtection
|
||||
&& account.overdraftLimit >= transaction.withdrawal - account.balance)
|
||||
|
||||
// Object construction
|
||||
common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
|
||||
```
|
||||
|
||||
For more detail, see:
|
||||
|
||||
* [Introduction](doc/intro.md)
|
||||
* [Language Definition](doc/langdef.md)
|
||||
|
||||
Released under the [Apache License](LICENSE).
|
||||
145
vendor/cel.dev/expr/WORKSPACE
vendored
Normal file
145
vendor/cel.dev/expr/WORKSPACE
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_proto",
|
||||
sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
|
||||
strip_prefix = "rules_proto-4.0.0-3.20.0",
|
||||
urls = [
|
||||
"https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# googleapis as of 09/16/2024
|
||||
http_archive(
|
||||
name = "com_google_googleapis",
|
||||
strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
|
||||
sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# protobuf
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
|
||||
strip_prefix = "protobuf-3.21.5",
|
||||
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
|
||||
)
|
||||
|
||||
# googletest
|
||||
http_archive(
|
||||
name = "com_google_googletest",
|
||||
urls = ["https://github.com/google/googletest/archive/master.zip"],
|
||||
strip_prefix = "googletest-master",
|
||||
)
|
||||
|
||||
# gflags
|
||||
http_archive(
|
||||
name = "com_github_gflags_gflags",
|
||||
sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
|
||||
strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
|
||||
"https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# glog
|
||||
http_archive(
|
||||
name = "com_google_glog",
|
||||
sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
|
||||
strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
|
||||
"https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# absl
|
||||
http_archive(
|
||||
name = "com_google_absl",
|
||||
strip_prefix = "abseil-cpp-master",
|
||||
urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
|
||||
)
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
|
||||
load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
cc = True,
|
||||
)
|
||||
|
||||
# Do *not* call *_dependencies(), etc, yet. See comment at the end.
|
||||
|
||||
# Generated Google APIs protos for Golang
|
||||
# Generated Google APIs protos for Golang 08/26/2024
|
||||
go_repository(
|
||||
name = "org_golang_google_genproto_googleapis_api",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/genproto/googleapis/api",
|
||||
sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
|
||||
version = "v0.0.0-20240826202546-f6391c0de4c7",
|
||||
)
|
||||
|
||||
# Generated Google APIs protos for Golang 08/26/2024
|
||||
go_repository(
|
||||
name = "org_golang_google_genproto_googleapis_rpc",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/genproto/googleapis/rpc",
|
||||
sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
|
||||
version = "v0.0.0-20240826202546-f6391c0de4c7",
|
||||
)
|
||||
|
||||
# gRPC deps
|
||||
go_repository(
|
||||
name = "org_golang_google_grpc",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/grpc",
|
||||
tag = "v1.49.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_net",
|
||||
importpath = "golang.org/x/net",
|
||||
sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
|
||||
version = "v0.0.0-20190311183353-d8887717615a",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_text",
|
||||
importpath = "golang.org/x/text",
|
||||
sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
|
||||
version = "v0.3.2",
|
||||
)
|
||||
|
||||
# Run the dependencies at the end. These will silently try to import some
|
||||
# of the above repositories but at different versions, so ours must come first.
|
||||
go_rules_dependencies()
|
||||
go_register_toolchains(version = "1.19.1")
|
||||
gazelle_dependencies()
|
||||
rules_proto_dependencies()
|
||||
rules_proto_toolchains()
|
||||
protobuf_deps()
|
||||
0
vendor/cel.dev/expr/WORKSPACE.bzlmod
vendored
Normal file
0
vendor/cel.dev/expr/WORKSPACE.bzlmod
vendored
Normal file
1432
vendor/cel.dev/expr/checked.pb.go
generated
vendored
Normal file
1432
vendor/cel.dev/expr/checked.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
9
vendor/cel.dev/expr/cloudbuild.yaml
vendored
Normal file
9
vendor/cel.dev/expr/cloudbuild.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
steps:
|
||||
- name: 'gcr.io/cloud-builders/bazel:7.3.2'
|
||||
entrypoint: bazel
|
||||
args: ['build', '...']
|
||||
id: bazel-build
|
||||
waitFor: ['-']
|
||||
timeout: 15m
|
||||
options:
|
||||
machineType: 'N1_HIGHCPU_32'
|
||||
487
vendor/cel.dev/expr/eval.pb.go
generated
vendored
Normal file
487
vendor/cel.dev/expr/eval.pb.go
generated
vendored
Normal file
@@ -0,0 +1,487 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.3
|
||||
// protoc v5.27.1
|
||||
// source: cel/expr/eval.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type EvalState struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *EvalState) Reset() {
|
||||
*x = EvalState{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EvalState) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvalState) ProtoMessage() {}
|
||||
|
||||
func (x *EvalState) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
|
||||
func (*EvalState) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *EvalState) GetValues() []*ExprValue {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *EvalState) GetResults() []*EvalState_Result {
|
||||
if x != nil {
|
||||
return x.Results
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExprValue struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// Types that are valid to be assigned to Kind:
|
||||
//
|
||||
// *ExprValue_Value
|
||||
// *ExprValue_Error
|
||||
// *ExprValue_Unknown
|
||||
Kind isExprValue_Kind `protobuf_oneof:"kind"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExprValue) Reset() {
|
||||
*x = ExprValue{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ExprValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExprValue) ProtoMessage() {}
|
||||
|
||||
func (x *ExprValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
|
||||
func (*ExprValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetKind() isExprValue_Kind {
|
||||
if x != nil {
|
||||
return x.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetValue() *Value {
|
||||
if x != nil {
|
||||
if x, ok := x.Kind.(*ExprValue_Value); ok {
|
||||
return x.Value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetError() *ErrorSet {
|
||||
if x != nil {
|
||||
if x, ok := x.Kind.(*ExprValue_Error); ok {
|
||||
return x.Error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetUnknown() *UnknownSet {
|
||||
if x != nil {
|
||||
if x, ok := x.Kind.(*ExprValue_Unknown); ok {
|
||||
return x.Unknown
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isExprValue_Kind interface {
|
||||
isExprValue_Kind()
|
||||
}
|
||||
|
||||
type ExprValue_Value struct {
|
||||
Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ExprValue_Error struct {
|
||||
Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ExprValue_Unknown struct {
|
||||
Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ExprValue_Value) isExprValue_Kind() {}
|
||||
|
||||
func (*ExprValue_Error) isExprValue_Kind() {}
|
||||
|
||||
func (*ExprValue_Unknown) isExprValue_Kind() {}
|
||||
|
||||
type ErrorSet struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ErrorSet) Reset() {
|
||||
*x = ErrorSet{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ErrorSet) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ErrorSet) ProtoMessage() {}
|
||||
|
||||
func (x *ErrorSet) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
|
||||
func (*ErrorSet) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ErrorSet) GetErrors() []*Status {
|
||||
if x != nil {
|
||||
return x.Errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Status struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Status) Reset() {
|
||||
*x = Status{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Status) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Status) ProtoMessage() {}
|
||||
|
||||
func (x *Status) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Status.ProtoReflect.Descriptor instead.
|
||||
func (*Status) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *Status) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Status) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Status) GetDetails() []*anypb.Any {
|
||||
if x != nil {
|
||||
return x.Details
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type UnknownSet struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *UnknownSet) Reset() {
|
||||
*x = UnknownSet{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *UnknownSet) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UnknownSet) ProtoMessage() {}
|
||||
|
||||
func (x *UnknownSet) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
|
||||
func (*UnknownSet) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *UnknownSet) GetExprs() []int64 {
|
||||
if x != nil {
|
||||
return x.Exprs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EvalState_Result struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
|
||||
Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) Reset() {
|
||||
*x = EvalState_Result{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvalState_Result) ProtoMessage() {}
|
||||
|
||||
func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
|
||||
func (*EvalState_Result) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) GetExpr() int64 {
|
||||
if x != nil {
|
||||
return x.Expr
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) GetValue() int64 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_cel_expr_eval_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_eval_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
|
||||
0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f,
|
||||
0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b,
|
||||
0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72,
|
||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
|
||||
0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74,
|
||||
0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
||||
0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65,
|
||||
0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12,
|
||||
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05,
|
||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65,
|
||||
0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48,
|
||||
0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e,
|
||||
0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
|
||||
0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48,
|
||||
0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69,
|
||||
0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28,
|
||||
0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||
0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
|
||||
0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65,
|
||||
0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
|
||||
0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
|
||||
0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8,
|
||||
0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_eval_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_eval_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_eval_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_eval_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_cel_expr_eval_proto_goTypes = []any{
|
||||
(*EvalState)(nil), // 0: cel.expr.EvalState
|
||||
(*ExprValue)(nil), // 1: cel.expr.ExprValue
|
||||
(*ErrorSet)(nil), // 2: cel.expr.ErrorSet
|
||||
(*Status)(nil), // 3: cel.expr.Status
|
||||
(*UnknownSet)(nil), // 4: cel.expr.UnknownSet
|
||||
(*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result
|
||||
(*Value)(nil), // 6: cel.expr.Value
|
||||
(*anypb.Any)(nil), // 7: google.protobuf.Any
|
||||
}
|
||||
var file_cel_expr_eval_proto_depIdxs = []int32{
|
||||
1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
|
||||
5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
|
||||
6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
|
||||
2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
|
||||
4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
|
||||
3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status
|
||||
7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any
|
||||
7, // [7:7] is the sub-list for method output_type
|
||||
7, // [7:7] is the sub-list for method input_type
|
||||
7, // [7:7] is the sub-list for extension type_name
|
||||
7, // [7:7] is the sub-list for extension extendee
|
||||
0, // [0:7] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_eval_proto_init() }
|
||||
func file_cel_expr_eval_proto_init() {
|
||||
if File_cel_expr_eval_proto != nil {
|
||||
return
|
||||
}
|
||||
file_cel_expr_value_proto_init()
|
||||
file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{
|
||||
(*ExprValue_Value)(nil),
|
||||
(*ExprValue_Error)(nil),
|
||||
(*ExprValue_Unknown)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_eval_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_eval_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_eval_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_eval_proto = out.File
|
||||
file_cel_expr_eval_proto_rawDesc = nil
|
||||
file_cel_expr_eval_proto_goTypes = nil
|
||||
file_cel_expr_eval_proto_depIdxs = nil
|
||||
}
|
||||
236
vendor/cel.dev/expr/explain.pb.go
generated
vendored
Normal file
236
vendor/cel.dev/expr/explain.pb.go
generated
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.5
|
||||
// source: cel/expr/explain.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Explain struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Explain) Reset() {
|
||||
*x = Explain{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Explain) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Explain) ProtoMessage() {}
|
||||
|
||||
func (x *Explain) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
|
||||
func (*Explain) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Explain) GetValues() []*Value {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Explain) GetExprSteps() []*Explain_ExprStep {
|
||||
if x != nil {
|
||||
return x.ExprSteps
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Explain_ExprStep struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) Reset() {
|
||||
*x = Explain_ExprStep{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Explain_ExprStep) ProtoMessage() {}
|
||||
|
||||
func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
|
||||
func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) GetId() int64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) GetValueIndex() int32 {
|
||||
if x != nil {
|
||||
return x.ValueIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_cel_expr_explain_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_explain_proto_rawDesc = []byte{
|
||||
0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
|
||||
0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
|
||||
0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
|
||||
0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
|
||||
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
|
||||
0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
|
||||
0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
|
||||
0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
|
||||
0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
|
||||
0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_explain_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_explain_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_explain_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_explain_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_cel_expr_explain_proto_goTypes = []interface{}{
|
||||
(*Explain)(nil), // 0: cel.expr.Explain
|
||||
(*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
|
||||
(*Value)(nil), // 2: cel.expr.Value
|
||||
}
|
||||
var file_cel_expr_explain_proto_depIdxs = []int32{
|
||||
2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
|
||||
1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_explain_proto_init() }
|
||||
func file_cel_expr_explain_proto_init() {
|
||||
if File_cel_expr_explain_proto != nil {
|
||||
return
|
||||
}
|
||||
file_cel_expr_value_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Explain); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Explain_ExprStep); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_explain_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_explain_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_explain_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_explain_proto = out.File
|
||||
file_cel_expr_explain_proto_rawDesc = nil
|
||||
file_cel_expr_explain_proto_goTypes = nil
|
||||
file_cel_expr_explain_proto_depIdxs = nil
|
||||
}
|
||||
9
vendor/cel.dev/expr/regen_go_proto.sh
vendored
Normal file
9
vendor/cel.dev/expr/regen_go_proto.sh
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
bazel build //proto/cel/expr/conformance/...
|
||||
files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
|
||||
for src in ${files[@]};
|
||||
do
|
||||
dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
|
||||
echo "copying $dst"
|
||||
$(cp $src $dst)
|
||||
done
|
||||
10
vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
vendored
Normal file
10
vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
bazel build //proto/cel/expr:all
|
||||
|
||||
rm -vf ./*.pb.go
|
||||
|
||||
files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
|
||||
for src in "${files[@]}";
|
||||
do
|
||||
cp -v "${src}" ./
|
||||
done
|
||||
1633
vendor/cel.dev/expr/syntax.pb.go
generated
vendored
Normal file
1633
vendor/cel.dev/expr/syntax.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
653
vendor/cel.dev/expr/value.pb.go
generated
vendored
Normal file
653
vendor/cel.dev/expr/value.pb.go
generated
vendored
Normal file
@@ -0,0 +1,653 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.5
|
||||
// source: cel/expr/value.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
structpb "google.golang.org/protobuf/types/known/structpb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Kind:
|
||||
//
|
||||
// *Value_NullValue
|
||||
// *Value_BoolValue
|
||||
// *Value_Int64Value
|
||||
// *Value_Uint64Value
|
||||
// *Value_DoubleValue
|
||||
// *Value_StringValue
|
||||
// *Value_BytesValue
|
||||
// *Value_EnumValue
|
||||
// *Value_ObjectValue
|
||||
// *Value_MapValue
|
||||
// *Value_ListValue
|
||||
// *Value_TypeValue
|
||||
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||
}
|
||||
|
||||
func (x *Value) Reset() {
|
||||
*x = Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Value) ProtoMessage() {}
|
||||
|
||||
func (x *Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Value.ProtoReflect.Descriptor instead.
|
||||
func (*Value) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *Value) GetKind() isValue_Kind {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetNullValue() structpb.NullValue {
|
||||
if x, ok := x.GetKind().(*Value_NullValue); ok {
|
||||
return x.NullValue
|
||||
}
|
||||
return structpb.NullValue(0)
|
||||
}
|
||||
|
||||
func (x *Value) GetBoolValue() bool {
|
||||
if x, ok := x.GetKind().(*Value_BoolValue); ok {
|
||||
return x.BoolValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Value) GetInt64Value() int64 {
|
||||
if x, ok := x.GetKind().(*Value_Int64Value); ok {
|
||||
return x.Int64Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetUint64Value() uint64 {
|
||||
if x, ok := x.GetKind().(*Value_Uint64Value); ok {
|
||||
return x.Uint64Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetDoubleValue() float64 {
|
||||
if x, ok := x.GetKind().(*Value_DoubleValue); ok {
|
||||
return x.DoubleValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetStringValue() string {
|
||||
if x, ok := x.GetKind().(*Value_StringValue); ok {
|
||||
return x.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Value) GetBytesValue() []byte {
|
||||
if x, ok := x.GetKind().(*Value_BytesValue); ok {
|
||||
return x.BytesValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetEnumValue() *EnumValue {
|
||||
if x, ok := x.GetKind().(*Value_EnumValue); ok {
|
||||
return x.EnumValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetObjectValue() *anypb.Any {
|
||||
if x, ok := x.GetKind().(*Value_ObjectValue); ok {
|
||||
return x.ObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetMapValue() *MapValue {
|
||||
if x, ok := x.GetKind().(*Value_MapValue); ok {
|
||||
return x.MapValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetListValue() *ListValue {
|
||||
if x, ok := x.GetKind().(*Value_ListValue); ok {
|
||||
return x.ListValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetTypeValue() string {
|
||||
if x, ok := x.GetKind().(*Value_TypeValue); ok {
|
||||
return x.TypeValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type isValue_Kind interface {
|
||||
isValue_Kind()
|
||||
}
|
||||
|
||||
type Value_NullValue struct {
|
||||
NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
||||
}
|
||||
|
||||
type Value_BoolValue struct {
|
||||
BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_Int64Value struct {
|
||||
Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_Uint64Value struct {
|
||||
Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_DoubleValue struct {
|
||||
DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_StringValue struct {
|
||||
StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_BytesValue struct {
|
||||
BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_EnumValue struct {
|
||||
EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ObjectValue struct {
|
||||
ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_MapValue struct {
|
||||
MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ListValue struct {
|
||||
ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_TypeValue struct {
|
||||
TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Value_NullValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BoolValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_Int64Value) isValue_Kind() {}
|
||||
|
||||
func (*Value_Uint64Value) isValue_Kind() {}
|
||||
|
||||
func (*Value_DoubleValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_StringValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BytesValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_EnumValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ObjectValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_MapValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ListValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_TypeValue) isValue_Kind() {}
|
||||
|
||||
type EnumValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnumValue) Reset() {
|
||||
*x = EnumValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnumValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnumValue) ProtoMessage() {}
|
||||
|
||||
func (x *EnumValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
|
||||
func (*EnumValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *EnumValue) GetType() string {
|
||||
if x != nil {
|
||||
return x.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *EnumValue) GetValue() int32 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ListValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListValue) Reset() {
|
||||
*x = ListValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListValue) ProtoMessage() {}
|
||||
|
||||
func (x *ListValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
|
||||
func (*ListValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ListValue) GetValues() []*Value {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MapValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MapValue) Reset() {
|
||||
*x = MapValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MapValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MapValue) ProtoMessage() {}
|
||||
|
||||
func (x *MapValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
|
||||
func (*MapValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *MapValue) GetEntries() []*MapValue_Entry {
|
||||
if x != nil {
|
||||
return x.Entries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MapValue_Entry struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) Reset() {
|
||||
*x = MapValue_Entry{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MapValue_Entry) ProtoMessage() {}
|
||||
|
||||
func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
|
||||
func (*MapValue_Entry) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) GetKey() *Value {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) GetValue() *Value {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_cel_expr_value_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_value_proto_rawDesc = []byte{
|
||||
0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
|
||||
0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
|
||||
0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
|
||||
0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
|
||||
0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
|
||||
0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
|
||||
0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
|
||||
0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
|
||||
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
|
||||
0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
|
||||
0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
|
||||
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
|
||||
0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
|
||||
0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
|
||||
0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
|
||||
0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
|
||||
0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
|
||||
0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
|
||||
0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
|
||||
0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
|
||||
0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
|
||||
0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_value_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_value_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_value_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_value_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_cel_expr_value_proto_goTypes = []interface{}{
|
||||
(*Value)(nil), // 0: cel.expr.Value
|
||||
(*EnumValue)(nil), // 1: cel.expr.EnumValue
|
||||
(*ListValue)(nil), // 2: cel.expr.ListValue
|
||||
(*MapValue)(nil), // 3: cel.expr.MapValue
|
||||
(*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
|
||||
(structpb.NullValue)(0), // 5: google.protobuf.NullValue
|
||||
(*anypb.Any)(nil), // 6: google.protobuf.Any
|
||||
}
|
||||
var file_cel_expr_value_proto_depIdxs = []int32{
|
||||
5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
|
||||
1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
|
||||
6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
|
||||
3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
|
||||
2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
|
||||
0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
|
||||
4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
|
||||
0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
|
||||
0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_value_proto_init() }
|
||||
func file_cel_expr_value_proto_init() {
|
||||
if File_cel_expr_value_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnumValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MapValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MapValue_Entry); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
|
||||
(*Value_NullValue)(nil),
|
||||
(*Value_BoolValue)(nil),
|
||||
(*Value_Int64Value)(nil),
|
||||
(*Value_Uint64Value)(nil),
|
||||
(*Value_DoubleValue)(nil),
|
||||
(*Value_StringValue)(nil),
|
||||
(*Value_BytesValue)(nil),
|
||||
(*Value_EnumValue)(nil),
|
||||
(*Value_ObjectValue)(nil),
|
||||
(*Value_MapValue)(nil),
|
||||
(*Value_ListValue)(nil),
|
||||
(*Value_TypeValue)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_value_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_value_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_value_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_value_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_value_proto = out.File
|
||||
file_cel_expr_value_proto_rawDesc = nil
|
||||
file_cel_expr_value_proto_goTypes = nil
|
||||
file_cel_expr_value_proto_depIdxs = nil
|
||||
}
|
||||
202
vendor/cloud.google.com/go/monitoring/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/monitoring/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
403
vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go
generated
vendored
Normal file
403
vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go
generated
vendored
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newAlertPolicyClientHook clientHook
|
||||
|
||||
// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient.
|
||||
type AlertPolicyCallOptions struct {
|
||||
ListAlertPolicies []gax.CallOption
|
||||
GetAlertPolicy []gax.CallOption
|
||||
CreateAlertPolicy []gax.CallOption
|
||||
DeleteAlertPolicy []gax.CallOption
|
||||
UpdateAlertPolicy []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultAlertPolicyGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions {
|
||||
return &AlertPolicyCallOptions{
|
||||
ListAlertPolicies: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetAlertPolicy: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateAlertPolicy: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
DeleteAlertPolicy: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
UpdateAlertPolicy: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// internalAlertPolicyClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalAlertPolicyClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
ListAlertPolicies(context.Context, *monitoringpb.ListAlertPoliciesRequest, ...gax.CallOption) *AlertPolicyIterator
|
||||
GetAlertPolicy(context.Context, *monitoringpb.GetAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
|
||||
CreateAlertPolicy(context.Context, *monitoringpb.CreateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
|
||||
DeleteAlertPolicy(context.Context, *monitoringpb.DeleteAlertPolicyRequest, ...gax.CallOption) error
|
||||
UpdateAlertPolicy(context.Context, *monitoringpb.UpdateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
|
||||
}
|
||||
|
||||
// AlertPolicyClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// The AlertPolicyService API is used to manage (list, create, delete,
|
||||
// edit) alert policies in Cloud Monitoring. An alerting policy is
|
||||
// a description of the conditions under which some aspect of your
|
||||
// system is considered to be “unhealthy” and the ways to notify
|
||||
// people or services about this state. In addition to using this API, alert
|
||||
// policies can also be managed through
|
||||
// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/),
|
||||
// which can be reached by clicking the “Monitoring” tab in
|
||||
// Cloud console (at https://console.cloud.google.com/).
|
||||
type AlertPolicyClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalAlertPolicyClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *AlertPolicyCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *AlertPolicyClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *AlertPolicyClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// ListAlertPolicies lists the existing alerting policies for the workspace.
|
||||
func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
|
||||
return c.internalClient.ListAlertPolicies(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetAlertPolicy gets a single alerting policy.
|
||||
func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
|
||||
return c.internalClient.GetAlertPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateAlertPolicy creates a new alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
|
||||
return c.internalClient.CreateAlertPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// DeleteAlertPolicy deletes an alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteAlertPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with
|
||||
// a new one or replace only certain fields in the current alerting policy by
|
||||
// specifying the fields to be updated via updateMask. Returns the
|
||||
// updated alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
|
||||
return c.internalClient.UpdateAlertPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// alertPolicyGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type alertPolicyGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing AlertPolicyClient
|
||||
CallOptions **AlertPolicyCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
alertPolicyClient monitoringpb.AlertPolicyServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewAlertPolicyClient creates a new alert policy service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// The AlertPolicyService API is used to manage (list, create, delete,
|
||||
// edit) alert policies in Cloud Monitoring. An alerting policy is
|
||||
// a description of the conditions under which some aspect of your
|
||||
// system is considered to be “unhealthy” and the ways to notify
|
||||
// people or services about this state. In addition to using this API, alert
|
||||
// policies can also be managed through
|
||||
// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/),
|
||||
// which can be reached by clicking the “Monitoring” tab in
|
||||
// Cloud console (at https://console.cloud.google.com/).
|
||||
func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) {
|
||||
clientOpts := defaultAlertPolicyGRPCClientOptions()
|
||||
if newAlertPolicyClientHook != nil {
|
||||
hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := AlertPolicyClient{CallOptions: defaultAlertPolicyCallOptions()}
|
||||
|
||||
c := &alertPolicyGRPCClient{
|
||||
connPool: connPool,
|
||||
alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *alertPolicyGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *alertPolicyGRPCClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListAlertPolicies[0:len((*c.CallOptions).ListAlertPolicies):len((*c.CallOptions).ListAlertPolicies)], opts...)
|
||||
it := &AlertPolicyIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) {
|
||||
resp := &monitoringpb.ListAlertPoliciesResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.alertPolicyClient.ListAlertPolicies, req, settings.GRPC, c.logger, "ListAlertPolicies")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetAlertPolicies(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *alertPolicyGRPCClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetAlertPolicy[0:len((*c.CallOptions).GetAlertPolicy):len((*c.CallOptions).GetAlertPolicy)], opts...)
|
||||
var resp *monitoringpb.AlertPolicy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.alertPolicyClient.GetAlertPolicy, req, settings.GRPC, c.logger, "GetAlertPolicy")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyGRPCClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateAlertPolicy[0:len((*c.CallOptions).CreateAlertPolicy):len((*c.CallOptions).CreateAlertPolicy)], opts...)
|
||||
var resp *monitoringpb.AlertPolicy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.alertPolicyClient.CreateAlertPolicy, req, settings.GRPC, c.logger, "CreateAlertPolicy")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyGRPCClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).DeleteAlertPolicy[0:len((*c.CallOptions).DeleteAlertPolicy):len((*c.CallOptions).DeleteAlertPolicy)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.alertPolicyClient.DeleteAlertPolicy, req, settings.GRPC, c.logger, "DeleteAlertPolicy")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *alertPolicyGRPCClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).UpdateAlertPolicy[0:len((*c.CallOptions).UpdateAlertPolicy):len((*c.CallOptions).UpdateAlertPolicy)], opts...)
|
||||
var resp *monitoringpb.AlertPolicy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.alertPolicyClient.UpdateAlertPolicy, req, settings.GRPC, c.logger, "UpdateAlertPolicy")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
682
vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go
generated
vendored
Normal file
682
vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go
generated
vendored
Normal file
@@ -0,0 +1,682 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
"google.golang.org/api/iterator"
|
||||
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
)
|
||||
|
||||
// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy.
|
||||
type AlertPolicyIterator struct {
|
||||
items []*monitoringpb.AlertPolicy
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) {
|
||||
var item *monitoringpb.AlertPolicy
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *AlertPolicyIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *AlertPolicyIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// GroupIterator manages a stream of *monitoringpb.Group.
|
||||
type GroupIterator struct {
|
||||
items []*monitoringpb.Group
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *GroupIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *GroupIterator) Next() (*monitoringpb.Group, error) {
|
||||
var item *monitoringpb.Group
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *GroupIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *GroupIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor.
|
||||
type MetricDescriptorIterator struct {
|
||||
items []*metricpb.MetricDescriptor
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) {
|
||||
var item *metricpb.MetricDescriptor
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *MetricDescriptorIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *MetricDescriptorIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor.
|
||||
type MonitoredResourceDescriptorIterator struct {
|
||||
items []*monitoredrespb.MonitoredResourceDescriptor
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) {
|
||||
var item *monitoredrespb.MonitoredResourceDescriptor
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *MonitoredResourceDescriptorIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource.
|
||||
type MonitoredResourceIterator struct {
|
||||
items []*monitoredrespb.MonitoredResource
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) {
|
||||
var item *monitoredrespb.MonitoredResource
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *MonitoredResourceIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *MonitoredResourceIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor.
|
||||
type NotificationChannelDescriptorIterator struct {
|
||||
items []*monitoringpb.NotificationChannelDescriptor
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) {
|
||||
var item *monitoringpb.NotificationChannelDescriptor
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *NotificationChannelDescriptorIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel.
|
||||
type NotificationChannelIterator struct {
|
||||
items []*monitoringpb.NotificationChannel
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) {
|
||||
var item *monitoringpb.NotificationChannel
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *NotificationChannelIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *NotificationChannelIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// ServiceIterator manages a stream of *monitoringpb.Service.
|
||||
type ServiceIterator struct {
|
||||
items []*monitoringpb.Service
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *ServiceIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *ServiceIterator) Next() (*monitoringpb.Service, error) {
|
||||
var item *monitoringpb.Service
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ServiceIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *ServiceIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective.
|
||||
type ServiceLevelObjectiveIterator struct {
|
||||
items []*monitoringpb.ServiceLevelObjective
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) {
|
||||
var item *monitoringpb.ServiceLevelObjective
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ServiceLevelObjectiveIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// SnoozeIterator manages a stream of *monitoringpb.Snooze.
|
||||
type SnoozeIterator struct {
|
||||
items []*monitoringpb.Snooze
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Snooze, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *SnoozeIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *SnoozeIterator) Next() (*monitoringpb.Snooze, error) {
|
||||
var item *monitoringpb.Snooze
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *SnoozeIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *SnoozeIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TimeSeriesDataIterator manages a stream of *monitoringpb.TimeSeriesData.
|
||||
type TimeSeriesDataIterator struct {
|
||||
items []*monitoringpb.TimeSeriesData
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeriesData, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *TimeSeriesDataIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TimeSeriesDataIterator) Next() (*monitoringpb.TimeSeriesData, error) {
|
||||
var item *monitoringpb.TimeSeriesData
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TimeSeriesDataIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TimeSeriesDataIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries.
|
||||
type TimeSeriesIterator struct {
|
||||
items []*monitoringpb.TimeSeries
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) {
|
||||
var item *monitoringpb.TimeSeries
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TimeSeriesIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TimeSeriesIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig.
|
||||
type UptimeCheckConfigIterator struct {
|
||||
items []*monitoringpb.UptimeCheckConfig
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) {
|
||||
var item *monitoringpb.UptimeCheckConfig
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *UptimeCheckConfigIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *UptimeCheckConfigIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp.
|
||||
type UptimeCheckIpIterator struct {
|
||||
items []*monitoringpb.UptimeCheckIp
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) {
|
||||
var item *monitoringpb.UptimeCheckIp
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *UptimeCheckIpIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *UptimeCheckIpIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
112
vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go
generated
vendored
Normal file
112
vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
//go:build go1.23
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"iter"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
"github.com/googleapis/gax-go/v2/iterator"
|
||||
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
)
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *AlertPolicyIterator) All() iter.Seq2[*monitoringpb.AlertPolicy, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *GroupIterator) All() iter.Seq2[*monitoringpb.Group, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *MetricDescriptorIterator) All() iter.Seq2[*metricpb.MetricDescriptor, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *MonitoredResourceDescriptorIterator) All() iter.Seq2[*monitoredrespb.MonitoredResourceDescriptor, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *MonitoredResourceIterator) All() iter.Seq2[*monitoredrespb.MonitoredResource, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *NotificationChannelDescriptorIterator) All() iter.Seq2[*monitoringpb.NotificationChannelDescriptor, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *NotificationChannelIterator) All() iter.Seq2[*monitoringpb.NotificationChannel, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *ServiceIterator) All() iter.Seq2[*monitoringpb.Service, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *ServiceLevelObjectiveIterator) All() iter.Seq2[*monitoringpb.ServiceLevelObjective, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *SnoozeIterator) All() iter.Seq2[*monitoringpb.Snooze, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *TimeSeriesDataIterator) All() iter.Seq2[*monitoringpb.TimeSeriesData, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *TimeSeriesIterator) All() iter.Seq2[*monitoringpb.TimeSeries, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *UptimeCheckConfigIterator) All() iter.Seq2[*monitoringpb.UptimeCheckConfig, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *UptimeCheckIpIterator) All() iter.Seq2[*monitoringpb.UptimeCheckIp, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
85
vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go
generated
vendored
Normal file
85
vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
// Package monitoring is an auto-generated package for the
|
||||
// Cloud Monitoring API.
|
||||
//
|
||||
// Manages your Cloud Monitoring data and configurations.
|
||||
//
|
||||
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
|
||||
//
|
||||
// # General documentation
|
||||
//
|
||||
// For information that is relevant for all client libraries please reference
|
||||
// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
|
||||
// page includes:
|
||||
//
|
||||
// - [Authentication and Authorization]
|
||||
// - [Timeouts and Cancellation]
|
||||
// - [Testing against Client Libraries]
|
||||
// - [Debugging Client Libraries]
|
||||
// - [Inspecting errors]
|
||||
//
|
||||
// # Example usage
|
||||
//
|
||||
// To get started with this package, create a client.
|
||||
//
|
||||
// // go get cloud.google.com/go/monitoring/apiv3/v2@latest
|
||||
// ctx := context.Background()
|
||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
||||
// // It will require modifications to work:
|
||||
// // - It may require correct/in-range values for request initialization.
|
||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
||||
// c, err := monitoring.NewAlertPolicyClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
||||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
||||
// The returned client must be Closed when it is done being used.
|
||||
//
|
||||
// # Using the Client
|
||||
//
|
||||
// The following is an example of making an API call with the newly created client, mentioned above.
|
||||
//
|
||||
// req := &monitoringpb.CreateAlertPolicyRequest{
|
||||
// // TODO: Fill request struct fields.
|
||||
// // See https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb#CreateAlertPolicyRequest.
|
||||
// }
|
||||
// resp, err := c.CreateAlertPolicy(ctx, req)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// // TODO: Use resp.
|
||||
// _ = resp
|
||||
//
|
||||
// # Use of Context
|
||||
//
|
||||
// The ctx passed to NewAlertPolicyClient is used for authentication requests and
|
||||
// for creating the underlying connection, but is not used for subsequent calls.
|
||||
// Individual methods on the client use the ctx given to them.
|
||||
//
|
||||
// To close the open connection, use the Close() method.
|
||||
//
|
||||
// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
|
||||
// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
|
||||
// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
|
||||
// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
|
||||
// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
|
||||
package monitoring // import "cloud.google.com/go/monitoring/apiv3/v2"
|
||||
336
vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json
generated
vendored
Normal file
336
vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json
generated
vendored
Normal file
@@ -0,0 +1,336 @@
|
||||
{
|
||||
"schema": "1.0",
|
||||
"comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
|
||||
"language": "go",
|
||||
"protoPackage": "google.monitoring.v3",
|
||||
"libraryPackage": "cloud.google.com/go/monitoring/apiv3/v2",
|
||||
"services": {
|
||||
"AlertPolicyService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "AlertPolicyClient",
|
||||
"rpcs": {
|
||||
"CreateAlertPolicy": {
|
||||
"methods": [
|
||||
"CreateAlertPolicy"
|
||||
]
|
||||
},
|
||||
"DeleteAlertPolicy": {
|
||||
"methods": [
|
||||
"DeleteAlertPolicy"
|
||||
]
|
||||
},
|
||||
"GetAlertPolicy": {
|
||||
"methods": [
|
||||
"GetAlertPolicy"
|
||||
]
|
||||
},
|
||||
"ListAlertPolicies": {
|
||||
"methods": [
|
||||
"ListAlertPolicies"
|
||||
]
|
||||
},
|
||||
"UpdateAlertPolicy": {
|
||||
"methods": [
|
||||
"UpdateAlertPolicy"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"GroupService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "GroupClient",
|
||||
"rpcs": {
|
||||
"CreateGroup": {
|
||||
"methods": [
|
||||
"CreateGroup"
|
||||
]
|
||||
},
|
||||
"DeleteGroup": {
|
||||
"methods": [
|
||||
"DeleteGroup"
|
||||
]
|
||||
},
|
||||
"GetGroup": {
|
||||
"methods": [
|
||||
"GetGroup"
|
||||
]
|
||||
},
|
||||
"ListGroupMembers": {
|
||||
"methods": [
|
||||
"ListGroupMembers"
|
||||
]
|
||||
},
|
||||
"ListGroups": {
|
||||
"methods": [
|
||||
"ListGroups"
|
||||
]
|
||||
},
|
||||
"UpdateGroup": {
|
||||
"methods": [
|
||||
"UpdateGroup"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"MetricService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "MetricClient",
|
||||
"rpcs": {
|
||||
"CreateMetricDescriptor": {
|
||||
"methods": [
|
||||
"CreateMetricDescriptor"
|
||||
]
|
||||
},
|
||||
"CreateServiceTimeSeries": {
|
||||
"methods": [
|
||||
"CreateServiceTimeSeries"
|
||||
]
|
||||
},
|
||||
"CreateTimeSeries": {
|
||||
"methods": [
|
||||
"CreateTimeSeries"
|
||||
]
|
||||
},
|
||||
"DeleteMetricDescriptor": {
|
||||
"methods": [
|
||||
"DeleteMetricDescriptor"
|
||||
]
|
||||
},
|
||||
"GetMetricDescriptor": {
|
||||
"methods": [
|
||||
"GetMetricDescriptor"
|
||||
]
|
||||
},
|
||||
"GetMonitoredResourceDescriptor": {
|
||||
"methods": [
|
||||
"GetMonitoredResourceDescriptor"
|
||||
]
|
||||
},
|
||||
"ListMetricDescriptors": {
|
||||
"methods": [
|
||||
"ListMetricDescriptors"
|
||||
]
|
||||
},
|
||||
"ListMonitoredResourceDescriptors": {
|
||||
"methods": [
|
||||
"ListMonitoredResourceDescriptors"
|
||||
]
|
||||
},
|
||||
"ListTimeSeries": {
|
||||
"methods": [
|
||||
"ListTimeSeries"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"NotificationChannelService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "NotificationChannelClient",
|
||||
"rpcs": {
|
||||
"CreateNotificationChannel": {
|
||||
"methods": [
|
||||
"CreateNotificationChannel"
|
||||
]
|
||||
},
|
||||
"DeleteNotificationChannel": {
|
||||
"methods": [
|
||||
"DeleteNotificationChannel"
|
||||
]
|
||||
},
|
||||
"GetNotificationChannel": {
|
||||
"methods": [
|
||||
"GetNotificationChannel"
|
||||
]
|
||||
},
|
||||
"GetNotificationChannelDescriptor": {
|
||||
"methods": [
|
||||
"GetNotificationChannelDescriptor"
|
||||
]
|
||||
},
|
||||
"GetNotificationChannelVerificationCode": {
|
||||
"methods": [
|
||||
"GetNotificationChannelVerificationCode"
|
||||
]
|
||||
},
|
||||
"ListNotificationChannelDescriptors": {
|
||||
"methods": [
|
||||
"ListNotificationChannelDescriptors"
|
||||
]
|
||||
},
|
||||
"ListNotificationChannels": {
|
||||
"methods": [
|
||||
"ListNotificationChannels"
|
||||
]
|
||||
},
|
||||
"SendNotificationChannelVerificationCode": {
|
||||
"methods": [
|
||||
"SendNotificationChannelVerificationCode"
|
||||
]
|
||||
},
|
||||
"UpdateNotificationChannel": {
|
||||
"methods": [
|
||||
"UpdateNotificationChannel"
|
||||
]
|
||||
},
|
||||
"VerifyNotificationChannel": {
|
||||
"methods": [
|
||||
"VerifyNotificationChannel"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"QueryService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "QueryClient",
|
||||
"rpcs": {
|
||||
"QueryTimeSeries": {
|
||||
"methods": [
|
||||
"QueryTimeSeries"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ServiceMonitoringService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "ServiceMonitoringClient",
|
||||
"rpcs": {
|
||||
"CreateService": {
|
||||
"methods": [
|
||||
"CreateService"
|
||||
]
|
||||
},
|
||||
"CreateServiceLevelObjective": {
|
||||
"methods": [
|
||||
"CreateServiceLevelObjective"
|
||||
]
|
||||
},
|
||||
"DeleteService": {
|
||||
"methods": [
|
||||
"DeleteService"
|
||||
]
|
||||
},
|
||||
"DeleteServiceLevelObjective": {
|
||||
"methods": [
|
||||
"DeleteServiceLevelObjective"
|
||||
]
|
||||
},
|
||||
"GetService": {
|
||||
"methods": [
|
||||
"GetService"
|
||||
]
|
||||
},
|
||||
"GetServiceLevelObjective": {
|
||||
"methods": [
|
||||
"GetServiceLevelObjective"
|
||||
]
|
||||
},
|
||||
"ListServiceLevelObjectives": {
|
||||
"methods": [
|
||||
"ListServiceLevelObjectives"
|
||||
]
|
||||
},
|
||||
"ListServices": {
|
||||
"methods": [
|
||||
"ListServices"
|
||||
]
|
||||
},
|
||||
"UpdateService": {
|
||||
"methods": [
|
||||
"UpdateService"
|
||||
]
|
||||
},
|
||||
"UpdateServiceLevelObjective": {
|
||||
"methods": [
|
||||
"UpdateServiceLevelObjective"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"SnoozeService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "SnoozeClient",
|
||||
"rpcs": {
|
||||
"CreateSnooze": {
|
||||
"methods": [
|
||||
"CreateSnooze"
|
||||
]
|
||||
},
|
||||
"GetSnooze": {
|
||||
"methods": [
|
||||
"GetSnooze"
|
||||
]
|
||||
},
|
||||
"ListSnoozes": {
|
||||
"methods": [
|
||||
"ListSnoozes"
|
||||
]
|
||||
},
|
||||
"UpdateSnooze": {
|
||||
"methods": [
|
||||
"UpdateSnooze"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"UptimeCheckService": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "UptimeCheckClient",
|
||||
"rpcs": {
|
||||
"CreateUptimeCheckConfig": {
|
||||
"methods": [
|
||||
"CreateUptimeCheckConfig"
|
||||
]
|
||||
},
|
||||
"DeleteUptimeCheckConfig": {
|
||||
"methods": [
|
||||
"DeleteUptimeCheckConfig"
|
||||
]
|
||||
},
|
||||
"GetUptimeCheckConfig": {
|
||||
"methods": [
|
||||
"GetUptimeCheckConfig"
|
||||
]
|
||||
},
|
||||
"ListUptimeCheckConfigs": {
|
||||
"methods": [
|
||||
"ListUptimeCheckConfigs"
|
||||
]
|
||||
},
|
||||
"ListUptimeCheckIps": {
|
||||
"methods": [
|
||||
"ListUptimeCheckIps"
|
||||
]
|
||||
},
|
||||
"UpdateUptimeCheckConfig": {
|
||||
"methods": [
|
||||
"UpdateUptimeCheckConfig"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
470
vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go
generated
vendored
Normal file
470
vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go
generated
vendored
Normal file
@@ -0,0 +1,470 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newGroupClientHook clientHook
|
||||
|
||||
// GroupCallOptions contains the retry settings for each method of GroupClient.
|
||||
type GroupCallOptions struct {
|
||||
ListGroups []gax.CallOption
|
||||
GetGroup []gax.CallOption
|
||||
CreateGroup []gax.CallOption
|
||||
UpdateGroup []gax.CallOption
|
||||
DeleteGroup []gax.CallOption
|
||||
ListGroupMembers []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultGroupGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultGroupCallOptions() *GroupCallOptions {
|
||||
return &GroupCallOptions{
|
||||
ListGroups: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetGroup: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateGroup: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
UpdateGroup: []gax.CallOption{
|
||||
gax.WithTimeout(180000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
DeleteGroup: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListGroupMembers: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// internalGroupClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalGroupClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
ListGroups(context.Context, *monitoringpb.ListGroupsRequest, ...gax.CallOption) *GroupIterator
|
||||
GetGroup(context.Context, *monitoringpb.GetGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
|
||||
CreateGroup(context.Context, *monitoringpb.CreateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
|
||||
UpdateGroup(context.Context, *monitoringpb.UpdateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
|
||||
DeleteGroup(context.Context, *monitoringpb.DeleteGroupRequest, ...gax.CallOption) error
|
||||
ListGroupMembers(context.Context, *monitoringpb.ListGroupMembersRequest, ...gax.CallOption) *MonitoredResourceIterator
|
||||
}
|
||||
|
||||
// GroupClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// The Group API lets you inspect and manage your
|
||||
// groups (at #google.monitoring.v3.Group).
|
||||
//
|
||||
// A group is a named filter that is used to identify
|
||||
// a collection of monitored resources. Groups are typically used to
|
||||
// mirror the physical and/or logical topology of the environment.
|
||||
// Because group membership is computed dynamically, monitored
|
||||
// resources that are started in the future are automatically placed
|
||||
// in matching groups. By using a group to name monitored resources in,
|
||||
// for example, an alert policy, the target of that alert policy is
|
||||
// updated automatically as monitored resources are added and removed
|
||||
// from the infrastructure.
|
||||
type GroupClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalGroupClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *GroupCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *GroupClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *GroupClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *GroupClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// ListGroups lists the existing groups.
|
||||
func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
|
||||
return c.internalClient.ListGroups(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetGroup gets a single group.
|
||||
func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
return c.internalClient.GetGroup(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateGroup creates a new group.
|
||||
func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
return c.internalClient.CreateGroup(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// UpdateGroup updates an existing group.
|
||||
// You can change any group attributes except name.
|
||||
func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
return c.internalClient.UpdateGroup(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// DeleteGroup deletes an existing group.
|
||||
func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteGroup(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListGroupMembers lists the monitored resources that are members of a group.
|
||||
func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
|
||||
return c.internalClient.ListGroupMembers(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// groupGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type groupGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing GroupClient
|
||||
CallOptions **GroupCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
groupClient monitoringpb.GroupServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewGroupClient creates a new group service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// The Group API lets you inspect and manage your
|
||||
// groups (at #google.monitoring.v3.Group).
|
||||
//
|
||||
// A group is a named filter that is used to identify
|
||||
// a collection of monitored resources. Groups are typically used to
|
||||
// mirror the physical and/or logical topology of the environment.
|
||||
// Because group membership is computed dynamically, monitored
|
||||
// resources that are started in the future are automatically placed
|
||||
// in matching groups. By using a group to name monitored resources in,
|
||||
// for example, an alert policy, the target of that alert policy is
|
||||
// updated automatically as monitored resources are added and removed
|
||||
// from the infrastructure.
|
||||
func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) {
|
||||
clientOpts := defaultGroupGRPCClientOptions()
|
||||
if newGroupClientHook != nil {
|
||||
hookOpts, err := newGroupClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := GroupClient{CallOptions: defaultGroupCallOptions()}
|
||||
|
||||
c := &groupGRPCClient{
|
||||
connPool: connPool,
|
||||
groupClient: monitoringpb.NewGroupServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *groupGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *groupGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *groupGRPCClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListGroups[0:len((*c.CallOptions).ListGroups):len((*c.CallOptions).ListGroups)], opts...)
|
||||
it := &GroupIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListGroupsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) {
|
||||
resp := &monitoringpb.ListGroupsResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.groupClient.ListGroups, req, settings.GRPC, c.logger, "ListGroups")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetGroup(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *groupGRPCClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetGroup[0:len((*c.CallOptions).GetGroup):len((*c.CallOptions).GetGroup)], opts...)
|
||||
var resp *monitoringpb.Group
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.groupClient.GetGroup, req, settings.GRPC, c.logger, "GetGroup")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *groupGRPCClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateGroup[0:len((*c.CallOptions).CreateGroup):len((*c.CallOptions).CreateGroup)], opts...)
|
||||
var resp *monitoringpb.Group
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.groupClient.CreateGroup, req, settings.GRPC, c.logger, "CreateGroup")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *groupGRPCClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).UpdateGroup[0:len((*c.CallOptions).UpdateGroup):len((*c.CallOptions).UpdateGroup)], opts...)
|
||||
var resp *monitoringpb.Group
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.groupClient.UpdateGroup, req, settings.GRPC, c.logger, "UpdateGroup")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *groupGRPCClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).DeleteGroup[0:len((*c.CallOptions).DeleteGroup):len((*c.CallOptions).DeleteGroup)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.groupClient.DeleteGroup, req, settings.GRPC, c.logger, "DeleteGroup")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *groupGRPCClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListGroupMembers[0:len((*c.CallOptions).ListGroupMembers):len((*c.CallOptions).ListGroupMembers)], opts...)
|
||||
it := &MonitoredResourceIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) {
|
||||
resp := &monitoringpb.ListGroupMembersResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.groupClient.ListGroupMembers, req, settings.GRPC, c.logger, "ListGroupMembers")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetMembers(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
64
vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go
generated
vendored
Normal file
64
vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/googleapis/gax-go/v2/internallog/grpclog"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const serviceName = "monitoring.googleapis.com"
|
||||
|
||||
// For more information on implementing a client constructor hook, see
|
||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
||||
type clientHookParams struct{}
|
||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||
|
||||
var versionClient string
|
||||
|
||||
func getVersionClient() string {
|
||||
if versionClient == "" {
|
||||
return "UNKNOWN"
|
||||
}
|
||||
return versionClient
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/monitoring",
|
||||
"https://www.googleapis.com/auth/monitoring.read",
|
||||
"https://www.googleapis.com/auth/monitoring.write",
|
||||
}
|
||||
}
|
||||
|
||||
func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
|
||||
var zero O
|
||||
logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
|
||||
resp, err := fn(ctx, req, opts...)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
|
||||
return resp, err
|
||||
}
|
||||
582
vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go
generated
vendored
Normal file
582
vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go
generated
vendored
Normal file
@@ -0,0 +1,582 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newMetricClientHook clientHook
|
||||
|
||||
// MetricCallOptions contains the retry settings for each method of MetricClient.
|
||||
type MetricCallOptions struct {
|
||||
ListMonitoredResourceDescriptors []gax.CallOption
|
||||
GetMonitoredResourceDescriptor []gax.CallOption
|
||||
ListMetricDescriptors []gax.CallOption
|
||||
GetMetricDescriptor []gax.CallOption
|
||||
CreateMetricDescriptor []gax.CallOption
|
||||
DeleteMetricDescriptor []gax.CallOption
|
||||
ListTimeSeries []gax.CallOption
|
||||
CreateTimeSeries []gax.CallOption
|
||||
CreateServiceTimeSeries []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultMetricGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMetricCallOptions() *MetricCallOptions {
|
||||
return &MetricCallOptions{
|
||||
ListMonitoredResourceDescriptors: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetMonitoredResourceDescriptor: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListMetricDescriptors: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetMetricDescriptor: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateMetricDescriptor: []gax.CallOption{
|
||||
gax.WithTimeout(12000 * time.Millisecond),
|
||||
},
|
||||
DeleteMetricDescriptor: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListTimeSeries: []gax.CallOption{
|
||||
gax.WithTimeout(90000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateTimeSeries: []gax.CallOption{
|
||||
gax.WithTimeout(12000 * time.Millisecond),
|
||||
},
|
||||
CreateServiceTimeSeries: []gax.CallOption{},
|
||||
}
|
||||
}
|
||||
|
||||
// internalMetricClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalMetricClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
ListMonitoredResourceDescriptors(context.Context, *monitoringpb.ListMonitoredResourceDescriptorsRequest, ...gax.CallOption) *MonitoredResourceDescriptorIterator
|
||||
GetMonitoredResourceDescriptor(context.Context, *monitoringpb.GetMonitoredResourceDescriptorRequest, ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error)
|
||||
ListMetricDescriptors(context.Context, *monitoringpb.ListMetricDescriptorsRequest, ...gax.CallOption) *MetricDescriptorIterator
|
||||
GetMetricDescriptor(context.Context, *monitoringpb.GetMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error)
|
||||
CreateMetricDescriptor(context.Context, *monitoringpb.CreateMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error)
|
||||
DeleteMetricDescriptor(context.Context, *monitoringpb.DeleteMetricDescriptorRequest, ...gax.CallOption) error
|
||||
ListTimeSeries(context.Context, *monitoringpb.ListTimeSeriesRequest, ...gax.CallOption) *TimeSeriesIterator
|
||||
CreateTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error
|
||||
CreateServiceTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error
|
||||
}
|
||||
|
||||
// MetricClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// Manages metric descriptors, monitored resource descriptors, and
|
||||
// time series data.
|
||||
type MetricClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalMetricClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *MetricCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *MetricClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *MetricClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *MetricClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter.
|
||||
func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
|
||||
return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetMonitoredResourceDescriptor gets a single monitored resource descriptor.
|
||||
func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
|
||||
return c.internalClient.GetMonitoredResourceDescriptor(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListMetricDescriptors lists metric descriptors that match a filter.
|
||||
func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
|
||||
return c.internalClient.ListMetricDescriptors(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetMetricDescriptor gets a single metric descriptor.
|
||||
func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
|
||||
return c.internalClient.GetMetricDescriptor(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateMetricDescriptor creates a new metric descriptor.
|
||||
// The creation is executed asynchronously.
|
||||
// User-created metric descriptors define
|
||||
// custom metrics (at https://cloud.google.com/monitoring/custom-metrics).
|
||||
// The metric descriptor is updated if it already exists,
|
||||
// except that metric labels are never removed.
|
||||
func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
|
||||
return c.internalClient.CreateMetricDescriptor(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// DeleteMetricDescriptor deletes a metric descriptor. Only user-created
|
||||
// custom metrics (at https://cloud.google.com/monitoring/custom-metrics) can be
|
||||
// deleted.
|
||||
func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteMetricDescriptor(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListTimeSeries lists time series that match a filter.
|
||||
func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
|
||||
return c.internalClient.ListTimeSeries(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateTimeSeries creates or adds data to one or more time series.
|
||||
// The response is empty if all time series in the request were written.
|
||||
// If any time series could not be written, a corresponding failure message is
|
||||
// included in the error response.
|
||||
// This method does not support
|
||||
// resource locations constraint of an organization
|
||||
// policy (at https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
|
||||
func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.CreateTimeSeries(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateServiceTimeSeries creates or adds data to one or more service time series. A service time
|
||||
// series is a time series for a metric from a Google Cloud service. The
|
||||
// response is empty if all time series in the request were written. If any
|
||||
// time series could not be written, a corresponding failure message is
|
||||
// included in the error response. This endpoint rejects writes to
|
||||
// user-defined metrics.
|
||||
// This method is only for use by Google Cloud services. Use
|
||||
// projects.timeSeries.create
|
||||
// instead.
|
||||
func (c *MetricClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.CreateServiceTimeSeries(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// metricGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type metricGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing MetricClient
|
||||
CallOptions **MetricCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
metricClient monitoringpb.MetricServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewMetricClient creates a new metric service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// Manages metric descriptors, monitored resource descriptors, and
|
||||
// time series data.
|
||||
func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) {
|
||||
clientOpts := defaultMetricGRPCClientOptions()
|
||||
if newMetricClientHook != nil {
|
||||
hookOpts, err := newMetricClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := MetricClient{CallOptions: defaultMetricCallOptions()}
|
||||
|
||||
c := &metricGRPCClient{
|
||||
connPool: connPool,
|
||||
metricClient: monitoringpb.NewMetricServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *metricGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *metricGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...)
|
||||
it := &MonitoredResourceDescriptorIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
|
||||
resp := &monitoringpb.ListMonitoredResourceDescriptorsResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.metricClient.ListMonitoredResourceDescriptors, req, settings.GRPC, c.logger, "ListMonitoredResourceDescriptors")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetMonitoredResourceDescriptor[0:len((*c.CallOptions).GetMonitoredResourceDescriptor):len((*c.CallOptions).GetMonitoredResourceDescriptor)], opts...)
|
||||
var resp *monitoredrespb.MonitoredResourceDescriptor
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.metricClient.GetMonitoredResourceDescriptor, req, settings.GRPC, c.logger, "GetMonitoredResourceDescriptor")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListMetricDescriptors[0:len((*c.CallOptions).ListMetricDescriptors):len((*c.CallOptions).ListMetricDescriptors)], opts...)
|
||||
it := &MetricDescriptorIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) {
|
||||
resp := &monitoringpb.ListMetricDescriptorsResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.metricClient.ListMetricDescriptors, req, settings.GRPC, c.logger, "ListMetricDescriptors")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetMetricDescriptors(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetMetricDescriptor[0:len((*c.CallOptions).GetMetricDescriptor):len((*c.CallOptions).GetMetricDescriptor)], opts...)
|
||||
var resp *metricpb.MetricDescriptor
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.metricClient.GetMetricDescriptor, req, settings.GRPC, c.logger, "GetMetricDescriptor")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateMetricDescriptor[0:len((*c.CallOptions).CreateMetricDescriptor):len((*c.CallOptions).CreateMetricDescriptor)], opts...)
|
||||
var resp *metricpb.MetricDescriptor
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.metricClient.CreateMetricDescriptor, req, settings.GRPC, c.logger, "CreateMetricDescriptor")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).DeleteMetricDescriptor[0:len((*c.CallOptions).DeleteMetricDescriptor):len((*c.CallOptions).DeleteMetricDescriptor)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.metricClient.DeleteMetricDescriptor, req, settings.GRPC, c.logger, "DeleteMetricDescriptor")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListTimeSeries[0:len((*c.CallOptions).ListTimeSeries):len((*c.CallOptions).ListTimeSeries)], opts...)
|
||||
it := &TimeSeriesIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) {
|
||||
resp := &monitoringpb.ListTimeSeriesResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.metricClient.ListTimeSeries, req, settings.GRPC, c.logger, "ListTimeSeries")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetTimeSeries(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateTimeSeries[0:len((*c.CallOptions).CreateTimeSeries):len((*c.CallOptions).CreateTimeSeries)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.metricClient.CreateTimeSeries, req, settings.GRPC, c.logger, "CreateTimeSeries")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *metricGRPCClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateServiceTimeSeries[0:len((*c.CallOptions).CreateServiceTimeSeries):len((*c.CallOptions).CreateServiceTimeSeries)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.metricClient.CreateServiceTimeSeries, req, settings.GRPC, c.logger, "CreateServiceTimeSeries")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
2894
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go
generated
vendored
Normal file
2894
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
961
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go
generated
vendored
Normal file
961
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go
generated
vendored
Normal file
@@ -0,0 +1,961 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/alert_service.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// The protocol for the `CreateAlertPolicy` request.
|
||||
type CreateAlertPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The
|
||||
// [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
|
||||
// to create the alerting policy. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]
|
||||
//
|
||||
// Note that this field names the parent container in which the alerting
|
||||
// policy will be written, not the name of the created policy. |name| must be
|
||||
// a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will
|
||||
// return. The alerting policy that is returned will have a name that contains
|
||||
// a normalized representation of this name as a prefix but adds a suffix of
|
||||
// the form `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the
|
||||
// container.
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Required. The requested alerting policy. You should omit the `name` field
|
||||
// in this policy. The name will be returned in the new policy, including a
|
||||
// new `[ALERT_POLICY_ID]` value.
|
||||
AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateAlertPolicyRequest) Reset() {
|
||||
*x = CreateAlertPolicyRequest{}
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *CreateAlertPolicyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateAlertPolicyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreateAlertPolicyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateAlertPolicyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *CreateAlertPolicyRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
|
||||
if x != nil {
|
||||
return x.AlertPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The protocol for the `GetAlertPolicy` request.
|
||||
type GetAlertPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The alerting policy to retrieve. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetAlertPolicyRequest) Reset() {
|
||||
*x = GetAlertPolicyRequest{}
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetAlertPolicyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetAlertPolicyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetAlertPolicyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetAlertPolicyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetAlertPolicyRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `ListAlertPolicies` request.
|
||||
type ListAlertPoliciesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The
|
||||
// [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
|
||||
// alert policies are to be listed. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]
|
||||
//
|
||||
// Note that this field names the parent container in which the alerting
|
||||
// policies to be listed are stored. To retrieve a single alerting policy
|
||||
// by name, use the
|
||||
// [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy]
|
||||
// operation, instead.
|
||||
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Optional. If provided, this field specifies the criteria that must be met
|
||||
// by alert policies to be included in the response.
|
||||
//
|
||||
// For more details, see [sorting and
|
||||
// filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
|
||||
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// Optional. A comma-separated list of fields by which to sort the result.
|
||||
// Supports the same set of field references as the `filter` field. Entries
|
||||
// can be prefixed with a minus sign to sort by the field in descending order.
|
||||
//
|
||||
// For more details, see [sorting and
|
||||
// filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
|
||||
OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
|
||||
// Optional. The maximum number of results to return in a single response.
|
||||
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
// Optional. If this field is not empty then it must contain the
|
||||
// `nextPageToken` value returned by a previous call to this method. Using
|
||||
// this field causes the method to return more results from the previous
|
||||
// method call.
|
||||
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) Reset() {
|
||||
*x = ListAlertPoliciesRequest{}
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListAlertPoliciesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListAlertPoliciesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) GetFilter() string {
|
||||
if x != nil {
|
||||
return x.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) GetOrderBy() string {
|
||||
if x != nil {
|
||||
return x.OrderBy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) GetPageSize() int32 {
|
||||
if x != nil {
|
||||
return x.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesRequest) GetPageToken() string {
|
||||
if x != nil {
|
||||
return x.PageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `ListAlertPolicies` response.
|
||||
type ListAlertPoliciesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The returned alert policies.
|
||||
AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"`
|
||||
// If there might be more results than were returned, then this field is set
|
||||
// to a non-empty value. To see the additional results,
|
||||
// use that value as `page_token` in the next call to this method.
|
||||
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||||
// The total number of alert policies in all pages. This number is only an
|
||||
// estimate, and may change in subsequent pages. https://aip.dev/158
|
||||
TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesResponse) Reset() {
|
||||
*x = ListAlertPoliciesResponse{}
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListAlertPoliciesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListAlertPoliciesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListAlertPoliciesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy {
|
||||
if x != nil {
|
||||
return x.AlertPolicies
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesResponse) GetNextPageToken() string {
|
||||
if x != nil {
|
||||
return x.NextPageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ListAlertPoliciesResponse) GetTotalSize() int32 {
|
||||
if x != nil {
|
||||
return x.TotalSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// The protocol for the `UpdateAlertPolicy` request.
|
||||
type UpdateAlertPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Optional. A list of alerting policy field names. If this field is not
|
||||
// empty, each listed field in the existing alerting policy is set to the
|
||||
// value of the corresponding field in the supplied policy (`alert_policy`),
|
||||
// or to the field's default value if the field is not in the supplied
|
||||
// alerting policy. Fields not listed retain their previous value.
|
||||
//
|
||||
// Examples of valid field masks include `display_name`, `documentation`,
|
||||
// `documentation.content`, `documentation.mime_type`, `user_labels`,
|
||||
// `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc.
|
||||
//
|
||||
// If this field is empty, then the supplied alerting policy replaces the
|
||||
// existing policy. It is the same as deleting the existing policy and
|
||||
// adding the supplied policy, except for the following:
|
||||
//
|
||||
// - The new policy will have the same `[ALERT_POLICY_ID]` as the former
|
||||
// policy. This gives you continuity with the former policy in your
|
||||
// notifications and incidents.
|
||||
// - Conditions in the new policy will keep their former `[CONDITION_ID]` if
|
||||
// the supplied condition includes the `name` field with that
|
||||
// `[CONDITION_ID]`. If the supplied condition omits the `name` field,
|
||||
// then a new `[CONDITION_ID]` is created.
|
||||
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
|
||||
// Required. The updated alerting policy or the updated values for the
|
||||
// fields listed in `update_mask`.
|
||||
// If `update_mask` is not empty, any fields in this policy that are
|
||||
// not in `update_mask` are ignored.
|
||||
AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UpdateAlertPolicyRequest) Reset() {
|
||||
*x = UpdateAlertPolicyRequest{}
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *UpdateAlertPolicyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UpdateAlertPolicyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *UpdateAlertPolicyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UpdateAlertPolicyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
|
||||
if x != nil {
|
||||
return x.UpdateMask
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
|
||||
if x != nil {
|
||||
return x.AlertPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The protocol for the `DeleteAlertPolicy` request.
|
||||
type DeleteAlertPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The alerting policy to delete. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
|
||||
//
|
||||
// For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy].
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteAlertPolicyRequest) Reset() {
|
||||
*x = DeleteAlertPolicyRequest{}
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DeleteAlertPolicyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteAlertPolicyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteAlertPolicyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteAlertPolicyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DeleteAlertPolicyRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_alert_service_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_alert_service_proto_rawDesc = []byte{
|
||||
0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72,
|
||||
0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
|
||||
0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
|
||||
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69,
|
||||
0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41,
|
||||
0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
|
||||
0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f,
|
||||
0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
|
||||
0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0,
|
||||
0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22,
|
||||
0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x18,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
|
||||
0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65,
|
||||
0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
|
||||
0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65,
|
||||
0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01,
|
||||
0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61,
|
||||
0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
|
||||
0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xac,
|
||||
0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69,
|
||||
0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e,
|
||||
0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
|
||||
0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72,
|
||||
0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f,
|
||||
0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
|
||||
0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d,
|
||||
0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01,
|
||||
0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa7, 0x01,
|
||||
0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
|
||||
0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01,
|
||||
0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x49, 0x0a, 0x0c,
|
||||
0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72,
|
||||
0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x5d, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74,
|
||||
0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
||||
0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0x9e, 0x08, 0x0a, 0x12, 0x41, 0x6c, 0x65, 0x72, 0x74,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa8, 0x01,
|
||||
0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
|
||||
0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41,
|
||||
0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41,
|
||||
0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
|
||||
0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x96, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74,
|
||||
0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
|
||||
0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
|
||||
0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f,
|
||||
0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
|
||||
0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a,
|
||||
0x7d, 0x12, 0xb5, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72,
|
||||
0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41,
|
||||
0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x4d, 0xda, 0x41, 0x11, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f,
|
||||
0x6c, 0x69, 0x63, 0x79, 0x22, 0x23, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
|
||||
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72,
|
||||
0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x11, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
||||
0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65,
|
||||
0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65,
|
||||
0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcb, 0x01,
|
||||
0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
|
||||
0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x63, 0xda, 0x41, 0x18, 0x75, 0x70, 0x64, 0x61, 0x74,
|
||||
0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74,
|
||||
0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x32, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x61, 0x6c,
|
||||
0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
|
||||
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41,
|
||||
0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74,
|
||||
0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c,
|
||||
0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74,
|
||||
0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
|
||||
0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f,
|
||||
0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
|
||||
0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f,
|
||||
0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
|
||||
0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_alert_service_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_alert_service_proto_rawDescData = file_google_monitoring_v3_alert_service_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_alert_service_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_alert_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_service_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_alert_service_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_google_monitoring_v3_alert_service_proto_goTypes = []any{
|
||||
(*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest
|
||||
(*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest
|
||||
(*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest
|
||||
(*ListAlertPoliciesResponse)(nil), // 3: google.monitoring.v3.ListAlertPoliciesResponse
|
||||
(*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest
|
||||
(*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest
|
||||
(*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy
|
||||
(*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask
|
||||
(*emptypb.Empty)(nil), // 8: google.protobuf.Empty
|
||||
}
|
||||
var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{
|
||||
6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy
|
||||
6, // 1: google.monitoring.v3.ListAlertPoliciesResponse.alert_policies:type_name -> google.monitoring.v3.AlertPolicy
|
||||
7, // 2: google.monitoring.v3.UpdateAlertPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask
|
||||
6, // 3: google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy
|
||||
2, // 4: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:input_type -> google.monitoring.v3.ListAlertPoliciesRequest
|
||||
1, // 5: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:input_type -> google.monitoring.v3.GetAlertPolicyRequest
|
||||
0, // 6: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:input_type -> google.monitoring.v3.CreateAlertPolicyRequest
|
||||
5, // 7: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:input_type -> google.monitoring.v3.DeleteAlertPolicyRequest
|
||||
4, // 8: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:input_type -> google.monitoring.v3.UpdateAlertPolicyRequest
|
||||
3, // 9: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:output_type -> google.monitoring.v3.ListAlertPoliciesResponse
|
||||
6, // 10: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
|
||||
6, // 11: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
|
||||
8, // 12: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:output_type -> google.protobuf.Empty
|
||||
6, // 13: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
|
||||
9, // [9:14] is the sub-list for method output_type
|
||||
4, // [4:9] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_alert_service_proto_init() }
|
||||
func file_google_monitoring_v3_alert_service_proto_init() {
|
||||
if File_google_monitoring_v3_alert_service_proto != nil {
|
||||
return
|
||||
}
|
||||
file_google_monitoring_v3_alert_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_alert_service_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_alert_service_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_alert_service_proto_depIdxs,
|
||||
MessageInfos: file_google_monitoring_v3_alert_service_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_alert_service_proto = out.File
|
||||
file_google_monitoring_v3_alert_service_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_alert_service_proto_goTypes = nil
|
||||
file_google_monitoring_v3_alert_service_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// AlertPolicyServiceClient is the client API for AlertPolicyService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type AlertPolicyServiceClient interface {
|
||||
// Lists the existing alerting policies for the workspace.
|
||||
ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error)
|
||||
// Gets a single alerting policy.
|
||||
GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
|
||||
// Creates a new alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
|
||||
// Deletes an alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
|
||||
// Updates an alerting policy. You can either replace the entire policy with
|
||||
// a new one or replace only certain fields in the current alerting policy by
|
||||
// specifying the fields to be updated via `updateMask`. Returns the
|
||||
// updated alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
|
||||
}
|
||||
|
||||
type alertPolicyServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient {
|
||||
return &alertPolicyServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) {
|
||||
out := new(ListAlertPoliciesResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
|
||||
out := new(AlertPolicy)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
|
||||
out := new(AlertPolicy)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
|
||||
out := new(emptypb.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
|
||||
out := new(AlertPolicy)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// AlertPolicyServiceServer is the server API for AlertPolicyService service.
|
||||
type AlertPolicyServiceServer interface {
|
||||
// Lists the existing alerting policies for the workspace.
|
||||
ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error)
|
||||
// Gets a single alerting policy.
|
||||
GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error)
|
||||
// Creates a new alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error)
|
||||
// Deletes an alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error)
|
||||
// Updates an alerting policy. You can either replace the entire policy with
|
||||
// a new one or replace only certain fields in the current alerting policy by
|
||||
// specifying the fields to be updated via `updateMask`. Returns the
|
||||
// updated alerting policy.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// alerting policies in a single project. This includes calls to
|
||||
// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
|
||||
UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error)
|
||||
}
|
||||
|
||||
// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedAlertPolicyServiceServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented")
|
||||
}
|
||||
func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented")
|
||||
}
|
||||
func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented")
|
||||
}
|
||||
func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented")
|
||||
}
|
||||
func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented")
|
||||
}
|
||||
|
||||
func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) {
|
||||
s.RegisterService(&_AlertPolicyService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListAlertPoliciesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.monitoring.v3.AlertPolicyService",
|
||||
HandlerType: (*AlertPolicyServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ListAlertPolicies",
|
||||
Handler: _AlertPolicyService_ListAlertPolicies_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetAlertPolicy",
|
||||
Handler: _AlertPolicyService_GetAlertPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateAlertPolicy",
|
||||
Handler: _AlertPolicyService_CreateAlertPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteAlertPolicy",
|
||||
Handler: _AlertPolicyService_DeleteAlertPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateAlertPolicy",
|
||||
Handler: _AlertPolicyService_UpdateAlertPolicy_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/monitoring/v3/alert_service.proto",
|
||||
}
|
||||
1121
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go
generated
vendored
Normal file
1121
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
181
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go
generated
vendored
Normal file
181
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/dropped_labels.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// A set of (label, value) pairs that were removed from a Distribution
|
||||
// time series during aggregation and then added as an attachment to a
|
||||
// Distribution.Exemplar.
|
||||
//
|
||||
// The full label set for the exemplars is constructed by using the dropped
|
||||
// pairs in combination with the label values that remain on the aggregated
|
||||
// Distribution time series. The constructed full label set can be used to
|
||||
// identify the specific entity, such as the instance or job, which might be
|
||||
// contributing to a long-tail. However, with dropped labels, the storage
|
||||
// requirements are reduced because only the aggregated distribution values for
|
||||
// a large group of time series are stored.
|
||||
//
|
||||
// Note that there are no guarantees on ordering of the labels from
|
||||
// exemplar-to-exemplar and from distribution-to-distribution in the same
|
||||
// stream, and there may be duplicates. It is up to clients to resolve any
|
||||
// ambiguities.
|
||||
type DroppedLabels struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Map from label to its value, for all labels dropped in any aggregation.
|
||||
Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (x *DroppedLabels) Reset() {
|
||||
*x = DroppedLabels{}
|
||||
mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *DroppedLabels) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DroppedLabels) ProtoMessage() {}
|
||||
|
||||
func (x *DroppedLabels) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DroppedLabels.ProtoReflect.Descriptor instead.
|
||||
func (*DroppedLabels) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *DroppedLabels) GetLabel() map[string]string {
|
||||
if x != nil {
|
||||
return x.Label
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_dropped_labels_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_dropped_labels_proto_rawDesc = []byte{
|
||||
0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c,
|
||||
0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
|
||||
0x33, 0x22, 0x8f, 0x01, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62,
|
||||
0x65, 0x6c, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65,
|
||||
0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74,
|
||||
0x72, 0x79, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x1a, 0x38, 0x0a, 0x0a, 0x4c, 0x61, 0x62,
|
||||
0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
|
||||
0x02, 0x38, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
|
||||
0x42, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
|
||||
0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
|
||||
0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
|
||||
0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_dropped_labels_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_dropped_labels_proto_rawDescData = file_google_monitoring_v3_dropped_labels_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_dropped_labels_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_dropped_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_dropped_labels_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_dropped_labels_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{
|
||||
(*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels
|
||||
nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry
|
||||
}
|
||||
var file_google_monitoring_v3_dropped_labels_proto_depIdxs = []int32{
|
||||
1, // 0: google.monitoring.v3.DroppedLabels.label:type_name -> google.monitoring.v3.DroppedLabels.LabelEntry
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_dropped_labels_proto_init() }
|
||||
func file_google_monitoring_v3_dropped_labels_proto_init() {
|
||||
if File_google_monitoring_v3_dropped_labels_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_dropped_labels_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_dropped_labels_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_dropped_labels_proto_depIdxs,
|
||||
MessageInfos: file_google_monitoring_v3_dropped_labels_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_dropped_labels_proto = out.File
|
||||
file_google_monitoring_v3_dropped_labels_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_dropped_labels_proto_goTypes = nil
|
||||
file_google_monitoring_v3_dropped_labels_proto_depIdxs = nil
|
||||
}
|
||||
249
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go
generated
vendored
Normal file
249
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go
generated
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/group.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// The description of a dynamic collection of monitored resources. Each group
|
||||
// has a filter that is matched against monitored resources and their associated
|
||||
// metadata. If a group's filter matches an available monitored resource, then
|
||||
// that resource is a member of that group. Groups can contain any number of
|
||||
// monitored resources, and each monitored resource can be a member of any
|
||||
// number of groups.
|
||||
//
|
||||
// Groups can be nested in parent-child hierarchies. The `parentName` field
|
||||
// identifies an optional parent for each group. If a group has a parent, then
|
||||
// the only monitored resources available to be matched by the group's filter
|
||||
// are the resources contained in the parent group. In other words, a group
|
||||
// contains the monitored resources that match its filter and the filters of all
|
||||
// the group's ancestors. A group without a parent can contain any monitored
|
||||
// resource.
|
||||
//
|
||||
// For example, consider an infrastructure running a set of instances with two
|
||||
// user-defined tags: `"environment"` and `"role"`. A parent group has a filter,
|
||||
// `environment="production"`. A child of that parent group has a filter,
|
||||
// `role="transcoder"`. The parent group contains all instances in the
|
||||
// production environment, regardless of their roles. The child group contains
|
||||
// instances that have the transcoder role *and* are in the production
|
||||
// environment.
|
||||
//
|
||||
// The monitored resources contained in a group can change at any moment,
|
||||
// depending on what resources exist and what filters are associated with the
|
||||
// group and its ancestors.
|
||||
type Group struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Output only. The name of this group. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
|
||||
//
|
||||
// When creating a group, this field is ignored and a new name is created
|
||||
// consisting of the project specified in the call to `CreateGroup`
|
||||
// and a unique `[GROUP_ID]` that is generated automatically.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// A user-assigned name for this group, used only for display purposes.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// The name of the group's parent, if it has one. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
|
||||
//
|
||||
// For groups with no parent, `parent_name` is the empty string, `""`.
|
||||
ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"`
|
||||
// The filter used to determine which monitored resources belong to this
|
||||
// group.
|
||||
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// If true, the members of this group are considered to be a cluster.
|
||||
// The system can perform additional analysis on groups that are clusters.
|
||||
IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Group) Reset() {
|
||||
*x = Group{}
|
||||
mi := &file_google_monitoring_v3_group_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Group) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Group) ProtoMessage() {}
|
||||
|
||||
func (x *Group) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_group_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Group.ProtoReflect.Descriptor instead.
|
||||
func (*Group) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_group_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Group) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Group) GetDisplayName() string {
|
||||
if x != nil {
|
||||
return x.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Group) GetParentName() string {
|
||||
if x != nil {
|
||||
return x.ParentName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Group) GetFilter() string {
|
||||
if x != nil {
|
||||
return x.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Group) GetIsCluster() bool {
|
||||
if x != nil {
|
||||
return x.IsCluster
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_group_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_group_proto_rawDesc = []byte{
|
||||
0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e,
|
||||
0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
|
||||
0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||
0x08, 0x52, 0x09, 0x69, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x99, 0x01, 0xea,
|
||||
0x41, 0x95, 0x01, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47,
|
||||
0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
|
||||
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f,
|
||||
0x7b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x2b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, 0x72,
|
||||
0x6f, 0x75, 0x70, 0x7d, 0x12, 0x1f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66,
|
||||
0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67,
|
||||
0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
|
||||
0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
|
||||
0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33,
|
||||
0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
|
||||
0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33,
|
||||
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_group_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_group_proto_rawDescData = file_google_monitoring_v3_group_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_group_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_group_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_group_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_monitoring_v3_group_proto_goTypes = []any{
|
||||
(*Group)(nil), // 0: google.monitoring.v3.Group
|
||||
}
|
||||
var file_google_monitoring_v3_group_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_group_proto_init() }
|
||||
func file_google_monitoring_v3_group_proto_init() {
|
||||
if File_google_monitoring_v3_group_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_group_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_group_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_group_proto_depIdxs,
|
||||
MessageInfos: file_google_monitoring_v3_group_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_group_proto = out.File
|
||||
file_google_monitoring_v3_group_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_group_proto_goTypes = nil
|
||||
file_google_monitoring_v3_group_proto_depIdxs = nil
|
||||
}
|
||||
1205
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go
generated
vendored
Normal file
1205
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1067
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go
generated
vendored
Normal file
1067
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2293
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go
generated
vendored
Normal file
2293
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
176
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go
generated
vendored
Normal file
176
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/mutation_record.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Describes a change made to a configuration.
|
||||
type MutationRecord struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// When the change occurred.
|
||||
MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"`
|
||||
// The email address of the user making the change.
|
||||
MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MutationRecord) Reset() {
|
||||
*x = MutationRecord{}
|
||||
mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *MutationRecord) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MutationRecord) ProtoMessage() {}
|
||||
|
||||
func (x *MutationRecord) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MutationRecord.ProtoReflect.Descriptor instead.
|
||||
func (*MutationRecord) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.MutateTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MutationRecord) GetMutatedBy() string {
|
||||
if x != nil {
|
||||
return x.MutatedBy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_mutation_record_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_mutation_record_proto_rawDesc = []byte{
|
||||
0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
||||
0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
|
||||
0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
|
||||
0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f,
|
||||
0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x42,
|
||||
0x79, 0x42, 0xce, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x13,
|
||||
0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
|
||||
0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
|
||||
0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
|
||||
0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
|
||||
0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_mutation_record_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_mutation_record_proto_rawDescData = file_google_monitoring_v3_mutation_record_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_mutation_record_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_mutation_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_mutation_record_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_mutation_record_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{
|
||||
(*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord
|
||||
(*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp
|
||||
}
|
||||
var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{
|
||||
1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_mutation_record_proto_init() }
|
||||
func file_google_monitoring_v3_mutation_record_proto_init() {
|
||||
if File_google_monitoring_v3_mutation_record_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_mutation_record_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_mutation_record_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_mutation_record_proto_depIdxs,
|
||||
MessageInfos: file_google_monitoring_v3_mutation_record_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_mutation_record_proto = out.File
|
||||
file_google_monitoring_v3_mutation_record_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_mutation_record_proto_goTypes = nil
|
||||
file_google_monitoring_v3_mutation_record_proto_depIdxs = nil
|
||||
}
|
||||
619
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go
generated
vendored
Normal file
619
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go
generated
vendored
Normal file
@@ -0,0 +1,619 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/notification.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
api "google.golang.org/genproto/googleapis/api"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
label "google.golang.org/genproto/googleapis/api/label"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Indicates whether the channel has been verified or not. It is illegal
|
||||
// to specify this field in a
|
||||
// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel]
|
||||
// or an
|
||||
// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
|
||||
// operation.
|
||||
type NotificationChannel_VerificationStatus int32
|
||||
|
||||
const (
|
||||
// Sentinel value used to indicate that the state is unknown, omitted, or
|
||||
// is not applicable (as in the case of channels that neither support
|
||||
// nor require verification in order to function).
|
||||
NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0
|
||||
// The channel has yet to be verified and requires verification to function.
|
||||
// Note that this state also applies to the case where the verification
|
||||
// process has been initiated by sending a verification code but where
|
||||
// the verification code has not been submitted to complete the process.
|
||||
NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1
|
||||
// It has been proven that notifications can be received on this
|
||||
// notification channel and that someone on the project has access
|
||||
// to messages that are delivered to that channel.
|
||||
NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2
|
||||
)
|
||||
|
||||
// Enum value maps for NotificationChannel_VerificationStatus.
|
||||
var (
|
||||
NotificationChannel_VerificationStatus_name = map[int32]string{
|
||||
0: "VERIFICATION_STATUS_UNSPECIFIED",
|
||||
1: "UNVERIFIED",
|
||||
2: "VERIFIED",
|
||||
}
|
||||
NotificationChannel_VerificationStatus_value = map[string]int32{
|
||||
"VERIFICATION_STATUS_UNSPECIFIED": 0,
|
||||
"UNVERIFIED": 1,
|
||||
"VERIFIED": 2,
|
||||
}
|
||||
)
|
||||
|
||||
func (x NotificationChannel_VerificationStatus) Enum() *NotificationChannel_VerificationStatus {
|
||||
p := new(NotificationChannel_VerificationStatus)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x NotificationChannel_VerificationStatus) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (NotificationChannel_VerificationStatus) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_google_monitoring_v3_notification_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (NotificationChannel_VerificationStatus) Type() protoreflect.EnumType {
|
||||
return &file_google_monitoring_v3_notification_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x NotificationChannel_VerificationStatus) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use NotificationChannel_VerificationStatus.Descriptor instead.
|
||||
func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1, 0}
|
||||
}
|
||||
|
||||
// A description of a notification channel. The descriptor includes
|
||||
// the properties of the channel and the set of labels or fields that
|
||||
// must be specified to configure channels of a given type.
|
||||
type NotificationChannelDescriptor struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The full REST resource name for this descriptor. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE]
|
||||
//
|
||||
// In the above, `[TYPE]` is the value of the `type` field.
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The type of notification channel, such as "email" and "sms". To view the
|
||||
// full list of channels, see
|
||||
// [Channel
|
||||
// descriptors](https://cloud.google.com/monitoring/alerts/using-channels-api#ncd).
|
||||
// Notification channel types are globally unique.
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
// A human-readable name for the notification channel type. This
|
||||
// form of the name is suitable for a user interface.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// A human-readable description of the notification channel
|
||||
// type. The description may include a description of the properties
|
||||
// of the channel and pointers to external documentation.
|
||||
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
|
||||
// The set of labels that must be defined to identify a particular
|
||||
// channel of the corresponding type. Each label includes a
|
||||
// description for how that field should be populated.
|
||||
Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
|
||||
// The tiers that support this notification channel; the project service tier
|
||||
// must be one of the supported_tiers.
|
||||
//
|
||||
// Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto.
|
||||
SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"`
|
||||
// The product launch stage for channels of this type.
|
||||
LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"`
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) Reset() {
|
||||
*x = NotificationChannelDescriptor{}
|
||||
mi := &file_google_monitoring_v3_notification_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*NotificationChannelDescriptor) ProtoMessage() {}
|
||||
|
||||
func (x *NotificationChannelDescriptor) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_notification_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use NotificationChannelDescriptor.ProtoReflect.Descriptor instead.
|
||||
func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) GetType() string {
|
||||
if x != nil {
|
||||
return x.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) GetDisplayName() string {
|
||||
if x != nil {
|
||||
return x.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) GetDescription() string {
|
||||
if x != nil {
|
||||
return x.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor {
|
||||
if x != nil {
|
||||
return x.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto.
|
||||
func (x *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier {
|
||||
if x != nil {
|
||||
return x.SupportedTiers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage {
|
||||
if x != nil {
|
||||
return x.LaunchStage
|
||||
}
|
||||
return api.LaunchStage(0)
|
||||
}
|
||||
|
||||
// A `NotificationChannel` is a medium through which an alert is
|
||||
// delivered when a policy violation is detected. Examples of channels
|
||||
// include email, SMS, and third-party messaging applications. Fields
|
||||
// containing sensitive information like authentication tokens or
|
||||
// contact info are only partially populated on retrieval.
|
||||
type NotificationChannel struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The type of the notification channel. This field matches the
|
||||
// value of the
|
||||
// [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type]
|
||||
// field.
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
// Identifier. The full REST resource name for this channel. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
|
||||
//
|
||||
// The `[CHANNEL_ID]` is automatically assigned by the server on creation.
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// An optional human-readable name for this notification channel. It is
|
||||
// recommended that you specify a non-empty and unique name in order to
|
||||
// make it easier to identify the channels in your project, though this is
|
||||
// not enforced. The display name is limited to 512 Unicode characters.
|
||||
DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// An optional human-readable description of this notification channel. This
|
||||
// description may provide additional details, beyond the display
|
||||
// name, for the channel. This may not exceed 1024 Unicode characters.
|
||||
Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
|
||||
// Configuration fields that define the channel and its behavior. The
|
||||
// permissible and required labels are specified in the
|
||||
// [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels]
|
||||
// of the `NotificationChannelDescriptor` corresponding to the `type` field.
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// User-supplied key/value data that does not need to conform to
|
||||
// the corresponding `NotificationChannelDescriptor`'s schema, unlike
|
||||
// the `labels` field. This field is intended to be used for organizing
|
||||
// and identifying the `NotificationChannel` objects.
|
||||
//
|
||||
// The field can contain up to 64 entries. Each key and value is limited to
|
||||
// 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
|
||||
// values can contain only lowercase letters, numerals, underscores, and
|
||||
// dashes. Keys must begin with a letter.
|
||||
UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Indicates whether this channel has been verified or not. On a
|
||||
// [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
|
||||
// or
|
||||
// [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
|
||||
// operation, this field is expected to be populated.
|
||||
//
|
||||
// If the value is `UNVERIFIED`, then it indicates that the channel is
|
||||
// non-functioning (it both requires verification and lacks verification);
|
||||
// otherwise, it is assumed that the channel works.
|
||||
//
|
||||
// If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that
|
||||
// the channel is of a type that does not require verification or that
|
||||
// this specific channel has been exempted from verification because it was
|
||||
// created prior to verification being required for channels of this type.
|
||||
//
|
||||
// This field cannot be modified using a standard
|
||||
// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
|
||||
// operation. To change the value of this field, you must call
|
||||
// [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel].
|
||||
VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"`
|
||||
// Whether notifications are forwarded to the described channel. This makes
|
||||
// it possible to disable delivery of notifications to a particular channel
|
||||
// without removing the channel from all alerting policies that reference
|
||||
// the channel. This is a more convenient approach when the change is
|
||||
// temporary and you want to receive notifications from the same set
|
||||
// of alerting policies on the channel at some point in the future.
|
||||
Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||
// Record of the creation of this channel.
|
||||
CreationRecord *MutationRecord `protobuf:"bytes,12,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"`
|
||||
// Records of the modification of this channel.
|
||||
MutationRecords []*MutationRecord `protobuf:"bytes,13,rep,name=mutation_records,json=mutationRecords,proto3" json:"mutation_records,omitempty"`
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) Reset() {
|
||||
*x = NotificationChannel{}
|
||||
mi := &file_google_monitoring_v3_notification_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*NotificationChannel) ProtoMessage() {}
|
||||
|
||||
func (x *NotificationChannel) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_notification_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead.
|
||||
func (*NotificationChannel) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetType() string {
|
||||
if x != nil {
|
||||
return x.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetDisplayName() string {
|
||||
if x != nil {
|
||||
return x.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetDescription() string {
|
||||
if x != nil {
|
||||
return x.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetLabels() map[string]string {
|
||||
if x != nil {
|
||||
return x.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetUserLabels() map[string]string {
|
||||
if x != nil {
|
||||
return x.UserLabels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus {
|
||||
if x != nil {
|
||||
return x.VerificationStatus
|
||||
}
|
||||
return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue {
|
||||
if x != nil {
|
||||
return x.Enabled
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetCreationRecord() *MutationRecord {
|
||||
if x != nil {
|
||||
return x.CreationRecord
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetMutationRecords() []*MutationRecord {
|
||||
if x != nil {
|
||||
return x.MutationRecords
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_notification_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_notification_proto_rawDesc = []byte{
|
||||
0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a,
|
||||
0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c,
|
||||
0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62,
|
||||
0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
|
||||
0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x22, 0xf0, 0x04, 0x0a, 0x1d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
|
||||
0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64,
|
||||
0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20,
|
||||
0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61,
|
||||
0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c,
|
||||
0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
|
||||
0x65, 0x64, 0x5f, 0x74, 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x65,
|
||||
0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64,
|
||||
0x54, 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
|
||||
0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
|
||||
0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
|
||||
0x65, 0x3a, 0xa0, 0x02, 0xea, 0x41, 0x9c, 0x02, 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
|
||||
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
|
||||
0x72, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f,
|
||||
0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
|
||||
0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65,
|
||||
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e,
|
||||
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
|
||||
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72,
|
||||
0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f,
|
||||
0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c,
|
||||
0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f,
|
||||
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
|
||||
0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68,
|
||||
0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
|
||||
0x7d, 0x12, 0x01, 0x2a, 0x22, 0xcb, 0x08, 0x0a, 0x13, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04,
|
||||
0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
|
||||
0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
|
||||
0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73,
|
||||
0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b,
|
||||
0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d,
|
||||
0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
|
||||
0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5a, 0x0a,
|
||||
0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
|
||||
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x55, 0x73,
|
||||
0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75,
|
||||
0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x13, 0x76, 0x65, 0x72,
|
||||
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f,
|
||||
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
|
||||
0x6c, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
|
||||
0x61, 0x74, 0x75, 0x73, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
|
||||
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4d,
|
||||
0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72,
|
||||
0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
|
||||
0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63,
|
||||
0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4f, 0x0a,
|
||||
0x10, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
|
||||
0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
|
||||
0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0f, 0x6d,
|
||||
0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x39,
|
||||
0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
|
||||
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65,
|
||||
0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
|
||||
0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69,
|
||||
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23,
|
||||
0x0a, 0x1f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53,
|
||||
0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
|
||||
0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45,
|
||||
0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
|
||||
0x02, 0x3a, 0xfe, 0x01, 0xea, 0x41, 0xfa, 0x01, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
|
||||
0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x3e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
|
||||
0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69,
|
||||
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73,
|
||||
0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
|
||||
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x48, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69,
|
||||
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
|
||||
0x7d, 0x12, 0x3c, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64,
|
||||
0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
|
||||
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12,
|
||||
0x01, 0x2a, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42,
|
||||
0x11, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
|
||||
0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f,
|
||||
0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
|
||||
0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c,
|
||||
0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56,
|
||||
0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75,
|
||||
0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56,
|
||||
0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_notification_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_notification_proto_rawDescData = file_google_monitoring_v3_notification_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_notification_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_notification_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_notification_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_google_monitoring_v3_notification_proto_goTypes = []any{
|
||||
(NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus
|
||||
(*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor
|
||||
(*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel
|
||||
nil, // 3: google.monitoring.v3.NotificationChannel.LabelsEntry
|
||||
nil, // 4: google.monitoring.v3.NotificationChannel.UserLabelsEntry
|
||||
(*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor
|
||||
(ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier
|
||||
(api.LaunchStage)(0), // 7: google.api.LaunchStage
|
||||
(*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue
|
||||
(*MutationRecord)(nil), // 9: google.monitoring.v3.MutationRecord
|
||||
}
|
||||
var file_google_monitoring_v3_notification_proto_depIdxs = []int32{
|
||||
5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor
|
||||
6, // 1: google.monitoring.v3.NotificationChannelDescriptor.supported_tiers:type_name -> google.monitoring.v3.ServiceTier
|
||||
7, // 2: google.monitoring.v3.NotificationChannelDescriptor.launch_stage:type_name -> google.api.LaunchStage
|
||||
3, // 3: google.monitoring.v3.NotificationChannel.labels:type_name -> google.monitoring.v3.NotificationChannel.LabelsEntry
|
||||
4, // 4: google.monitoring.v3.NotificationChannel.user_labels:type_name -> google.monitoring.v3.NotificationChannel.UserLabelsEntry
|
||||
0, // 5: google.monitoring.v3.NotificationChannel.verification_status:type_name -> google.monitoring.v3.NotificationChannel.VerificationStatus
|
||||
8, // 6: google.monitoring.v3.NotificationChannel.enabled:type_name -> google.protobuf.BoolValue
|
||||
9, // 7: google.monitoring.v3.NotificationChannel.creation_record:type_name -> google.monitoring.v3.MutationRecord
|
||||
9, // 8: google.monitoring.v3.NotificationChannel.mutation_records:type_name -> google.monitoring.v3.MutationRecord
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_notification_proto_init() }
|
||||
func file_google_monitoring_v3_notification_proto_init() {
|
||||
if File_google_monitoring_v3_notification_proto != nil {
|
||||
return
|
||||
}
|
||||
file_google_monitoring_v3_common_proto_init()
|
||||
file_google_monitoring_v3_mutation_record_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_notification_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_notification_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_notification_proto_depIdxs,
|
||||
EnumInfos: file_google_monitoring_v3_notification_proto_enumTypes,
|
||||
MessageInfos: file_google_monitoring_v3_notification_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_notification_proto = out.File
|
||||
file_google_monitoring_v3_notification_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_notification_proto_goTypes = nil
|
||||
file_google_monitoring_v3_notification_proto_depIdxs = nil
|
||||
}
|
||||
1819
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go
generated
vendored
Normal file
1819
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
221
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go
generated
vendored
Normal file
221
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/query_service.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
var File_google_monitoring_v3_query_service_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{
|
||||
0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72,
|
||||
0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
|
||||
0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
|
||||
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65,
|
||||
0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x32, 0xe1, 0x02, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51,
|
||||
0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65,
|
||||
0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22,
|
||||
0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
|
||||
0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x88, 0x02, 0x01, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74,
|
||||
0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
|
||||
0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f,
|
||||
0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70,
|
||||
0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
|
||||
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
|
||||
0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
|
||||
0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
|
||||
0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x2e, 0x76, 0x33, 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f,
|
||||
0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76,
|
||||
0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a,
|
||||
0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
|
||||
0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_query_service_proto_goTypes = []any{
|
||||
(*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest
|
||||
(*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse
|
||||
}
|
||||
var file_google_monitoring_v3_query_service_proto_depIdxs = []int32{
|
||||
0, // 0: google.monitoring.v3.QueryService.QueryTimeSeries:input_type -> google.monitoring.v3.QueryTimeSeriesRequest
|
||||
1, // 1: google.monitoring.v3.QueryService.QueryTimeSeries:output_type -> google.monitoring.v3.QueryTimeSeriesResponse
|
||||
1, // [1:2] is the sub-list for method output_type
|
||||
0, // [0:1] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_query_service_proto_init() }
|
||||
func file_google_monitoring_v3_query_service_proto_init() {
|
||||
if File_google_monitoring_v3_query_service_proto != nil {
|
||||
return
|
||||
}
|
||||
file_google_monitoring_v3_metric_service_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_query_service_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_query_service_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_query_service_proto_depIdxs,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_query_service_proto = out.File
|
||||
file_google_monitoring_v3_query_service_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_query_service_proto_goTypes = nil
|
||||
file_google_monitoring_v3_query_service_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// QueryServiceClient is the client API for QueryService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type QueryServiceClient interface {
|
||||
// Deprecated: Do not use.
|
||||
// Queries time series by using Monitoring Query Language (MQL). We recommend
|
||||
// using PromQL instead of MQL. For more information about the status of MQL,
|
||||
// see the [MQL deprecation
|
||||
// notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
|
||||
QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error)
|
||||
}
|
||||
|
||||
type queryServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewQueryServiceClient(cc grpc.ClientConnInterface) QueryServiceClient {
|
||||
return &queryServiceClient{cc}
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func (c *queryServiceClient) QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) {
|
||||
out := new(QueryTimeSeriesResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.QueryService/QueryTimeSeries", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// QueryServiceServer is the server API for QueryService service.
|
||||
type QueryServiceServer interface {
|
||||
// Deprecated: Do not use.
|
||||
// Queries time series by using Monitoring Query Language (MQL). We recommend
|
||||
// using PromQL instead of MQL. For more information about the status of MQL,
|
||||
// see the [MQL deprecation
|
||||
// notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
|
||||
QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedQueryServiceServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedQueryServiceServer) QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method QueryTimeSeries not implemented")
|
||||
}
|
||||
|
||||
func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) {
|
||||
s.RegisterService(&_QueryService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _QueryService_QueryTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(QueryTimeSeriesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(QueryServiceServer).QueryTimeSeries(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.QueryService/QueryTimeSeries",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(QueryServiceServer).QueryTimeSeries(ctx, req.(*QueryTimeSeriesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _QueryService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.monitoring.v3.QueryService",
|
||||
HandlerType: (*QueryServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "QueryTimeSeries",
|
||||
Handler: _QueryService_QueryTimeSeries_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/monitoring/v3/query_service.proto",
|
||||
}
|
||||
2755
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go
generated
vendored
Normal file
2755
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1626
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go
generated
vendored
Normal file
1626
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
310
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go
generated
vendored
Normal file
310
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go
generated
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/snooze.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// A `Snooze` will prevent any alerts from being opened, and close any that
|
||||
// are already open. The `Snooze` will work on alerts that match the
|
||||
// criteria defined in the `Snooze`. The `Snooze` will be active from
|
||||
// `interval.start_time` through `interval.end_time`.
|
||||
type Snooze struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. Identifier. The name of the `Snooze`. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]
|
||||
//
|
||||
// The ID of the `Snooze` will be generated by the system.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Required. This defines the criteria for applying the `Snooze`. See
|
||||
// `Criteria` for more information.
|
||||
Criteria *Snooze_Criteria `protobuf:"bytes,3,opt,name=criteria,proto3" json:"criteria,omitempty"`
|
||||
// Required. The `Snooze` will be active from `interval.start_time` through
|
||||
// `interval.end_time`.
|
||||
// `interval.start_time` cannot be in the past. There is a 15 second clock
|
||||
// skew to account for the time it takes for a request to reach the API from
|
||||
// the UI.
|
||||
Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"`
|
||||
// Required. A display name for the `Snooze`. This can be, at most, 512
|
||||
// unicode characters.
|
||||
DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Snooze) Reset() {
|
||||
*x = Snooze{}
|
||||
mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Snooze) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Snooze) ProtoMessage() {}
|
||||
|
||||
func (x *Snooze) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Snooze.ProtoReflect.Descriptor instead.
|
||||
func (*Snooze) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Snooze) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Snooze) GetCriteria() *Snooze_Criteria {
|
||||
if x != nil {
|
||||
return x.Criteria
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Snooze) GetInterval() *TimeInterval {
|
||||
if x != nil {
|
||||
return x.Interval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Snooze) GetDisplayName() string {
|
||||
if x != nil {
|
||||
return x.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Criteria specific to the `AlertPolicy`s that this `Snooze` applies to. The
|
||||
// `Snooze` will suppress alerts that come from one of the `AlertPolicy`s
|
||||
// whose names are supplied.
|
||||
type Snooze_Criteria struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The specific `AlertPolicy` names for the alert that should be snoozed.
|
||||
// The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]
|
||||
//
|
||||
// There is a limit of 16 policies per snooze. This limit is checked during
|
||||
// snooze creation.
|
||||
// Exactly 1 alert policy is required if `filter` is specified at the same
|
||||
// time.
|
||||
Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"`
|
||||
// Optional. The filter string to match on Alert fields when silencing the
|
||||
// alerts. It follows the standard https://google.aip.dev/160 syntax.
|
||||
// A filter string used to apply the snooze to specific incidents
|
||||
// that have matching filter values.
|
||||
// Filters can be defined for snoozes that apply to one alerting
|
||||
// policy.
|
||||
// Filters must be a string formatted as one or more resource labels with
|
||||
// specific label values. If multiple resource labels are used, then they
|
||||
// must be connected with an AND operator. For example, the following filter
|
||||
// applies the snooze to incidents that have an instance ID of
|
||||
// `1234567890` and a zone of `us-central1-a`:
|
||||
//
|
||||
// resource.labels.instance_id="1234567890" AND
|
||||
// resource.labels.zone="us-central1-a"
|
||||
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Snooze_Criteria) Reset() {
|
||||
*x = Snooze_Criteria{}
|
||||
mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Snooze_Criteria) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Snooze_Criteria) ProtoMessage() {}
|
||||
|
||||
func (x *Snooze_Criteria) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Snooze_Criteria.ProtoReflect.Descriptor instead.
|
||||
func (*Snooze_Criteria) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *Snooze_Criteria) GetPolicies() []string {
|
||||
if x != nil {
|
||||
return x.Policies
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Snooze_Criteria) GetFilter() string {
|
||||
if x != nil {
|
||||
return x.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_snooze_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_snooze_proto_rawDesc = []byte{
|
||||
0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61,
|
||||
0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
|
||||
0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d,
|
||||
0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x03, 0x0a, 0x06, 0x53, 0x6e, 0x6f,
|
||||
0x6f, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08,
|
||||
0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x43, 0x72, 0x69,
|
||||
0x74, 0x65, 0x72, 0x69, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74,
|
||||
0x65, 0x72, 0x69, 0x61, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
|
||||
0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73,
|
||||
0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42,
|
||||
0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x1a, 0x6a, 0x0a, 0x08, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x46, 0x0a,
|
||||
0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42,
|
||||
0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x3a, 0x4a, 0xea,
|
||||
0x41, 0x47, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e,
|
||||
0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
|
||||
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73,
|
||||
0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x7d, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f,
|
||||
0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x50, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
|
||||
0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
|
||||
0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
|
||||
0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
|
||||
0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
|
||||
0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_snooze_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_snooze_proto_rawDescData = file_google_monitoring_v3_snooze_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_snooze_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_snooze_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_snooze_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_google_monitoring_v3_snooze_proto_goTypes = []any{
|
||||
(*Snooze)(nil), // 0: google.monitoring.v3.Snooze
|
||||
(*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria
|
||||
(*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval
|
||||
}
|
||||
var file_google_monitoring_v3_snooze_proto_depIdxs = []int32{
|
||||
1, // 0: google.monitoring.v3.Snooze.criteria:type_name -> google.monitoring.v3.Snooze.Criteria
|
||||
2, // 1: google.monitoring.v3.Snooze.interval:type_name -> google.monitoring.v3.TimeInterval
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_snooze_proto_init() }
|
||||
func file_google_monitoring_v3_snooze_proto_init() {
|
||||
if File_google_monitoring_v3_snooze_proto != nil {
|
||||
return
|
||||
}
|
||||
file_google_monitoring_v3_common_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_snooze_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_snooze_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_snooze_proto_depIdxs,
|
||||
MessageInfos: file_google_monitoring_v3_snooze_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_snooze_proto = out.File
|
||||
file_google_monitoring_v3_snooze_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_snooze_proto_goTypes = nil
|
||||
file_google_monitoring_v3_snooze_proto_depIdxs = nil
|
||||
}
|
||||
793
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go
generated
vendored
Normal file
793
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go
generated
vendored
Normal file
@@ -0,0 +1,793 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/snooze_service.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// The message definition for creating a `Snooze`. Users must provide the body
|
||||
// of the `Snooze` to be created but must omit the `Snooze` field, `name`.
|
||||
type CreateSnoozeRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The
|
||||
// [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
|
||||
// a `Snooze` should be created. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]
|
||||
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
|
||||
// Required. The `Snooze` to create. Omit the `name` field, as it will be
|
||||
// filled in by the API.
|
||||
Snooze *Snooze `protobuf:"bytes,2,opt,name=snooze,proto3" json:"snooze,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateSnoozeRequest) Reset() {
|
||||
*x = CreateSnoozeRequest{}
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *CreateSnoozeRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateSnoozeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreateSnoozeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateSnoozeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreateSnoozeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *CreateSnoozeRequest) GetParent() string {
|
||||
if x != nil {
|
||||
return x.Parent
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreateSnoozeRequest) GetSnooze() *Snooze {
|
||||
if x != nil {
|
||||
return x.Snooze
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The message definition for listing `Snooze`s associated with the given
|
||||
// `parent`, satisfying the optional `filter`.
|
||||
type ListSnoozesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The
|
||||
// [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
|
||||
// `Snooze`s should be listed. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]
|
||||
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
|
||||
// Optional. Optional filter to restrict results to the given criteria. The
|
||||
// following fields are supported.
|
||||
//
|
||||
// - `interval.start_time`
|
||||
// - `interval.end_time`
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// interval.start_time > "2022-03-11T00:00:00-08:00" AND
|
||||
// interval.end_time < "2022-03-12T00:00:00-08:00"
|
||||
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// Optional. The maximum number of results to return for a single query. The
|
||||
// server may further constrain the maximum number of results returned in a
|
||||
// single page. The value should be in the range [1, 1000]. If the value given
|
||||
// is outside this range, the server will decide the number of results to be
|
||||
// returned.
|
||||
PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
// Optional. The `next_page_token` from a previous call to
|
||||
// `ListSnoozesRequest` to get the next page of results.
|
||||
PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListSnoozesRequest) Reset() {
|
||||
*x = ListSnoozesRequest{}
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ListSnoozesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListSnoozesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListSnoozesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListSnoozesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListSnoozesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ListSnoozesRequest) GetParent() string {
|
||||
if x != nil {
|
||||
return x.Parent
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ListSnoozesRequest) GetFilter() string {
|
||||
if x != nil {
|
||||
return x.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ListSnoozesRequest) GetPageSize() int32 {
|
||||
if x != nil {
|
||||
return x.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ListSnoozesRequest) GetPageToken() string {
|
||||
if x != nil {
|
||||
return x.PageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The results of a successful `ListSnoozes` call, containing the matching
|
||||
// `Snooze`s.
|
||||
type ListSnoozesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// `Snooze`s matching this list call.
|
||||
Snoozes []*Snooze `protobuf:"bytes,1,rep,name=snoozes,proto3" json:"snoozes,omitempty"`
|
||||
// Page token for repeated calls to `ListSnoozes`, to fetch additional pages
|
||||
// of results. If this is empty or missing, there are no more pages.
|
||||
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListSnoozesResponse) Reset() {
|
||||
*x = ListSnoozesResponse{}
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ListSnoozesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListSnoozesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListSnoozesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListSnoozesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListSnoozesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ListSnoozesResponse) GetSnoozes() []*Snooze {
|
||||
if x != nil {
|
||||
return x.Snoozes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ListSnoozesResponse) GetNextPageToken() string {
|
||||
if x != nil {
|
||||
return x.NextPageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The message definition for retrieving a `Snooze`. Users must specify the
|
||||
// field, `name`, which identifies the `Snooze`.
|
||||
type GetSnoozeRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The ID of the `Snooze` to retrieve. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetSnoozeRequest) Reset() {
|
||||
*x = GetSnoozeRequest{}
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetSnoozeRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetSnoozeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetSnoozeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetSnoozeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetSnoozeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *GetSnoozeRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The message definition for updating a `Snooze`. The field, `snooze.name`
|
||||
// identifies the `Snooze` to be updated. The remainder of `snooze` gives the
|
||||
// content the `Snooze` in question will be assigned.
|
||||
//
|
||||
// What fields can be updated depends on the start time and end time of the
|
||||
// `Snooze`.
|
||||
//
|
||||
// - end time is in the past: These `Snooze`s are considered
|
||||
// read-only and cannot be updated.
|
||||
// - start time is in the past and end time is in the future: `display_name`
|
||||
// and `interval.end_time` can be updated.
|
||||
// - start time is in the future: `display_name`, `interval.start_time` and
|
||||
// `interval.end_time` can be updated.
|
||||
type UpdateSnoozeRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Required. The `Snooze` to update. Must have the name field present.
|
||||
Snooze *Snooze `protobuf:"bytes,1,opt,name=snooze,proto3" json:"snooze,omitempty"`
|
||||
// Required. The fields to update.
|
||||
//
|
||||
// For each field listed in `update_mask`:
|
||||
//
|
||||
// - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a
|
||||
// value for that field, the value of the field in the existing `Snooze`
|
||||
// will be set to the value of the field in the supplied `Snooze`.
|
||||
// - If the field does not have a value in the supplied `Snooze`, the field
|
||||
// in the existing `Snooze` is set to its default value.
|
||||
//
|
||||
// Fields not listed retain their existing value.
|
||||
//
|
||||
// The following are the field names that are accepted in `update_mask`:
|
||||
//
|
||||
// - `display_name`
|
||||
// - `interval.start_time`
|
||||
// - `interval.end_time`
|
||||
//
|
||||
// That said, the start time and end time of the `Snooze` determines which
|
||||
// fields can legally be updated. Before attempting an update, users should
|
||||
// consult the documentation for `UpdateSnoozeRequest`, which talks about
|
||||
// which fields can be updated.
|
||||
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UpdateSnoozeRequest) Reset() {
|
||||
*x = UpdateSnoozeRequest{}
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *UpdateSnoozeRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UpdateSnoozeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *UpdateSnoozeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UpdateSnoozeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*UpdateSnoozeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *UpdateSnoozeRequest) GetSnooze() *Snooze {
|
||||
if x != nil {
|
||||
return x.Snooze
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *UpdateSnoozeRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
|
||||
if x != nil {
|
||||
return x.UpdateMask
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_snooze_service_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_snooze_service_proto_rawDesc = []byte{
|
||||
0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x5f, 0x73, 0x65,
|
||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
|
||||
0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
|
||||
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
|
||||
0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
|
||||
0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d,
|
||||
0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72,
|
||||
0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
|
||||
0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0xb9,
|
||||
0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52,
|
||||
0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69,
|
||||
0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
|
||||
0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61,
|
||||
0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
|
||||
0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
|
||||
0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x75, 0x0a, 0x13, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65,
|
||||
0x52, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
|
||||
0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
|
||||
0x6e, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
|
||||
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x04, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e,
|
||||
0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73,
|
||||
0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
|
||||
0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
|
||||
0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
|
||||
0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
|
||||
0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70,
|
||||
0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x32, 0x98, 0x06, 0x0a, 0x0d, 0x53, 0x6e, 0x6f,
|
||||
0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
|
||||
0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e,
|
||||
0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c,
|
||||
0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x06, 0x73, 0x6e,
|
||||
0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e,
|
||||
0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e,
|
||||
0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e,
|
||||
0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
|
||||
0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0xda, 0x41, 0x06, 0x70,
|
||||
0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33,
|
||||
0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
|
||||
0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x81, 0x01, 0x0a,
|
||||
0x09, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
|
||||
0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65,
|
||||
0x22, 0x2e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12,
|
||||
0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
|
||||
0x12, 0xa4, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
|
||||
0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53,
|
||||
0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x4b, 0xda, 0x41, 0x12, 0x73,
|
||||
0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
|
||||
0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x32,
|
||||
0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f,
|
||||
0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
|
||||
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
|
||||
0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d,
|
||||
0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
|
||||
0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
|
||||
0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72,
|
||||
0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
|
||||
0x42, 0x12, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
|
||||
0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
|
||||
0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
|
||||
0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
|
||||
0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
|
||||
0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_snooze_service_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_snooze_service_proto_rawDescData = file_google_monitoring_v3_snooze_service_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_snooze_service_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_snooze_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_service_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_snooze_service_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{
|
||||
(*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest
|
||||
(*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest
|
||||
(*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse
|
||||
(*GetSnoozeRequest)(nil), // 3: google.monitoring.v3.GetSnoozeRequest
|
||||
(*UpdateSnoozeRequest)(nil), // 4: google.monitoring.v3.UpdateSnoozeRequest
|
||||
(*Snooze)(nil), // 5: google.monitoring.v3.Snooze
|
||||
(*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask
|
||||
}
|
||||
var file_google_monitoring_v3_snooze_service_proto_depIdxs = []int32{
|
||||
5, // 0: google.monitoring.v3.CreateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze
|
||||
5, // 1: google.monitoring.v3.ListSnoozesResponse.snoozes:type_name -> google.monitoring.v3.Snooze
|
||||
5, // 2: google.monitoring.v3.UpdateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze
|
||||
6, // 3: google.monitoring.v3.UpdateSnoozeRequest.update_mask:type_name -> google.protobuf.FieldMask
|
||||
0, // 4: google.monitoring.v3.SnoozeService.CreateSnooze:input_type -> google.monitoring.v3.CreateSnoozeRequest
|
||||
1, // 5: google.monitoring.v3.SnoozeService.ListSnoozes:input_type -> google.monitoring.v3.ListSnoozesRequest
|
||||
3, // 6: google.monitoring.v3.SnoozeService.GetSnooze:input_type -> google.monitoring.v3.GetSnoozeRequest
|
||||
4, // 7: google.monitoring.v3.SnoozeService.UpdateSnooze:input_type -> google.monitoring.v3.UpdateSnoozeRequest
|
||||
5, // 8: google.monitoring.v3.SnoozeService.CreateSnooze:output_type -> google.monitoring.v3.Snooze
|
||||
2, // 9: google.monitoring.v3.SnoozeService.ListSnoozes:output_type -> google.monitoring.v3.ListSnoozesResponse
|
||||
5, // 10: google.monitoring.v3.SnoozeService.GetSnooze:output_type -> google.monitoring.v3.Snooze
|
||||
5, // 11: google.monitoring.v3.SnoozeService.UpdateSnooze:output_type -> google.monitoring.v3.Snooze
|
||||
8, // [8:12] is the sub-list for method output_type
|
||||
4, // [4:8] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_snooze_service_proto_init() }
|
||||
func file_google_monitoring_v3_snooze_service_proto_init() {
|
||||
if File_google_monitoring_v3_snooze_service_proto != nil {
|
||||
return
|
||||
}
|
||||
file_google_monitoring_v3_snooze_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_snooze_service_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_snooze_service_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_snooze_service_proto_depIdxs,
|
||||
MessageInfos: file_google_monitoring_v3_snooze_service_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_snooze_service_proto = out.File
|
||||
file_google_monitoring_v3_snooze_service_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_snooze_service_proto_goTypes = nil
|
||||
file_google_monitoring_v3_snooze_service_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// SnoozeServiceClient is the client API for SnoozeService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type SnoozeServiceClient interface {
|
||||
// Creates a `Snooze` that will prevent alerts, which match the provided
|
||||
// criteria, from being opened. The `Snooze` applies for a specific time
|
||||
// interval.
|
||||
CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
|
||||
// Lists the `Snooze`s associated with a project. Can optionally pass in
|
||||
// `filter`, which specifies predicates to match `Snooze`s.
|
||||
ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error)
|
||||
// Retrieves a `Snooze` by `name`.
|
||||
GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
|
||||
// Updates a `Snooze`, identified by its `name`, with the parameters in the
|
||||
// given `Snooze` object.
|
||||
UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
|
||||
}
|
||||
|
||||
type snoozeServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewSnoozeServiceClient(cc grpc.ClientConnInterface) SnoozeServiceClient {
|
||||
return &snoozeServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *snoozeServiceClient) CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
|
||||
out := new(Snooze)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/CreateSnooze", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *snoozeServiceClient) ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) {
|
||||
out := new(ListSnoozesResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/ListSnoozes", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *snoozeServiceClient) GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
|
||||
out := new(Snooze)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/GetSnooze", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *snoozeServiceClient) UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
|
||||
out := new(Snooze)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/UpdateSnooze", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SnoozeServiceServer is the server API for SnoozeService service.
|
||||
type SnoozeServiceServer interface {
|
||||
// Creates a `Snooze` that will prevent alerts, which match the provided
|
||||
// criteria, from being opened. The `Snooze` applies for a specific time
|
||||
// interval.
|
||||
CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error)
|
||||
// Lists the `Snooze`s associated with a project. Can optionally pass in
|
||||
// `filter`, which specifies predicates to match `Snooze`s.
|
||||
ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error)
|
||||
// Retrieves a `Snooze` by `name`.
|
||||
GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error)
|
||||
// Updates a `Snooze`, identified by its `name`, with the parameters in the
|
||||
// given `Snooze` object.
|
||||
UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error)
|
||||
}
|
||||
|
||||
// UnimplementedSnoozeServiceServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedSnoozeServiceServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedSnoozeServiceServer) CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateSnooze not implemented")
|
||||
}
|
||||
func (*UnimplementedSnoozeServiceServer) ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListSnoozes not implemented")
|
||||
}
|
||||
func (*UnimplementedSnoozeServiceServer) GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetSnooze not implemented")
|
||||
}
|
||||
func (*UnimplementedSnoozeServiceServer) UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdateSnooze not implemented")
|
||||
}
|
||||
|
||||
func RegisterSnoozeServiceServer(s *grpc.Server, srv SnoozeServiceServer) {
|
||||
s.RegisterService(&_SnoozeService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _SnoozeService_CreateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateSnoozeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SnoozeServiceServer).CreateSnooze(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.SnoozeService/CreateSnooze",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SnoozeServiceServer).CreateSnooze(ctx, req.(*CreateSnoozeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SnoozeService_ListSnoozes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListSnoozesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SnoozeServiceServer).ListSnoozes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.SnoozeService/ListSnoozes",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SnoozeServiceServer).ListSnoozes(ctx, req.(*ListSnoozesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SnoozeService_GetSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetSnoozeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SnoozeServiceServer).GetSnooze(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.SnoozeService/GetSnooze",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SnoozeServiceServer).GetSnooze(ctx, req.(*GetSnoozeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SnoozeService_UpdateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateSnoozeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SnoozeServiceServer).UpdateSnooze(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.SnoozeService/UpdateSnooze",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SnoozeServiceServer).UpdateSnooze(ctx, req.(*UpdateSnoozeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _SnoozeService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.monitoring.v3.SnoozeService",
|
||||
HandlerType: (*SnoozeServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "CreateSnooze",
|
||||
Handler: _SnoozeService_CreateSnooze_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListSnoozes",
|
||||
Handler: _SnoozeService_ListSnoozes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetSnooze",
|
||||
Handler: _SnoozeService_GetSnooze_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateSnooze",
|
||||
Handler: _SnoozeService_UpdateSnooze_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/monitoring/v3/snooze_service.proto",
|
||||
}
|
||||
172
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go
generated
vendored
Normal file
172
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.2
|
||||
// protoc v4.25.3
|
||||
// source: google/monitoring/v3/span_context.proto
|
||||
|
||||
package monitoringpb
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// The context of a span. This is attached to an
|
||||
// [Exemplar][google.api.Distribution.Exemplar]
|
||||
// in [Distribution][google.api.Distribution] values during aggregation.
|
||||
//
|
||||
// It contains the name of a span with format:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]
|
||||
type SpanContext struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The resource name of the span. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]
|
||||
//
|
||||
// `[TRACE_ID]` is a unique identifier for a trace within a project;
|
||||
// it is a 32-character hexadecimal encoding of a 16-byte array.
|
||||
//
|
||||
// `[SPAN_ID]` is a unique identifier for a span within a trace; it
|
||||
// is a 16-character hexadecimal encoding of an 8-byte array.
|
||||
SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SpanContext) Reset() {
|
||||
*x = SpanContext{}
|
||||
mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SpanContext) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SpanContext) ProtoMessage() {}
|
||||
|
||||
func (x *SpanContext) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead.
|
||||
func (*SpanContext) Descriptor() ([]byte, []int) {
|
||||
return file_google_monitoring_v3_span_context_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *SpanContext) GetSpanName() string {
|
||||
if x != nil {
|
||||
return x.SpanName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_google_monitoring_v3_span_context_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_monitoring_v3_span_context_proto_rawDesc = []byte{
|
||||
0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
|
||||
0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
|
||||
0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x22,
|
||||
0x2a, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b,
|
||||
0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0xcb, 0x01, 0x0a, 0x18,
|
||||
0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c,
|
||||
0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67,
|
||||
0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa,
|
||||
0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d,
|
||||
0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69,
|
||||
0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
|
||||
0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_monitoring_v3_span_context_proto_rawDescOnce sync.Once
|
||||
file_google_monitoring_v3_span_context_proto_rawDescData = file_google_monitoring_v3_span_context_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte {
|
||||
file_google_monitoring_v3_span_context_proto_rawDescOnce.Do(func() {
|
||||
file_google_monitoring_v3_span_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_span_context_proto_rawDescData)
|
||||
})
|
||||
return file_google_monitoring_v3_span_context_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_monitoring_v3_span_context_proto_goTypes = []any{
|
||||
(*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext
|
||||
}
|
||||
var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_monitoring_v3_span_context_proto_init() }
|
||||
func file_google_monitoring_v3_span_context_proto_init() {
|
||||
if File_google_monitoring_v3_span_context_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_monitoring_v3_span_context_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_monitoring_v3_span_context_proto_goTypes,
|
||||
DependencyIndexes: file_google_monitoring_v3_span_context_proto_depIdxs,
|
||||
MessageInfos: file_google_monitoring_v3_span_context_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_monitoring_v3_span_context_proto = out.File
|
||||
file_google_monitoring_v3_span_context_proto_rawDesc = nil
|
||||
file_google_monitoring_v3_span_context_proto_goTypes = nil
|
||||
file_google_monitoring_v3_span_context_proto_depIdxs = nil
|
||||
}
|
||||
2531
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go
generated
vendored
Normal file
2531
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1112
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go
generated
vendored
Normal file
1112
vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
622
vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go
generated
vendored
Normal file
622
vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go
generated
vendored
Normal file
@@ -0,0 +1,622 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newNotificationChannelClientHook clientHook
|
||||
|
||||
// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient.
|
||||
type NotificationChannelCallOptions struct {
|
||||
ListNotificationChannelDescriptors []gax.CallOption
|
||||
GetNotificationChannelDescriptor []gax.CallOption
|
||||
ListNotificationChannels []gax.CallOption
|
||||
GetNotificationChannel []gax.CallOption
|
||||
CreateNotificationChannel []gax.CallOption
|
||||
UpdateNotificationChannel []gax.CallOption
|
||||
DeleteNotificationChannel []gax.CallOption
|
||||
SendNotificationChannelVerificationCode []gax.CallOption
|
||||
GetNotificationChannelVerificationCode []gax.CallOption
|
||||
VerifyNotificationChannel []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultNotificationChannelGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions {
|
||||
return &NotificationChannelCallOptions{
|
||||
ListNotificationChannelDescriptors: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetNotificationChannelDescriptor: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListNotificationChannels: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetNotificationChannel: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateNotificationChannel: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
UpdateNotificationChannel: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
DeleteNotificationChannel: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
SendNotificationChannelVerificationCode: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
GetNotificationChannelVerificationCode: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
VerifyNotificationChannel: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// internalNotificationChannelClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalNotificationChannelClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
ListNotificationChannelDescriptors(context.Context, *monitoringpb.ListNotificationChannelDescriptorsRequest, ...gax.CallOption) *NotificationChannelDescriptorIterator
|
||||
GetNotificationChannelDescriptor(context.Context, *monitoringpb.GetNotificationChannelDescriptorRequest, ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error)
|
||||
ListNotificationChannels(context.Context, *monitoringpb.ListNotificationChannelsRequest, ...gax.CallOption) *NotificationChannelIterator
|
||||
GetNotificationChannel(context.Context, *monitoringpb.GetNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
|
||||
CreateNotificationChannel(context.Context, *monitoringpb.CreateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
|
||||
UpdateNotificationChannel(context.Context, *monitoringpb.UpdateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
|
||||
DeleteNotificationChannel(context.Context, *monitoringpb.DeleteNotificationChannelRequest, ...gax.CallOption) error
|
||||
SendNotificationChannelVerificationCode(context.Context, *monitoringpb.SendNotificationChannelVerificationCodeRequest, ...gax.CallOption) error
|
||||
GetNotificationChannelVerificationCode(context.Context, *monitoringpb.GetNotificationChannelVerificationCodeRequest, ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error)
|
||||
VerifyNotificationChannel(context.Context, *monitoringpb.VerifyNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
|
||||
}
|
||||
|
||||
// NotificationChannelClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// The Notification Channel API provides access to configuration that
|
||||
// controls how messages related to incidents are sent.
|
||||
type NotificationChannelClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalNotificationChannelClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *NotificationChannelCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *NotificationChannelClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *NotificationChannelClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors
|
||||
// makes it possible for new channel types to be dynamically added.
|
||||
func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
|
||||
return c.internalClient.ListNotificationChannelDescriptors(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields
|
||||
// are expected / permitted for a notification channel of the given type.
|
||||
func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
|
||||
return c.internalClient.GetNotificationChannelDescriptor(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListNotificationChannels lists the notification channels that have been created for the project.
|
||||
// To list the types of notification channels that are supported, use
|
||||
// the ListNotificationChannelDescriptors method.
|
||||
func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
|
||||
return c.internalClient.ListNotificationChannels(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetNotificationChannel gets a single notification channel. The channel includes the relevant
|
||||
// configuration details with which the channel was created. However, the
|
||||
// response may truncate or omit passwords, API keys, or other private key
|
||||
// matter and thus the response may not be 100% identical to the information
|
||||
// that was supplied in the call to the create method.
|
||||
func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
return c.internalClient.GetNotificationChannel(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateNotificationChannel creates a new notification channel, representing a single notification
|
||||
// endpoint such as an email address, SMS number, or PagerDuty service.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// notification channels in a single project. This includes calls to
|
||||
// CreateNotificationChannel, DeleteNotificationChannel and
|
||||
// UpdateNotificationChannel.
|
||||
func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
return c.internalClient.CreateNotificationChannel(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask
|
||||
// remain unchanged.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// notification channels in a single project. This includes calls to
|
||||
// CreateNotificationChannel, DeleteNotificationChannel and
|
||||
// UpdateNotificationChannel.
|
||||
func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
return c.internalClient.UpdateNotificationChannel(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// DeleteNotificationChannel deletes a notification channel.
|
||||
//
|
||||
// Design your application to single-thread API calls that modify the state of
|
||||
// notification channels in a single project. This includes calls to
|
||||
// CreateNotificationChannel, DeleteNotificationChannel and
|
||||
// UpdateNotificationChannel.
|
||||
func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteNotificationChannel(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code
|
||||
// can then be supplied in VerifyNotificationChannel to verify the channel.
|
||||
func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.SendNotificationChannelVerificationCode(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then
|
||||
// be used in a call to VerifyNotificationChannel() on a different channel
|
||||
// with an equivalent identity in the same or in a different project. This
|
||||
// makes it possible to copy a channel between projects without requiring
|
||||
// manual reverification of the channel. If the channel is not in the
|
||||
// verified state, this method will fail (in other words, this may only be
|
||||
// used if the SendNotificationChannelVerificationCode and
|
||||
// VerifyNotificationChannel paths have already been used to put the given
|
||||
// channel into the verified state).
|
||||
//
|
||||
// There is no guarantee that the verification codes returned by this method
|
||||
// will be of a similar structure or form as the ones that are delivered
|
||||
// to the channel via SendNotificationChannelVerificationCode; while
|
||||
// VerifyNotificationChannel() will recognize both the codes delivered via
|
||||
// SendNotificationChannelVerificationCode() and returned from
|
||||
// GetNotificationChannelVerificationCode(), it is typically the case that
|
||||
// the verification codes delivered via
|
||||
// SendNotificationChannelVerificationCode() will be shorter and also
|
||||
// have a shorter expiration (e.g. codes such as “G-123456”) whereas
|
||||
// GetVerificationCode() will typically return a much longer, websafe base
|
||||
// 64 encoded string that has a longer expiration time.
|
||||
func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) {
|
||||
return c.internalClient.GetNotificationChannelVerificationCode(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code
|
||||
// delivered to the channel as a result of calling
|
||||
// SendNotificationChannelVerificationCode.
|
||||
func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
return c.internalClient.VerifyNotificationChannel(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// notificationChannelGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type notificationChannelGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing NotificationChannelClient
|
||||
CallOptions **NotificationChannelCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
notificationChannelClient monitoringpb.NotificationChannelServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewNotificationChannelClient creates a new notification channel service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// The Notification Channel API provides access to configuration that
|
||||
// controls how messages related to incidents are sent.
|
||||
func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) {
|
||||
clientOpts := defaultNotificationChannelGRPCClientOptions()
|
||||
if newNotificationChannelClientHook != nil {
|
||||
hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := NotificationChannelClient{CallOptions: defaultNotificationChannelCallOptions()}
|
||||
|
||||
c := ¬ificationChannelGRPCClient{
|
||||
connPool: connPool,
|
||||
notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *notificationChannelGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListNotificationChannelDescriptors[0:len((*c.CallOptions).ListNotificationChannelDescriptors):len((*c.CallOptions).ListNotificationChannelDescriptors)], opts...)
|
||||
it := &NotificationChannelDescriptorIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) {
|
||||
resp := &monitoringpb.ListNotificationChannelDescriptorsResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.ListNotificationChannelDescriptors, req, settings.GRPC, c.logger, "ListNotificationChannelDescriptors")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetChannelDescriptors(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetNotificationChannelDescriptor[0:len((*c.CallOptions).GetNotificationChannelDescriptor):len((*c.CallOptions).GetNotificationChannelDescriptor)], opts...)
|
||||
var resp *monitoringpb.NotificationChannelDescriptor
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannelDescriptor, req, settings.GRPC, c.logger, "GetNotificationChannelDescriptor")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListNotificationChannels[0:len((*c.CallOptions).ListNotificationChannels):len((*c.CallOptions).ListNotificationChannels)], opts...)
|
||||
it := &NotificationChannelIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) {
|
||||
resp := &monitoringpb.ListNotificationChannelsResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.ListNotificationChannels, req, settings.GRPC, c.logger, "ListNotificationChannels")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetNotificationChannels(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetNotificationChannel[0:len((*c.CallOptions).GetNotificationChannel):len((*c.CallOptions).GetNotificationChannel)], opts...)
|
||||
var resp *monitoringpb.NotificationChannel
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannel, req, settings.GRPC, c.logger, "GetNotificationChannel")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateNotificationChannel[0:len((*c.CallOptions).CreateNotificationChannel):len((*c.CallOptions).CreateNotificationChannel)], opts...)
|
||||
var resp *monitoringpb.NotificationChannel
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.CreateNotificationChannel, req, settings.GRPC, c.logger, "CreateNotificationChannel")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).UpdateNotificationChannel[0:len((*c.CallOptions).UpdateNotificationChannel):len((*c.CallOptions).UpdateNotificationChannel)], opts...)
|
||||
var resp *monitoringpb.NotificationChannel
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.UpdateNotificationChannel, req, settings.GRPC, c.logger, "UpdateNotificationChannel")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).DeleteNotificationChannel[0:len((*c.CallOptions).DeleteNotificationChannel):len((*c.CallOptions).DeleteNotificationChannel)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.notificationChannelClient.DeleteNotificationChannel, req, settings.GRPC, c.logger, "DeleteNotificationChannel")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).SendNotificationChannelVerificationCode[0:len((*c.CallOptions).SendNotificationChannelVerificationCode):len((*c.CallOptions).SendNotificationChannelVerificationCode)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.notificationChannelClient.SendNotificationChannelVerificationCode, req, settings.GRPC, c.logger, "SendNotificationChannelVerificationCode")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetNotificationChannelVerificationCode[0:len((*c.CallOptions).GetNotificationChannelVerificationCode):len((*c.CallOptions).GetNotificationChannelVerificationCode)], opts...)
|
||||
var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannelVerificationCode, req, settings.GRPC, c.logger, "GetNotificationChannelVerificationCode")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *notificationChannelGRPCClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).VerifyNotificationChannel[0:len((*c.CallOptions).VerifyNotificationChannel):len((*c.CallOptions).VerifyNotificationChannel)], opts...)
|
||||
var resp *monitoringpb.NotificationChannel
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.notificationChannelClient.VerifyNotificationChannel, req, settings.GRPC, c.logger, "VerifyNotificationChannel")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
242
vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go
generated
vendored
Normal file
242
vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newQueryClientHook clientHook
|
||||
|
||||
// QueryCallOptions contains the retry settings for each method of QueryClient.
|
||||
type QueryCallOptions struct {
|
||||
QueryTimeSeries []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultQueryGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultQueryCallOptions() *QueryCallOptions {
|
||||
return &QueryCallOptions{
|
||||
QueryTimeSeries: []gax.CallOption{},
|
||||
}
|
||||
}
|
||||
|
||||
// internalQueryClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalQueryClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
QueryTimeSeries(context.Context, *monitoringpb.QueryTimeSeriesRequest, ...gax.CallOption) *TimeSeriesDataIterator
|
||||
}
|
||||
|
||||
// QueryClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// The QueryService API is used to manage time series data in Cloud
|
||||
// Monitoring. Time series data is a collection of data points that describes
|
||||
// the time-varying values of a metric.
|
||||
type QueryClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalQueryClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *QueryCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *QueryClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *QueryClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *QueryClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// QueryTimeSeries queries time series by using Monitoring Query Language (MQL). We recommend
|
||||
// using PromQL instead of MQL. For more information about the status of MQL,
|
||||
// see the MQL deprecation
|
||||
// notice (at https://cloud.google.com/stackdriver/docs/deprecations/mql).
|
||||
//
|
||||
// Deprecated: QueryTimeSeries may be removed in a future version.
|
||||
func (c *QueryClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator {
|
||||
return c.internalClient.QueryTimeSeries(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// queryGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type queryGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing QueryClient
|
||||
CallOptions **QueryCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
queryClient monitoringpb.QueryServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewQueryClient creates a new query service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// The QueryService API is used to manage time series data in Cloud
|
||||
// Monitoring. Time series data is a collection of data points that describes
|
||||
// the time-varying values of a metric.
|
||||
func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) {
|
||||
clientOpts := defaultQueryGRPCClientOptions()
|
||||
if newQueryClientHook != nil {
|
||||
hookOpts, err := newQueryClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := QueryClient{CallOptions: defaultQueryCallOptions()}
|
||||
|
||||
c := &queryGRPCClient{
|
||||
connPool: connPool,
|
||||
queryClient: monitoringpb.NewQueryServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *queryGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *queryGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *queryGRPCClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).QueryTimeSeries[0:len((*c.CallOptions).QueryTimeSeries):len((*c.CallOptions).QueryTimeSeries)], opts...)
|
||||
it := &TimeSeriesDataIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.QueryTimeSeriesRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeriesData, string, error) {
|
||||
resp := &monitoringpb.QueryTimeSeriesResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.queryClient.QueryTimeSeries, req, settings.GRPC, c.logger, "QueryTimeSeries")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetTimeSeriesData(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
569
vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go
generated
vendored
Normal file
569
vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go
generated
vendored
Normal file
@@ -0,0 +1,569 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newServiceMonitoringClientHook clientHook
|
||||
|
||||
// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient.
|
||||
type ServiceMonitoringCallOptions struct {
|
||||
CreateService []gax.CallOption
|
||||
GetService []gax.CallOption
|
||||
ListServices []gax.CallOption
|
||||
UpdateService []gax.CallOption
|
||||
DeleteService []gax.CallOption
|
||||
CreateServiceLevelObjective []gax.CallOption
|
||||
GetServiceLevelObjective []gax.CallOption
|
||||
ListServiceLevelObjectives []gax.CallOption
|
||||
UpdateServiceLevelObjective []gax.CallOption
|
||||
DeleteServiceLevelObjective []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions {
|
||||
return &ServiceMonitoringCallOptions{
|
||||
CreateService: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
GetService: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListServices: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
UpdateService: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
DeleteService: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateServiceLevelObjective: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
GetServiceLevelObjective: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListServiceLevelObjectives: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
UpdateServiceLevelObjective: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
DeleteServiceLevelObjective: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// internalServiceMonitoringClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalServiceMonitoringClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
CreateService(context.Context, *monitoringpb.CreateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
|
||||
GetService(context.Context, *monitoringpb.GetServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
|
||||
ListServices(context.Context, *monitoringpb.ListServicesRequest, ...gax.CallOption) *ServiceIterator
|
||||
UpdateService(context.Context, *monitoringpb.UpdateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
|
||||
DeleteService(context.Context, *monitoringpb.DeleteServiceRequest, ...gax.CallOption) error
|
||||
CreateServiceLevelObjective(context.Context, *monitoringpb.CreateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
|
||||
GetServiceLevelObjective(context.Context, *monitoringpb.GetServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
|
||||
ListServiceLevelObjectives(context.Context, *monitoringpb.ListServiceLevelObjectivesRequest, ...gax.CallOption) *ServiceLevelObjectiveIterator
|
||||
UpdateServiceLevelObjective(context.Context, *monitoringpb.UpdateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
|
||||
DeleteServiceLevelObjective(context.Context, *monitoringpb.DeleteServiceLevelObjectiveRequest, ...gax.CallOption) error
|
||||
}
|
||||
|
||||
// ServiceMonitoringClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for
|
||||
// managing and querying aspects of a Metrics Scope’s services. These include
|
||||
// the Service's monitored resources, its Service-Level Objectives, and a
|
||||
// taxonomy of categorized Health Metrics.
|
||||
type ServiceMonitoringClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalServiceMonitoringClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *ServiceMonitoringCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *ServiceMonitoringClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// CreateService create a Service.
|
||||
func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
|
||||
return c.internalClient.CreateService(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetService get the named Service.
|
||||
func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
|
||||
return c.internalClient.GetService(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListServices list Services for this Metrics Scope.
|
||||
func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator {
|
||||
return c.internalClient.ListServices(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// UpdateService update this Service.
|
||||
func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
|
||||
return c.internalClient.UpdateService(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// DeleteService soft delete this Service.
|
||||
func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteService(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service.
|
||||
func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
|
||||
return c.internalClient.CreateServiceLevelObjective(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetServiceLevelObjective get a ServiceLevelObjective by name.
|
||||
func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
|
||||
return c.internalClient.GetServiceLevelObjective(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service.
|
||||
func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator {
|
||||
return c.internalClient.ListServiceLevelObjectives(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// UpdateServiceLevelObjective update the given ServiceLevelObjective.
|
||||
func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
|
||||
return c.internalClient.UpdateServiceLevelObjective(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// DeleteServiceLevelObjective delete the given ServiceLevelObjective.
|
||||
func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteServiceLevelObjective(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// serviceMonitoringGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type serviceMonitoringGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing ServiceMonitoringClient
|
||||
CallOptions **ServiceMonitoringCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewServiceMonitoringClient creates a new service monitoring service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for
|
||||
// managing and querying aspects of a Metrics Scope’s services. These include
|
||||
// the Service's monitored resources, its Service-Level Objectives, and a
|
||||
// taxonomy of categorized Health Metrics.
|
||||
func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) {
|
||||
clientOpts := defaultServiceMonitoringGRPCClientOptions()
|
||||
if newServiceMonitoringClientHook != nil {
|
||||
hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := ServiceMonitoringClient{CallOptions: defaultServiceMonitoringCallOptions()}
|
||||
|
||||
c := &serviceMonitoringGRPCClient{
|
||||
connPool: connPool,
|
||||
serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *serviceMonitoringGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateService[0:len((*c.CallOptions).CreateService):len((*c.CallOptions).CreateService)], opts...)
|
||||
var resp *monitoringpb.Service
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.CreateService, req, settings.GRPC, c.logger, "CreateService")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetService[0:len((*c.CallOptions).GetService):len((*c.CallOptions).GetService)], opts...)
|
||||
var resp *monitoringpb.Service
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.GetService, req, settings.GRPC, c.logger, "GetService")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListServices[0:len((*c.CallOptions).ListServices):len((*c.CallOptions).ListServices)], opts...)
|
||||
it := &ServiceIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListServicesRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) {
|
||||
resp := &monitoringpb.ListServicesResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.ListServices, req, settings.GRPC, c.logger, "ListServices")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetServices(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).UpdateService[0:len((*c.CallOptions).UpdateService):len((*c.CallOptions).UpdateService)], opts...)
|
||||
var resp *monitoringpb.Service
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.UpdateService, req, settings.GRPC, c.logger, "UpdateService")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).DeleteService[0:len((*c.CallOptions).DeleteService):len((*c.CallOptions).DeleteService)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.serviceMonitoringClient.DeleteService, req, settings.GRPC, c.logger, "DeleteService")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateServiceLevelObjective[0:len((*c.CallOptions).CreateServiceLevelObjective):len((*c.CallOptions).CreateServiceLevelObjective)], opts...)
|
||||
var resp *monitoringpb.ServiceLevelObjective
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.CreateServiceLevelObjective, req, settings.GRPC, c.logger, "CreateServiceLevelObjective")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetServiceLevelObjective[0:len((*c.CallOptions).GetServiceLevelObjective):len((*c.CallOptions).GetServiceLevelObjective)], opts...)
|
||||
var resp *monitoringpb.ServiceLevelObjective
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.GetServiceLevelObjective, req, settings.GRPC, c.logger, "GetServiceLevelObjective")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListServiceLevelObjectives[0:len((*c.CallOptions).ListServiceLevelObjectives):len((*c.CallOptions).ListServiceLevelObjectives)], opts...)
|
||||
it := &ServiceLevelObjectiveIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) {
|
||||
resp := &monitoringpb.ListServiceLevelObjectivesResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.ListServiceLevelObjectives, req, settings.GRPC, c.logger, "ListServiceLevelObjectives")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetServiceLevelObjectives(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).UpdateServiceLevelObjective[0:len((*c.CallOptions).UpdateServiceLevelObjective):len((*c.CallOptions).UpdateServiceLevelObjective)], opts...)
|
||||
var resp *monitoringpb.ServiceLevelObjective
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.serviceMonitoringClient.UpdateServiceLevelObjective, req, settings.GRPC, c.logger, "UpdateServiceLevelObjective")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *serviceMonitoringGRPCClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).DeleteServiceLevelObjective[0:len((*c.CallOptions).DeleteServiceLevelObjective):len((*c.CallOptions).DeleteServiceLevelObjective)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.serviceMonitoringClient.DeleteServiceLevelObjective, req, settings.GRPC, c.logger, "DeleteServiceLevelObjective")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
347
vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go
generated
vendored
Normal file
347
vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go
generated
vendored
Normal file
@@ -0,0 +1,347 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newSnoozeClientHook clientHook
|
||||
|
||||
// SnoozeCallOptions contains the retry settings for each method of SnoozeClient.
|
||||
type SnoozeCallOptions struct {
|
||||
CreateSnooze []gax.CallOption
|
||||
ListSnoozes []gax.CallOption
|
||||
GetSnooze []gax.CallOption
|
||||
UpdateSnooze []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultSnoozeGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSnoozeCallOptions() *SnoozeCallOptions {
|
||||
return &SnoozeCallOptions{
|
||||
CreateSnooze: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
ListSnoozes: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetSnooze: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
UpdateSnooze: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// internalSnoozeClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalSnoozeClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
CreateSnooze(context.Context, *monitoringpb.CreateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
|
||||
ListSnoozes(context.Context, *monitoringpb.ListSnoozesRequest, ...gax.CallOption) *SnoozeIterator
|
||||
GetSnooze(context.Context, *monitoringpb.GetSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
|
||||
UpdateSnooze(context.Context, *monitoringpb.UpdateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
|
||||
}
|
||||
|
||||
// SnoozeClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// The SnoozeService API is used to temporarily prevent an alert policy from
|
||||
// generating alerts. A Snooze is a description of the criteria under which one
|
||||
// or more alert policies should not fire alerts for the specified duration.
|
||||
type SnoozeClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalSnoozeClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *SnoozeCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *SnoozeClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *SnoozeClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *SnoozeClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// CreateSnooze creates a Snooze that will prevent alerts, which match the provided
|
||||
// criteria, from being opened. The Snooze applies for a specific time
|
||||
// interval.
|
||||
func (c *SnoozeClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
|
||||
return c.internalClient.CreateSnooze(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListSnoozes lists the Snoozes associated with a project. Can optionally pass in
|
||||
// filter, which specifies predicates to match Snoozes.
|
||||
func (c *SnoozeClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator {
|
||||
return c.internalClient.ListSnoozes(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetSnooze retrieves a Snooze by name.
|
||||
func (c *SnoozeClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
|
||||
return c.internalClient.GetSnooze(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// UpdateSnooze updates a Snooze, identified by its name, with the parameters in the
|
||||
// given Snooze object.
|
||||
func (c *SnoozeClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
|
||||
return c.internalClient.UpdateSnooze(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// snoozeGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type snoozeGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing SnoozeClient
|
||||
CallOptions **SnoozeCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
snoozeClient monitoringpb.SnoozeServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewSnoozeClient creates a new snooze service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// The SnoozeService API is used to temporarily prevent an alert policy from
|
||||
// generating alerts. A Snooze is a description of the criteria under which one
|
||||
// or more alert policies should not fire alerts for the specified duration.
|
||||
func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) {
|
||||
clientOpts := defaultSnoozeGRPCClientOptions()
|
||||
if newSnoozeClientHook != nil {
|
||||
hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := SnoozeClient{CallOptions: defaultSnoozeCallOptions()}
|
||||
|
||||
c := &snoozeGRPCClient{
|
||||
connPool: connPool,
|
||||
snoozeClient: monitoringpb.NewSnoozeServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *snoozeGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *snoozeGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...)
|
||||
var resp *monitoringpb.Snooze
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.snoozeClient.CreateSnooze, req, settings.GRPC, c.logger, "CreateSnooze")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...)
|
||||
it := &SnoozeIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Snooze, string, error) {
|
||||
resp := &monitoringpb.ListSnoozesResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.snoozeClient.ListSnoozes, req, settings.GRPC, c.logger, "ListSnoozes")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetSnoozes(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...)
|
||||
var resp *monitoringpb.Snooze
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.snoozeClient.GetSnooze, req, settings.GRPC, c.logger, "GetSnooze")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "snooze.name", url.QueryEscape(req.GetSnooze().GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...)
|
||||
var resp *monitoringpb.Snooze
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.snoozeClient.UpdateSnooze, req, settings.GRPC, c.logger, "UpdateSnooze")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
454
vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go
generated
vendored
Normal file
454
vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go
generated
vendored
Normal file
@@ -0,0 +1,454 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var newUptimeCheckClientHook clientHook
|
||||
|
||||
// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient.
|
||||
type UptimeCheckCallOptions struct {
|
||||
ListUptimeCheckConfigs []gax.CallOption
|
||||
GetUptimeCheckConfig []gax.CallOption
|
||||
CreateUptimeCheckConfig []gax.CallOption
|
||||
UpdateUptimeCheckConfig []gax.CallOption
|
||||
DeleteUptimeCheckConfig []gax.CallOption
|
||||
ListUptimeCheckIps []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultUptimeCheckGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
|
||||
internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions {
|
||||
return &UptimeCheckCallOptions{
|
||||
ListUptimeCheckConfigs: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetUptimeCheckConfig: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateUptimeCheckConfig: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
UpdateUptimeCheckConfig: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
},
|
||||
DeleteUptimeCheckConfig: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListUptimeCheckIps: []gax.CallOption{
|
||||
gax.WithTimeout(30000 * time.Millisecond),
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 30000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// internalUptimeCheckClient is an interface that defines the methods available from Cloud Monitoring API.
|
||||
type internalUptimeCheckClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
ListUptimeCheckConfigs(context.Context, *monitoringpb.ListUptimeCheckConfigsRequest, ...gax.CallOption) *UptimeCheckConfigIterator
|
||||
GetUptimeCheckConfig(context.Context, *monitoringpb.GetUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
|
||||
CreateUptimeCheckConfig(context.Context, *monitoringpb.CreateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
|
||||
UpdateUptimeCheckConfig(context.Context, *monitoringpb.UpdateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
|
||||
DeleteUptimeCheckConfig(context.Context, *monitoringpb.DeleteUptimeCheckConfigRequest, ...gax.CallOption) error
|
||||
ListUptimeCheckIps(context.Context, *monitoringpb.ListUptimeCheckIpsRequest, ...gax.CallOption) *UptimeCheckIpIterator
|
||||
}
|
||||
|
||||
// UptimeCheckClient is a client for interacting with Cloud Monitoring API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// The UptimeCheckService API is used to manage (list, create, delete, edit)
|
||||
// Uptime check configurations in the Cloud Monitoring product. An Uptime
|
||||
// check is a piece of configuration that determines which resources and
|
||||
// services to monitor for availability. These configurations can also be
|
||||
// configured interactively by navigating to the [Cloud console]
|
||||
// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project,
|
||||
// clicking on “Monitoring” on the left-hand side to navigate to Cloud
|
||||
// Monitoring, and then clicking on “Uptime”.
|
||||
type UptimeCheckClient struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalUptimeCheckClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *UptimeCheckCallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *UptimeCheckClient) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *UptimeCheckClient) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project
|
||||
// (leaving out any invalid configurations).
|
||||
func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
|
||||
return c.internalClient.ListUptimeCheckConfigs(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetUptimeCheckConfig gets a single Uptime check configuration.
|
||||
func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
|
||||
return c.internalClient.GetUptimeCheckConfig(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CreateUptimeCheckConfig creates a new Uptime check configuration.
|
||||
func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
|
||||
return c.internalClient.CreateUptimeCheckConfig(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire
|
||||
// configuration with a new one or replace only certain fields in the current
|
||||
// configuration by specifying the fields to be updated via updateMask.
|
||||
// Returns the updated configuration.
|
||||
func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
|
||||
return c.internalClient.UpdateUptimeCheckConfig(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail
|
||||
// if the Uptime check configuration is referenced by an alert policy or
|
||||
// other dependent configs that would be rendered invalid by the deletion.
|
||||
func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteUptimeCheckConfig(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// ListUptimeCheckIps returns the list of IP addresses that checkers run from.
|
||||
func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
|
||||
return c.internalClient.ListUptimeCheckIps(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// uptimeCheckGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type uptimeCheckGRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// Points back to the CallOptions field of the containing UptimeCheckClient
|
||||
CallOptions **UptimeCheckCallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
uptimeCheckClient monitoringpb.UptimeCheckServiceClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogHeaders []string
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewUptimeCheckClient creates a new uptime check service client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// The UptimeCheckService API is used to manage (list, create, delete, edit)
|
||||
// Uptime check configurations in the Cloud Monitoring product. An Uptime
|
||||
// check is a piece of configuration that determines which resources and
|
||||
// services to monitor for availability. These configurations can also be
|
||||
// configured interactively by navigating to the [Cloud console]
|
||||
// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project,
|
||||
// clicking on “Monitoring” on the left-hand side to navigate to Cloud
|
||||
// Monitoring, and then clicking on “Uptime”.
|
||||
func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) {
|
||||
clientOpts := defaultUptimeCheckGRPCClientOptions()
|
||||
if newUptimeCheckClientHook != nil {
|
||||
hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := UptimeCheckClient{CallOptions: defaultUptimeCheckCallOptions()}
|
||||
|
||||
c := &uptimeCheckGRPCClient{
|
||||
connPool: connPool,
|
||||
uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
logger: internaloption.GetLogger(opts),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
|
||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeaders = []string{
|
||||
"x-goog-api-client", gax.XGoogHeader(kv...),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *uptimeCheckGRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *uptimeCheckGRPCClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).ListUptimeCheckConfigs[0:len((*c.CallOptions).ListUptimeCheckConfigs):len((*c.CallOptions).ListUptimeCheckConfigs)], opts...)
|
||||
it := &UptimeCheckConfigIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) {
|
||||
resp := &monitoringpb.ListUptimeCheckConfigsResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.uptimeCheckClient.ListUptimeCheckConfigs, req, settings.GRPC, c.logger, "ListUptimeCheckConfigs")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetUptimeCheckConfigs(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *uptimeCheckGRPCClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).GetUptimeCheckConfig[0:len((*c.CallOptions).GetUptimeCheckConfig):len((*c.CallOptions).GetUptimeCheckConfig)], opts...)
|
||||
var resp *monitoringpb.UptimeCheckConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.uptimeCheckClient.GetUptimeCheckConfig, req, settings.GRPC, c.logger, "GetUptimeCheckConfig")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckGRPCClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).CreateUptimeCheckConfig[0:len((*c.CallOptions).CreateUptimeCheckConfig):len((*c.CallOptions).CreateUptimeCheckConfig)], opts...)
|
||||
var resp *monitoringpb.UptimeCheckConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.uptimeCheckClient.CreateUptimeCheckConfig, req, settings.GRPC, c.logger, "CreateUptimeCheckConfig")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckGRPCClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).UpdateUptimeCheckConfig[0:len((*c.CallOptions).UpdateUptimeCheckConfig):len((*c.CallOptions).UpdateUptimeCheckConfig)], opts...)
|
||||
var resp *monitoringpb.UptimeCheckConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.uptimeCheckClient.UpdateUptimeCheckConfig, req, settings.GRPC, c.logger, "UpdateUptimeCheckConfig")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
|
||||
hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
|
||||
|
||||
hds = append(c.xGoogHeaders, hds...)
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
|
||||
opts = append((*c.CallOptions).DeleteUptimeCheckConfig[0:len((*c.CallOptions).DeleteUptimeCheckConfig):len((*c.CallOptions).DeleteUptimeCheckConfig)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = executeRPC(ctx, c.uptimeCheckClient.DeleteUptimeCheckConfig, req, settings.GRPC, c.logger, "DeleteUptimeCheckConfig")
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *uptimeCheckGRPCClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
|
||||
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
|
||||
opts = append((*c.CallOptions).ListUptimeCheckIps[0:len((*c.CallOptions).ListUptimeCheckIps):len((*c.CallOptions).ListUptimeCheckIps)], opts...)
|
||||
it := &UptimeCheckIpIterator{}
|
||||
req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) {
|
||||
resp := &monitoringpb.ListUptimeCheckIpsResponse{}
|
||||
if pageToken != "" {
|
||||
req.PageToken = pageToken
|
||||
}
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else if pageSize != 0 {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = executeRPC(ctx, c.uptimeCheckClient.ListUptimeCheckIps, req, settings.GRPC, c.logger, "ListUptimeCheckIps")
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetUptimeCheckIps(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
|
||||
return it
|
||||
}
|
||||
23
vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go
generated
vendored
Normal file
23
vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2023 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by gapicgen. DO NOT EDIT.
|
||||
|
||||
package monitoring
|
||||
|
||||
import "cloud.google.com/go/monitoring/internal"
|
||||
|
||||
func init() {
|
||||
versionClient = internal.Version
|
||||
}
|
||||
18
vendor/cloud.google.com/go/monitoring/internal/version.go
generated
vendored
Normal file
18
vendor/cloud.google.com/go/monitoring/internal/version.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.24.2"
|
||||
282
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
282
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
@@ -1,6 +1,288 @@
|
||||
# Changes
|
||||
|
||||
|
||||
## [1.56.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.55.0...storage/v1.56.0) (2025-07-24)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/control:** Update BUILD configs to support rest transportation for all client ([116a33a](https://github.com/googleapis/google-cloud-go/commit/116a33ab13c9fac6f6830dded55c24d38504707b))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Avoid integration test segfaults. ([#12419](https://github.com/googleapis/google-cloud-go/issues/12419)) ([a9dec07](https://github.com/googleapis/google-cloud-go/commit/a9dec0763f85f083cc1da451249caae7ad97d904))
|
||||
* **storage:** Current object generation takeover. ([#12383](https://github.com/googleapis/google-cloud-go/issues/12383)) ([9ca8e01](https://github.com/googleapis/google-cloud-go/commit/9ca8e015405a523bbe68cbff2defbdff3dac0a61))
|
||||
* **storage:** Fix MultiRangeDownloader deadlock ([#12548](https://github.com/googleapis/google-cloud-go/issues/12548)) ([2eb23bb](https://github.com/googleapis/google-cloud-go/commit/2eb23bb01ffe92c967e772ef66c846357fbf5419))
|
||||
* **storage:** Remove object length limit for unfinalized reads ([#12489](https://github.com/googleapis/google-cloud-go/issues/12489)) ([5566d7d](https://github.com/googleapis/google-cloud-go/commit/5566d7dd5cc83afce38821961c447f5945e48456))
|
||||
|
||||
|
||||
### Performance Improvements
|
||||
|
||||
* **storage:** Zero copy for MultiRangeDownloader ([#12542](https://github.com/googleapis/google-cloud-go/issues/12542)) ([a5e6a68](https://github.com/googleapis/google-cloud-go/commit/a5e6a681164d5be62270cde16891685a9f03bb12))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage/internal:** Fix broken link for message `CustomPlacementConfig` ([9614487](https://github.com/googleapis/google-cloud-go/commit/96144875e01bfc8a59c2671c6eae87233710cef7))
|
||||
* **storage:** Fix typo in storage/doc.go ([#12391](https://github.com/googleapis/google-cloud-go/issues/12391)) ([bf74408](https://github.com/googleapis/google-cloud-go/commit/bf744088f0ed23ea510b914c994e1754ca1fc7c4))
|
||||
* **storage:** Improve error inspection documentation ([#12301](https://github.com/googleapis/google-cloud-go/issues/12301)) ([420da1a](https://github.com/googleapis/google-cloud-go/commit/420da1a64ac4040c8b2e6d6f0d66e7633426ac25))
|
||||
|
||||
## [1.55.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.54.0...storage/v1.55.0) (2025-05-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/control:** Add Client Libraries Storage IntelligenceConfig ([2aaada3](https://github.com/googleapis/google-cloud-go/commit/2aaada3fb7a9d3eaacec3351019e225c4038646b))
|
||||
* **storage/internal:** Add IpFilter to Bucket ([#12309](https://github.com/googleapis/google-cloud-go/issues/12309)) ([d8ae687](https://github.com/googleapis/google-cloud-go/commit/d8ae6874a54b48fce49968664f14db63c055c6e2))
|
||||
* **storage/internal:** Add Object.Retention message ([d8ae687](https://github.com/googleapis/google-cloud-go/commit/d8ae6874a54b48fce49968664f14db63c055c6e2))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Add EnableNewAuthLibrary internalOption to HTTP newClient ([#12320](https://github.com/googleapis/google-cloud-go/issues/12320)) ([0036073](https://github.com/googleapis/google-cloud-go/commit/0036073affee5451894654a983fba6b2638433cb))
|
||||
* **storage:** Migrate oauth2/google usages to cloud.google.com/go/auth ([#11191](https://github.com/googleapis/google-cloud-go/issues/11191)) ([3a22349](https://github.com/googleapis/google-cloud-go/commit/3a22349c1ba6a192d70269f77e5804a9957aa572))
|
||||
* **storage:** Omit check on MultiRangeDownloader ([#12342](https://github.com/googleapis/google-cloud-go/issues/12342)) ([774621c](https://github.com/googleapis/google-cloud-go/commit/774621c5baa5110f57fe79d817143416bd671d1e))
|
||||
* **storage:** Retry url.Error and net.OpErrors when they wrap an io.EOF ([#12289](https://github.com/googleapis/google-cloud-go/issues/12289)) ([080f6b0](https://github.com/googleapis/google-cloud-go/commit/080f6b05c5e8bd5baaef71ed47f8d54c695f63d3))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage/internal:** Add explicit Optional annotations to fields that have always been treated as optional ([d8ae687](https://github.com/googleapis/google-cloud-go/commit/d8ae6874a54b48fce49968664f14db63c055c6e2))
|
||||
* **storage/internal:** Add note that Bucket.project output format is always project number format ([d8ae687](https://github.com/googleapis/google-cloud-go/commit/d8ae6874a54b48fce49968664f14db63c055c6e2))
|
||||
* **storage/internal:** Add note that managedFolders are supported for GetIamPolicy and SetIamPolicy ([d8ae687](https://github.com/googleapis/google-cloud-go/commit/d8ae6874a54b48fce49968664f14db63c055c6e2))
|
||||
|
||||
## [1.54.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.53.0...storage/v1.54.0) (2025-05-12)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Add experimental ZB API option ([#12214](https://github.com/googleapis/google-cloud-go/issues/12214)) ([f669982](https://github.com/googleapis/google-cloud-go/commit/f669982de2abf64759eccf5c38bd669488b9cf6a))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Fix append writer hang ([#12201](https://github.com/googleapis/google-cloud-go/issues/12201)) ([7ce2a2a](https://github.com/googleapis/google-cloud-go/commit/7ce2a2ad3ae9deff28c73c1bcc2e7001770464eb))
|
||||
* **storage:** Retry unwrapped EOFs ([#12202](https://github.com/googleapis/google-cloud-go/issues/12202)) ([b2d42bd](https://github.com/googleapis/google-cloud-go/commit/b2d42bda6a398f3aa00030b6e99bbcb40f132ff7))
|
||||
|
||||
## [1.53.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.52.0...storage/v1.53.0) (2025-05-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/control:** Add Anywhere cache control APIs ([83ae06c](https://github.com/googleapis/google-cloud-go/commit/83ae06c3ec7d190e38856ba4cfd8a13f08356b4d))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Fix append edge cases ([#12074](https://github.com/googleapis/google-cloud-go/issues/12074)) ([0eee1f9](https://github.com/googleapis/google-cloud-go/commit/0eee1f99a7dc0d1bfc36fa43d78933cae47962ee))
|
||||
* **storage:** Fix retries for redirection errors. ([#12093](https://github.com/googleapis/google-cloud-go/issues/12093)) ([3e177e7](https://github.com/googleapis/google-cloud-go/commit/3e177e755f5bf6aa96e8712cc4adcba7eb6f04f6))
|
||||
* **storage:** Handle gRPC deadlines in tests. ([#12092](https://github.com/googleapis/google-cloud-go/issues/12092)) ([30b7cd2](https://github.com/googleapis/google-cloud-go/commit/30b7cd27771ccbd49b70ee106da36362ba8f1e87))
|
||||
* **storage:** Update offset on resumable upload retry ([#12086](https://github.com/googleapis/google-cloud-go/issues/12086)) ([6ce8fe5](https://github.com/googleapis/google-cloud-go/commit/6ce8fe5aec0ec7916eda4d1405cab5f5f65a5de8))
|
||||
* **storage:** Validate Bidi option for MRD ([#12033](https://github.com/googleapis/google-cloud-go/issues/12033)) ([d9018cf](https://github.com/googleapis/google-cloud-go/commit/d9018cf640a9ac25e2b23b75b3bcfa734379ab09))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage/control:** Added comments for Anywhere cache messages ([83ae06c](https://github.com/googleapis/google-cloud-go/commit/83ae06c3ec7d190e38856ba4cfd8a13f08356b4d))
|
||||
|
||||
## [1.52.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.51.0...storage/v1.52.0) (2025-04-22)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/control:** Add Anywhere cache control APIs ([#11807](https://github.com/googleapis/google-cloud-go/issues/11807)) ([12bfa98](https://github.com/googleapis/google-cloud-go/commit/12bfa984f87099dbfbd5abf3436e440e62b04bad))
|
||||
* **storage:** Add CurrentState function to determine state of stream in MRD ([#11688](https://github.com/googleapis/google-cloud-go/issues/11688)) ([14e8e13](https://github.com/googleapis/google-cloud-go/commit/14e8e132d9d5808d1ca789792e7e39f0857991da))
|
||||
* **storage:** Add OwnerEntity to bucketAttrs ([#11857](https://github.com/googleapis/google-cloud-go/issues/11857)) ([4cd4a0c](https://github.com/googleapis/google-cloud-go/commit/4cd4a0ca1f6132ea6ed9df7b27310a3238a9c3fd))
|
||||
* **storage:** Takeover appendable object ([#11977](https://github.com/googleapis/google-cloud-go/issues/11977)) ([513b937](https://github.com/googleapis/google-cloud-go/commit/513b937420b945c4a76e20711f305c6ad8a77812))
|
||||
* **storage:** Unfinalized appendable objects. ([#11647](https://github.com/googleapis/google-cloud-go/issues/11647)) ([52c0218](https://github.com/googleapis/google-cloud-go/commit/52c02183fabf43fcba3893f493140ac28a7836d1))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Fix Attrs for append takeover ([#11989](https://github.com/googleapis/google-cloud-go/issues/11989)) ([6db35b1](https://github.com/googleapis/google-cloud-go/commit/6db35b10567b7f1463bfef722b0fd72257190ee7))
|
||||
* **storage:** Fix panic when Flush called early ([#11934](https://github.com/googleapis/google-cloud-go/issues/11934)) ([7d0b8a7](https://github.com/googleapis/google-cloud-go/commit/7d0b8a75ae55731ae765c01f24920f9f11038f44))
|
||||
* **storage:** Fix unfinalized write size ([#12016](https://github.com/googleapis/google-cloud-go/issues/12016)) ([6217f8f](https://github.com/googleapis/google-cloud-go/commit/6217f8fd3cd8680a7e6b7b46fc9b7bda6ee6292e))
|
||||
* **storage:** Force first message on next sendBuffer when nothing sent on current ([#11871](https://github.com/googleapis/google-cloud-go/issues/11871)) ([a1a2292](https://github.com/googleapis/google-cloud-go/commit/a1a22927d6a4399e7392787bccb9707bc9e8f149))
|
||||
* **storage:** Populate Writer.Attrs after Flush() ([#12021](https://github.com/googleapis/google-cloud-go/issues/12021)) ([8e56f74](https://github.com/googleapis/google-cloud-go/commit/8e56f745e7f2175660838f96c1a12a46841cac40))
|
||||
* **storage:** Remove check for FinalizeOnClose ([#11992](https://github.com/googleapis/google-cloud-go/issues/11992)) ([2664b8c](https://github.com/googleapis/google-cloud-go/commit/2664b8cec00a606001184cb17c074fd0e79e66b8))
|
||||
* **storage:** Wrap read response parsing errors ([#11951](https://github.com/googleapis/google-cloud-go/issues/11951)) ([d2e6583](https://github.com/googleapis/google-cloud-go/commit/d2e658387b80ec8a3e41e048a9d520b8dd13dd00))
|
||||
|
||||
|
||||
## [1.51.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.50.0...storage/v1.51.0) (2025-03-07)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/append:** Support appends in w1r3. ([#11483](https://github.com/googleapis/google-cloud-go/issues/11483)) ([48bb391](https://github.com/googleapis/google-cloud-go/commit/48bb39154479a2cf2d379316e0915f39d7b7a518))
|
||||
* **storage:** Benchmark with experimental MRD. ([#11501](https://github.com/googleapis/google-cloud-go/issues/11501)) ([7b49152](https://github.com/googleapis/google-cloud-go/commit/7b491520a693d258d3370a19c43c9dff6c8558c7))
|
||||
* **storage:** Implement RetryChunkDeadline for grpc writes ([#11476](https://github.com/googleapis/google-cloud-go/issues/11476)) ([03575d7](https://github.com/googleapis/google-cloud-go/commit/03575d74f5241cc714e4d3ac63635569a34f5633))
|
||||
* **storage:** Specify benchmark integrity check. ([#11465](https://github.com/googleapis/google-cloud-go/issues/11465)) ([da18845](https://github.com/googleapis/google-cloud-go/commit/da188453e0254c49a01d28788d0849a2d0e98e0c))
|
||||
* **storage:** Use ReadHandle for faster re-connect ([#11510](https://github.com/googleapis/google-cloud-go/issues/11510)) ([cac52f7](https://github.com/googleapis/google-cloud-go/commit/cac52f79a73d46774d33d76e3075c0a5b3e0b9f3))
|
||||
* **storage:** Wrap NotFound errors for buckets and objects ([#11519](https://github.com/googleapis/google-cloud-go/issues/11519)) ([0dd7d3d](https://github.com/googleapis/google-cloud-go/commit/0dd7d3d62e54c6c3bca395fcca8450ad3347a5a0))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage/append:** Report progress for appends. ([#11503](https://github.com/googleapis/google-cloud-go/issues/11503)) ([96dbb6c](https://github.com/googleapis/google-cloud-go/commit/96dbb6c12398fb3cbffab2bf61836bef2f704f66))
|
||||
* **storage:** Add a safety check for readhandle ([#11549](https://github.com/googleapis/google-cloud-go/issues/11549)) ([c9edb37](https://github.com/googleapis/google-cloud-go/commit/c9edb379ece70f065650702c9240ee540ca2f610))
|
||||
* **storage:** Add universe domain to defaultSignBytesFunc ([#11521](https://github.com/googleapis/google-cloud-go/issues/11521)) ([511608b](https://github.com/googleapis/google-cloud-go/commit/511608b8e8554aa06f9fe2e2e4f51ead0f484031))
|
||||
* **storage:** Clone the defaultRetry to avoid modifying it directly ([#11533](https://github.com/googleapis/google-cloud-go/issues/11533)) ([7f8d69d](https://github.com/googleapis/google-cloud-go/commit/7f8d69dcd6a7b1ad6c1df8d9fe8dfb5fe0947479))
|
||||
* **storage:** Fix adding multiple range on stream with same read id ([#11584](https://github.com/googleapis/google-cloud-go/issues/11584)) ([0bb3434](https://github.com/googleapis/google-cloud-go/commit/0bb3434e0e12563ff21ef72ad2e52ad7eb61d66e))
|
||||
* **storage:** Modify the callback of mrd to return length of data read instead of limit. ([#11687](https://github.com/googleapis/google-cloud-go/issues/11687)) ([9e359f0](https://github.com/googleapis/google-cloud-go/commit/9e359f0089f744c32d12bf77889d69a4db155357))
|
||||
* **storage:** Propagate ctx from invoke to grpc upload reqs ([#11475](https://github.com/googleapis/google-cloud-go/issues/11475)) ([9ad9d76](https://github.com/googleapis/google-cloud-go/commit/9ad9d7665ca2f4cfdcee75f5e683084ac49536a6))
|
||||
* **storage:** Remove duplicate routing header ([#11534](https://github.com/googleapis/google-cloud-go/issues/11534)) ([8eeb59c](https://github.com/googleapis/google-cloud-go/commit/8eeb59cbfb16d8f379f7aa4c6f11e53cebbd38a6))
|
||||
* **storage:** Return sentinel ErrObjectNotExist for copy and compose ([#11369](https://github.com/googleapis/google-cloud-go/issues/11369)) ([74d0c10](https://github.com/googleapis/google-cloud-go/commit/74d0c1096f897ca3c15646f3049ea540bed0a6a0)), refs [#10760](https://github.com/googleapis/google-cloud-go/issues/10760)
|
||||
* **storage:** Wait for XML read req to finish to avoid data races ([#11527](https://github.com/googleapis/google-cloud-go/issues/11527)) ([782e12a](https://github.com/googleapis/google-cloud-go/commit/782e12a11c1dfe6d831f5d0b9b5f4409993e4d9e))
|
||||
|
||||
## [1.50.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.49.0...storage/v1.50.0) (2025-01-09)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add new appendable Object to BidiWrite API ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
|
||||
* **storage/internal:** Add new preview BidiReadObject API ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
|
||||
* **storage:** Add support for gRPC bi-directional multi-range reads. This API is in private preview and not generally and is not yet available for general use. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
|
||||
* **storage:** Add support for ReadHandle, a gRPC feature that allows for accelerated resumption of streams when one is interrupted. ReadHandle requires the bi-directional read API, which is in private preview and is not yet available for general use. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
|
||||
* **storage:** Support appendable semantics for writes in gRPC. This API is in preview. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
|
||||
* **storage:** Refactor gRPC writer flow ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Add mutex around uses of mrd variables ([#11405](https://github.com/googleapis/google-cloud-go/issues/11405)) ([54bfc32](https://github.com/googleapis/google-cloud-go/commit/54bfc32db7a0ff40a493de4d466f21ad624de04e))
|
||||
* **storage:** Return the appropriate error for method not supported ([#11416](https://github.com/googleapis/google-cloud-go/issues/11416)) ([56d704e](https://github.com/googleapis/google-cloud-go/commit/56d704e3037840aeb87b22cc83f2b6088c79bcee))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage/internal:** Add IAM information to RPC comments for reference documentation ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
|
||||
* **storage:** Add preview comment to NewMultiRangeDownloader ([#11420](https://github.com/googleapis/google-cloud-go/issues/11420)) ([4ec1d66](https://github.com/googleapis/google-cloud-go/commit/4ec1d66ee180e800606568e8693a282645ec7369))
|
||||
|
||||
## [1.49.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.48.0...storage/v1.49.0) (2024-12-21)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add finalize_time field in Object metadata ([46fc993](https://github.com/googleapis/google-cloud-go/commit/46fc993a3195203a230e2831bee456baaa9f7b1c))
|
||||
* **storage/internal:** Add MoveObject RPC ([46fc993](https://github.com/googleapis/google-cloud-go/commit/46fc993a3195203a230e2831bee456baaa9f7b1c))
|
||||
* **storage:** Add ObjectHandle.Move method ([#11302](https://github.com/googleapis/google-cloud-go/issues/11302)) ([a3cb8c4](https://github.com/googleapis/google-cloud-go/commit/a3cb8c4fc48883b54d4e830ae5f5ef4f1a3b8ca3))
|
||||
* **storage:** Return file metadata on read ([#11212](https://github.com/googleapis/google-cloud-go/issues/11212)) ([d49263b](https://github.com/googleapis/google-cloud-go/commit/d49263b2ab614cad801e26b4a169eafe08d4a2a0))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage/dataflux:** Address deadlock when reading from ranges ([#11303](https://github.com/googleapis/google-cloud-go/issues/11303)) ([32cbf56](https://github.com/googleapis/google-cloud-go/commit/32cbf561590541eb0387787bf729be6ddf68e4ee))
|
||||
* **storage:** Disable allow non-default credentials flag ([#11337](https://github.com/googleapis/google-cloud-go/issues/11337)) ([145ddf4](https://github.com/googleapis/google-cloud-go/commit/145ddf4f6123d9561856d2b6adeefdfae462b3f7))
|
||||
* **storage:** Monitored resource detection ([#11197](https://github.com/googleapis/google-cloud-go/issues/11197)) ([911bcd8](https://github.com/googleapis/google-cloud-go/commit/911bcd8b1816256482bd52e85da7eaf00c315293))
|
||||
* **storage:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
|
||||
|
||||
## [1.48.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.47.0...storage/v1.48.0) (2024-12-05)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/dataflux:** Run worksteal listing parallel to sequential listing ([#10966](https://github.com/googleapis/google-cloud-go/issues/10966)) ([3005f5a](https://github.com/googleapis/google-cloud-go/commit/3005f5a86c18254e569b8b1782bf014aa62f33cc))
|
||||
* **storage:** Add Writer.ChunkTransferTimeout ([#11111](https://github.com/googleapis/google-cloud-go/issues/11111)) ([fd1db20](https://github.com/googleapis/google-cloud-go/commit/fd1db203d0de898891b9920aacb141ea39228609))
|
||||
* **storage:** Allow non default service account ([#11137](https://github.com/googleapis/google-cloud-go/issues/11137)) ([19f01c3](https://github.com/googleapis/google-cloud-go/commit/19f01c3c48ed1272c8fc0af9e5f69646cb662808))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Add backoff to gRPC write retries ([#11200](https://github.com/googleapis/google-cloud-go/issues/11200)) ([a7db927](https://github.com/googleapis/google-cloud-go/commit/a7db927da9cf4c6cf242a5db83e44a16d75a8291))
|
||||
* **storage:** Correct direct connectivity check ([#11152](https://github.com/googleapis/google-cloud-go/issues/11152)) ([a75c8b0](https://github.com/googleapis/google-cloud-go/commit/a75c8b0f72c38d9a85c908715c3e37eb5cffb131))
|
||||
* **storage:** Disable soft delete policy using 0 retentionDurationSeconds ([#11226](https://github.com/googleapis/google-cloud-go/issues/11226)) ([f087721](https://github.com/googleapis/google-cloud-go/commit/f087721b7b20ad28ded1d0a84756a8bbaa2bb95a))
|
||||
* **storage:** Retry SignBlob call for URL signing ([#11154](https://github.com/googleapis/google-cloud-go/issues/11154)) ([f198452](https://github.com/googleapis/google-cloud-go/commit/f198452fd2b29e779e9080ba79d7e873eb0c32ef))
|
||||
|
||||
## [1.47.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.46.0...storage/v1.47.0) (2024-11-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Introduce dp detector based on grpc metrics ([#11100](https://github.com/googleapis/google-cloud-go/issues/11100)) ([60c2323](https://github.com/googleapis/google-cloud-go/commit/60c2323102b623e042fc508e2b1bb830a03f9577))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Bump auth dep ([#11135](https://github.com/googleapis/google-cloud-go/issues/11135)) ([9620a51](https://github.com/googleapis/google-cloud-go/commit/9620a51b2c6904d8d93e124494bc297fb98553d2))
|
||||
|
||||
## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.45.0...storage/v1.46.0) (2024-10-31)
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Add grpc metrics experimental options ([#10984](https://github.com/googleapis/google-cloud-go/issues/10984)) ([5b7397b](https://github.com/googleapis/google-cloud-go/commit/5b7397b169176f030049e1511859a883422c774e))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Skip only specific transport tests. ([#11016](https://github.com/googleapis/google-cloud-go/issues/11016)) ([d40fbff](https://github.com/googleapis/google-cloud-go/commit/d40fbff9c1984aeed0224a4ac93eb95c5af17126))
|
||||
* **storage:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
|
||||
* **storage:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([2b8ca4b](https://github.com/googleapis/google-cloud-go/commit/2b8ca4b4127ce3025c7a21cc7247510e07cc5625))
|
||||
|
||||
|
||||
### Miscellaneous Chores
|
||||
|
||||
* **storage/internal:** Remove notification, service account, and hmac RPCS. These API have been migrated to Storage Control and are available via the JSON API. ([#11008](https://github.com/googleapis/google-cloud-go/issues/11008)) ([e0759f4](https://github.com/googleapis/google-cloud-go/commit/e0759f46639b4c542e5b49e4dc81340d8e123370))
|
||||
|
||||
## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.44.0...storage/v1.45.0) (2024-10-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Adds support for restore token ([70d82fe](https://github.com/googleapis/google-cloud-go/commit/70d82fe93f60f1075298a077ce1616f9ae7e13fe))
|
||||
* **storage:** Adding bucket-specific dynamicDelay ([#10987](https://github.com/googleapis/google-cloud-go/issues/10987)) ([a807a7e](https://github.com/googleapis/google-cloud-go/commit/a807a7e7f9fb002374407622c126102c5e61af82))
|
||||
* **storage:** Dynamic read request stall timeout ([#10958](https://github.com/googleapis/google-cloud-go/issues/10958)) ([a09f00e](https://github.com/googleapis/google-cloud-go/commit/a09f00eeecac82af98ae769bab284ee58a3a66cb))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage:** Remove preview wording from NewGRPCClient ([#11002](https://github.com/googleapis/google-cloud-go/issues/11002)) ([40c3a5b](https://github.com/googleapis/google-cloud-go/commit/40c3a5b9c4cd4db2f1695e180419197b6a03ed7f))
|
||||
|
||||
## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.43.0...storage/v1.44.0) (2024-10-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/dataflux:** Add dataflux interface ([#10748](https://github.com/googleapis/google-cloud-go/issues/10748)) ([cb7b0a1](https://github.com/googleapis/google-cloud-go/commit/cb7b0a1b285de9d4182155a123747419232dd35f))
|
||||
* **storage/dataflux:** Add range_splitter [#10748](https://github.com/googleapis/google-cloud-go/issues/10748) ([#10899](https://github.com/googleapis/google-cloud-go/issues/10899)) ([d49da26](https://github.com/googleapis/google-cloud-go/commit/d49da26be7dc52fad37c392c2876f62b1a5625a2))
|
||||
* **storage/dataflux:** Add worksteal algorithm to fast-listing ([#10913](https://github.com/googleapis/google-cloud-go/issues/10913)) ([015b52c](https://github.com/googleapis/google-cloud-go/commit/015b52c345df75408be3edcfda96d37145794f9f))
|
||||
* **storage/internal:** Add managed folder to testIamPermissions method ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
|
||||
* **storage/transfermanager:** Add option to StripPrefix on directory download ([#10894](https://github.com/googleapis/google-cloud-go/issues/10894)) ([607534c](https://github.com/googleapis/google-cloud-go/commit/607534cdd5edf2d15d3de891cf6a0b6cbaa7d545))
|
||||
* **storage/transfermanager:** Add SkipIfExists option ([#10893](https://github.com/googleapis/google-cloud-go/issues/10893)) ([7daa1bd](https://github.com/googleapis/google-cloud-go/commit/7daa1bdc78844adac80f6378b1f6f2dd415b80a8))
|
||||
* **storage/transfermanager:** Checksum full object downloads ([#10569](https://github.com/googleapis/google-cloud-go/issues/10569)) ([c366c90](https://github.com/googleapis/google-cloud-go/commit/c366c908534ef09442f1f3e8a4f74bd545a474fb))
|
||||
* **storage:** Add direct google access side-effect imports by default ([#10757](https://github.com/googleapis/google-cloud-go/issues/10757)) ([9ad8324](https://github.com/googleapis/google-cloud-go/commit/9ad83248a7049c82580bc45d9685c329811bce88))
|
||||
* **storage:** Add full object checksum to reader.Attrs ([#10538](https://github.com/googleapis/google-cloud-go/issues/10538)) ([245d2ea](https://github.com/googleapis/google-cloud-go/commit/245d2eaddb4862da7c8d1892d5d462bf390adb2b))
|
||||
* **storage:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
|
||||
* **storage:** Add update time in bucketAttrs ([#10710](https://github.com/googleapis/google-cloud-go/issues/10710)) ([5f06ae1](https://github.com/googleapis/google-cloud-go/commit/5f06ae1a331c46ded47c96c205b3f1be92d64d29)), refs [#9361](https://github.com/googleapis/google-cloud-go/issues/9361)
|
||||
* **storage:** GA gRPC client ([#10859](https://github.com/googleapis/google-cloud-go/issues/10859)) ([c7a55a2](https://github.com/googleapis/google-cloud-go/commit/c7a55a26c645905317fe27505d503c338f50ee34))
|
||||
* **storage:** Introduce gRPC client-side metrics ([#10639](https://github.com/googleapis/google-cloud-go/issues/10639)) ([437bcb1](https://github.com/googleapis/google-cloud-go/commit/437bcb1e0b514959648eed36ba3963aa4fbeffc8))
|
||||
* **storage:** Support IncludeFoldersAsPrefixes for gRPC ([#10767](https://github.com/googleapis/google-cloud-go/issues/10767)) ([65bcc59](https://github.com/googleapis/google-cloud-go/commit/65bcc59a6c0753f8fbd66c8792bc69300e95ec62))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage/transfermanager:** Correct Attrs.StartOffset for sharded downloads ([#10512](https://github.com/googleapis/google-cloud-go/issues/10512)) ([01a5cbb](https://github.com/googleapis/google-cloud-go/commit/01a5cbba6d9d9f425f045b58fa16d8c85804c29c))
|
||||
* **storage:** Add retryalways policy to encryption test ([#10644](https://github.com/googleapis/google-cloud-go/issues/10644)) ([59cfd12](https://github.com/googleapis/google-cloud-go/commit/59cfd12ce5650279c99787da4a273db1e3253c76)), refs [#10567](https://github.com/googleapis/google-cloud-go/issues/10567)
|
||||
* **storage:** Add unknown host to retriable errors ([#10619](https://github.com/googleapis/google-cloud-go/issues/10619)) ([4ec0452](https://github.com/googleapis/google-cloud-go/commit/4ec0452a393341b1036ac6e1e7287843f097d978))
|
||||
* **storage:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
|
||||
* **storage:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
|
||||
* **storage:** Check for grpc NotFound error in HMAC test ([#10645](https://github.com/googleapis/google-cloud-go/issues/10645)) ([3c8e88a](https://github.com/googleapis/google-cloud-go/commit/3c8e88a085bab3142dfff6ef9a8e49c29a5c877d))
|
||||
* **storage:** Disable grpc metrics using emulator ([#10870](https://github.com/googleapis/google-cloud-go/issues/10870)) ([35ad73d](https://github.com/googleapis/google-cloud-go/commit/35ad73d3be5485ac592e2ef1ea6c0854f1eff4a0))
|
||||
* **storage:** Retry gRPC DEADLINE_EXCEEDED errors ([#10635](https://github.com/googleapis/google-cloud-go/issues/10635)) ([0018415](https://github.com/googleapis/google-cloud-go/commit/0018415295a5fd964b923db6a4785e9eed46a2e2))
|
||||
* **storage:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
|
||||
* **storage:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
|
||||
|
||||
|
||||
### Performance Improvements
|
||||
|
||||
* **storage:** GRPC zerocopy codec ([#10888](https://github.com/googleapis/google-cloud-go/issues/10888)) ([aeba28f](https://github.com/googleapis/google-cloud-go/commit/aeba28ffffcd82ac5540e45247112bdacc5c530d))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage/internal:** Clarify possible objectAccessControl roles ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
|
||||
* **storage/internal:** Update dual-region bucket link ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
|
||||
|
||||
## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03)
|
||||
|
||||
|
||||
|
||||
53
vendor/cloud.google.com/go/storage/TESTING.md
generated
vendored
Normal file
53
vendor/cloud.google.com/go/storage/TESTING.md
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
# Testing
|
||||
|
||||
Package storage has unit, emulated integration tests, and integration tests
|
||||
against the real GCS service.
|
||||
|
||||
## Setup
|
||||
|
||||
Assume that you're running from a directory which contains the `google-cloud-go`
|
||||
git repository.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/googleapis/google-cloud-go
|
||||
git clone https://github.com/googleapis/storage-testbench # emulator
|
||||
```
|
||||
|
||||
## Running unit tests
|
||||
|
||||
```bash
|
||||
go test ./google-cloud-go/storage -short
|
||||
```
|
||||
|
||||
## Running emulated integration tests
|
||||
|
||||
See
|
||||
https://github.com/googleapis/storage-testbench?tab=readme-ov-file#how-to-use-this-testbench
|
||||
for testbench setup instructions. After following those instructions, you should
|
||||
have an emulator running an HTTP server on port 9000 and a gRPC server on port
|
||||
8888.
|
||||
|
||||
```bash
|
||||
STORAGE_EMULATOR_HOST_GRPC="localhost:8888" STORAGE_EMULATOR_HOST="http://localhost:9000" go test ./google-cloud-go/storage -short -run="^Test(RetryConformance|.*Emulated)"
|
||||
```
|
||||
|
||||
If you don't specify the `-run` filter, this will also run unit tests.
|
||||
|
||||
## Running live service integration tests
|
||||
|
||||
See the [general setup instructions](../CONTRIBUTING.md#local-setup) for more
|
||||
details. The GCS integration tests require:
|
||||
|
||||
- A project configured such that all bucket types can be created (e.g. with and
|
||||
without UBLA, with and without HNS). A dedicated project which only stores
|
||||
test data is recommended.
|
||||
- A JSON key file for a service account with most GCS privileges in that
|
||||
project.
|
||||
- A VM in that project.
|
||||
|
||||
Run with:
|
||||
|
||||
```bash
|
||||
GCLOUD_TESTS_GOLANG_PROJECT_ID="${PROJECT_ID?}" GCLOUD_TESTS_GOLANG_KEY="${KEYFILE?}" \
|
||||
go test ./google-cloud-go/storage -run="^Test.*Integration"
|
||||
```
|
||||
13
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
13
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
@@ -17,7 +17,6 @@ package storage
|
||||
import (
|
||||
"context"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal/apiv2/storagepb"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
@@ -77,8 +76,8 @@ type ACLHandle struct {
|
||||
|
||||
// Delete permanently deletes the ACL entry for the given entity.
|
||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "ACL.Delete")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectDelete(ctx, entity)
|
||||
@@ -91,8 +90,8 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
|
||||
|
||||
// Set sets the role for the given entity.
|
||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "ACL.Set")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectSet(ctx, entity, role, false)
|
||||
@@ -105,8 +104,8 @@ func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (er
|
||||
|
||||
// List retrieves ACL entries.
|
||||
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "ACL.List")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectList(ctx)
|
||||
|
||||
85
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
85
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal/apiv2/storagepb"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iamcredentials/v1"
|
||||
@@ -82,8 +81,8 @@ func (c *Client) Bucket(name string) *BucketHandle {
|
||||
// Create creates the Bucket in the project.
|
||||
// If attrs is nil the API defaults will be used.
|
||||
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Bucket.Create")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
|
||||
@@ -95,8 +94,8 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
func (b *BucketHandle) Delete(ctx context.Context) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Bucket.Delete")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...)
|
||||
@@ -150,8 +149,8 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
|
||||
// Attrs returns the metadata for the bucket.
|
||||
func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Bucket.Attrs")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.GetBucket(ctx, b.name, b.conds, o...)
|
||||
@@ -159,8 +158,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
|
||||
|
||||
// Update updates a bucket's attributes.
|
||||
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Update")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Bucket.Update")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
|
||||
o := makeStorageOpts(isIdempotent, b.retry, b.userProject)
|
||||
@@ -200,11 +199,11 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
|
||||
newopts.GoogleAccessID = id
|
||||
}
|
||||
if newopts.SignBytes == nil && len(newopts.PrivateKey) == 0 {
|
||||
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
|
||||
if j, ok := b.c.credsJSON(); ok {
|
||||
var sa struct {
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
err := json.Unmarshal(j, &sa)
|
||||
if err == nil && sa.PrivateKey != "" {
|
||||
newopts.PrivateKey = []byte(sa.PrivateKey)
|
||||
}
|
||||
@@ -248,11 +247,11 @@ func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolic
|
||||
newopts.GoogleAccessID = id
|
||||
}
|
||||
if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 {
|
||||
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
|
||||
if j, ok := b.c.credsJSON(); ok {
|
||||
var sa struct {
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
err := json.Unmarshal(j, &sa)
|
||||
if err == nil && sa.PrivateKey != "" {
|
||||
newopts.PrivateKey = []byte(sa.PrivateKey)
|
||||
}
|
||||
@@ -270,14 +269,14 @@ func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolic
|
||||
func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
||||
returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)")
|
||||
|
||||
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
|
||||
if j, ok := b.c.credsJSON(); ok {
|
||||
var sa struct {
|
||||
ClientEmail string `json:"client_email"`
|
||||
SAImpersonationURL string `json:"service_account_impersonation_url"`
|
||||
CredType string `json:"type"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
err := json.Unmarshal(j, &sa)
|
||||
if err != nil {
|
||||
returnErr = err
|
||||
} else {
|
||||
@@ -320,17 +319,30 @@ func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte,
|
||||
return func(in []byte) ([]byte, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
opts := []option.ClientOption{option.WithHTTPClient(b.c.hc)}
|
||||
|
||||
if b.c.creds != nil {
|
||||
universeDomain, err := b.c.creds.UniverseDomain(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, option.WithUniverseDomain(universeDomain))
|
||||
}
|
||||
|
||||
// It's ok to recreate this service per call since we pass in the http client,
|
||||
// circumventing the cost of recreating the auth/transport layer
|
||||
svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc))
|
||||
svc, err := iamcredentials.NewService(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create iamcredentials client: %w", err)
|
||||
}
|
||||
|
||||
resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
|
||||
Payload: base64.StdEncoding.EncodeToString(in),
|
||||
}).Do()
|
||||
if err != nil {
|
||||
// Do the SignBlob call with a retry for transient errors.
|
||||
var resp *iamcredentials.SignBlobResponse
|
||||
if err := run(ctx, func(ctx context.Context) error {
|
||||
resp, err = svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
|
||||
Payload: base64.StdEncoding.EncodeToString(in),
|
||||
}).Do()
|
||||
return err
|
||||
}, b.retry, true); err != nil {
|
||||
return nil, fmt.Errorf("unable to sign bytes: %w", err)
|
||||
}
|
||||
out, err := base64.StdEncoding.DecodeString(resp.SignedBlob)
|
||||
@@ -416,6 +428,10 @@ type BucketAttrs struct {
|
||||
// This field is read-only.
|
||||
Created time.Time
|
||||
|
||||
// Updated is the time at which the bucket was last modified.
|
||||
// This field is read-only.
|
||||
Updated time.Time
|
||||
|
||||
// VersioningEnabled reports whether this bucket has versioning enabled.
|
||||
VersioningEnabled bool
|
||||
|
||||
@@ -498,6 +514,9 @@ type BucketAttrs struct {
|
||||
// It cannot be modified after bucket creation time.
|
||||
// UniformBucketLevelAccess must also also be enabled on the bucket.
|
||||
HierarchicalNamespace *HierarchicalNamespace
|
||||
|
||||
// OwnerEntity contains entity information in the form "project-owner-projectId".
|
||||
OwnerEntity string
|
||||
}
|
||||
|
||||
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
|
||||
@@ -824,6 +843,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||
DefaultEventBasedHold: b.DefaultEventBasedHold,
|
||||
StorageClass: b.StorageClass,
|
||||
Created: convertTime(b.TimeCreated),
|
||||
Updated: convertTime(b.Updated),
|
||||
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
|
||||
ACL: toBucketACLRules(b.Acl),
|
||||
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
|
||||
@@ -847,6 +867,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||
Autoclass: toAutoclassFromRaw(b.Autoclass),
|
||||
SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy),
|
||||
HierarchicalNamespace: toHierarchicalNamespaceFromRaw(b.HierarchicalNamespace),
|
||||
OwnerEntity: ownerEntityFromRaw(b.Owner),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -861,6 +882,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
|
||||
DefaultEventBasedHold: b.GetDefaultEventBasedHold(),
|
||||
StorageClass: b.GetStorageClass(),
|
||||
Created: b.GetCreateTime().AsTime(),
|
||||
Updated: b.GetUpdateTime().AsTime(),
|
||||
VersioningEnabled: b.GetVersioning().GetEnabled(),
|
||||
ACL: toBucketACLRulesFromProto(b.GetAcl()),
|
||||
DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
|
||||
@@ -882,6 +904,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
|
||||
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
|
||||
SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy),
|
||||
HierarchicalNamespace: toHierarchicalNamespaceFromProto(b.HierarchicalNamespace),
|
||||
OwnerEntity: ownerEntityFromProto(b.GetOwner()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1332,8 +1355,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
||||
}
|
||||
if ua.SoftDeletePolicy != nil {
|
||||
if ua.SoftDeletePolicy.RetentionDuration == 0 {
|
||||
rb.NullFields = append(rb.NullFields, "SoftDeletePolicy")
|
||||
rb.SoftDeletePolicy = nil
|
||||
rb.SoftDeletePolicy = &raw.BucketSoftDeletePolicy{
|
||||
RetentionDurationSeconds: 0,
|
||||
ForceSendFields: []string{"RetentionDurationSeconds"},
|
||||
}
|
||||
} else {
|
||||
rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy()
|
||||
}
|
||||
@@ -2204,6 +2229,20 @@ func toHierarchicalNamespaceFromRaw(r *raw.BucketHierarchicalNamespace) *Hierarc
|
||||
}
|
||||
}
|
||||
|
||||
func ownerEntityFromRaw(r *raw.BucketOwner) string {
|
||||
if r == nil {
|
||||
return ""
|
||||
}
|
||||
return r.Entity
|
||||
}
|
||||
|
||||
func ownerEntityFromProto(p *storagepb.Owner) string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return p.GetEntity()
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the
|
||||
// Query q. If q is nil, no filtering is done. Objects will be iterated over
|
||||
// lexicographically by name.
|
||||
|
||||
44
vendor/cloud.google.com/go/storage/client.go
generated
vendored
44
vendor/cloud.google.com/go/storage/client.go
generated
vendored
@@ -16,7 +16,6 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/iam/apiv1/iampb"
|
||||
@@ -62,6 +61,7 @@ type storageClient interface {
|
||||
GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error)
|
||||
UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error)
|
||||
RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error)
|
||||
MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error)
|
||||
|
||||
// Default Object ACL methods.
|
||||
|
||||
@@ -87,7 +87,7 @@ type storageClient interface {
|
||||
RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error)
|
||||
|
||||
NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error)
|
||||
OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error)
|
||||
OpenWriter(params *openWriterParams, opts ...storageOption) (internalWriter, error)
|
||||
|
||||
// IAM methods.
|
||||
|
||||
@@ -107,6 +107,8 @@ type storageClient interface {
|
||||
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
|
||||
CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error)
|
||||
DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error
|
||||
|
||||
NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (*MultiRangeDownloader, error)
|
||||
}
|
||||
|
||||
// settings contains transport-agnostic configuration for API calls made via
|
||||
@@ -122,7 +124,7 @@ type settings struct {
|
||||
gax []gax.CallOption
|
||||
|
||||
// idempotent indicates if the call is idempotent or not when considering
|
||||
// if the call should be retired or not.
|
||||
// if the call should be retried or not.
|
||||
idempotent bool
|
||||
|
||||
// clientOption is a set of option.ClientOption to be used during client
|
||||
@@ -132,6 +134,8 @@ type settings struct {
|
||||
|
||||
// userProject is the user project that should be billed for the request.
|
||||
userProject string
|
||||
|
||||
metricsContext *metricsContext
|
||||
}
|
||||
|
||||
func initSettings(opts ...storageOption) *settings {
|
||||
@@ -235,7 +239,8 @@ type openWriterParams struct {
|
||||
chunkSize int
|
||||
// chunkRetryDeadline - see `Writer.ChunkRetryDeadline`.
|
||||
// Optional.
|
||||
chunkRetryDeadline time.Duration
|
||||
chunkRetryDeadline time.Duration
|
||||
chunkTransferTimeout time.Duration
|
||||
|
||||
// Object/request properties
|
||||
|
||||
@@ -251,12 +256,22 @@ type openWriterParams struct {
|
||||
// conds - see `Writer.o.conds`.
|
||||
// Optional.
|
||||
conds *Conditions
|
||||
// appendGen -- object generation to write to.
|
||||
// Optional; required for taking over appendable objects only
|
||||
appendGen int64
|
||||
// encryptionKey - see `Writer.o.encryptionKey`
|
||||
// Optional.
|
||||
encryptionKey []byte
|
||||
// sendCRC32C - see `Writer.SendCRC32C`.
|
||||
// Optional.
|
||||
sendCRC32C bool
|
||||
// append - Write with appendable object semantics.
|
||||
// Optional.
|
||||
append bool
|
||||
// finalizeOnClose - Finalize the object when the storage.Writer is closed
|
||||
// successfully.
|
||||
// Optional.
|
||||
finalizeOnClose bool
|
||||
|
||||
// Writer callbacks
|
||||
|
||||
@@ -272,6 +287,19 @@ type openWriterParams struct {
|
||||
// setObj callback for reporting the resulting object - see `Writer.obj`.
|
||||
// Required.
|
||||
setObj func(*ObjectAttrs)
|
||||
// setSize callback for updated the persisted size in Writer.obj.
|
||||
setSize func(int64)
|
||||
// setTakeoverOffset callback for returning offset to start writing from to Writer.
|
||||
setTakeoverOffset func(int64)
|
||||
}
|
||||
|
||||
type newMultiRangeDownloaderParams struct {
|
||||
bucket string
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
gen int64
|
||||
object string
|
||||
handle *ReadHandle
|
||||
}
|
||||
|
||||
type newRangeReaderParams struct {
|
||||
@@ -283,6 +311,7 @@ type newRangeReaderParams struct {
|
||||
object string
|
||||
offset int64
|
||||
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
|
||||
handle *ReadHandle
|
||||
}
|
||||
|
||||
type getObjectParams struct {
|
||||
@@ -310,6 +339,13 @@ type restoreObjectParams struct {
|
||||
copySourceACL bool
|
||||
}
|
||||
|
||||
type moveObjectParams struct {
|
||||
bucket, srcObject, dstObject string
|
||||
srcConds *Conditions
|
||||
dstConds *Conditions
|
||||
encryptionKey []byte
|
||||
}
|
||||
|
||||
type composeObjectRequest struct {
|
||||
dstBucket string
|
||||
dstObject destinationObject
|
||||
|
||||
88
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
88
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
@@ -274,15 +274,43 @@ To generate the signature, you must have:
|
||||
|
||||
# Errors
|
||||
|
||||
Errors returned by this client are often of the type [googleapi.Error].
|
||||
These errors can be introspected for more information by using [errors.As]
|
||||
with the richer [googleapi.Error] type. For example:
|
||||
Errors returned by this client are often of the type [github.com/googleapis/gax-go/v2/apierror].
|
||||
The [apierror.APIError] type can wrap a [google.golang.org/grpc/status.Status]
|
||||
if gRPC was used, or a [google.golang.org/api/googleapi.Error] if HTTP/REST was used.
|
||||
You might also encounter [googleapi.Error] directly from HTTP operations.
|
||||
These types of errors can be inspected for more information by using [errors.As]
|
||||
to access the specific underlying error types and retrieve detailed information,
|
||||
including HTTP or gRPC status codes. For example:
|
||||
|
||||
// APIErrors often wrap a googleapi.Error (for JSON and XML calls) or a status.Status (for gRPC calls)
|
||||
var ae *apierror.APIError
|
||||
if ok := errors.As(err, &ae); ok {
|
||||
// ae.HTTPCode() is the HTTP status code.
|
||||
// ae.GRPCStatus().Code() is the gRPC status code
|
||||
log.Printf("APIError: HTTPCode: %d, GRPCStatusCode: %s", ae.HTTPCode(), ae.GRPCStatus().Code())
|
||||
|
||||
if ae.GRPCStatus().Code() == codes.Unavailable {
|
||||
// ... handle gRPC unavailable ...
|
||||
}
|
||||
}
|
||||
|
||||
// This allows a user to get more information directly from googleapi.Errors (for JSON/XML calls)
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok {
|
||||
if e.Code == 409 { ... }
|
||||
// e.Code is the HTTP status code.
|
||||
// e.Message is the error message.
|
||||
// e.Body is the raw response body.
|
||||
// e.Header contains the HTTP response headers.
|
||||
log.Printf("HTTP Code: %d, Message: %s", e.Code, e.Message)
|
||||
|
||||
if e.Code == 409 {
|
||||
// ... handle conflict ...
|
||||
}
|
||||
}
|
||||
|
||||
This library may also return other errors that are not wrapped as [apierror.APIError]. For
|
||||
example, errors with authentication may return [cloud.google.com/go/auth.Error].
|
||||
|
||||
# Retrying failed requests
|
||||
|
||||
Methods in this package may retry calls that fail with transient errors.
|
||||
@@ -331,14 +359,15 @@ to add a [custom audit logging] header:
|
||||
// Use client as usual with the context and the additional headers will be sent.
|
||||
client.Bucket("my-bucket").Attrs(ctx)
|
||||
|
||||
# Experimental gRPC API
|
||||
# gRPC API
|
||||
|
||||
This package includes support for the Cloud Storage gRPC API, which is currently
|
||||
in preview. This implementation uses gRPC rather than the current JSON & XML
|
||||
APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC
|
||||
team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to
|
||||
allowlist to access this API. The Go Storage gRPC library is not yet generally
|
||||
available, so it may be subject to breaking changes.
|
||||
This package includes support for the [Cloud Storage gRPC API]. This
|
||||
implementation uses gRPC rather than the default JSON & XML APIs
|
||||
to make requests to Cloud Storage. All methods on the [Client] support
|
||||
the gRPC API, with the exception of [GetServiceAccount], [Notification],
|
||||
and [HMACKey] methods.
|
||||
|
||||
The Cloud Storage gRPC API is generally available.
|
||||
|
||||
To create a client which will use gRPC, use the alternate constructor:
|
||||
|
||||
@@ -349,15 +378,31 @@ To create a client which will use gRPC, use the alternate constructor:
|
||||
}
|
||||
// Use client as usual.
|
||||
|
||||
If the application is running within GCP, users may get better performance by
|
||||
enabling Direct Google Access (enabling requests to skip some proxy steps). To enable,
|
||||
set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
|
||||
the following side-effect imports to your application:
|
||||
One major advantage of the gRPC API is that it can use [Direct Connectivity],
|
||||
enabling requests to skip some proxy steps and reducing response latency.
|
||||
Requirements to use Direct Connectivity include:
|
||||
|
||||
import (
|
||||
_ "google.golang.org/grpc/balancer/rls"
|
||||
_ "google.golang.org/grpc/xds/googledirectpath"
|
||||
)
|
||||
- Your application must be running inside Google Cloud.
|
||||
- Your Cloud Storage [bucket location] must overlap with your VM or compute
|
||||
environment zone. For example, if your VM is in us-east1a, your bucket
|
||||
must be located in either us-east1 (single region), nam4 (dual region),
|
||||
or us (multi-region).
|
||||
- Your client must use service account authentication.
|
||||
|
||||
Additional requirements for Direct Connectivity are documented in the
|
||||
[Cloud Storage gRPC docs].
|
||||
|
||||
Dependencies for the gRPC API may slightly increase the size of binaries for
|
||||
applications depending on this package. If you are not using gRPC, you can use
|
||||
the build tag `disable_grpc_modules` to opt out of these dependencies and
|
||||
reduce the binary size.
|
||||
|
||||
The gRPC client is instrumented with Open Telemetry metrics which export to
|
||||
Cloud Monitoring by default. More information is available in the
|
||||
[gRPC client-side metrics] documentation, including information about
|
||||
roles which must be enabled in order to do the export successfully. To
|
||||
disable this export, you can use the [WithDisabledClientMetrics] client
|
||||
option.
|
||||
|
||||
# Storage Control API
|
||||
|
||||
@@ -375,5 +420,10 @@ client, which is available as a subpackage in this module. See package docs at
|
||||
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
|
||||
[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata
|
||||
[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2
|
||||
[Cloud Storage gRPC API]: https://cloud.google.com/storage/docs/enable-grpc-api
|
||||
[Direct Connectivity]: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#direct-connectivity
|
||||
[bucket location]: https://cloud.google.com/storage/docs/locations
|
||||
[Cloud Storage gRPC docs]: https://cloud.google.com/storage/docs/enable-grpc-api#limitations
|
||||
[gRPC client-side metrics]: https://cloud.google.com/storage/docs/client-side-metrics
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
||||
|
||||
237
vendor/cloud.google.com/go/storage/dynamic_delay.go
generated
vendored
Normal file
237
vendor/cloud.google.com/go/storage/dynamic_delay.go
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// dynamicDelay dynamically calculates the delay at a fixed percentile, based on
|
||||
// delay samples.
|
||||
//
|
||||
// dynamicDelay is goroutine-safe.
|
||||
type dynamicDelay struct {
|
||||
increaseFactor float64
|
||||
decreaseFactor float64
|
||||
minDelay time.Duration
|
||||
maxDelay time.Duration
|
||||
value time.Duration
|
||||
|
||||
// Guards the value
|
||||
mu *sync.RWMutex
|
||||
}
|
||||
|
||||
// validateDynamicDelayParams ensures,
|
||||
// targetPercentile is a valid fraction (between 0 and 1).
|
||||
// increaseRate is a positive number.
|
||||
// minDelay is less than maxDelay.
|
||||
func validateDynamicDelayParams(targetPercentile, increaseRate float64, minDelay, maxDelay time.Duration) error {
|
||||
if targetPercentile < 0 || targetPercentile > 1 {
|
||||
return fmt.Errorf("invalid targetPercentile (%v): must be within [0, 1]", targetPercentile)
|
||||
}
|
||||
if increaseRate <= 0 {
|
||||
return fmt.Errorf("invalid increaseRate (%v): must be > 0", increaseRate)
|
||||
}
|
||||
if minDelay >= maxDelay {
|
||||
return fmt.Errorf("invalid minDelay (%v) and maxDelay (%v) combination: minDelay must be smaller than maxDelay", minDelay, maxDelay)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDynamicDelay returns a dynamicDelay.
|
||||
//
|
||||
// targetPercentile is the desired percentile to be computed. For example, a
|
||||
// targetPercentile of 0.99 computes the delay at the 99th percentile. Must be
|
||||
// in the range [0, 1].
|
||||
//
|
||||
// increaseRate (must be > 0) determines how many increase calls it takes for
|
||||
// Value to double.
|
||||
//
|
||||
// initialDelay is the start value of the delay.
|
||||
//
|
||||
// decrease can never lower the delay past minDelay, increase can never raise
|
||||
// the delay past maxDelay.
|
||||
func newDynamicDelay(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) *dynamicDelay {
|
||||
if initialDelay < minDelay {
|
||||
initialDelay = minDelay
|
||||
}
|
||||
if initialDelay > maxDelay {
|
||||
initialDelay = maxDelay
|
||||
}
|
||||
|
||||
// Compute increaseFactor and decreaseFactor such that:
|
||||
// (increaseFactor ^ (1 - targetPercentile)) * (decreaseFactor ^ targetPercentile) = 1
|
||||
increaseFactor := math.Exp(math.Log(2) / increaseRate)
|
||||
if increaseFactor < 1.001 {
|
||||
increaseFactor = 1.001
|
||||
}
|
||||
decreaseFactor := math.Exp(-math.Log(increaseFactor) * (1 - targetPercentile) / targetPercentile)
|
||||
if decreaseFactor > 0.9999 {
|
||||
decreaseFactor = 0.9999
|
||||
}
|
||||
|
||||
return &dynamicDelay{
|
||||
increaseFactor: increaseFactor,
|
||||
decreaseFactor: decreaseFactor,
|
||||
minDelay: minDelay,
|
||||
maxDelay: maxDelay,
|
||||
value: initialDelay,
|
||||
mu: &sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dynamicDelay) unsafeIncrease() {
|
||||
v := time.Duration(float64(d.value) * d.increaseFactor)
|
||||
if v > d.maxDelay {
|
||||
d.value = d.maxDelay
|
||||
} else {
|
||||
d.value = v
|
||||
}
|
||||
}
|
||||
|
||||
// increase notes that the operation took longer than the delay returned by Value.
|
||||
func (d *dynamicDelay) increase() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.unsafeIncrease()
|
||||
}
|
||||
|
||||
func (d *dynamicDelay) unsafeDecrease() {
|
||||
v := time.Duration(float64(d.value) * d.decreaseFactor)
|
||||
if v < d.minDelay {
|
||||
d.value = d.minDelay
|
||||
} else {
|
||||
d.value = v
|
||||
}
|
||||
}
|
||||
|
||||
// decrease notes that the operation completed before the delay returned by getValue.
|
||||
func (d *dynamicDelay) decrease() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.unsafeDecrease()
|
||||
}
|
||||
|
||||
// update updates the delay value depending on the specified latency.
|
||||
func (d *dynamicDelay) update(latency time.Duration) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if latency > d.value {
|
||||
d.unsafeIncrease()
|
||||
} else {
|
||||
d.unsafeDecrease()
|
||||
}
|
||||
}
|
||||
|
||||
// getValue returns the desired delay to wait before retry the operation.
|
||||
func (d *dynamicDelay) getValue() time.Duration {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
return d.value
|
||||
}
|
||||
|
||||
// printDelay prints the state of delay, helpful in debugging.
|
||||
func (d *dynamicDelay) printDelay() {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
fmt.Println("IncreaseFactor: ", d.increaseFactor)
|
||||
fmt.Println("DecreaseFactor: ", d.decreaseFactor)
|
||||
fmt.Println("MinDelay: ", d.minDelay)
|
||||
fmt.Println("MaxDelay: ", d.maxDelay)
|
||||
fmt.Println("Value: ", d.value)
|
||||
}
|
||||
|
||||
// bucketDelayManager wraps dynamicDelay to provide bucket-specific delays.
|
||||
type bucketDelayManager struct {
|
||||
targetPercentile float64
|
||||
increaseRate float64
|
||||
initialDelay time.Duration
|
||||
minDelay time.Duration
|
||||
maxDelay time.Duration
|
||||
|
||||
// delays maps bucket names to their dynamic delay instance.
|
||||
delays map[string]*dynamicDelay
|
||||
|
||||
// mu guards delays.
|
||||
mu *sync.RWMutex
|
||||
}
|
||||
|
||||
// newBucketDelayManager returns a new bucketDelayManager instance.
|
||||
func newBucketDelayManager(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) (*bucketDelayManager, error) {
|
||||
err := validateDynamicDelayParams(targetPercentile, increaseRate, minDelay, maxDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bucketDelayManager{
|
||||
targetPercentile: targetPercentile,
|
||||
increaseRate: increaseRate,
|
||||
initialDelay: initialDelay,
|
||||
minDelay: minDelay,
|
||||
maxDelay: maxDelay,
|
||||
delays: make(map[string]*dynamicDelay),
|
||||
mu: &sync.RWMutex{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getDelay retrieves the dynamicDelay instance for the given bucket name. If no delay
|
||||
// exists for the bucket, a new one is created with the configured parameters.
|
||||
func (b *bucketDelayManager) getDelay(bucketName string) *dynamicDelay {
|
||||
b.mu.RLock()
|
||||
delay, ok := b.delays[bucketName]
|
||||
b.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// Check again, as someone might create b/w the execution of mu.RUnlock() and mu.Lock().
|
||||
delay, ok = b.delays[bucketName]
|
||||
if !ok {
|
||||
// Create a new dynamicDelay for the bucket if it doesn't exist
|
||||
delay = newDynamicDelay(b.targetPercentile, b.increaseRate, b.initialDelay, b.minDelay, b.maxDelay)
|
||||
b.delays[bucketName] = delay
|
||||
}
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
// increase notes that the operation took longer than the delay for the given bucket.
|
||||
func (b *bucketDelayManager) increase(bucketName string) {
|
||||
b.getDelay(bucketName).increase()
|
||||
}
|
||||
|
||||
// decrease notes that the operation completed before the delay for the given bucket.
|
||||
func (b *bucketDelayManager) decrease(bucketName string) {
|
||||
b.getDelay(bucketName).decrease()
|
||||
}
|
||||
|
||||
// update updates the delay value for the bucket depending on the specified latency.
|
||||
func (b *bucketDelayManager) update(bucketName string, latency time.Duration) {
|
||||
b.getDelay(bucketName).update(latency)
|
||||
}
|
||||
|
||||
// getValue returns the desired delay to wait before retrying the operation for the given bucket.
|
||||
func (b *bucketDelayManager) getValue(bucketName string) time.Duration {
|
||||
return b.getDelay(bucketName).getValue()
|
||||
}
|
||||
3
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
3
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
@@ -89,4 +89,5 @@ then
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log
|
||||
|
||||
go test -v -timeout 17m ./ ./dataflux -run="^Test(RetryConformance|.*Emulated)$" -short -race 2>&1 | tee -a sponge_log.log
|
||||
|
||||
101
vendor/cloud.google.com/go/storage/experimental/experimental.go
generated
vendored
Normal file
101
vendor/cloud.google.com/go/storage/experimental/experimental.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package experimental is a collection of experimental features that might
|
||||
// have some rough edges to them. Housing experimental features in this package
|
||||
// results in a user accessing these APIs as `experimental.Foo`, thereby making
|
||||
// it explicit that the feature is experimental and using them in production
|
||||
// code is at their own risk.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package experimental
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/storage/internal"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// WithMetricInterval provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient].
|
||||
// It sets how often to emit metrics [metric.WithInterval] when using
|
||||
// [metric.NewPeriodicReader]
|
||||
// When using Cloud Monitoring interval must be at minimum 1 [time.Minute].
|
||||
func WithMetricInterval(metricInterval time.Duration) option.ClientOption {
|
||||
return internal.WithMetricInterval.(func(time.Duration) option.ClientOption)(metricInterval)
|
||||
}
|
||||
|
||||
// WithMetricExporter provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient].
|
||||
// Set an alternate client-side metric Exporter to emit metrics through.
|
||||
// Must implement [metric.Exporter]
|
||||
func WithMetricExporter(ex *metric.Exporter) option.ClientOption {
|
||||
return internal.WithMetricExporter.(func(*metric.Exporter) option.ClientOption)(ex)
|
||||
}
|
||||
|
||||
// WithReadStallTimeout provides a [option.ClientOption] that may be passed to [storage.NewClient].
|
||||
// It enables the client to retry stalled requests when starting a download from
|
||||
// Cloud Storage. If the timeout elapses with no response from the server, the request
|
||||
// is automatically retried.
|
||||
// The timeout is initially set to ReadStallTimeoutConfig.Min. The client tracks
|
||||
// latency across all read requests from the client for each bucket accessed, and can
|
||||
// adjust the timeout higher to the target percentile when latency for request to that
|
||||
// bucket is high.
|
||||
// Currently, this is supported only for downloads ([storage.NewReader] and
|
||||
// [storage.NewRangeReader] calls) and only for the XML API. Other read APIs (gRPC & JSON)
|
||||
// will be supported soon.
|
||||
func WithReadStallTimeout(rstc *ReadStallTimeoutConfig) option.ClientOption {
|
||||
return internal.WithReadStallTimeout.(func(config *ReadStallTimeoutConfig) option.ClientOption)(rstc)
|
||||
}
|
||||
|
||||
// ReadStallTimeoutConfig defines the timeout which is adjusted dynamically based on
|
||||
// past observed latencies.
|
||||
type ReadStallTimeoutConfig struct {
|
||||
// Min is the minimum duration of the timeout. The default value is 500ms. Requests
|
||||
// taking shorter than this value to return response headers will never time out.
|
||||
// In general, you should choose a Min value that is greater than the typical value
|
||||
// for the target percentile.
|
||||
Min time.Duration
|
||||
|
||||
// TargetPercentile is the percentile to target for the dynamic timeout. The default
|
||||
// value is 0.99. At the default percentile, at most 1% of requests will be timed out
|
||||
// and retried.
|
||||
TargetPercentile float64
|
||||
}
|
||||
|
||||
// WithGRPCBidiReads provides an [option.ClientOption] that may be passed to
|
||||
// [cloud.google.com/go/storage.NewGRPCClient].
|
||||
// It enables the client to use bi-directional gRPC APIs for downloads rather than the
|
||||
// server streaming API. In particular, it allows users to use the
|
||||
// [cloud.google.com/go/storage.MultiRangeDownloader]
|
||||
// surface, which requires bi-directional streaming.
|
||||
//
|
||||
// The bi-directional API is in private preview; please contact your account manager if
|
||||
// interested.
|
||||
func WithGRPCBidiReads() option.ClientOption {
|
||||
return internal.WithGRPCBidiReads.(func() option.ClientOption)()
|
||||
}
|
||||
|
||||
// WithZonalBucketAPIs provides an [option.ClientOption] that may be passed to
|
||||
// [cloud.google.com/go/storage.NewGRPCClient].
|
||||
// It enables the client to use bi-directional gRPC APIs for downloads rather than the
|
||||
// server streaming API (same as [WithGRPCBidiReads]) as well as appendable
|
||||
// object semantics for uploads. By setting this option, both upload and download
|
||||
// paths will use zonal bucket compatible APIs by default.
|
||||
//
|
||||
// Zonal buckets and rapid storage is in private preview; please contact your
|
||||
// account manager if interested.
|
||||
func WithZonalBucketAPIs() option.ClientOption {
|
||||
return internal.WithZonalBucketAPIs.(func() option.ClientOption)()
|
||||
}
|
||||
2289
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
2289
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
22
vendor/cloud.google.com/go/storage/grpc_dp.go
generated
vendored
Normal file
22
vendor/cloud.google.com/go/storage/grpc_dp.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build !disable_grpc_modules
|
||||
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
_ "google.golang.org/grpc/balancer/rls"
|
||||
_ "google.golang.org/grpc/xds/googledirectpath"
|
||||
)
|
||||
283
vendor/cloud.google.com/go/storage/grpc_metrics.go
generated
vendored
Normal file
283
vendor/cloud.google.com/go/storage/grpc_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,283 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/contrib/detectors/gcp"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/experimental/stats"
|
||||
"google.golang.org/grpc/stats/opentelemetry"
|
||||
)
|
||||
|
||||
const (
|
||||
monitoredResourceName = "storage.googleapis.com/Client"
|
||||
metricPrefix = "storage.googleapis.com/client/"
|
||||
)
|
||||
|
||||
// Added to help with tests
|
||||
type storageMonitoredResource struct {
|
||||
project string
|
||||
api string
|
||||
location string
|
||||
instance string
|
||||
cloudPlatform string
|
||||
host string
|
||||
resource *resource.Resource
|
||||
}
|
||||
|
||||
func (smr *storageMonitoredResource) exporter() (metric.Exporter, error) {
|
||||
exporter, err := mexporter.New(
|
||||
mexporter.WithProjectID(smr.project),
|
||||
mexporter.WithMetricDescriptorTypeFormatter(metricFormatter),
|
||||
mexporter.WithCreateServiceTimeSeries(),
|
||||
mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: creating metrics exporter: %w", err)
|
||||
}
|
||||
return exporter, nil
|
||||
}
|
||||
|
||||
func newStorageMonitoredResource(ctx context.Context, project, api string, opts ...resource.Option) (*storageMonitoredResource, error) {
|
||||
detectedAttrs, err := resource.New(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
smr := &storageMonitoredResource{
|
||||
instance: uuid.New().String(),
|
||||
api: api,
|
||||
project: project,
|
||||
}
|
||||
s := detectedAttrs.Set()
|
||||
// Attempt to use resource detector project id if project id wasn't
|
||||
// identified using ADC as a last resort. Otherwise metrics cannot be started.
|
||||
if p, present := s.Value("cloud.account.id"); present && smr.project == "" {
|
||||
smr.project = p.AsString()
|
||||
} else if !present && smr.project == "" {
|
||||
return nil, errors.New("google cloud project is required to start client-side metrics")
|
||||
}
|
||||
if v, ok := s.Value("cloud.region"); ok {
|
||||
smr.location = v.AsString()
|
||||
} else {
|
||||
smr.location = "global"
|
||||
}
|
||||
if v, ok := s.Value("cloud.platform"); ok {
|
||||
smr.cloudPlatform = v.AsString()
|
||||
} else {
|
||||
smr.cloudPlatform = "unknown"
|
||||
}
|
||||
if v, ok := s.Value("host.id"); ok {
|
||||
smr.host = v.AsString()
|
||||
} else if v, ok := s.Value("faas.id"); ok {
|
||||
smr.host = v.AsString()
|
||||
} else {
|
||||
smr.host = "unknown"
|
||||
}
|
||||
smr.resource, err = resource.New(ctx, resource.WithAttributes([]attribute.KeyValue{
|
||||
{Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)},
|
||||
{Key: "project_id", Value: attribute.StringValue(smr.project)},
|
||||
{Key: "api", Value: attribute.StringValue(smr.api)},
|
||||
{Key: "instance_id", Value: attribute.StringValue(smr.instance)},
|
||||
{Key: "location", Value: attribute.StringValue(smr.location)},
|
||||
{Key: "cloud_platform", Value: attribute.StringValue(smr.cloudPlatform)},
|
||||
{Key: "host_id", Value: attribute.StringValue(smr.host)},
|
||||
}...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return smr, nil
|
||||
}
|
||||
|
||||
type metricsContext struct {
|
||||
// client options passed to gRPC channels
|
||||
clientOpts []option.ClientOption
|
||||
// instance of metric reader used by gRPC client-side metrics
|
||||
provider *metric.MeterProvider
|
||||
// clean func to call when closing gRPC client
|
||||
close func()
|
||||
}
|
||||
|
||||
type metricsConfig struct {
|
||||
project string
|
||||
interval time.Duration
|
||||
customExporter *metric.Exporter
|
||||
manualReader *metric.ManualReader // used by tests
|
||||
disableExporter bool // used by tests disables exports
|
||||
resourceOpts []resource.Option // used by tests
|
||||
}
|
||||
|
||||
func newGRPCMetricContext(ctx context.Context, cfg metricsConfig) (*metricsContext, error) {
|
||||
var exporter metric.Exporter
|
||||
meterOpts := []metric.Option{}
|
||||
if cfg.customExporter == nil {
|
||||
var ropts []resource.Option
|
||||
if cfg.resourceOpts != nil {
|
||||
ropts = cfg.resourceOpts
|
||||
} else {
|
||||
ropts = []resource.Option{resource.WithDetectors(gcp.NewDetector())}
|
||||
}
|
||||
smr, err := newStorageMonitoredResource(ctx, cfg.project, "grpc", ropts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exporter, err = smr.exporter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meterOpts = append(meterOpts, metric.WithResource(smr.resource))
|
||||
} else {
|
||||
exporter = *cfg.customExporter
|
||||
}
|
||||
interval := time.Minute
|
||||
if cfg.interval > 0 {
|
||||
interval = cfg.interval
|
||||
}
|
||||
meterOpts = append(meterOpts,
|
||||
// Metric views update histogram boundaries to be relevant to GCS
|
||||
// otherwise default OTel histogram boundaries are used.
|
||||
metric.WithView(
|
||||
createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()),
|
||||
createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()),
|
||||
createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries())),
|
||||
)
|
||||
if cfg.manualReader != nil {
|
||||
meterOpts = append(meterOpts, metric.WithReader(cfg.manualReader))
|
||||
}
|
||||
if !cfg.disableExporter {
|
||||
meterOpts = append(meterOpts, metric.WithReader(
|
||||
metric.NewPeriodicReader(&exporterLogSuppressor{Exporter: exporter}, metric.WithInterval(interval))))
|
||||
}
|
||||
provider := metric.NewMeterProvider(meterOpts...)
|
||||
mo := opentelemetry.MetricsOptions{
|
||||
MeterProvider: provider,
|
||||
Metrics: stats.NewMetrics(
|
||||
"grpc.client.attempt.started",
|
||||
"grpc.client.attempt.duration",
|
||||
"grpc.client.attempt.sent_total_compressed_message_size",
|
||||
"grpc.client.attempt.rcvd_total_compressed_message_size",
|
||||
"grpc.client.call.duration",
|
||||
"grpc.lb.wrr.rr_fallback",
|
||||
"grpc.lb.wrr.endpoint_weight_not_yet_usable",
|
||||
"grpc.lb.wrr.endpoint_weight_stale",
|
||||
"grpc.lb.wrr.endpoint_weights",
|
||||
"grpc.lb.rls.cache_entries",
|
||||
"grpc.lb.rls.cache_size",
|
||||
"grpc.lb.rls.default_target_picks",
|
||||
"grpc.lb.rls.target_picks",
|
||||
"grpc.lb.rls.failed_picks",
|
||||
),
|
||||
OptionalLabels: []string{"grpc.lb.locality"},
|
||||
}
|
||||
opts := []option.ClientOption{
|
||||
option.WithGRPCDialOption(
|
||||
opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})),
|
||||
option.WithGRPCDialOption(
|
||||
grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})),
|
||||
}
|
||||
return &metricsContext{
|
||||
clientOpts: opts,
|
||||
provider: provider,
|
||||
close: func() {
|
||||
provider.Shutdown(ctx)
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Silences permission errors after initial error is emitted to prevent
|
||||
// chatty logs.
|
||||
type exporterLogSuppressor struct {
|
||||
metric.Exporter
|
||||
emittedFailure bool
|
||||
}
|
||||
|
||||
// Implements OTel SDK metric.Exporter interface to prevent noisy logs from
|
||||
// lack of credentials after initial failure.
|
||||
// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter
|
||||
func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||
if err := e.Exporter.Export(ctx, rm); err != nil && !e.emittedFailure {
|
||||
if strings.Contains(err.Error(), "PermissionDenied") {
|
||||
e.emittedFailure = true
|
||||
return fmt.Errorf("gRPC metrics failed due permission issue: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func latencyHistogramBoundaries() []float64 {
|
||||
boundaries := []float64{}
|
||||
boundary := 0.0
|
||||
increment := 0.002
|
||||
// 2ms buckets for first 100ms, so we can have higher resolution for uploads and downloads in the 100 KiB range
|
||||
for i := 0; i < 50; i++ {
|
||||
boundaries = append(boundaries, boundary)
|
||||
// increment by 2ms
|
||||
boundary += increment
|
||||
}
|
||||
// For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes
|
||||
for i := 0; i < 150 && boundary < 300; i++ {
|
||||
boundaries = append(boundaries, boundary)
|
||||
if i != 0 && i%10 == 0 {
|
||||
increment *= 2
|
||||
}
|
||||
boundary += increment
|
||||
}
|
||||
return boundaries
|
||||
}
|
||||
|
||||
func sizeHistogramBoundaries() []float64 {
|
||||
kb := 1024.0
|
||||
mb := 1024.0 * kb
|
||||
gb := 1024.0 * mb
|
||||
boundaries := []float64{}
|
||||
boundary := 0.0
|
||||
increment := 128 * kb
|
||||
// 128 KiB increments up to 4MiB, then exponential growth
|
||||
for len(boundaries) < 200 && boundary <= 16*gb {
|
||||
boundaries = append(boundaries, boundary)
|
||||
boundary += increment
|
||||
if boundary >= 4*mb {
|
||||
increment *= 2
|
||||
}
|
||||
}
|
||||
return boundaries
|
||||
}
|
||||
|
||||
func createHistogramView(name string, boundaries []float64) metric.View {
|
||||
return metric.NewView(metric.Instrument{
|
||||
Name: name,
|
||||
Kind: metric.InstrumentKindHistogram,
|
||||
}, metric.Stream{
|
||||
Name: name,
|
||||
Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: boundaries},
|
||||
})
|
||||
}
|
||||
|
||||
func metricFormatter(m metricdata.Metrics) string {
|
||||
return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/")
|
||||
}
|
||||
862
vendor/cloud.google.com/go/storage/grpc_reader.go
generated
vendored
Normal file
862
vendor/cloud.google.com/go/storage/grpc_reader.go
generated
vendored
Normal file
@@ -0,0 +1,862 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal/apiv2/storagepb"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/mem"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Below is the legacy implementation of gRPC downloads using the ReadObject API.
|
||||
// It's used by gRPC if the experimental option WithGRPCBidiReads was not passed.
|
||||
// TODO: once BidiReadObject is in GA, remove this implementation.
|
||||
|
||||
// Custom codec to be used for unmarshaling ReadObjectResponse messages.
|
||||
// This is used to avoid a copy of object data in proto.Unmarshal.
|
||||
type bytesCodecReadObject struct {
|
||||
}
|
||||
|
||||
var _ encoding.CodecV2 = bytesCodecReadObject{}
|
||||
|
||||
// Marshal is used to encode messages to send for bytesCodecReadObject. Since we are only
|
||||
// using this to send ReadObjectRequest messages we don't need to recycle buffers
|
||||
// here.
|
||||
func (bytesCodecReadObject) Marshal(v any) (mem.BufferSlice, error) {
|
||||
vv, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
|
||||
}
|
||||
var data mem.BufferSlice
|
||||
buf, err := proto.Marshal(vv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = append(data, mem.SliceBuffer(buf))
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Unmarshal is used for data received for ReadObjectResponse. We want to preserve
|
||||
// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal.
|
||||
func (bytesCodecReadObject) Unmarshal(data mem.BufferSlice, v any) error {
|
||||
switch v := v.(type) {
|
||||
case *mem.BufferSlice:
|
||||
*v = data
|
||||
// Pick up a reference to the data so that it is not freed while decoding.
|
||||
data.Ref()
|
||||
return nil
|
||||
case proto.Message:
|
||||
buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
|
||||
return proto.Unmarshal(buf.ReadOnlyData(), v)
|
||||
default:
|
||||
return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (bytesCodecReadObject) Name() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewRangeReaderReadObject is the legacy (non-bidi) implementation of reads.
|
||||
func (c *grpcStorageClient) NewRangeReaderReadObject(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReaderReadObject")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
s.gax = append(s.gax, gax.WithGRPCOptions(
|
||||
grpc.ForceCodecV2(bytesCodecReadObject{}),
|
||||
))
|
||||
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
|
||||
b := bucketResourceName(globalProjectAlias, params.bucket)
|
||||
req := &storagepb.ReadObjectRequest{
|
||||
Bucket: b,
|
||||
Object: params.object,
|
||||
CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
|
||||
}
|
||||
// The default is a negative value, which means latest.
|
||||
if params.gen >= 0 {
|
||||
req.Generation = params.gen
|
||||
}
|
||||
|
||||
// Define a function that initiates a Read with offset and length, assuming
|
||||
// we have already read seen bytes.
|
||||
reopen := func(seen int64) (*readStreamResponseReadObject, context.CancelFunc, error) {
|
||||
// If the context has already expired, return immediately without making
|
||||
// we call.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cc, cancel := context.WithCancel(ctx)
|
||||
|
||||
req.ReadOffset = params.offset + seen
|
||||
|
||||
// Only set a ReadLimit if length is greater than zero, because <= 0 means
|
||||
// to read it all.
|
||||
if params.length > 0 {
|
||||
req.ReadLimit = params.length - seen
|
||||
}
|
||||
|
||||
if err := applyCondsProto("gRPCReadObjectReader.reopen", params.gen, params.conds, req); err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var stream storagepb.Storage_ReadObjectClient
|
||||
var err error
|
||||
var decoder *readObjectResponseDecoder
|
||||
|
||||
err = run(cc, func(ctx context.Context) error {
|
||||
stream, err = c.raw.ReadObject(ctx, req, s.gax...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Receive the message into databuf as a wire-encoded message so we can
|
||||
// use a custom decoder to avoid an extra copy at the protobuf layer.
|
||||
databufs := mem.BufferSlice{}
|
||||
err := stream.RecvMsg(&databufs)
|
||||
if err != nil {
|
||||
// NotFound types of errors show up on the Recv call, rather than the
|
||||
// initialization of the stream via ReadObject above.
|
||||
return formatObjectErr(err)
|
||||
}
|
||||
// Use a custom decoder that uses protobuf unmarshalling for all
|
||||
// fields except the object data. Object data is handled separately
|
||||
// to avoid a copy.
|
||||
decoder = &readObjectResponseDecoder{
|
||||
databufs: databufs,
|
||||
}
|
||||
err = decoder.readFullObjectResponse()
|
||||
return err
|
||||
}, s.retry, s.idempotent)
|
||||
if err != nil {
|
||||
// Close the stream context we just created to ensure we don't leak
|
||||
// resources.
|
||||
cancel()
|
||||
// Free any buffers.
|
||||
if decoder != nil && decoder.databufs != nil {
|
||||
decoder.databufs.Free()
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &readStreamResponseReadObject{stream, decoder}, cancel, nil
|
||||
}
|
||||
|
||||
res, cancel, err := reopen(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The first message was Recv'd on stream open, use it to populate the
|
||||
// object metadata.
|
||||
msg := res.decoder.msg
|
||||
obj := msg.GetMetadata()
|
||||
// This is the size of the entire object, even if only a range was requested.
|
||||
size := obj.GetSize()
|
||||
|
||||
// Only support checksums when reading an entire object, not a range.
|
||||
var (
|
||||
wantCRC uint32
|
||||
checkCRC bool
|
||||
)
|
||||
if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil {
|
||||
if params.offset == 0 && params.length < 0 {
|
||||
checkCRC = true
|
||||
}
|
||||
wantCRC = checksums.GetCrc32C()
|
||||
}
|
||||
|
||||
metadata := obj.GetMetadata()
|
||||
r = &Reader{
|
||||
Attrs: ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: obj.GetContentType(),
|
||||
ContentEncoding: obj.GetContentEncoding(),
|
||||
CacheControl: obj.GetCacheControl(),
|
||||
LastModified: obj.GetUpdateTime().AsTime(),
|
||||
Metageneration: obj.GetMetageneration(),
|
||||
Generation: obj.GetGeneration(),
|
||||
CRC32C: wantCRC,
|
||||
},
|
||||
objectMetadata: &metadata,
|
||||
reader: &gRPCReadObjectReader{
|
||||
stream: res.stream,
|
||||
reopen: reopen,
|
||||
cancel: cancel,
|
||||
size: size,
|
||||
// Preserve the decoder to read out object data when Read/WriteTo is called.
|
||||
currMsg: res.decoder,
|
||||
settings: s,
|
||||
zeroRange: params.length == 0,
|
||||
wantCRC: wantCRC,
|
||||
checkCRC: checkCRC,
|
||||
},
|
||||
checkCRC: checkCRC,
|
||||
}
|
||||
|
||||
cr := msg.GetContentRange()
|
||||
if cr != nil {
|
||||
r.Attrs.StartOffset = cr.GetStart()
|
||||
r.remain = cr.GetEnd() - cr.GetStart()
|
||||
} else {
|
||||
r.remain = size
|
||||
}
|
||||
|
||||
// For a zero-length request, explicitly close the stream and set remaining
|
||||
// bytes to zero.
|
||||
if params.length == 0 {
|
||||
r.remain = 0
|
||||
r.reader.Close()
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
type readStreamResponseReadObject struct {
|
||||
stream storagepb.Storage_ReadObjectClient
|
||||
decoder *readObjectResponseDecoder
|
||||
}
|
||||
|
||||
type gRPCReadObjectReader struct {
|
||||
seen, size int64
|
||||
zeroRange bool
|
||||
stream storagepb.Storage_ReadObjectClient
|
||||
reopen func(seen int64) (*readStreamResponseReadObject, context.CancelFunc, error)
|
||||
leftovers []byte
|
||||
currMsg *readObjectResponseDecoder // decoder for the current message
|
||||
cancel context.CancelFunc
|
||||
settings *settings
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
}
|
||||
|
||||
// Update the running CRC with the data in the slice, if CRC checking was enabled.
|
||||
func (r *gRPCReadObjectReader) updateCRC(b []byte) {
|
||||
if r.checkCRC {
|
||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b)
|
||||
}
|
||||
}
|
||||
|
||||
// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled.
|
||||
func (r *gRPCReadObjectReader) runCRCCheck() error {
|
||||
if r.checkCRC && r.gotCRC != r.wantCRC {
|
||||
return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads bytes into the user's buffer from an open gRPC stream.
|
||||
func (r *gRPCReadObjectReader) Read(p []byte) (int, error) {
|
||||
// The entire object has been read by this reader, check the checksum if
|
||||
// necessary and return EOF.
|
||||
if r.size == r.seen || r.zeroRange {
|
||||
if err := r.runCRCCheck(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// No stream to read from, either never initialized or Close was called.
|
||||
// Note: There is a potential concurrency issue if multiple routines are
|
||||
// using the same reader. One encounters an error and the stream is closed
|
||||
// and then reopened while the other routine attempts to read from it.
|
||||
if r.stream == nil {
|
||||
return 0, fmt.Errorf("storage: reader has been closed")
|
||||
}
|
||||
|
||||
var n int
|
||||
|
||||
// If there is data remaining in the current message, return what was
|
||||
// available to conform to the Reader
|
||||
// interface: https://pkg.go.dev/io#Reader.
|
||||
if !r.currMsg.done {
|
||||
n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
|
||||
r.updateCRC(b)
|
||||
})
|
||||
r.seen += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Attempt to Recv the next message on the stream.
|
||||
// This will update r.currMsg with the decoder for the new message.
|
||||
err := r.recv()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// TODO: Determine if we need to capture incremental CRC32C for this
|
||||
// chunk. The Object CRC32C checksum is captured when directed to read
|
||||
// the entire Object. If directed to read a range, we may need to
|
||||
// calculate the range's checksum for verification if the checksum is
|
||||
// present in the response here.
|
||||
// TODO: Figure out if we need to support decompressive transcoding
|
||||
// https://cloud.google.com/storage/docs/transcoding.
|
||||
|
||||
n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
|
||||
r.updateCRC(b)
|
||||
})
|
||||
r.seen += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// WriteTo writes all the data requested by the Reader into w, implementing
|
||||
// io.WriterTo.
|
||||
func (r *gRPCReadObjectReader) WriteTo(w io.Writer) (int64, error) {
|
||||
// The entire object has been read by this reader, check the checksum if
|
||||
// necessary and return nil.
|
||||
if r.size == r.seen || r.zeroRange {
|
||||
if err := r.runCRCCheck(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// No stream to read from, either never initialized or Close was called.
|
||||
// Note: There is a potential concurrency issue if multiple routines are
|
||||
// using the same reader. One encounters an error and the stream is closed
|
||||
// and then reopened while the other routine attempts to read from it.
|
||||
if r.stream == nil {
|
||||
return 0, fmt.Errorf("storage: reader has been closed")
|
||||
}
|
||||
|
||||
// Track bytes written during before call.
|
||||
var alreadySeen = r.seen
|
||||
|
||||
// Write any already received message to the stream. There will be some leftovers from the
|
||||
// original NewRangeReaderReadObject call.
|
||||
if r.currMsg != nil && !r.currMsg.done {
|
||||
written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
|
||||
r.updateCRC(b)
|
||||
})
|
||||
r.seen += int64(written)
|
||||
r.currMsg = nil
|
||||
if err != nil {
|
||||
return r.seen - alreadySeen, err
|
||||
}
|
||||
}
|
||||
|
||||
// Loop and receive additional messages until the entire data is written.
|
||||
for {
|
||||
// Attempt to receive the next message on the stream.
|
||||
// Will terminate with io.EOF once data has all come through.
|
||||
// recv() handles stream reopening and retry logic so no need for retries here.
|
||||
err := r.recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// We are done; check the checksum if necessary and return.
|
||||
err = r.runCRCCheck()
|
||||
}
|
||||
return r.seen - alreadySeen, err
|
||||
}
|
||||
|
||||
// TODO: Determine if we need to capture incremental CRC32C for this
|
||||
// chunk. The Object CRC32C checksum is captured when directed to read
|
||||
// the entire Object. If directed to read a range, we may need to
|
||||
// calculate the range's checksum for verification if the checksum is
|
||||
// present in the response here.
|
||||
// TODO: Figure out if we need to support decompressive transcoding
|
||||
// https://cloud.google.com/storage/docs/transcoding.
|
||||
written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
|
||||
r.updateCRC(b)
|
||||
})
|
||||
r.seen += int64(written)
|
||||
if err != nil {
|
||||
return r.seen - alreadySeen, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Close cancels the read stream's context in order for it to be closed and
|
||||
// collected, and frees any currently in use buffers.
|
||||
func (r *gRPCReadObjectReader) Close() error {
|
||||
if r.cancel != nil {
|
||||
r.cancel()
|
||||
}
|
||||
r.stream = nil
|
||||
r.currMsg = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// recv attempts to Recv the next message on the stream and extract the object
|
||||
// data that it contains. In the event that a retryable error is encountered,
|
||||
// the stream will be closed, reopened, and RecvMsg again.
|
||||
// This will attempt to Recv until one of the following is true:
|
||||
//
|
||||
// * Recv is successful
|
||||
// * A non-retryable error is encountered
|
||||
// * The Reader's context is canceled
|
||||
//
|
||||
// The last error received is the one that is returned, which could be from
|
||||
// an attempt to reopen the stream.
|
||||
func (r *gRPCReadObjectReader) recv() error {
|
||||
databufs := mem.BufferSlice{}
|
||||
err := r.stream.RecvMsg(&databufs)
|
||||
|
||||
if err != nil && r.settings.retry.runShouldRetry(err) {
|
||||
// This will "close" the existing stream and immediately attempt to
|
||||
// reopen the stream, but will backoff if further attempts are necessary.
|
||||
// Reopening the stream Recvs the first message, so if retrying is
|
||||
// successful, r.currMsg will be updated to include the new data.
|
||||
return r.reopenStream()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.currMsg = &readObjectResponseDecoder{databufs: databufs}
|
||||
return r.currMsg.readFullObjectResponse()
|
||||
}
|
||||
|
||||
// ReadObjectResponse field and subfield numbers.
|
||||
const (
|
||||
checksummedDataFieldReadObject = protowire.Number(1)
|
||||
checksummedDataContentFieldReadObject = protowire.Number(1)
|
||||
checksummedDataCRC32CFieldReadObject = protowire.Number(2)
|
||||
objectChecksumsFieldReadObject = protowire.Number(2)
|
||||
contentRangeFieldReadObject = protowire.Number(3)
|
||||
metadataFieldReadObject = protowire.Number(4)
|
||||
)
|
||||
|
||||
// readObjectResponseDecoder is a wrapper on the raw message, used to decode one message
|
||||
// without copying object data. It also has methods to write out the resulting object
|
||||
// data to the user application.
|
||||
type readObjectResponseDecoder struct {
|
||||
databufs mem.BufferSlice // raw bytes of the message being processed
|
||||
// Decoding offsets
|
||||
off uint64 // offset in the messsage relative to the data as a whole
|
||||
currBuf int // index of the current buffer being processed
|
||||
currOff uint64 // offset in the current buffer
|
||||
// Processed data
|
||||
msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated
|
||||
dataOffsets bufferSliceOffsetsReadObject // offsets of the object data in the message.
|
||||
done bool // true if the data has been completely read.
|
||||
}
|
||||
|
||||
type bufferSliceOffsetsReadObject struct {
|
||||
startBuf, endBuf int // indices of start and end buffers of object data in the msg
|
||||
startOff, endOff uint64 // offsets within these buffers where the data starts and ends.
|
||||
currBuf int // index of current buffer being read out to the user application.
|
||||
currOff uint64 // offset of read in current buffer.
|
||||
}
|
||||
|
||||
// peek ahead 10 bytes from the current offset in the databufs. This will return a
|
||||
// slice of the current buffer if the bytes are all in one buffer, but will copy
|
||||
// the bytes into a new buffer if the distance is split across buffers. Use this
|
||||
// to allow protowire methods to be used to parse tags & fixed values.
|
||||
// The max length of a varint tag is 10 bytes, see
|
||||
// https://protobuf.dev/programming-guides/encoding/#varints . Other int types
|
||||
// are shorter.
|
||||
func (d *readObjectResponseDecoder) peek() []byte {
|
||||
b := d.databufs[d.currBuf].ReadOnlyData()
|
||||
// Check if the tag will fit in the current buffer. If not, copy the next 10
|
||||
// bytes into a new buffer to ensure that we can read the tag correctly
|
||||
// without it being divided between buffers.
|
||||
tagBuf := b[d.currOff:]
|
||||
remainingInBuf := len(tagBuf)
|
||||
// If we have less than 10 bytes remaining and are not in the final buffer,
|
||||
// copy up to 10 bytes ahead from the next buffer.
|
||||
if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 {
|
||||
tagBuf = d.copyNextBytes(10)
|
||||
}
|
||||
return tagBuf
|
||||
}
|
||||
|
||||
// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the
|
||||
// buffers overall. Does not advance offsets.
|
||||
func (d *readObjectResponseDecoder) copyNextBytes(n int) []byte {
|
||||
remaining := n
|
||||
if r := d.databufs.Len() - int(d.off); r < remaining {
|
||||
remaining = r
|
||||
}
|
||||
currBuf := d.currBuf
|
||||
currOff := d.currOff
|
||||
var buf []byte
|
||||
for remaining > 0 {
|
||||
b := d.databufs[currBuf].ReadOnlyData()
|
||||
remainingInCurr := len(b[currOff:])
|
||||
if remainingInCurr < remaining {
|
||||
buf = append(buf, b[currOff:]...)
|
||||
remaining -= remainingInCurr
|
||||
currBuf++
|
||||
currOff = 0
|
||||
} else {
|
||||
buf = append(buf, b[currOff:currOff+uint64(remaining)]...)
|
||||
remaining = 0
|
||||
}
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we
|
||||
// go past the end of the data.
|
||||
func (d *readObjectResponseDecoder) advanceOffset(n uint64) error {
|
||||
remaining := n
|
||||
for remaining > 0 {
|
||||
remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff
|
||||
if remainingInCurr <= remaining {
|
||||
remaining -= remainingInCurr
|
||||
d.currBuf++
|
||||
d.currOff = 0
|
||||
} else {
|
||||
d.currOff += remaining
|
||||
remaining = 0
|
||||
}
|
||||
}
|
||||
// If we have advanced past the end of the buffers, something went wrong.
|
||||
if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) {
|
||||
return errors.New("decoding: truncated message, cannot advance offset")
|
||||
}
|
||||
d.off += n
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// This copies object data from the message into the buffer and returns the number of
|
||||
// bytes copied. The data offsets are incremented in the message. The updateCRC
|
||||
// function is called on the copied bytes.
|
||||
func (d *readObjectResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int {
|
||||
// For a completely empty message, just return 0
|
||||
if len(d.databufs) == 0 {
|
||||
return 0
|
||||
}
|
||||
databuf := d.databufs[d.dataOffsets.currBuf]
|
||||
startOff := d.dataOffsets.currOff
|
||||
var b []byte
|
||||
if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
|
||||
b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
|
||||
} else {
|
||||
b = databuf.ReadOnlyData()[startOff:]
|
||||
}
|
||||
n := copy(p, b)
|
||||
updateCRC(b[:n])
|
||||
d.dataOffsets.currOff += uint64(n)
|
||||
|
||||
// We've read all the data from this message. Free the underlying buffers.
|
||||
if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff {
|
||||
d.done = true
|
||||
d.databufs.Free()
|
||||
}
|
||||
// We are at the end of the current buffer
|
||||
if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) {
|
||||
d.dataOffsets.currOff = 0
|
||||
d.dataOffsets.currBuf++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *readObjectResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) {
|
||||
// For a completely empty message, just return 0
|
||||
if len(d.databufs) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
var written int64
|
||||
for !d.done {
|
||||
databuf := d.databufs[d.dataOffsets.currBuf]
|
||||
startOff := d.dataOffsets.currOff
|
||||
var b []byte
|
||||
if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
|
||||
b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
|
||||
} else {
|
||||
b = databuf.ReadOnlyData()[startOff:]
|
||||
}
|
||||
var n int
|
||||
// Write all remaining data from the current buffer
|
||||
n, err := w.Write(b)
|
||||
written += int64(n)
|
||||
updateCRC(b)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
d.dataOffsets.currOff = 0
|
||||
// We've read all the data from this message.
|
||||
if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
|
||||
d.done = true
|
||||
d.databufs.Free()
|
||||
} else {
|
||||
d.dataOffsets.currBuf++
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// Consume the next available tag in the input data and return the field number and type.
|
||||
// Advances the relevant offsets in the data.
|
||||
func (d *readObjectResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) {
|
||||
tagBuf := d.peek()
|
||||
|
||||
// Consume the next tag. This will tell us which field is next in the
|
||||
// buffer, its type, and how much space it takes up.
|
||||
fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf)
|
||||
if tagLength < 0 {
|
||||
return 0, 0, protowire.ParseError(tagLength)
|
||||
}
|
||||
// Update the offsets and current buffer depending on the tag length.
|
||||
if err := d.advanceOffset(uint64(tagLength)); err != nil {
|
||||
return 0, 0, fmt.Errorf("consuming tag: %w", err)
|
||||
}
|
||||
return fieldNum, fieldType, nil
|
||||
}
|
||||
|
||||
// Consume a varint that represents the length of a bytes field. Return the length of
|
||||
// the data, and advance the offsets by the length of the varint.
|
||||
func (d *readObjectResponseDecoder) consumeVarint() (uint64, error) {
|
||||
tagBuf := d.peek()
|
||||
|
||||
// Consume the next tag. This will tell us which field is next in the
|
||||
// buffer, its type, and how much space it takes up.
|
||||
dataLength, tagLength := protowire.ConsumeVarint(tagBuf)
|
||||
if tagLength < 0 {
|
||||
return 0, protowire.ParseError(tagLength)
|
||||
}
|
||||
|
||||
// Update the offsets and current buffer depending on the tag length.
|
||||
d.advanceOffset(uint64(tagLength))
|
||||
return dataLength, nil
|
||||
}
|
||||
|
||||
func (d *readObjectResponseDecoder) consumeFixed32() (uint32, error) {
|
||||
valueBuf := d.peek()
|
||||
|
||||
// Consume the next tag. This will tell us which field is next in the
|
||||
// buffer, its type, and how much space it takes up.
|
||||
value, tagLength := protowire.ConsumeFixed32(valueBuf)
|
||||
if tagLength < 0 {
|
||||
return 0, protowire.ParseError(tagLength)
|
||||
}
|
||||
|
||||
// Update the offsets and current buffer depending on the tag length.
|
||||
d.advanceOffset(uint64(tagLength))
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (d *readObjectResponseDecoder) consumeFixed64() (uint64, error) {
|
||||
valueBuf := d.peek()
|
||||
|
||||
// Consume the next tag. This will tell us which field is next in the
|
||||
// buffer, its type, and how much space it takes up.
|
||||
value, tagLength := protowire.ConsumeFixed64(valueBuf)
|
||||
if tagLength < 0 {
|
||||
return 0, protowire.ParseError(tagLength)
|
||||
}
|
||||
|
||||
// Update the offsets and current buffer depending on the tag length.
|
||||
d.advanceOffset(uint64(tagLength))
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Consume any field values up to the end offset provided and don't return anything.
|
||||
// This is used to skip any values which are not going to be used.
|
||||
// msgEndOff is indexed in terms of the overall data across all buffers.
|
||||
func (d *readObjectResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error {
|
||||
// reimplement protowire.ConsumeFieldValue without the extra case for groups (which
|
||||
// are are complicted and not a thing in proto3).
|
||||
var err error
|
||||
switch fieldType {
|
||||
case protowire.VarintType:
|
||||
_, err = d.consumeVarint()
|
||||
case protowire.Fixed32Type:
|
||||
_, err = d.consumeFixed32()
|
||||
case protowire.Fixed64Type:
|
||||
_, err = d.consumeFixed64()
|
||||
case protowire.BytesType:
|
||||
_, err = d.consumeBytes()
|
||||
default:
|
||||
return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Consume a bytes field from the input. Returns offsets for the data in the buffer slices
|
||||
// and an error.
|
||||
func (d *readObjectResponseDecoder) consumeBytes() (bufferSliceOffsetsReadObject, error) {
|
||||
// m is the length of the data past the tag.
|
||||
m, err := d.consumeVarint()
|
||||
if err != nil {
|
||||
return bufferSliceOffsetsReadObject{}, fmt.Errorf("consuming bytes field: %w", err)
|
||||
}
|
||||
offsets := bufferSliceOffsetsReadObject{
|
||||
startBuf: d.currBuf,
|
||||
startOff: d.currOff,
|
||||
currBuf: d.currBuf,
|
||||
currOff: d.currOff,
|
||||
}
|
||||
|
||||
// Advance offsets to lengths of bytes field and capture where we end.
|
||||
d.advanceOffset(m)
|
||||
offsets.endBuf = d.currBuf
|
||||
offsets.endOff = d.currOff
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
// Consume a bytes field from the input and copy into a new buffer if
|
||||
// necessary (if the data is split across buffers in databuf). This can be
|
||||
// used to leverage proto.Unmarshal for small bytes fields (i.e. anything
|
||||
// except object data).
|
||||
func (d *readObjectResponseDecoder) consumeBytesCopy() ([]byte, error) {
|
||||
// m is the length of the bytes data.
|
||||
m, err := d.consumeVarint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("consuming varint: %w", err)
|
||||
}
|
||||
// Copy the data into a buffer and advance the offset
|
||||
b := d.copyNextBytes(int(m))
|
||||
if err := d.advanceOffset(m); err != nil {
|
||||
return nil, fmt.Errorf("advancing offset: %w", err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// readFullObjectResponse returns the ReadObjectResponse that is encoded in the
|
||||
// wire-encoded message buffer b, or an error if the message is invalid.
|
||||
// This must be used on the first recv of an object as it may contain all fields
|
||||
// of ReadObjectResponse, and we use or pass on those fields to the user.
|
||||
// This function is essentially identical to proto.Unmarshal, except it aliases
|
||||
// the data in the input []byte. If the proto library adds a feature to
|
||||
// Unmarshal that does that, this function can be dropped.
|
||||
func (d *readObjectResponseDecoder) readFullObjectResponse() error {
|
||||
msg := &storagepb.ReadObjectResponse{}
|
||||
|
||||
// Loop over the entire message, extracting fields as we go. This does not
|
||||
// handle field concatenation, in which the contents of a single field
|
||||
// are split across multiple protobuf tags.
|
||||
for d.off < uint64(d.databufs.Len()) {
|
||||
fieldNum, fieldType, err := d.consumeTag()
|
||||
if err != nil {
|
||||
return fmt.Errorf("consuming next tag: %w", err)
|
||||
}
|
||||
|
||||
// Unmarshal the field according to its type. Only fields that are not
|
||||
// nil will be present.
|
||||
switch {
|
||||
case fieldNum == checksummedDataFieldReadObject && fieldType == protowire.BytesType:
|
||||
// The ChecksummedData field was found. Initialize the struct.
|
||||
msg.ChecksummedData = &storagepb.ChecksummedData{}
|
||||
|
||||
bytesFieldLen, err := d.consumeVarint()
|
||||
if err != nil {
|
||||
return fmt.Errorf("consuming bytes: %w", err)
|
||||
}
|
||||
|
||||
var contentEndOff = d.off + bytesFieldLen
|
||||
for d.off < contentEndOff {
|
||||
gotNum, gotTyp, err := d.consumeTag()
|
||||
if err != nil {
|
||||
return fmt.Errorf("consuming checksummedData tag: %w", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case gotNum == checksummedDataContentFieldReadObject && gotTyp == protowire.BytesType:
|
||||
// Get the offsets of the content bytes.
|
||||
d.dataOffsets, err = d.consumeBytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err)
|
||||
}
|
||||
case gotNum == checksummedDataCRC32CFieldReadObject && gotTyp == protowire.Fixed32Type:
|
||||
v, err := d.consumeFixed32()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err)
|
||||
}
|
||||
msg.ChecksummedData.Crc32C = &v
|
||||
default:
|
||||
err := d.consumeFieldValue(gotNum, gotTyp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
case fieldNum == objectChecksumsFieldReadObject && fieldType == protowire.BytesType:
|
||||
// The field was found. Initialize the struct.
|
||||
msg.ObjectChecksums = &storagepb.ObjectChecksums{}
|
||||
// Consume the bytes and copy them into a single buffer if they are split across buffers.
|
||||
buf, err := d.consumeBytesCopy()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %w", err)
|
||||
}
|
||||
// Unmarshal.
|
||||
if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil {
|
||||
return err
|
||||
}
|
||||
case fieldNum == contentRangeFieldReadObject && fieldType == protowire.BytesType:
|
||||
msg.ContentRange = &storagepb.ContentRange{}
|
||||
buf, err := d.consumeBytesCopy()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %w", err)
|
||||
}
|
||||
if err := proto.Unmarshal(buf, msg.ContentRange); err != nil {
|
||||
return err
|
||||
}
|
||||
case fieldNum == metadataFieldReadObject && fieldType == protowire.BytesType:
|
||||
msg.Metadata = &storagepb.Object{}
|
||||
|
||||
buf, err := d.consumeBytesCopy()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid ReadObjectResponse.Metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := proto.Unmarshal(buf, msg.Metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
err := d.consumeFieldValue(fieldNum, fieldType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid field in ReadObjectResponse: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.msg = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
// reopenStream "closes" the existing stream and attempts to reopen a stream and
|
||||
// sets the Reader's stream and cancelStream properties in the process.
|
||||
func (r *gRPCReadObjectReader) reopenStream() error {
|
||||
// Close existing stream and initialize new stream with updated offset.
|
||||
r.Close()
|
||||
|
||||
res, cancel, err := r.reopen(r.seen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.stream = res.stream
|
||||
r.currMsg = res.decoder
|
||||
r.cancel = cancel
|
||||
return nil
|
||||
}
|
||||
40
vendor/cloud.google.com/go/storage/grpc_reader_multi_range.go
generated
vendored
Normal file
40
vendor/cloud.google.com/go/storage/grpc_reader_multi_range.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import "sync"
|
||||
|
||||
// readIDGenerator generates unique read IDs for multi-range reads.
|
||||
// Call readIDGenerator.Next to get the next ID. Safe to be called concurrently.
|
||||
type readIDGenerator struct {
|
||||
initOnce sync.Once
|
||||
nextID chan int64 // do not use this field directly
|
||||
}
|
||||
|
||||
func (g *readIDGenerator) init() {
|
||||
g.nextID = make(chan int64, 1)
|
||||
g.nextID <- 1
|
||||
}
|
||||
|
||||
// Next returns the Next read ID. It initializes the readIDGenerator if needed.
|
||||
func (g *readIDGenerator) Next() int64 {
|
||||
g.initOnce.Do(g.init)
|
||||
|
||||
id := <-g.nextID
|
||||
n := id + 1
|
||||
g.nextID <- n
|
||||
|
||||
return id
|
||||
}
|
||||
1028
vendor/cloud.google.com/go/storage/grpc_writer.go
generated
vendored
Normal file
1028
vendor/cloud.google.com/go/storage/grpc_writer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
22
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/storage/internal/apiv2/storagepb"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
@@ -103,6 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
||||
//
|
||||
// Options such as UserProjectForHMACKeys can be used to set the
|
||||
// userProject to be billed against for operations.
|
||||
// Note: gRPC is not supported.
|
||||
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
@@ -118,6 +118,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
|
||||
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
||||
// Only inactive HMAC keys can be deleted.
|
||||
// After deletion, a key cannot be used to authenticate requests.
|
||||
// Note: gRPC is not supported.
|
||||
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
@@ -158,23 +159,8 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro
|
||||
return hmKey, nil
|
||||
}
|
||||
|
||||
func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
|
||||
if pbmd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &HMACKey{
|
||||
AccessID: pbmd.GetAccessId(),
|
||||
ID: pbmd.GetId(),
|
||||
State: HMACState(pbmd.GetState()),
|
||||
ProjectID: pbmd.GetProject(),
|
||||
CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
|
||||
UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
|
||||
ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
||||
// Note: gRPC is not supported.
|
||||
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
if projectID == "" {
|
||||
return nil, errors.New("storage: expecting a non-blank projectID")
|
||||
@@ -203,6 +189,7 @@ type HMACKeyAttrsToUpdate struct {
|
||||
}
|
||||
|
||||
// Update mutates the HMACKey referred to by accessID.
|
||||
// Note: gRPC is not supported.
|
||||
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
if au.State != Active && au.State != Inactive {
|
||||
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
|
||||
@@ -237,6 +224,7 @@ type HMACKeysIterator struct {
|
||||
// ListHMACKeys returns an iterator for listing HMACKeys.
|
||||
//
|
||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
// Note: gRPC is not supported.
|
||||
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
|
||||
294
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
294
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -30,30 +30,31 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/auth"
|
||||
"cloud.google.com/go/iam/apiv1/iampb"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"github.com/google/uuid"
|
||||
"github.com/googleapis/gax-go/v2/callctx"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/api/transport"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
)
|
||||
|
||||
// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic
|
||||
// storageClient interface.
|
||||
type httpStorageClient struct {
|
||||
creds *google.Credentials
|
||||
hc *http.Client
|
||||
xmlHost string
|
||||
raw *raw.Service
|
||||
scheme string
|
||||
settings *settings
|
||||
config *storageConfig
|
||||
creds *auth.Credentials
|
||||
hc *http.Client
|
||||
xmlHost string
|
||||
raw *raw.Service
|
||||
scheme string
|
||||
settings *settings
|
||||
config *storageConfig
|
||||
dynamicReadReqStallTimeout *bucketDelayManager
|
||||
}
|
||||
|
||||
// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON
|
||||
@@ -63,7 +64,7 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
|
||||
o := s.clientOption
|
||||
config := newStorageConfig(o...)
|
||||
|
||||
var creds *google.Credentials
|
||||
var creds *auth.Credentials
|
||||
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
|
||||
// since raw.NewService configures the correct default endpoints when initializing the
|
||||
// internal http client. However, in our case, "NewRangeReader" in reader.go needs to
|
||||
@@ -81,10 +82,10 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
|
||||
)
|
||||
// Don't error out here. The user may have passed in their own HTTP
|
||||
// client which does not auth with ADC or other common conventions.
|
||||
c, err := transport.Creds(ctx, o...)
|
||||
c, err := internaloption.AuthCreds(ctx, o)
|
||||
if err == nil {
|
||||
creds = c
|
||||
o = append(o, internaloption.WithCredentials(creds))
|
||||
o = append(o, option.WithAuthCredentials(creds))
|
||||
}
|
||||
} else {
|
||||
var hostURL *url.URL
|
||||
@@ -128,14 +129,29 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
|
||||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err)
|
||||
}
|
||||
|
||||
var bd *bucketDelayManager
|
||||
if config.readStallTimeoutConfig != nil {
|
||||
drrstConfig := config.readStallTimeoutConfig
|
||||
bd, err = newBucketDelayManager(
|
||||
drrstConfig.TargetPercentile,
|
||||
getDynamicReadReqIncreaseRateFromEnv(),
|
||||
getDynamicReadReqInitialTimeoutSecFromEnv(drrstConfig.Min),
|
||||
drrstConfig.Min,
|
||||
defaultDynamicReqdReqMaxTimeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating dynamic-delay: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &httpStorageClient{
|
||||
creds: creds,
|
||||
hc: hc,
|
||||
xmlHost: u.Host,
|
||||
raw: rawService,
|
||||
scheme: u.Scheme,
|
||||
settings: s,
|
||||
config: &config,
|
||||
creds: creds,
|
||||
hc: hc,
|
||||
xmlHost: u.Host,
|
||||
raw: rawService,
|
||||
scheme: u.Scheme,
|
||||
settings: s,
|
||||
config: &config,
|
||||
dynamicReadReqStallTimeout: bd,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -270,12 +286,8 @@ func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds
|
||||
return err
|
||||
}, s.retry, s.idempotent)
|
||||
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, formatBucketError(err)
|
||||
}
|
||||
return newBucket(resp)
|
||||
}
|
||||
@@ -331,6 +343,10 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
||||
it.query = *q
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
var err error
|
||||
// Add trace span around List API call within the fetch.
|
||||
ctx, _ = startSpan(ctx, "httpStorageClient.ObjectsListCall")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
req := c.raw.Objects.List(bucket)
|
||||
if it.query.SoftDeleted {
|
||||
req.SoftDeleted(it.query.SoftDeleted)
|
||||
@@ -359,17 +375,12 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = run(it.ctx, func(ctx context.Context) error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent)
|
||||
if err != nil {
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
err = ErrBucketNotExist
|
||||
}
|
||||
return "", err
|
||||
return "", formatBucketError(err)
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.items = append(it.items, newObject(item))
|
||||
@@ -399,11 +410,7 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str
|
||||
req.UserProject(s.userProject)
|
||||
}
|
||||
err := run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent)
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
return err
|
||||
return formatObjectErr(err)
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
@@ -428,12 +435,8 @@ func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectPara
|
||||
obj, err = req.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent)
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, formatObjectErr(err)
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
@@ -538,12 +541,8 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObje
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent)
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, formatObjectErr(err)
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
@@ -568,9 +567,32 @@ func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreOb
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent)
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
if err != nil {
|
||||
return nil, formatObjectErr(err)
|
||||
}
|
||||
return newObject(obj), err
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := c.raw.Objects.Move(params.bucket, params.srcObject, params.dstObject).Context(ctx)
|
||||
if err := applyConds("MoveObjectDestination", defaultGen, params.dstConds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := applySourceConds("MoveObjectSource", defaultGen, params.srcConds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
req.UserProject(s.userProject)
|
||||
}
|
||||
if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent)
|
||||
if err != nil {
|
||||
return nil, formatObjectErr(err)
|
||||
}
|
||||
return newObject(obj), err
|
||||
}
|
||||
@@ -758,7 +780,7 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec
|
||||
retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil {
|
||||
return nil, err
|
||||
return nil, formatObjectErr(err)
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
@@ -780,7 +802,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
|
||||
if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil {
|
||||
if err := applySourceConds("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
@@ -805,7 +827,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
|
||||
retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil {
|
||||
return nil, err
|
||||
return nil, formatObjectErr(err)
|
||||
}
|
||||
|
||||
r := &rewriteObjectResponse{
|
||||
@@ -819,6 +841,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// NewMultiRangeDownloader is not supported by http client.
|
||||
func (c *httpStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) {
|
||||
return nil, errMethodNotSupported
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
@@ -832,6 +859,7 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) {
|
||||
requestID := uuid.New()
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
Host: c.xmlHost,
|
||||
@@ -857,15 +885,49 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
|
||||
|
||||
reopen := readerReopen(ctx, req.Header, params, s,
|
||||
func(ctx context.Context) (*http.Response, error) {
|
||||
// Set custom headers passed in via the context. This is only required for XML;
|
||||
// for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively.
|
||||
ctxHeaders := callctx.HeadersFromContext(ctx)
|
||||
for k, vals := range ctxHeaders {
|
||||
for _, v := range vals {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
setHeadersFromCtx(ctx, req.Header)
|
||||
|
||||
if c.dynamicReadReqStallTimeout == nil {
|
||||
return c.hc.Do(req.WithContext(ctx))
|
||||
}
|
||||
return c.hc.Do(req.WithContext(ctx))
|
||||
|
||||
cancelCtx, cancel := context.WithCancel(ctx)
|
||||
var (
|
||||
res *http.Response
|
||||
err error
|
||||
)
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
reqStartTime := time.Now()
|
||||
res, err = c.hc.Do(req.WithContext(cancelCtx))
|
||||
if err == nil {
|
||||
reqLatency := time.Since(reqStartTime)
|
||||
c.dynamicReadReqStallTimeout.update(params.bucket, reqLatency)
|
||||
} else if errors.Is(err, context.Canceled) {
|
||||
// context.Canceled means operation took more than current dynamicTimeout,
|
||||
// hence should be increased.
|
||||
c.dynamicReadReqStallTimeout.increase(params.bucket)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait until stall timeout or request is successful.
|
||||
stallTimeout := c.dynamicReadReqStallTimeout.getValue(params.bucket)
|
||||
timer := time.After(stallTimeout)
|
||||
select {
|
||||
case <-timer:
|
||||
log.Printf("[%s] stalled read-req cancelled after %fs", requestID, stallTimeout.Seconds())
|
||||
cancel()
|
||||
<-done
|
||||
if res != nil && res.Body != nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
return res, context.DeadlineExceeded
|
||||
case <-done:
|
||||
cancel = nil
|
||||
}
|
||||
return res, err
|
||||
},
|
||||
func() error { return setConditionsHeaders(req.Header, params.conds) },
|
||||
func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) })
|
||||
@@ -901,7 +963,19 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR
|
||||
return parseReadResponse(res, params, reopen)
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
|
||||
type httpInternalWriter struct {
|
||||
*io.PipeWriter
|
||||
}
|
||||
|
||||
func (hiw httpInternalWriter) Flush() (int64, error) {
|
||||
return 0, errors.New("Writer.Flush is only supported for gRPC-based clients")
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (internalWriter, error) {
|
||||
if params.append {
|
||||
return nil, errors.New("storage: append not supported on HTTP Client; use gRPC")
|
||||
}
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
errorf := params.setError
|
||||
setObj := params.setObj
|
||||
@@ -917,6 +991,9 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
|
||||
if params.chunkRetryDeadline != 0 {
|
||||
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline))
|
||||
}
|
||||
if params.chunkTransferTimeout != 0 {
|
||||
mediaOpts = append(mediaOpts, googleapi.ChunkTransferTimeout(params.chunkTransferTimeout))
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
@@ -987,7 +1064,7 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
|
||||
setObj(newObject(resp))
|
||||
}()
|
||||
|
||||
return pw, nil
|
||||
return httpInternalWriter{pw}, nil
|
||||
}
|
||||
|
||||
// IAM methods.
|
||||
@@ -1186,9 +1263,6 @@ func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, a
|
||||
// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket,
|
||||
// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets.
|
||||
func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Notifications.List(bucket)
|
||||
if s.userProject != "" {
|
||||
@@ -1206,9 +1280,6 @@ func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Notifications.Insert(bucket, toRawNotification(n))
|
||||
if s.userProject != "" {
|
||||
@@ -1226,9 +1297,6 @@ func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket strin
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Notifications.Delete(bucket, id)
|
||||
if s.userProject != "" {
|
||||
@@ -1333,13 +1401,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade
|
||||
err = run(ctx, func(ctx context.Context) error {
|
||||
res, err = doDownload(ctx)
|
||||
if err != nil {
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) {
|
||||
if e.Code == http.StatusNotFound {
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
}
|
||||
return err
|
||||
return formatObjectErr(err)
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
@@ -1348,7 +1410,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(res.Body)
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return &googleapi.Error{
|
||||
Code: res.StatusCode,
|
||||
@@ -1372,7 +1434,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade
|
||||
// https://cloud.google.com/storage/docs/transcoding#range,
|
||||
// thus we have to manually move the body forward by seen bytes.
|
||||
if decompressiveTranscoding(res) && seen > 0 {
|
||||
_, _ = io.CopyN(ioutil.Discard, res.Body, seen)
|
||||
_, _ = io.CopyN(io.Discard, res.Body, seen)
|
||||
}
|
||||
|
||||
// If a generation hasn't been specified, and this is the first response we get, let's record the
|
||||
@@ -1422,18 +1484,20 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
|
||||
}
|
||||
} else {
|
||||
size = res.ContentLength
|
||||
// Check the CRC iff all of the following hold:
|
||||
// - We asked for content (length != 0).
|
||||
// - We got all the content (status != PartialContent).
|
||||
// - The server sent a CRC header.
|
||||
// - The Go http stack did not uncompress the file.
|
||||
// - We were not served compressed data that was uncompressed on download.
|
||||
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||
// computes it on the compressed contents, but we compute it on the
|
||||
// uncompressed contents.
|
||||
if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the CRC iff all of the following hold:
|
||||
// - We asked for content (length != 0).
|
||||
// - We got all the content (status != PartialContent).
|
||||
// - The server sent a CRC header.
|
||||
// - The Go http stack did not uncompress the file.
|
||||
// - We were not served compressed data that was uncompressed on download.
|
||||
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||
// computes it on the compressed contents, but we compute it on the
|
||||
// uncompressed contents.
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
if params.length == 0 || res.StatusCode == http.StatusPartialContent || res.Uncompressed || uncompressedByServer(res) {
|
||||
checkCRC = false
|
||||
}
|
||||
|
||||
remain := res.ContentLength
|
||||
@@ -1461,6 +1525,14 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
|
||||
}
|
||||
}
|
||||
|
||||
metadata := map[string]string{}
|
||||
for key, values := range res.Header {
|
||||
if len(values) > 0 && strings.HasPrefix(key, "X-Goog-Meta-") {
|
||||
key := key[len("X-Goog-Meta-"):]
|
||||
metadata[key] = values[0]
|
||||
}
|
||||
}
|
||||
|
||||
attrs := ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: res.Header.Get("Content-Type"),
|
||||
@@ -1470,12 +1542,15 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
|
||||
StartOffset: startOffset,
|
||||
Generation: params.gen,
|
||||
Metageneration: metaGen,
|
||||
CRC32C: crc,
|
||||
Decompressed: res.Uncompressed || uncompressedByServer(res),
|
||||
}
|
||||
return &Reader{
|
||||
Attrs: attrs,
|
||||
size: size,
|
||||
remain: remain,
|
||||
checkCRC: checkCRC,
|
||||
Attrs: attrs,
|
||||
objectMetadata: &metadata,
|
||||
size: size,
|
||||
remain: remain,
|
||||
checkCRC: checkCRC,
|
||||
reader: &httpReader{
|
||||
reopen: reopen,
|
||||
body: body,
|
||||
@@ -1484,3 +1559,30 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// setHeadersFromCtx sets custom headers passed in via the context on the header,
|
||||
// replacing any header with the same key (which avoids duplicating invocation headers).
|
||||
// This is only required for XML; for gRPC & JSON requests this is handled in
|
||||
// the GAPIC and Apiary layers respectively.
|
||||
func setHeadersFromCtx(ctx context.Context, header http.Header) {
|
||||
ctxHeaders := callctx.HeadersFromContext(ctx)
|
||||
for k, vals := range ctxHeaders {
|
||||
// Merge x-goog-api-client values into a single space-separated value.
|
||||
if strings.EqualFold(k, xGoogHeaderKey) {
|
||||
alreadySetValues := header.Values(xGoogHeaderKey)
|
||||
vals = append(vals, alreadySetValues...)
|
||||
|
||||
if len(vals) > 0 {
|
||||
xGoogHeader := vals[0]
|
||||
for _, v := range vals[1:] {
|
||||
xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
|
||||
}
|
||||
header.Set(k, xGoogHeader)
|
||||
}
|
||||
} else {
|
||||
for _, v := range vals {
|
||||
header.Set(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
13
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
13
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
@@ -19,7 +19,6 @@ import (
|
||||
|
||||
"cloud.google.com/go/iam"
|
||||
"cloud.google.com/go/iam/apiv1/iampb"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/genproto/googleapis/type/expr"
|
||||
)
|
||||
@@ -45,16 +44,16 @@ func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy,
|
||||
}
|
||||
|
||||
func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "storage.IAM.Get")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...)
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "storage.IAM.Set")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
isIdempotent := len(p.Etag) > 0
|
||||
o := makeStorageOpts(isIdempotent, c.retry, c.userProject)
|
||||
@@ -62,8 +61,8 @@ func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "storage.IAM.Test")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...)
|
||||
|
||||
100
vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
generated
vendored
100
vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Google LLC
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -41,7 +41,7 @@ type BucketIterator struct {
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
@@ -68,100 +68,6 @@ func (it *BucketIterator) takeBuf() interface{} {
|
||||
return b
|
||||
}
|
||||
|
||||
// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata.
|
||||
type HmacKeyMetadataIterator struct {
|
||||
items []*storagepb.HmacKeyMetadata
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) {
|
||||
var item *storagepb.HmacKeyMetadata
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *HmacKeyMetadataIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *HmacKeyMetadataIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig.
|
||||
type NotificationConfigIterator struct {
|
||||
items []*storagepb.NotificationConfig
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) {
|
||||
var item *storagepb.NotificationConfig
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *NotificationConfigIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *NotificationConfigIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// ObjectIterator manages a stream of *storagepb.Object.
|
||||
type ObjectIterator struct {
|
||||
items []*storagepb.Object
|
||||
@@ -182,7 +88,7 @@ type ObjectIterator struct {
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
|
||||
func (it *ObjectIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
38
vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
generated
vendored
Normal file
38
vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
//go:build go1.23
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"iter"
|
||||
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb"
|
||||
"github.com/googleapis/gax-go/v2/iterator"
|
||||
)
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *BucketIterator) All() iter.Seq2[*storagepb.Bucket, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
|
||||
// All returns an iterator. If an error is returned by the iterator, the
|
||||
// iterator will stop after that iteration.
|
||||
func (it *ObjectIterator) All() iter.Seq2[*storagepb.Object, error] {
|
||||
return iterator.RangeAdapter(it.Next)
|
||||
}
|
||||
65
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
65
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 Google LLC
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -17,19 +17,7 @@
|
||||
// Package storage is an auto-generated package for the
|
||||
// Cloud Storage API.
|
||||
//
|
||||
// Stop. This folder is likely not what you are looking for. This folder
|
||||
// contains protocol buffer definitions for an API only accessible to select
|
||||
// customers. Customers not participating should not depend on this file.
|
||||
// Please contact Google Cloud sales if you are interested. Unless told
|
||||
// otherwise by a Google Cloud representative, do not use or otherwise rely
|
||||
// on any of the contents of this folder. If you would like to use Cloud
|
||||
// Storage, please consult our official documentation (at
|
||||
// https://cloud.google.com/storage/docs/apis) for details on our XML and
|
||||
// JSON APIs, or else consider one of our client libraries (at
|
||||
// https://cloud.google.com/storage/docs/reference/libraries). This API
|
||||
// defined in this folder is unreleased and may shut off, break, or fail at
|
||||
// any time for any users who are not registered as a part of a private
|
||||
// preview program.
|
||||
// Lets you store and retrieve potentially-large, immutable data objects.
|
||||
//
|
||||
// # General documentation
|
||||
//
|
||||
@@ -47,6 +35,7 @@
|
||||
//
|
||||
// To get started with this package, create a client.
|
||||
//
|
||||
// // go get cloud.google.com/go/storage/internal/apiv2@latest
|
||||
// ctx := context.Background()
|
||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
||||
// // It will require modifications to work:
|
||||
@@ -65,25 +54,14 @@
|
||||
//
|
||||
// # Using the Client
|
||||
//
|
||||
// The following is an example of making an API call with the newly created client.
|
||||
// The following is an example of making an API call with the newly created client, mentioned above.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
||||
// // It will require modifications to work:
|
||||
// // - It may require correct/in-range values for request initialization.
|
||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
// stream, err := c.BidiWriteObject(ctx)
|
||||
// stream, err := c.BidiReadObject(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// go func() {
|
||||
// reqs := []*storagepb.BidiWriteObjectRequest{
|
||||
// reqs := []*storagepb.BidiReadObjectRequest{
|
||||
// // TODO: Create requests.
|
||||
// }
|
||||
// for _, req := range reqs {
|
||||
@@ -119,34 +97,3 @@
|
||||
// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
|
||||
// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
|
||||
package storage // import "cloud.google.com/go/storage/internal/apiv2"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// For more information on implementing a client constructor hook, see
|
||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
||||
type clientHookParams struct{}
|
||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||
|
||||
var versionClient string
|
||||
|
||||
func getVersionClient() string {
|
||||
if versionClient == "" {
|
||||
return "UNKNOWN"
|
||||
}
|
||||
return versionClient
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write",
|
||||
}
|
||||
}
|
||||
|
||||
60
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
60
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
@@ -10,6 +10,11 @@
|
||||
"grpc": {
|
||||
"libraryClient": "Client",
|
||||
"rpcs": {
|
||||
"BidiReadObject": {
|
||||
"methods": [
|
||||
"BidiReadObject"
|
||||
]
|
||||
},
|
||||
"BidiWriteObject": {
|
||||
"methods": [
|
||||
"BidiWriteObject"
|
||||
@@ -30,31 +35,11 @@
|
||||
"CreateBucket"
|
||||
]
|
||||
},
|
||||
"CreateHmacKey": {
|
||||
"methods": [
|
||||
"CreateHmacKey"
|
||||
]
|
||||
},
|
||||
"CreateNotificationConfig": {
|
||||
"methods": [
|
||||
"CreateNotificationConfig"
|
||||
]
|
||||
},
|
||||
"DeleteBucket": {
|
||||
"methods": [
|
||||
"DeleteBucket"
|
||||
]
|
||||
},
|
||||
"DeleteHmacKey": {
|
||||
"methods": [
|
||||
"DeleteHmacKey"
|
||||
]
|
||||
},
|
||||
"DeleteNotificationConfig": {
|
||||
"methods": [
|
||||
"DeleteNotificationConfig"
|
||||
]
|
||||
},
|
||||
"DeleteObject": {
|
||||
"methods": [
|
||||
"DeleteObject"
|
||||
@@ -65,46 +50,21 @@
|
||||
"GetBucket"
|
||||
]
|
||||
},
|
||||
"GetHmacKey": {
|
||||
"methods": [
|
||||
"GetHmacKey"
|
||||
]
|
||||
},
|
||||
"GetIamPolicy": {
|
||||
"methods": [
|
||||
"GetIamPolicy"
|
||||
]
|
||||
},
|
||||
"GetNotificationConfig": {
|
||||
"methods": [
|
||||
"GetNotificationConfig"
|
||||
]
|
||||
},
|
||||
"GetObject": {
|
||||
"methods": [
|
||||
"GetObject"
|
||||
]
|
||||
},
|
||||
"GetServiceAccount": {
|
||||
"methods": [
|
||||
"GetServiceAccount"
|
||||
]
|
||||
},
|
||||
"ListBuckets": {
|
||||
"methods": [
|
||||
"ListBuckets"
|
||||
]
|
||||
},
|
||||
"ListHmacKeys": {
|
||||
"methods": [
|
||||
"ListHmacKeys"
|
||||
]
|
||||
},
|
||||
"ListNotificationConfigs": {
|
||||
"methods": [
|
||||
"ListNotificationConfigs"
|
||||
]
|
||||
},
|
||||
"ListObjects": {
|
||||
"methods": [
|
||||
"ListObjects"
|
||||
@@ -115,6 +75,11 @@
|
||||
"LockBucketRetentionPolicy"
|
||||
]
|
||||
},
|
||||
"MoveObject": {
|
||||
"methods": [
|
||||
"MoveObject"
|
||||
]
|
||||
},
|
||||
"QueryWriteStatus": {
|
||||
"methods": [
|
||||
"QueryWriteStatus"
|
||||
@@ -155,11 +120,6 @@
|
||||
"UpdateBucket"
|
||||
]
|
||||
},
|
||||
"UpdateHmacKey": {
|
||||
"methods": [
|
||||
"UpdateHmacKey"
|
||||
]
|
||||
},
|
||||
"UpdateObject": {
|
||||
"methods": [
|
||||
"UpdateObject"
|
||||
|
||||
69
vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go
generated
vendored
Normal file
69
vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/googleapis/gax-go/v2/internallog/grpclog"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const serviceName = "storage.googleapis.com"
|
||||
|
||||
var protoVersion = fmt.Sprintf("1.%d", protoimpl.MaxVersion)
|
||||
|
||||
// For more information on implementing a client constructor hook, see
|
||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
||||
type clientHookParams struct{}
|
||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||
|
||||
var versionClient string
|
||||
|
||||
func getVersionClient() string {
|
||||
if versionClient == "" {
|
||||
return "UNKNOWN"
|
||||
}
|
||||
return versionClient
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write",
|
||||
}
|
||||
}
|
||||
|
||||
func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
|
||||
var zero O
|
||||
logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
|
||||
resp, err := fn(ctx, req, opts...)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
|
||||
return resp, err
|
||||
}
|
||||
724
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
724
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
10060
vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
generated
vendored
10060
vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
41
vendor/cloud.google.com/go/storage/internal/experimental.go
generated
vendored
Normal file
41
vendor/cloud.google.com/go/storage/internal/experimental.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// All options in this package are experimental.
|
||||
|
||||
package internal
|
||||
|
||||
var (
|
||||
// WithMetricInterval is a function which is implemented by storage package.
|
||||
// It sets how often to emit metrics when using NewPeriodicReader and must be
|
||||
// greater than 1 minute.
|
||||
WithMetricInterval any // func (*time.Duration) option.ClientOption
|
||||
|
||||
// WithMetricExporter is a function which is implemented by storage package.
|
||||
// Set an alternate client-side metric Exporter to emit metrics through.
|
||||
WithMetricExporter any // func (*metric.Exporter) option.ClientOption
|
||||
|
||||
// WithReadStallTimeout is a function which is implemented by storage package.
|
||||
// It takes ReadStallTimeoutConfig as inputs and returns a option.ClientOption.
|
||||
WithReadStallTimeout any // func (*ReadStallTimeoutConfig) option.ClientOption
|
||||
|
||||
// WithGRPCBidiReads is a function which is implemented by the storage package.
|
||||
// It sets the gRPC client to use the BidiReadObject API for downloads.
|
||||
WithGRPCBidiReads any // func() option.ClientOption
|
||||
|
||||
// WithZonalBucketAPIs is a function which is implemented by the storage package.
|
||||
// It sets the gRPC client to use the BidiReadObject API for downloads and
|
||||
// appendable object semantics by default for uploads.
|
||||
WithZonalBucketAPIs any // func() option.ClientOption
|
||||
)
|
||||
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
@@ -15,4 +15,4 @@
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.43.0"
|
||||
const Version = "1.56.0"
|
||||
|
||||
97
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
97
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
@@ -21,7 +21,10 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/version"
|
||||
@@ -38,10 +41,25 @@ var defaultRetry *retryConfig = &retryConfig{}
|
||||
var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version)
|
||||
|
||||
const (
|
||||
xGoogHeaderKey = "x-goog-api-client"
|
||||
idempotencyHeaderKey = "x-goog-gcs-idempotency-token"
|
||||
xGoogHeaderKey = "x-goog-api-client"
|
||||
idempotencyHeaderKey = "x-goog-gcs-idempotency-token"
|
||||
cookieHeaderKey = "cookie"
|
||||
directpathCookieHeaderKey = "x-directpath-tracing-cookie"
|
||||
)
|
||||
|
||||
var (
|
||||
cookieHeader = sync.OnceValue(func() string {
|
||||
return os.Getenv("GOOGLE_SDK_GO_TRACING_COOKIE")
|
||||
})
|
||||
)
|
||||
|
||||
func (r *retryConfig) runShouldRetry(err error) bool {
|
||||
if r == nil || r.shouldRetry == nil {
|
||||
return ShouldRetry(err)
|
||||
}
|
||||
return r.shouldRetry(err)
|
||||
}
|
||||
|
||||
// run determines whether a retry is necessary based on the config and
|
||||
// idempotency information. It then calls the function with or without retries
|
||||
// as appropriate, using the configured settings.
|
||||
@@ -62,19 +80,41 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry
|
||||
bo.Initial = retry.backoff.Initial
|
||||
bo.Max = retry.backoff.Max
|
||||
}
|
||||
var errorFunc func(err error) bool = ShouldRetry
|
||||
if retry.shouldRetry != nil {
|
||||
errorFunc = retry.shouldRetry
|
||||
|
||||
var quitAfterTimer *time.Timer
|
||||
if retry.maxRetryDuration != 0 {
|
||||
quitAfterTimer = time.NewTimer(retry.maxRetryDuration)
|
||||
defer quitAfterTimer.Stop()
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
return internal.Retry(ctx, bo, func() (stop bool, err error) {
|
||||
if retry.maxRetryDuration != 0 {
|
||||
select {
|
||||
case <-quitAfterTimer.C:
|
||||
if lastErr == nil {
|
||||
return true, fmt.Errorf("storage: request not sent, choose a larger value for the retry deadline (currently set to %s)", retry.maxRetryDuration)
|
||||
}
|
||||
return true, fmt.Errorf("storage: retry deadline of %s reached after %v attempts; last error: %w", retry.maxRetryDuration, attempts, lastErr)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts)
|
||||
err = call(ctxWithHeaders)
|
||||
if err != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts {
|
||||
return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err)
|
||||
lastErr = call(ctxWithHeaders)
|
||||
if lastErr != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts {
|
||||
return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, lastErr)
|
||||
}
|
||||
attempts++
|
||||
return !errorFunc(err), err
|
||||
retryable := retry.runShouldRetry(lastErr)
|
||||
// Explicitly check context cancellation so that we can distinguish between a
|
||||
// DEADLINE_EXCEEDED error from the server and a user-set context deadline.
|
||||
// Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's
|
||||
// sent by the server) in both cases.
|
||||
if ctxErr := ctx.Err(); errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) {
|
||||
retryable = false
|
||||
}
|
||||
return !retryable, lastErr
|
||||
})
|
||||
}
|
||||
|
||||
@@ -84,22 +124,14 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int
|
||||
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts)
|
||||
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ")
|
||||
|
||||
// TODO: remove this once the respective transport packages merge xGoogHeader.
|
||||
// Also remove gl-go at that time, as it will be repeated.
|
||||
hdrs := callctx.HeadersFromContext(ctx)
|
||||
for _, v := range hdrs[xGoogHeaderKey] {
|
||||
xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
|
||||
}
|
||||
|
||||
if hdrs[xGoogHeaderKey] != nil {
|
||||
// Replace the key instead of adding it, if there was anything to merge with.
|
||||
hdrs[xGoogHeaderKey] = []string{xGoogHeader}
|
||||
} else {
|
||||
// TODO: keep this line when removing the above code.
|
||||
ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
|
||||
}
|
||||
|
||||
ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
|
||||
ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID)
|
||||
|
||||
if c := cookieHeader(); c != "" {
|
||||
ctx = callctx.SetHeaders(ctx, cookieHeaderKey, c)
|
||||
ctx = callctx.SetHeaders(ctx, directpathCookieHeaderKey, c)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
@@ -138,14 +170,27 @@ func ShouldRetry(err error) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// TODO: remove when https://github.com/golang/go/issues/53472 is resolved.
|
||||
// We don't want to retry io.EOF errors, since these can indicate normal
|
||||
// functioning terminations such as internally in the case of Reader and
|
||||
// externally in the case of iterator methods. However, the linked bug
|
||||
// requires us to retry the EOFs that it causes, which should be wrapped
|
||||
// in net or url errors.
|
||||
if errors.Is(err, io.EOF) {
|
||||
return true
|
||||
}
|
||||
case *net.DNSError:
|
||||
if e.IsTemporary {
|
||||
return true
|
||||
}
|
||||
case interface{ Temporary() bool }:
|
||||
if e.Temporary() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC.
|
||||
// UNAVAILABLE, RESOURCE_EXHAUSTED, INTERNAL, and DEADLINE_EXCEEDED codes are all retryable for gRPC.
|
||||
if st, ok := status.FromError(err); ok {
|
||||
if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal {
|
||||
if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal || code == codes.DeadlineExceeded {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
49
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
49
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
@@ -20,8 +20,6 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal/apiv2/storagepb"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
@@ -92,30 +90,6 @@ func toNotification(rn *raw.Notification) *Notification {
|
||||
return n
|
||||
}
|
||||
|
||||
func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification {
|
||||
n := &Notification{
|
||||
ID: pbn.GetName(),
|
||||
EventTypes: pbn.GetEventTypes(),
|
||||
ObjectNamePrefix: pbn.GetObjectNamePrefix(),
|
||||
CustomAttributes: pbn.GetCustomAttributes(),
|
||||
PayloadFormat: pbn.GetPayloadFormat(),
|
||||
}
|
||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
|
||||
return n
|
||||
}
|
||||
|
||||
func toProtoNotification(n *Notification) *storagepb.NotificationConfig {
|
||||
return &storagepb.NotificationConfig{
|
||||
Name: n.ID,
|
||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
||||
n.TopicProjectID, n.TopicID),
|
||||
EventTypes: n.EventTypes,
|
||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
||||
CustomAttributes: n.CustomAttributes,
|
||||
PayloadFormat: n.PayloadFormat,
|
||||
}
|
||||
}
|
||||
|
||||
var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`)
|
||||
|
||||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
||||
@@ -144,9 +118,10 @@ func toRawNotification(n *Notification) *raw.Notification {
|
||||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
||||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
||||
// returned Notification's ID can be used to refer to it.
|
||||
// Note: gRPC is not supported.
|
||||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Bucket.AddNotification")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
if n.ID != "" {
|
||||
return nil, errors.New("storage: AddNotification: ID must not be set")
|
||||
@@ -165,9 +140,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
|
||||
|
||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
||||
// indexed by notification ID.
|
||||
// Note: gRPC is not supported.
|
||||
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Bucket.Notifications")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...)
|
||||
@@ -182,18 +158,11 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
||||
return m
|
||||
}
|
||||
|
||||
func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification {
|
||||
m := map[string]*Notification{}
|
||||
for _, n := range ns {
|
||||
m[n.Name] = toNotificationFromProto(n)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// DeleteNotification deletes the notification with the given ID.
|
||||
// Note: gRPC is not supported.
|
||||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Bucket.DeleteNotification")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...)
|
||||
|
||||
198
vendor/cloud.google.com/go/storage/option.go
generated
vendored
198
vendor/cloud.google.com/go/storage/option.go
generated
vendored
@@ -15,15 +15,76 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/storage/experimental"
|
||||
storageinternal "cloud.google.com/go/storage/internal"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
)
|
||||
|
||||
// storageConfig contains the Storage client option configuration that can be
|
||||
const (
|
||||
dynamicReadReqIncreaseRateEnv = "DYNAMIC_READ_REQ_INCREASE_RATE"
|
||||
dynamicReadReqInitialTimeoutEnv = "DYNAMIC_READ_REQ_INITIAL_TIMEOUT"
|
||||
defaultDynamicReadReqIncreaseRate = 15.0
|
||||
defaultDynamicReqdReqMaxTimeout = 1 * time.Hour
|
||||
defaultDynamicReadReqMinTimeout = 500 * time.Millisecond
|
||||
defaultTargetPercentile = 0.99
|
||||
)
|
||||
|
||||
func init() {
|
||||
// initialize experimental options
|
||||
storageinternal.WithMetricExporter = withMetricExporter
|
||||
storageinternal.WithMetricInterval = withMetricInterval
|
||||
storageinternal.WithReadStallTimeout = withReadStallTimeout
|
||||
storageinternal.WithGRPCBidiReads = withGRPCBidiReads
|
||||
storageinternal.WithZonalBucketAPIs = withZonalBucketAPIs
|
||||
}
|
||||
|
||||
// getDynamicReadReqIncreaseRateFromEnv returns the value set in the env variable.
|
||||
// It returns defaultDynamicReadReqIncreaseRate if env is not set or the set value is invalid.
|
||||
func getDynamicReadReqIncreaseRateFromEnv() float64 {
|
||||
increaseRate := os.Getenv(dynamicReadReqIncreaseRateEnv)
|
||||
if increaseRate == "" {
|
||||
return defaultDynamicReadReqIncreaseRate
|
||||
}
|
||||
|
||||
val, err := strconv.ParseFloat(increaseRate, 64)
|
||||
if err != nil {
|
||||
return defaultDynamicReadReqIncreaseRate
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// getDynamicReadReqInitialTimeoutSecFromEnv returns the value set in the env variable.
|
||||
// It returns the passed defaultVal if env is not set or the set value is invalid.
|
||||
func getDynamicReadReqInitialTimeoutSecFromEnv(defaultVal time.Duration) time.Duration {
|
||||
initialTimeout := os.Getenv(dynamicReadReqInitialTimeoutEnv)
|
||||
if initialTimeout == "" {
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
val, err := time.ParseDuration(initialTimeout)
|
||||
if err != nil {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// set through storageClientOptions.
|
||||
type storageConfig struct {
|
||||
useJSONforReads bool
|
||||
readAPIWasSet bool
|
||||
useJSONforReads bool
|
||||
readAPIWasSet bool
|
||||
disableClientMetrics bool
|
||||
metricExporter *metric.Exporter
|
||||
metricInterval time.Duration
|
||||
manualReader *metric.ManualReader
|
||||
readStallTimeoutConfig *experimental.ReadStallTimeoutConfig
|
||||
grpcBidiReads bool
|
||||
grpcAppendableUploads bool
|
||||
}
|
||||
|
||||
// newStorageConfig generates a new storageConfig with all the given
|
||||
@@ -78,3 +139,134 @@ func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) {
|
||||
c.useJSONforReads = w.useJSON
|
||||
c.readAPIWasSet = true
|
||||
}
|
||||
|
||||
type withDisabledClientMetrics struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
disabledClientMetrics bool
|
||||
}
|
||||
|
||||
// WithDisabledClientMetrics is an option that may be passed to [NewClient].
|
||||
// gRPC metrics are enabled by default in the GCS client and will export the
|
||||
// gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
|
||||
// [Google Cloud Monitoring]. The option is used to disable metrics.
|
||||
// Google Cloud Support can use this information to more quickly diagnose
|
||||
// problems related to GCS and gRPC.
|
||||
// Sending this data does not incur any billing charges, and requires minimal
|
||||
// CPU (a single RPC every few minutes) or memory (a few KiB to batch the
|
||||
// telemetry).
|
||||
//
|
||||
// The default is to enable client metrics. To opt-out of metrics collected use
|
||||
// this option.
|
||||
//
|
||||
// [gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
|
||||
// [gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
|
||||
// [Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
|
||||
func WithDisabledClientMetrics() option.ClientOption {
|
||||
return &withDisabledClientMetrics{disabledClientMetrics: true}
|
||||
}
|
||||
|
||||
func (w *withDisabledClientMetrics) ApplyStorageOpt(c *storageConfig) {
|
||||
c.disableClientMetrics = w.disabledClientMetrics
|
||||
}
|
||||
|
||||
type withMeterOptions struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
// set sampling interval
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func withMetricInterval(interval time.Duration) option.ClientOption {
|
||||
return &withMeterOptions{interval: interval}
|
||||
}
|
||||
|
||||
func (w *withMeterOptions) ApplyStorageOpt(c *storageConfig) {
|
||||
c.metricInterval = w.interval
|
||||
}
|
||||
|
||||
type withMetricExporterConfig struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
// exporter override
|
||||
metricExporter *metric.Exporter
|
||||
}
|
||||
|
||||
func withMetricExporter(ex *metric.Exporter) option.ClientOption {
|
||||
return &withMetricExporterConfig{metricExporter: ex}
|
||||
}
|
||||
|
||||
func (w *withMetricExporterConfig) ApplyStorageOpt(c *storageConfig) {
|
||||
c.metricExporter = w.metricExporter
|
||||
}
|
||||
|
||||
type withTestMetricReaderConfig struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
// reader override
|
||||
metricReader *metric.ManualReader
|
||||
}
|
||||
|
||||
func withTestMetricReader(ex *metric.ManualReader) option.ClientOption {
|
||||
return &withTestMetricReaderConfig{metricReader: ex}
|
||||
}
|
||||
|
||||
func (w *withTestMetricReaderConfig) ApplyStorageOpt(c *storageConfig) {
|
||||
c.manualReader = w.metricReader
|
||||
}
|
||||
|
||||
// WithReadStallTimeout is an option that may be passed to [NewClient].
|
||||
// It enables the client to retry the stalled read request, happens as part of
|
||||
// storage.Reader creation. As the name suggest, timeout is adjusted dynamically
|
||||
// based on past observed read-req latencies.
|
||||
//
|
||||
// This is only supported for the read operation and that too for http(XML) client.
|
||||
// Grpc read-operation will be supported soon.
|
||||
func withReadStallTimeout(rstc *experimental.ReadStallTimeoutConfig) option.ClientOption {
|
||||
// TODO (raj-prince): To keep separate dynamicDelay instance for different BucketHandle.
|
||||
// Currently, dynamicTimeout is kept at the client and hence shared across all the
|
||||
// BucketHandle, which is not the ideal state. As latency depends on location of VM
|
||||
// and Bucket, and read latency of different buckets may lie in different range.
|
||||
// Hence having a separate dynamicTimeout instance at BucketHandle level will
|
||||
// be better
|
||||
if rstc.Min == time.Duration(0) {
|
||||
rstc.Min = defaultDynamicReadReqMinTimeout
|
||||
}
|
||||
if rstc.TargetPercentile == 0 {
|
||||
rstc.TargetPercentile = defaultTargetPercentile
|
||||
}
|
||||
return &withReadStallTimeoutConfig{
|
||||
readStallTimeoutConfig: rstc,
|
||||
}
|
||||
}
|
||||
|
||||
type withReadStallTimeoutConfig struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
readStallTimeoutConfig *experimental.ReadStallTimeoutConfig
|
||||
}
|
||||
|
||||
func (wrstc *withReadStallTimeoutConfig) ApplyStorageOpt(config *storageConfig) {
|
||||
config.readStallTimeoutConfig = wrstc.readStallTimeoutConfig
|
||||
}
|
||||
|
||||
func withGRPCBidiReads() option.ClientOption {
|
||||
return &withGRPCBidiReadsConfig{}
|
||||
}
|
||||
|
||||
type withGRPCBidiReadsConfig struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
}
|
||||
|
||||
func (w *withGRPCBidiReadsConfig) ApplyStorageOpt(config *storageConfig) {
|
||||
config.grpcBidiReads = true
|
||||
}
|
||||
|
||||
func withZonalBucketAPIs() option.ClientOption {
|
||||
return &withZonalBucketAPIsConfig{}
|
||||
}
|
||||
|
||||
type withZonalBucketAPIsConfig struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
}
|
||||
|
||||
func (w *withZonalBucketAPIsConfig) ApplyStorageOpt(config *storageConfig) {
|
||||
// Use both appendable upload semantics and bidi reads.
|
||||
config.grpcAppendableUploads = true
|
||||
config.grpcBidiReads = true
|
||||
}
|
||||
|
||||
187
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
187
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
@@ -19,9 +19,9 @@ import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
@@ -36,6 +36,7 @@ var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
// Each field is read-only.
|
||||
type ReaderObjectAttrs struct {
|
||||
// Size is the length of the object's content.
|
||||
// Size may be out of date for unfinalized objects.
|
||||
Size int64
|
||||
|
||||
// StartOffset is the byte offset within the object
|
||||
@@ -65,6 +66,19 @@ type ReaderObjectAttrs struct {
|
||||
// meaningful in the context of a particular generation of a
|
||||
// particular object.
|
||||
Metageneration int64
|
||||
|
||||
// CRC32C is the CRC32 checksum of the entire object's content using the
|
||||
// Castagnoli93 polynomial, if available.
|
||||
CRC32C uint32
|
||||
|
||||
// Decompressed is true if the object is stored as a gzip file and was
|
||||
// decompressed when read.
|
||||
// Objects are automatically decompressed if the object's metadata property
|
||||
// "Content-Encoding" is set to "gzip" or satisfies decompressive
|
||||
// transcoding as per https://cloud.google.com/storage/docs/transcoding.
|
||||
//
|
||||
// To prevent decompression on reads, use [ObjectHandle.ReadCompressed].
|
||||
Decompressed bool
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader to read the contents of the
|
||||
@@ -91,7 +105,8 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
|
||||
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
|
||||
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
|
||||
// that file will be served back whole, regardless of the requested range as
|
||||
// Google Cloud Storage dictates.
|
||||
// Google Cloud Storage dictates. If decompressive transcoding occurs,
|
||||
// [Reader.Attrs.Decompressed] will be true.
|
||||
//
|
||||
// By default, reads are made using the Cloud Storage XML API. We recommend
|
||||
// using the JSON API instead, which can be done by setting [WithJSONReads]
|
||||
@@ -126,6 +141,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
||||
encryptionKey: o.encryptionKey,
|
||||
conds: o.conds,
|
||||
readCompressed: o.readCompressed,
|
||||
handle: &o.readHandle,
|
||||
}
|
||||
|
||||
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
|
||||
@@ -141,6 +157,51 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// NewMultiRangeDownloader creates a multi-range reader for an object.
|
||||
// Must be called on a gRPC client created using [NewGRPCClient].
|
||||
//
|
||||
// This uses the gRPC-specific bi-directional read API, which is in private
|
||||
// preview; please contact your account manager if interested. The option
|
||||
// [experimental.WithGRPCBidiReads] or [experimental.WithZonalBucketAPIs]
|
||||
// must be selected in order to use this API.
|
||||
func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (mrd *MultiRangeDownloader, err error) {
|
||||
// This span covers the life of the reader. It is closed via the context
|
||||
// in Reader.Close.
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.MultiRangeDownloader")
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.conds != nil {
|
||||
if err := o.conds.validate("NewMultiRangeDownloader"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := makeStorageOpts(true, o.retry, o.userProject)
|
||||
|
||||
params := &newMultiRangeDownloaderParams{
|
||||
bucket: o.bucket,
|
||||
conds: o.conds,
|
||||
encryptionKey: o.encryptionKey,
|
||||
gen: o.gen,
|
||||
object: o.object,
|
||||
handle: &o.readHandle,
|
||||
}
|
||||
|
||||
r, err := o.c.tc.NewMultiRangeDownloader(ctx, params, opts...)
|
||||
|
||||
// Pass the context so that the span can be closed in MultiRangeDownloader.Close(), or close the
|
||||
// span now if there is an error.
|
||||
if err == nil {
|
||||
r.ctx = ctx
|
||||
} else {
|
||||
trace.EndSpan(ctx, err)
|
||||
}
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
// decompressiveTranscoding returns true if the request was served decompressed
|
||||
// and different than its original storage form. This happens when the "Content-Encoding"
|
||||
// header is "gzip".
|
||||
@@ -199,7 +260,7 @@ func setConditionsHeaders(headers http.Header, conds *Conditions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
var emptyBody = io.NopCloser(strings.NewReader(""))
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
// It implements io.Reader.
|
||||
@@ -208,12 +269,17 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
||||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
||||
type Reader struct {
|
||||
Attrs ReaderObjectAttrs
|
||||
Attrs ReaderObjectAttrs
|
||||
objectMetadata *map[string]string
|
||||
|
||||
seen, remain, size int64
|
||||
checkCRC bool // Did we check the CRC? This is now only used by tests.
|
||||
|
||||
reader io.ReadCloser
|
||||
ctx context.Context
|
||||
reader io.ReadCloser
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
handle *ReadHandle
|
||||
unfinalized bool
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
@@ -246,6 +312,7 @@ func (r *Reader) WriteTo(w io.Writer) (int64, error) {
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
// Size may be out of date for a Reader to an unfinalized object.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.Size.
|
||||
func (r *Reader) Size() int64 {
|
||||
@@ -253,7 +320,11 @@ func (r *Reader) Size() int64 {
|
||||
}
|
||||
|
||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
||||
// Unfinalized objects will return -1.
|
||||
func (r *Reader) Remain() int64 {
|
||||
if r.unfinalized {
|
||||
return -1
|
||||
}
|
||||
return r.remain
|
||||
}
|
||||
|
||||
@@ -284,3 +355,107 @@ func (r *Reader) CacheControl() string {
|
||||
func (r *Reader) LastModified() (time.Time, error) {
|
||||
return r.Attrs.LastModified, nil
|
||||
}
|
||||
|
||||
// Metadata returns user-provided metadata, in key/value pairs.
|
||||
//
|
||||
// It can be nil if no metadata is present, or if the client uses the JSON
|
||||
// API for downloads. Only the XML and gRPC APIs support getting
|
||||
// custom metadata via the Reader; for JSON make a separate call to
|
||||
// ObjectHandle.Attrs.
|
||||
func (r *Reader) Metadata() map[string]string {
|
||||
if r.objectMetadata != nil {
|
||||
return *r.objectMetadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadHandle returns the read handle associated with an object.
|
||||
// ReadHandle will be periodically refreshed.
|
||||
//
|
||||
// ReadHandle requires the gRPC-specific bi-directional read API, which is in
|
||||
// private preview; please contact your account manager if interested.
|
||||
// Note that this only valid for gRPC and only with zonal buckets.
|
||||
func (r *Reader) ReadHandle() ReadHandle {
|
||||
if r.handle == nil {
|
||||
r.handle = &ReadHandle{}
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return (*r.handle)
|
||||
}
|
||||
|
||||
// MultiRangeDownloader reads a Cloud Storage object.
|
||||
//
|
||||
// Typically, a MultiRangeDownloader opens a stream to which we can add
|
||||
// different ranges to read from the object.
|
||||
//
|
||||
// This API is currently in preview and is not yet available for general use.
|
||||
type MultiRangeDownloader struct {
|
||||
Attrs ReaderObjectAttrs
|
||||
reader multiRangeDownloader
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type multiRangeDownloader interface {
|
||||
add(output io.Writer, offset, limit int64, callback func(int64, int64, error))
|
||||
wait()
|
||||
close() error
|
||||
getHandle() []byte
|
||||
error() error
|
||||
}
|
||||
|
||||
// Add adds a new range to MultiRangeDownloader.
|
||||
//
|
||||
// The offset for the first byte to return in the read, relative to the start
|
||||
// of the object.
|
||||
//
|
||||
// A negative offset value will be interpreted as the number of bytes from the
|
||||
// end of the object to be returned. Requesting a negative offset with magnitude
|
||||
// larger than the size of the object will return the entire object. An offset
|
||||
// larger than the size of the object will result in an OutOfRange error.
|
||||
//
|
||||
// A limit of zero indicates that there is no limit, and a negative limit will
|
||||
// cause an error.
|
||||
//
|
||||
// This will initiate the read range but is non-blocking; call callback to
|
||||
// process the result. Add is thread-safe and can be called simultaneously
|
||||
// from different goroutines.
|
||||
//
|
||||
// Callback will be called with the offset, length of data read, and error
|
||||
// of the read. Note that the length of the data read may be less than the
|
||||
// requested length if the end of the object is reached.
|
||||
func (mrd *MultiRangeDownloader) Add(output io.Writer, offset, length int64, callback func(int64, int64, error)) {
|
||||
mrd.reader.add(output, offset, length, callback)
|
||||
}
|
||||
|
||||
// Close the MultiRangeDownloader. It must be called when done reading.
|
||||
// Adding new ranges after this has been called will cause an error.
|
||||
//
|
||||
// This will immediately close the stream and can result in a
|
||||
// "stream closed early" error if a response for a range is still not processed.
|
||||
// Call [MultiRangeDownloader.Wait] to avoid this error.
|
||||
func (mrd *MultiRangeDownloader) Close() error {
|
||||
err := mrd.reader.close()
|
||||
trace.EndSpan(mrd.ctx, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for all the responses to process on the stream.
|
||||
// Adding new ranges after this has been called will cause an error.
|
||||
// Wait will wait for all callbacks to finish.
|
||||
func (mrd *MultiRangeDownloader) Wait() {
|
||||
mrd.reader.wait()
|
||||
}
|
||||
|
||||
// GetHandle returns the read handle. This can be used to further speed up the
|
||||
// follow up read if the same object is read through a different stream.
|
||||
func (mrd *MultiRangeDownloader) GetHandle() []byte {
|
||||
return mrd.reader.getHandle()
|
||||
}
|
||||
|
||||
// Error returns an error if the MultiRangeDownloader is in a permanent failure
|
||||
// state. It returns a nil error if the MultiRangeDownloader is open and can be
|
||||
// used.
|
||||
func (mrd *MultiRangeDownloader) Error() error {
|
||||
return mrd.reader.error()
|
||||
}
|
||||
|
||||
386
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
386
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
@@ -38,18 +38,24 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"cloud.google.com/go/auth"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal"
|
||||
"cloud.google.com/go/storage/internal/apiv2/storagepb"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/api/transport"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/experimental/stats"
|
||||
"google.golang.org/grpc/stats/opentelemetry"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
@@ -60,15 +66,17 @@ import (
|
||||
var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true}
|
||||
|
||||
var (
|
||||
// ErrBucketNotExist indicates that the bucket does not exist.
|
||||
// ErrBucketNotExist indicates that the bucket does not exist. It should be
|
||||
// checked for using [errors.Is] instead of direct equality.
|
||||
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
|
||||
// ErrObjectNotExist indicates that the object does not exist.
|
||||
// ErrObjectNotExist indicates that the object does not exist. It should be
|
||||
// checked for using [errors.Is] instead of direct equality.
|
||||
ErrObjectNotExist = errors.New("storage: object doesn't exist")
|
||||
// errMethodNotSupported indicates that the method called is not currently supported by the client.
|
||||
// TODO: Export this error when launching the transport-agnostic client.
|
||||
errMethodNotSupported = errors.New("storage: method is not currently supported")
|
||||
// errMethodNotValid indicates that given HTTP method is not valid.
|
||||
errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
|
||||
// errSignedURLMethodNotValid indicates that given HTTP method is not valid.
|
||||
errSignedURLMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
|
||||
)
|
||||
|
||||
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version)
|
||||
@@ -112,11 +120,23 @@ type Client struct {
|
||||
// xmlHost is the default host used for XML requests.
|
||||
xmlHost string
|
||||
// May be nil.
|
||||
creds *google.Credentials
|
||||
creds *auth.Credentials
|
||||
retry *retryConfig
|
||||
|
||||
// tc is the transport-agnostic client implemented with either gRPC or HTTP.
|
||||
tc storageClient
|
||||
|
||||
// Option to use gRRPC appendable upload API was set.
|
||||
grpcAppendableUploads bool
|
||||
}
|
||||
|
||||
// credsJSON returns the raw JSON of the Client's creds and true, or an empty slice
|
||||
// and false if no credentials JSON is available.
|
||||
func (c Client) credsJSON() ([]byte, bool) {
|
||||
if c.creds != nil && len(c.creds.JSON()) > 0 {
|
||||
return c.creds.JSON(), true
|
||||
}
|
||||
return []byte{}, false
|
||||
}
|
||||
|
||||
// NewClient creates a new Google Cloud Storage client using the HTTP transport.
|
||||
@@ -129,7 +149,7 @@ type Client struct {
|
||||
// You may configure the client by passing in options from the [google.golang.org/api/option]
|
||||
// package. You may also use options defined in this package, such as [WithJSONReads].
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
var creds *google.Credentials
|
||||
var creds *auth.Credentials
|
||||
|
||||
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
|
||||
// since raw.NewService configures the correct default endpoints when initializing the
|
||||
@@ -145,14 +165,15 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
||||
opts = append(opts, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"),
|
||||
internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"),
|
||||
internaloption.WithDefaultUniverseDomain("googleapis.com"),
|
||||
internaloption.EnableNewAuthLibrary(),
|
||||
)
|
||||
|
||||
// Don't error out here. The user may have passed in their own HTTP
|
||||
// client which does not auth with ADC or other common conventions.
|
||||
c, err := transport.Creds(ctx, opts...)
|
||||
c, err := internaloption.AuthCreds(ctx, opts)
|
||||
if err == nil {
|
||||
creds = c
|
||||
opts = append(opts, internaloption.WithCredentials(creds))
|
||||
opts = append(opts, option.WithAuthCredentials(creds))
|
||||
}
|
||||
} else {
|
||||
var hostURL *url.URL
|
||||
@@ -214,14 +235,11 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
||||
|
||||
// NewGRPCClient creates a new Storage client using the gRPC transport and API.
|
||||
// Client methods which have not been implemented in gRPC will return an error.
|
||||
// In particular, methods for Cloud Pub/Sub notifications are not supported.
|
||||
// In particular, methods for Cloud Pub/Sub notifications, Service Account HMAC
|
||||
// keys, and ServiceAccount are not supported.
|
||||
// Using a non-default universe domain is also not supported with the Storage
|
||||
// gRPC client.
|
||||
//
|
||||
// The storage gRPC API is still in preview and not yet publicly available.
|
||||
// If you would like to use the API, please first contact your GCP account rep to
|
||||
// request access. The API may be subject to breaking changes.
|
||||
//
|
||||
// Clients should be reused instead of created as needed. The methods of Client
|
||||
// are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
@@ -232,8 +250,66 @@ func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{
|
||||
tc: tc,
|
||||
grpcAppendableUploads: tc.config.grpcAppendableUploads,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &Client{tc: tc}, nil
|
||||
// CheckDirectConnectivitySupported checks if gRPC direct connectivity
|
||||
// is available for a specific bucket from the environment where the client
|
||||
// is running. A `nil` error represents Direct Connectivity was detected.
|
||||
// Direct connectivity is expected to be available when running from inside
|
||||
// GCP and connecting to a bucket in the same region.
|
||||
//
|
||||
// Experimental helper that's subject to change.
|
||||
//
|
||||
// You can pass in [option.ClientOption] you plan on passing to [NewGRPCClient]
|
||||
func CheckDirectConnectivitySupported(ctx context.Context, bucket string, opts ...option.ClientOption) error {
|
||||
view := metric.NewView(
|
||||
metric.Instrument{
|
||||
Name: "grpc.client.attempt.duration",
|
||||
Kind: metric.InstrumentKindHistogram,
|
||||
},
|
||||
metric.Stream{AttributeFilter: attribute.NewAllowKeysFilter("grpc.lb.locality")},
|
||||
)
|
||||
mr := metric.NewManualReader()
|
||||
provider := metric.NewMeterProvider(metric.WithReader(mr), metric.WithView(view))
|
||||
// Provider handles shutting down ManualReader
|
||||
defer provider.Shutdown(ctx)
|
||||
mo := opentelemetry.MetricsOptions{
|
||||
MeterProvider: provider,
|
||||
Metrics: stats.NewMetrics("grpc.client.attempt.duration"),
|
||||
OptionalLabels: []string{"grpc.lb.locality"},
|
||||
}
|
||||
combinedOpts := append(opts, WithDisabledClientMetrics(), option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})))
|
||||
client, err := NewGRPCClient(ctx, combinedOpts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage.NewGRPCClient: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
if _, err = client.Bucket(bucket).Attrs(ctx); err != nil {
|
||||
return fmt.Errorf("Bucket.Attrs: %w", err)
|
||||
}
|
||||
// Call manual reader to collect metric
|
||||
rm := metricdata.ResourceMetrics{}
|
||||
if err = mr.Collect(context.Background(), &rm); err != nil {
|
||||
return fmt.Errorf("ManualReader.Collect: %w", err)
|
||||
}
|
||||
for _, sm := range rm.ScopeMetrics {
|
||||
for _, m := range sm.Metrics {
|
||||
if m.Name == "grpc.client.attempt.duration" {
|
||||
hist := m.Data.(metricdata.Histogram[float64])
|
||||
for _, d := range hist.DataPoints {
|
||||
v, present := d.Attributes.Value("grpc.lb.locality")
|
||||
if present && v.AsString() != "" && v.AsString() != "{}" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.New("storage: direct connectivity not detected")
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
@@ -631,7 +707,7 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error {
|
||||
}
|
||||
opts.Method = strings.ToUpper(opts.Method)
|
||||
if _, ok := signedURLMethods[opts.Method]; !ok {
|
||||
return errMethodNotValid
|
||||
return errSignedURLMethodNotValid
|
||||
}
|
||||
if opts.Expires.IsZero() {
|
||||
return errors.New("storage: missing required expires option")
|
||||
@@ -879,6 +955,9 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
// ReadHandle associated with the object. This is periodically refreshed.
|
||||
type ReadHandle []byte
|
||||
|
||||
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
|
||||
// Use BucketHandle.Object to get a handle.
|
||||
type ObjectHandle struct {
|
||||
@@ -894,6 +973,23 @@ type ObjectHandle struct {
|
||||
retry *retryConfig
|
||||
overrideRetention *bool
|
||||
softDeleted bool
|
||||
readHandle ReadHandle
|
||||
}
|
||||
|
||||
// ReadHandle returns a new ObjectHandle that uses the ReadHandle to open the objects.
|
||||
//
|
||||
// Objects that have already been opened can be opened an additional time,
|
||||
// using a read handle returned in the response, at lower latency.
|
||||
// This produces the exact same object and generation and does not check if
|
||||
// the generation is still the newest one.
|
||||
// Note that this will be a noop unless it's set on a gRPC client on buckets with
|
||||
// bi-directional read API access.
|
||||
// Also note that you can get a ReadHandle only via calling reader.ReadHandle() on a
|
||||
// previous read of the same object.
|
||||
func (o *ObjectHandle) ReadHandle(r ReadHandle) *ObjectHandle {
|
||||
o2 := *o
|
||||
o2.readHandle = r
|
||||
return &o2
|
||||
}
|
||||
|
||||
// ACL provides access to the object's access control list.
|
||||
@@ -941,8 +1037,8 @@ func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
|
||||
// Attrs returns meta information about the object.
|
||||
// ErrObjectNotExist will be returned if the object is not found.
|
||||
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Object.Attrs")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
@@ -955,8 +1051,8 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
|
||||
// ObjectAttrsToUpdate docs for details on treatment of zero values.
|
||||
// ErrObjectNotExist will be returned if the object is not found.
|
||||
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
ctx, _ = startSpan(ctx, "Object.Update")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
@@ -1025,7 +1121,9 @@ type ObjectAttrsToUpdate struct {
|
||||
}
|
||||
|
||||
// Delete deletes the single specified object.
|
||||
func (o *ObjectHandle) Delete(ctx context.Context) error {
|
||||
func (o *ObjectHandle) Delete(ctx context.Context) (err error) {
|
||||
ctx, _ = startSpan(ctx, "Object.Delete")
|
||||
defer func() { endSpan(ctx, err) }()
|
||||
if err := o.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1098,6 +1196,38 @@ func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*Obje
|
||||
}, sOpts...)
|
||||
}
|
||||
|
||||
// Move changes the name of the object to the destination name.
|
||||
// It can only be used to rename an object within the same bucket. The
|
||||
// bucket must have [HierarchicalNamespace] enabled to use this method.
|
||||
//
|
||||
// Any preconditions set on the ObjectHandle will be applied for the source
|
||||
// object. Set preconditions on the destination object using
|
||||
// [MoveObjectDestination.Conditions].
|
||||
//
|
||||
// This API is in preview and is not yet publicly available.
|
||||
func (o *ObjectHandle) Move(ctx context.Context, destination MoveObjectDestination) (*ObjectAttrs, error) {
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sOpts := makeStorageOpts(true, o.retry, o.userProject)
|
||||
return o.c.tc.MoveObject(ctx, &moveObjectParams{
|
||||
bucket: o.bucket,
|
||||
srcObject: o.object,
|
||||
dstObject: destination.Object,
|
||||
srcConds: o.conds,
|
||||
dstConds: destination.Conditions,
|
||||
encryptionKey: o.encryptionKey,
|
||||
}, sOpts...)
|
||||
}
|
||||
|
||||
// MoveObjectDestination provides the destination object name and (optional) preconditions
|
||||
// for [ObjectHandle.Move].
|
||||
type MoveObjectDestination struct {
|
||||
Object string
|
||||
Conditions *Conditions
|
||||
}
|
||||
|
||||
// NewWriter returns a storage Writer that writes to the GCS object
|
||||
// associated with this ObjectHandle.
|
||||
//
|
||||
@@ -1124,9 +1254,87 @@ func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
|
||||
donec: make(chan struct{}),
|
||||
ObjectAttrs: ObjectAttrs{Name: o.object},
|
||||
ChunkSize: googleapi.DefaultUploadChunkSize,
|
||||
Append: o.c.grpcAppendableUploads,
|
||||
}
|
||||
}
|
||||
|
||||
// NewWriterFromAppendableObject opens a new Writer to an object which has been
|
||||
// partially flushed to GCS, but not finalized. It returns the Writer as well
|
||||
// as the current end offset of the object. All bytes written will be appended
|
||||
// continuing from the offset.
|
||||
//
|
||||
// Generation must be set on the ObjectHandle or an error will be returned.
|
||||
//
|
||||
// Writer fields such as ChunkSize or ChunkRetryDuration can be set only
|
||||
// by setting the equivalent field in [AppendableWriterOpts]. Attributes set
|
||||
// on the returned Writer will not be honored since the stream to GCS has
|
||||
// already been opened. Some fields such as ObjectAttrs and checksums cannot
|
||||
// be set on a takeover for append.
|
||||
//
|
||||
// It is the caller's responsibility to call Close when writing is complete to
|
||||
// close the stream.
|
||||
// Calling Close or Flush is necessary to sync any data in the pipe to GCS.
|
||||
//
|
||||
// The returned Writer is not safe to use across multiple go routines. In
|
||||
// addition, if you attempt to append to the same object from multiple
|
||||
// Writers at the same time, an error will be returned on Flush or Close.
|
||||
//
|
||||
// NewWriterFromAppendableObject is supported only for gRPC clients and only for
|
||||
// objects which were created append semantics and not finalized.
|
||||
// This feature is in preview and is not yet available for general use.
|
||||
func (o *ObjectHandle) NewWriterFromAppendableObject(ctx context.Context, opts *AppendableWriterOpts) (*Writer, int64, error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Writer")
|
||||
if o.gen < 0 {
|
||||
return nil, 0, errors.New("storage: ObjectHandle.Generation must be set to use NewWriterFromAppendableObject")
|
||||
}
|
||||
w := &Writer{
|
||||
ctx: ctx,
|
||||
o: o,
|
||||
donec: make(chan struct{}),
|
||||
ObjectAttrs: ObjectAttrs{Name: o.object},
|
||||
Append: true,
|
||||
}
|
||||
opts.apply(w)
|
||||
if w.ChunkSize == 0 {
|
||||
w.ChunkSize = googleapi.DefaultUploadChunkSize
|
||||
}
|
||||
err := w.openWriter()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return w, w.takeoverOffset, nil
|
||||
}
|
||||
|
||||
// AppendableWriterOpts provides options to set on a Writer initialized
|
||||
// by [NewWriterFromAppendableObject]. Writer options must be set via this
|
||||
// struct rather than being modified on the returned Writer. All Writer
|
||||
// fields not present in this struct cannot be set when taking over an
|
||||
// appendable object.
|
||||
//
|
||||
// AppendableWriterOpts is supported only for gRPC clients and only for
|
||||
// objects which were created append semantics and not finalized.
|
||||
// This feature is in preview and is not yet available for general use.
|
||||
type AppendableWriterOpts struct {
|
||||
// ChunkSize: See Writer.ChunkSize.
|
||||
ChunkSize int
|
||||
// ChunkRetryDeadline: See Writer.ChunkRetryDeadline.
|
||||
ChunkRetryDeadline time.Duration
|
||||
// ProgressFunc: See Writer.ProgressFunc.
|
||||
ProgressFunc func(int64)
|
||||
// FinalizeOnClose: See Writer.FinalizeOnClose.
|
||||
FinalizeOnClose bool
|
||||
}
|
||||
|
||||
func (opts *AppendableWriterOpts) apply(w *Writer) {
|
||||
if opts == nil {
|
||||
return
|
||||
}
|
||||
w.ChunkRetryDeadline = opts.ChunkRetryDeadline
|
||||
w.ProgressFunc = opts.ProgressFunc
|
||||
w.ChunkSize = opts.ChunkSize
|
||||
w.FinalizeOnClose = opts.FinalizeOnClose
|
||||
}
|
||||
|
||||
func (o *ObjectHandle) validate() error {
|
||||
if o.bucket == "" {
|
||||
return errors.New("storage: bucket name is empty")
|
||||
@@ -1218,6 +1426,7 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
|
||||
Acl: toProtoObjectACL(o.ACL),
|
||||
Metadata: o.Metadata,
|
||||
CreateTime: toProtoTimestamp(o.Created),
|
||||
FinalizeTime: toProtoTimestamp(o.Finalized),
|
||||
CustomTime: toProtoTimestamp(o.CustomTime),
|
||||
DeleteTime: toProtoTimestamp(o.Deleted),
|
||||
RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime),
|
||||
@@ -1380,6 +1589,10 @@ type ObjectAttrs struct {
|
||||
// Created is the time the object was created. This field is read-only.
|
||||
Created time.Time
|
||||
|
||||
// Finalized is the time the object contents were finalized. This may differ
|
||||
// from Created for appendable objects. This field is read-only.
|
||||
Finalized time.Time
|
||||
|
||||
// Deleted is the time the object was deleted.
|
||||
// If not deleted, it is the zero value. This field is read-only.
|
||||
Deleted time.Time
|
||||
@@ -1544,6 +1757,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
|
||||
CustomerKeySHA256: sha256,
|
||||
KMSKeyName: o.KmsKeyName,
|
||||
Created: convertTime(o.TimeCreated),
|
||||
Finalized: convertTime(o.TimeFinalized),
|
||||
Deleted: convertTime(o.TimeDeleted),
|
||||
Updated: convertTime(o.Updated),
|
||||
Etag: o.Etag,
|
||||
@@ -1583,6 +1797,7 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs {
|
||||
CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()),
|
||||
KMSKeyName: o.GetKmsKey(),
|
||||
Created: convertProtoTime(o.GetCreateTime()),
|
||||
Finalized: convertProtoTime(o.GetFinalizeTime()),
|
||||
Deleted: convertProtoTime(o.GetDeleteTime()),
|
||||
Updated: convertProtoTime(o.GetUpdateTime()),
|
||||
CustomTime: convertProtoTime(o.GetCustomTime()),
|
||||
@@ -1695,7 +1910,6 @@ type Query struct {
|
||||
|
||||
// IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of
|
||||
// prefixes returned by the query. Only applicable if Delimiter is set to /.
|
||||
// IncludeFoldersAsPrefixes is not yet implemented in the gRPC API.
|
||||
IncludeFoldersAsPrefixes bool
|
||||
|
||||
// SoftDeleted indicates whether to list soft-deleted objects.
|
||||
@@ -1731,6 +1945,7 @@ var attrToFieldMap = map[string]string{
|
||||
"CustomerKeySHA256": "customerEncryption",
|
||||
"KMSKeyName": "kmsKeyName",
|
||||
"Created": "timeCreated",
|
||||
"Finalized": "timeFinalized",
|
||||
"Deleted": "timeDeleted",
|
||||
"Updated": "updated",
|
||||
"Etag": "etag",
|
||||
@@ -1759,6 +1974,7 @@ var attrToProtoFieldMap = map[string]string{
|
||||
"Deleted": "delete_time",
|
||||
"ContentType": "content_type",
|
||||
"Created": "create_time",
|
||||
"Finalized": "finalize_time",
|
||||
"CRC32C": "checksums.crc32c",
|
||||
"MD5": "checksums.md5_hash",
|
||||
"Updated": "update_time",
|
||||
@@ -1998,56 +2214,91 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
|
||||
// applySourceConds modifies the provided call using the conditions in conds.
|
||||
// call is something that quacks like a *raw.WhateverCall.
|
||||
// This is specifically for calls like Rewrite and Move which have a source and destination
|
||||
// object.
|
||||
func applySourceConds(method string, gen int64, conds *Conditions, call interface{}) error {
|
||||
cval := reflect.ValueOf(call)
|
||||
if gen >= 0 {
|
||||
call.SourceGeneration(gen)
|
||||
if !setSourceGeneration(cval, gen) {
|
||||
return fmt.Errorf("storage: %s: source generation not supported", method)
|
||||
}
|
||||
}
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate("CopyTo source"); err != nil {
|
||||
if err := conds.validate(method); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case conds.GenerationMatch != 0:
|
||||
call.IfSourceGenerationMatch(conds.GenerationMatch)
|
||||
if !setIfSourceGenerationMatch(cval, conds.GenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method)
|
||||
}
|
||||
case conds.GenerationNotMatch != 0:
|
||||
call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
|
||||
if !setIfSourceGenerationNotMatch(cval, conds.GenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceGenerationNotMatch not supported", method)
|
||||
}
|
||||
case conds.DoesNotExist:
|
||||
call.IfSourceGenerationMatch(0)
|
||||
if !setIfSourceGenerationMatch(cval, int64(0)) {
|
||||
return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
|
||||
if !setIfSourceMetagenerationMatch(cval, conds.MetagenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceMetagenerationMatch not supported", method)
|
||||
}
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
|
||||
if !setIfSourceMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error {
|
||||
// applySourceCondsProto validates and attempts to set the conditions on a protobuf
|
||||
// message using protobuf reflection. This is specifically for RPCs which have separate
|
||||
// preconditions for source and destination objects (e.g. Rewrite and Move).
|
||||
func applySourceCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error {
|
||||
rmsg := msg.ProtoReflect()
|
||||
|
||||
if gen >= 0 {
|
||||
call.SourceGeneration = gen
|
||||
if !setConditionProtoField(rmsg, "source_generation", gen) {
|
||||
return fmt.Errorf("storage: %s: generation not supported", method)
|
||||
}
|
||||
}
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate("CopyTo source"); err != nil {
|
||||
if err := conds.validate(method); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case conds.GenerationMatch != 0:
|
||||
call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch)
|
||||
if !setConditionProtoField(rmsg, "if_source_generation_match", conds.GenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method)
|
||||
}
|
||||
case conds.GenerationNotMatch != 0:
|
||||
call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch)
|
||||
if !setConditionProtoField(rmsg, "if_source_generation_not_match", conds.GenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceGenerationNotMatch not supported", method)
|
||||
}
|
||||
case conds.DoesNotExist:
|
||||
call.IfSourceGenerationMatch = proto.Int64(0)
|
||||
if !setConditionProtoField(rmsg, "if_source_generation_match", int64(0)) {
|
||||
return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch)
|
||||
if !setConditionProtoField(rmsg, "if_source_metageneration_match", conds.MetagenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceMetagenerationMatch not supported", method)
|
||||
}
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch)
|
||||
if !setConditionProtoField(rmsg, "if_source_metageneration_not_match", conds.MetagenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2086,6 +2337,27 @@ func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
|
||||
return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value)
|
||||
}
|
||||
|
||||
// More methods to set source object precondition fields (used by Rewrite and Move APIs).
|
||||
func setSourceGeneration(cval reflect.Value, value interface{}) bool {
|
||||
return setCondition(cval.MethodByName("SourceGeneration"), value)
|
||||
}
|
||||
|
||||
func setIfSourceGenerationMatch(cval reflect.Value, value interface{}) bool {
|
||||
return setCondition(cval.MethodByName("IfSourceGenerationMatch"), value)
|
||||
}
|
||||
|
||||
func setIfSourceGenerationNotMatch(cval reflect.Value, value interface{}) bool {
|
||||
return setCondition(cval.MethodByName("IfSourceGenerationNotMatch"), value)
|
||||
}
|
||||
|
||||
func setIfSourceMetagenerationMatch(cval reflect.Value, value interface{}) bool {
|
||||
return setCondition(cval.MethodByName("IfSourceMetagenerationMatch"), value)
|
||||
}
|
||||
|
||||
func setIfSourceMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
|
||||
return setCondition(cval.MethodByName("IfSourceMetagenerationNotMatch"), value)
|
||||
}
|
||||
|
||||
func setCondition(setter reflect.Value, value interface{}) bool {
|
||||
if setter.IsValid() {
|
||||
setter.Call([]reflect.Value{reflect.ValueOf(value)})
|
||||
@@ -2255,6 +2527,10 @@ type retryConfig struct {
|
||||
policy RetryPolicy
|
||||
shouldRetry func(err error) bool
|
||||
maxAttempts *int
|
||||
// maxRetryDuration, if set, specifies a deadline after which the request
|
||||
// will no longer be retried. A value of 0 allows infinite retries.
|
||||
// maxRetryDuration is currently only set by Writer.ChunkRetryDeadline.
|
||||
maxRetryDuration time.Duration
|
||||
}
|
||||
|
||||
func (r *retryConfig) clone() *retryConfig {
|
||||
@@ -2272,10 +2548,11 @@ func (r *retryConfig) clone() *retryConfig {
|
||||
}
|
||||
|
||||
return &retryConfig{
|
||||
backoff: bo,
|
||||
policy: r.policy,
|
||||
shouldRetry: r.shouldRetry,
|
||||
maxAttempts: r.maxAttempts,
|
||||
backoff: bo,
|
||||
policy: r.policy,
|
||||
shouldRetry: r.shouldRetry,
|
||||
maxAttempts: r.maxAttempts,
|
||||
maxRetryDuration: r.maxRetryDuration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2350,6 +2627,7 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec
|
||||
}
|
||||
|
||||
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
|
||||
// Note: gRPC is not supported.
|
||||
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
|
||||
o := makeStorageOpts(true, c.retry, "")
|
||||
return c.tc.GetServiceAccount(ctx, projectID, o...)
|
||||
@@ -2448,3 +2726,25 @@ func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Mess
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatObjectErr checks if the provided error is NotFound and if so, wraps
|
||||
// it in an ErrObjectNotExist error. If not, formatObjectErr has no effect.
|
||||
func formatObjectErr(err error) error {
|
||||
var e *googleapi.Error
|
||||
if s, ok := status.FromError(err); (ok && s.Code() == codes.NotFound) ||
|
||||
(errors.As(err, &e) && e.Code == http.StatusNotFound) {
|
||||
return fmt.Errorf("%w: %w", ErrObjectNotExist, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// formatBucketError checks if the provided error is NotFound and if so, wraps
|
||||
// it in an ErrBucketNotExist error. If not, formatBucketError has no effect.
|
||||
func formatBucketError(err error) error {
|
||||
var e *googleapi.Error
|
||||
if s, ok := status.FromError(err); (ok && s.Code() == codes.NotFound) ||
|
||||
(errors.As(err, &e) && e.Code == http.StatusNotFound) {
|
||||
return fmt.Errorf("%w: %w", ErrBucketNotExist, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
98
vendor/cloud.google.com/go/storage/trace.go
generated
vendored
Normal file
98
vendor/cloud.google.com/go/storage/trace.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
internalTrace "cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
otelcodes "go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
storageOtelTracingDevVar = "GO_STORAGE_DEV_OTEL_TRACING"
|
||||
defaultTracerName = "cloud.google.com/go/storage"
|
||||
gcpClientRepo = "googleapis/google-cloud-go"
|
||||
gcpClientArtifact = "cloud.google.com/go/storage"
|
||||
)
|
||||
|
||||
// isOTelTracingDevEnabled checks the development flag until experimental feature is launched.
|
||||
// TODO: Remove development flag upon experimental launch.
|
||||
func isOTelTracingDevEnabled() bool {
|
||||
return os.Getenv(storageOtelTracingDevVar) == "true"
|
||||
}
|
||||
|
||||
func tracer() trace.Tracer {
|
||||
return otel.Tracer(defaultTracerName, trace.WithInstrumentationVersion(internal.Version))
|
||||
}
|
||||
|
||||
// startSpan creates a span and a context.Context containing the newly-created span.
|
||||
// If the context.Context provided in `ctx` contains a span then the newly-created
|
||||
// span will be a child of that span, otherwise it will be a root span.
|
||||
func startSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
name = appendPackageName(name)
|
||||
// TODO: Remove internalTrace upon experimental launch.
|
||||
if !isOTelTracingDevEnabled() {
|
||||
ctx = internalTrace.StartSpan(ctx, name)
|
||||
return ctx, nil
|
||||
}
|
||||
opts = append(opts, getCommonTraceOptions()...)
|
||||
ctx, span := tracer().Start(ctx, name, opts...)
|
||||
return ctx, span
|
||||
}
|
||||
|
||||
// endSpan retrieves the current span from ctx and completes the span.
|
||||
// If an error occurs, the error is recorded as an exception span event for this span,
|
||||
// and the span status is set in the form of a code and a description.
|
||||
func endSpan(ctx context.Context, err error) {
|
||||
// TODO: Remove internalTrace upon experimental launch.
|
||||
if !isOTelTracingDevEnabled() {
|
||||
internalTrace.EndSpan(ctx, err)
|
||||
} else {
|
||||
span := trace.SpanFromContext(ctx)
|
||||
if err != nil {
|
||||
span.SetStatus(otelcodes.Error, err.Error())
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}
|
||||
}
|
||||
|
||||
// getCommonTraceOptions makes a SpanStartOption with common attributes.
|
||||
func getCommonTraceOptions() []trace.SpanStartOption {
|
||||
opts := []trace.SpanStartOption{
|
||||
trace.WithAttributes(getCommonAttributes()...),
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// getCommonAttributes includes the common attributes used for Cloud Trace adoption tracking.
|
||||
func getCommonAttributes() []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
attribute.String("gcp.client.version", internal.Version),
|
||||
attribute.String("gcp.client.repo", gcpClientRepo),
|
||||
attribute.String("gcp.client.artifact", gcpClientArtifact),
|
||||
}
|
||||
}
|
||||
|
||||
func appendPackageName(spanName string) string {
|
||||
return fmt.Sprintf("%s.%s", gcpClientArtifact, spanName)
|
||||
}
|
||||
164
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
164
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
@@ -26,6 +26,16 @@ import (
|
||||
"cloud.google.com/go/internal/trace"
|
||||
)
|
||||
|
||||
// Interface internalWriter wraps low-level implementations which may vary
|
||||
// across client types.
|
||||
type internalWriter interface {
|
||||
io.WriteCloser
|
||||
Flush() (int64, error)
|
||||
// CloseWithError terminates the write operation and sets its status.
|
||||
// Note that CloseWithError always returns nil.
|
||||
CloseWithError(error) error
|
||||
}
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
type Writer struct {
|
||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
||||
@@ -77,22 +87,58 @@ type Writer struct {
|
||||
// For uploads of larger files, the Writer will attempt to retry if the
|
||||
// request to upload a particular chunk fails with a transient error.
|
||||
// If a single chunk has been attempting to upload for longer than this
|
||||
// deadline and the request fails, it will no longer be retried, and the error
|
||||
// will be returned to the caller. This is only applicable for files which are
|
||||
// large enough to require a multi-chunk resumable upload. The default value
|
||||
// is 32s. Users may want to pick a longer deadline if they are using larger
|
||||
// values for ChunkSize or if they expect to have a slow or unreliable
|
||||
// internet connection.
|
||||
// deadline and the request fails, it will no longer be retried, and the
|
||||
// error will be returned to the caller. This is only applicable for files
|
||||
// which are large enough to require a multi-chunk resumable upload. The
|
||||
// default value is 32s. Users may want to pick a longer deadline if they
|
||||
// are using larger values for ChunkSize or if they expect to have a slow or
|
||||
// unreliable internet connection.
|
||||
//
|
||||
// To set a deadline on the entire upload, use context timeout or
|
||||
// cancellation.
|
||||
ChunkRetryDeadline time.Duration
|
||||
|
||||
// ChunkTransferTimeout sets a per-chunk request timeout for resumable uploads.
|
||||
//
|
||||
// For resumable uploads, the Writer will terminate the request and attempt
|
||||
// a retry if the request to upload a particular chunk stalls for longer than
|
||||
// this duration. Retries may continue until the ChunkRetryDeadline is reached.
|
||||
//
|
||||
// ChunkTransferTimeout is not applicable to uploads made using a gRPC client.
|
||||
//
|
||||
// The default value is no timeout.
|
||||
ChunkTransferTimeout time.Duration
|
||||
|
||||
// ForceEmptyContentType is an optional parameter that is used to disable
|
||||
// auto-detection of Content-Type. By default, if a blank Content-Type
|
||||
// is provided, then gax.DetermineContentType is called to sniff the type.
|
||||
ForceEmptyContentType bool
|
||||
|
||||
// Append is a parameter to indicate whether the writer should use appendable
|
||||
// object semantics for the new object generation. Appendable objects are
|
||||
// visible on the first Write() call, and can be appended to until they are
|
||||
// finalized. If Writer.FinalizeOnClose is set to true, the object is finalized
|
||||
// when Writer.Close() is called; otherwise, the object is left unfinalized
|
||||
// and can be appended to later.
|
||||
//
|
||||
// Defaults to false unless the experiemental WithZonalBucketAPIs option was
|
||||
// set.
|
||||
//
|
||||
// Append is only supported for gRPC. This feature is in preview and is not
|
||||
// yet available for general use.
|
||||
Append bool
|
||||
|
||||
// FinalizeOnClose indicates whether the Writer should finalize an object when
|
||||
// closing the write stream. This only applies to Writers where Append is
|
||||
// true, since append semantics allow a prefix of the object to be durable and
|
||||
// readable. By default, objects written with Append semantics will not be
|
||||
// finalized, which means they can be appended to later. If Append is set
|
||||
// to false, this parameter will be ignored; non-appendable objects will
|
||||
// always be finalized when Writer.Close returns without error.
|
||||
//
|
||||
// This feature is in preview and is not yet available for general use.
|
||||
FinalizeOnClose bool
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
@@ -107,13 +153,15 @@ type Writer struct {
|
||||
o *ObjectHandle
|
||||
|
||||
opened bool
|
||||
pw *io.PipeWriter
|
||||
closed bool
|
||||
iw internalWriter
|
||||
|
||||
donec chan struct{} // closed after err and obj are set.
|
||||
obj *ObjectAttrs
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
mu sync.Mutex
|
||||
err error
|
||||
takeoverOffset int64 // offset from which the writer started appending to the object.
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
@@ -137,7 +185,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n, err = w.pw.Write(p)
|
||||
n, err = w.iw.Write(p)
|
||||
if err != nil {
|
||||
w.mu.Lock()
|
||||
werr := w.err
|
||||
@@ -152,6 +200,49 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Flush syncs all bytes currently in the Writer's buffer to Cloud Storage.
|
||||
// It returns the offset of bytes that have been currently synced to
|
||||
// Cloud Storage and an error.
|
||||
//
|
||||
// If Flush is never called, Writer will sync data automatically every
|
||||
// [Writer.ChunkSize] bytes and on [Writer.Close].
|
||||
//
|
||||
// [Writer.ProgressFunc] will be called on Flush if present.
|
||||
//
|
||||
// Do not call Flush concurrently with Write or Close. A single Writer is not
|
||||
// safe for unsynchronized use across threads.
|
||||
//
|
||||
// Note that calling Flush very early (before 512 bytes) may interfere with
|
||||
// automatic content sniffing in the Writer.
|
||||
//
|
||||
// Flush is supported only on gRPC clients where [Writer.Append] is set
|
||||
// to true. This feature is in preview and is not yet available for general use.
|
||||
func (w *Writer) Flush() (int64, error) {
|
||||
// Return error if Append is not true.
|
||||
if !w.Append {
|
||||
return 0, errors.New("storage: Flush not supported unless client uses gRPC and Append is set to true")
|
||||
}
|
||||
if w.closed {
|
||||
return 0, errors.New("storage: Flush called on closed Writer")
|
||||
}
|
||||
// Return error if already in error state.
|
||||
w.mu.Lock()
|
||||
werr := w.err
|
||||
w.mu.Unlock()
|
||||
if werr != nil {
|
||||
return 0, werr
|
||||
}
|
||||
// If Flush called before any bytes written, it should start the upload
|
||||
// at zero bytes. This will make the object visible with zero length data.
|
||||
if !w.opened {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return w.iw.Flush()
|
||||
}
|
||||
|
||||
// Close completes the write operation and flushes any buffered data.
|
||||
// If Close doesn't return an error, metadata about the written object
|
||||
// can be retrieved by calling Attrs.
|
||||
@@ -162,12 +253,12 @@ func (w *Writer) Close() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Closing either the read or write causes the entire pipe to close.
|
||||
if err := w.pw.Close(); err != nil {
|
||||
if err := w.iw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-w.donec
|
||||
w.closed = true
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
trace.EndSpan(w.ctx, w.err)
|
||||
@@ -178,31 +269,41 @@ func (w *Writer) openWriter() (err error) {
|
||||
if err := w.validateWriteAttrs(); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.o.gen != defaultGen {
|
||||
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen)
|
||||
if w.o.gen != defaultGen && !w.Append {
|
||||
return fmt.Errorf("storage: generation supported on Writer for appendable objects only, got %v", w.o.gen)
|
||||
}
|
||||
|
||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist)
|
||||
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
|
||||
params := &openWriterParams{
|
||||
ctx: w.ctx,
|
||||
chunkSize: w.ChunkSize,
|
||||
chunkRetryDeadline: w.ChunkRetryDeadline,
|
||||
bucket: w.o.bucket,
|
||||
attrs: &w.ObjectAttrs,
|
||||
conds: w.o.conds,
|
||||
encryptionKey: w.o.encryptionKey,
|
||||
sendCRC32C: w.SendCRC32C,
|
||||
donec: w.donec,
|
||||
setError: w.error,
|
||||
progress: w.progress,
|
||||
setObj: func(o *ObjectAttrs) { w.obj = o },
|
||||
ctx: w.ctx,
|
||||
chunkSize: w.ChunkSize,
|
||||
chunkRetryDeadline: w.ChunkRetryDeadline,
|
||||
chunkTransferTimeout: w.ChunkTransferTimeout,
|
||||
bucket: w.o.bucket,
|
||||
attrs: &w.ObjectAttrs,
|
||||
conds: w.o.conds,
|
||||
appendGen: w.o.gen,
|
||||
encryptionKey: w.o.encryptionKey,
|
||||
sendCRC32C: w.SendCRC32C,
|
||||
append: w.Append,
|
||||
finalizeOnClose: w.FinalizeOnClose,
|
||||
donec: w.donec,
|
||||
setError: w.error,
|
||||
progress: w.progress,
|
||||
setObj: func(o *ObjectAttrs) { w.obj = o },
|
||||
setSize: func(n int64) {
|
||||
if w.obj != nil {
|
||||
w.obj.Size = n
|
||||
}
|
||||
},
|
||||
setTakeoverOffset: func(n int64) { w.takeoverOffset = n },
|
||||
forceEmptyContentType: w.ForceEmptyContentType,
|
||||
}
|
||||
if err := w.ctx.Err(); err != nil {
|
||||
return err // short-circuit
|
||||
}
|
||||
w.pw, err = w.o.c.tc.OpenWriter(params, opts...)
|
||||
w.iw, err = w.o.c.tc.OpenWriter(params, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -223,7 +324,6 @@ func (w *Writer) monitorCancel() {
|
||||
w.err = werr
|
||||
w.mu.Unlock()
|
||||
|
||||
// Closing either the read or write causes the entire pipe to close.
|
||||
w.CloseWithError(werr)
|
||||
case <-w.donec:
|
||||
}
|
||||
@@ -237,7 +337,7 @@ func (w *Writer) CloseWithError(err error) error {
|
||||
if !w.opened {
|
||||
return nil
|
||||
}
|
||||
return w.pw.CloseWithError(err)
|
||||
return w.iw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Attrs returns metadata about a successfully-written object.
|
||||
@@ -266,9 +366,9 @@ func (w *Writer) validateWriteAttrs() error {
|
||||
}
|
||||
|
||||
// progress is a convenience wrapper that reports write progress to the Writer
|
||||
// ProgressFunc if it is set and progress is non-zero.
|
||||
// ProgressFunc if it is set.
|
||||
func (w *Writer) progress(p int64) {
|
||||
if w.ProgressFunc != nil && p != 0 {
|
||||
if w.ProgressFunc != nil {
|
||||
w.ProgressFunc(p)
|
||||
}
|
||||
}
|
||||
|
||||
202
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
generated
vendored
Normal file
202
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
3
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
generated
vendored
Normal file
3
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# GCP Resource detection library
|
||||
|
||||
This is a library intended to be used by Upstream OpenTelemetry resource detectors. It exists within this repository to allow for integration testing of the detection functions in real GCP environments.
|
||||
78
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
generated
vendored
Normal file
78
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gcp
|
||||
|
||||
import "context"
|
||||
|
||||
const (
|
||||
// See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules
|
||||
// for the environment variables available in GAE environments.
|
||||
gaeServiceEnv = "GAE_SERVICE"
|
||||
gaeVersionEnv = "GAE_VERSION"
|
||||
gaeInstanceEnv = "GAE_INSTANCE"
|
||||
gaeEnv = "GAE_ENV"
|
||||
gaeStandard = "standard"
|
||||
)
|
||||
|
||||
func (d *Detector) onAppEngineStandard() bool {
|
||||
// See https://cloud.google.com/appengine/docs/standard/go111/runtime#environment_variables.
|
||||
env, found := d.os.LookupEnv(gaeEnv)
|
||||
return found && env == gaeStandard
|
||||
}
|
||||
|
||||
func (d *Detector) onAppEngine() bool {
|
||||
_, found := d.os.LookupEnv(gaeServiceEnv)
|
||||
return found
|
||||
}
|
||||
|
||||
// AppEngineServiceName returns the service name of the app engine service.
|
||||
func (d *Detector) AppEngineServiceName() (string, error) {
|
||||
if name, found := d.os.LookupEnv(gaeServiceEnv); found {
|
||||
return name, nil
|
||||
}
|
||||
return "", errEnvVarNotFound
|
||||
}
|
||||
|
||||
// AppEngineServiceVersion returns the service version of the app engine service.
|
||||
func (d *Detector) AppEngineServiceVersion() (string, error) {
|
||||
if version, found := d.os.LookupEnv(gaeVersionEnv); found {
|
||||
return version, nil
|
||||
}
|
||||
return "", errEnvVarNotFound
|
||||
}
|
||||
|
||||
// AppEngineServiceInstance returns the service instance of the app engine service.
|
||||
func (d *Detector) AppEngineServiceInstance() (string, error) {
|
||||
if instanceID, found := d.os.LookupEnv(gaeInstanceEnv); found {
|
||||
return instanceID, nil
|
||||
}
|
||||
return "", errEnvVarNotFound
|
||||
}
|
||||
|
||||
// AppEngineFlexAvailabilityZoneAndRegion returns the zone and region in which this program is running.
|
||||
func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) {
|
||||
// The GCE metadata server is available on App Engine Flex.
|
||||
return d.GCEAvailabilityZoneAndRegion()
|
||||
}
|
||||
|
||||
// AppEngineStandardAvailabilityZone returns the zone the app engine service is running in.
|
||||
func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) {
|
||||
return d.metadata.ZoneWithContext(context.TODO())
|
||||
}
|
||||
|
||||
// AppEngineStandardCloudRegion returns the region the app engine service is running in.
|
||||
func (d *Detector) AppEngineStandardCloudRegion() (string, error) {
|
||||
return d.FaaSCloudRegion()
|
||||
}
|
||||
55
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
generated
vendored
Normal file
55
vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2024 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gcp
|
||||
|
||||
const (
|
||||
bmsProjectIDEnv = "BMS_PROJECT_ID"
|
||||
bmsRegionEnv = "BMS_REGION"
|
||||
bmsInstanceIDEnv = "BMS_INSTANCE_ID"
|
||||
)
|
||||
|
||||
// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying
|
||||
// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables.
|
||||
// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs
|
||||
func (d *Detector) onBareMetalSolution() bool {
|
||||
projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv)
|
||||
region, regionExists := d.os.LookupEnv(bmsRegionEnv)
|
||||
instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv)
|
||||
return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != ""
|
||||
}
|
||||
|
||||
// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable.
|
||||
func (d *Detector) BareMetalSolutionInstanceID() (string, error) {
|
||||
if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found {
|
||||
return instanceID, nil
|
||||
}
|
||||
return "", errEnvVarNotFound
|
||||
}
|
||||
|
||||
// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable.
|
||||
func (d *Detector) BareMetalSolutionCloudRegion() (string, error) {
|
||||
if region, found := d.os.LookupEnv(bmsRegionEnv); found {
|
||||
return region, nil
|
||||
}
|
||||
return "", errEnvVarNotFound
|
||||
}
|
||||
|
||||
// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable.
|
||||
func (d *Detector) BareMetalSolutionProjectID() (string, error) {
|
||||
if project, found := d.os.LookupEnv(bmsProjectIDEnv); found {
|
||||
return project, nil
|
||||
}
|
||||
return "", errEnvVarNotFound
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user