mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
9 Commits
v1.110.16
...
weakpointe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9261be945 | ||
|
|
16a75129be | ||
|
|
68bdb5e4d3 | ||
|
|
4360d10962 | ||
|
|
ce9c868f59 | ||
|
|
212ce1baf0 | ||
|
|
1a091e5831 | ||
|
|
bac186fc65 | ||
|
|
15ce9e5e49 |
9
Makefile
9
Makefile
@@ -12,6 +12,7 @@ PKG_TAG := $(BUILDINFO_TAG)
|
||||
endif
|
||||
|
||||
EXTRA_DOCKER_TAG_SUFFIX ?=
|
||||
EXTRA_GO_BUILD_TAGS ?=
|
||||
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(DATEINFO_TAG)-$(BUILDINFO_TAG)'
|
||||
TAR_OWNERSHIP ?= --owner=1000 --group=1000
|
||||
@@ -470,16 +471,16 @@ vendor-update:
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=1 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-pure:
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-pure$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-goos-goarch:
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=$(CGO_ENABLED) GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-$(GOOS)-$(GOARCH)$(RACE) $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-local-windows-goarch:
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build $(RACE) -ldflags "$(GO_BUILDINFO)" -tags "$(EXTRA_GO_BUILD_TAGS)" -o bin/$(APP_NAME)-windows-$(GOARCH)$(RACE).exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc
|
||||
|
||||
@@ -437,7 +437,7 @@ func TestRecordingRuleExec_Negative(t *testing.T) {
|
||||
|
||||
_, err = rr.exec(context.TODO(), time.Now(), 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot execute recroding rule: %s", err)
|
||||
t.Fatalf("cannot execute recording rule: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,106 +1,110 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
# special tag to reduce resulting binary size
|
||||
# See this issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
VMBACKUP_GO_BUILD_TAGS=disable_grpc_modules
|
||||
|
||||
vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) app-local
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local
|
||||
|
||||
vmbackup-race:
|
||||
APP_NAME=vmbackup RACE=-race $(MAKE) app-local
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) RACE=-race $(MAKE) app-local
|
||||
|
||||
vmbackup-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker
|
||||
|
||||
vmbackup-pure-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-pure
|
||||
|
||||
vmbackup-linux-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmbackup-linux-arm-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmbackup-linux-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmbackup-linux-ppc64le-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-ppc64le
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmbackup-linux-386-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-linux-386
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmbackup-darwin-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
vmbackup-darwin-arm64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-darwin-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmbackup-freebsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-freebsd-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmbackup-openbsd-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-openbsd-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmbackup-windows-amd64-prod:
|
||||
APP_NAME=vmbackup $(MAKE) app-via-docker-windows-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker
|
||||
|
||||
package-vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmbackup-amd64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-amd64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmbackup-arm:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmbackup-arm64:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-arm64
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmbackup-ppc64le:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-ppc64le
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmbackup-386:
|
||||
APP_NAME=vmbackup $(MAKE) package-via-docker-386
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmbackup:
|
||||
APP_NAME=vmbackup $(MAKE) publish-via-docker
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) publish-via-docker
|
||||
|
||||
vmbackup-linux-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-arm:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-ppc64le:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-s390x:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-loong64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-linux-386:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-darwin-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-darwin-arm64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-freebsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-openbsd-amd64:
|
||||
APP_NAME=vmbackup CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmbackup-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmbackup $(MAKE) app-local-windows-goarch
|
||||
GOARCH=amd64 APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmbackup-pure:
|
||||
APP_NAME=vmbackup $(MAKE) app-local-pure
|
||||
APP_NAME=vmbackup EXTRA_GO_BUILD_TAGS=$(VMBACKUP_GO_BUILD_TAGS) $(MAKE) app-local-pure
|
||||
|
||||
@@ -1,106 +1,110 @@
|
||||
# All these commands must run from repository root.
|
||||
|
||||
# special tag to reduce resulting binary size
|
||||
# See this issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
VMRESTORE_GO_BUILD_TAGS=disable_grpc_modules
|
||||
|
||||
vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) app-local
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local
|
||||
|
||||
vmrestore-race:
|
||||
APP_NAME=vmrestore RACE=-race $(MAKE) app-local
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) RACE=-race $(MAKE) app-local
|
||||
|
||||
vmrestore-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker
|
||||
|
||||
vmrestore-pure-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-pure
|
||||
|
||||
vmrestore-linux-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
vmrestore-linux-arm-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
vmrestore-linux-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
vmrestore-linux-ppc64le-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-ppc64le
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
vmrestore-linux-386-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-linux-386
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-linux-386
|
||||
|
||||
vmrestore-darwin-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
vmrestore-darwin-arm64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-darwin-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
vmrestore-freebsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-freebsd-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
vmrestore-openbsd-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-openbsd-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
vmrestore-windows-amd64-prod:
|
||||
APP_NAME=vmrestore $(MAKE) app-via-docker-windows-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker
|
||||
|
||||
package-vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmrestore-amd64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-amd64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmrestore-arm:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmrestore-arm64:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-arm64
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmrestore-ppc64le:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-ppc64le
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmrestore-386:
|
||||
APP_NAME=vmrestore $(MAKE) package-via-docker-386
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmrestore:
|
||||
APP_NAME=vmrestore $(MAKE) publish-via-docker
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) publish-via-docker
|
||||
|
||||
vmrestore-linux-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-arm:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-ppc64le:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-s390x:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-loong64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=loong64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-linux-386:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-darwin-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-darwin-arm64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-freebsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-openbsd-amd64:
|
||||
APP_NAME=vmrestore CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
vmrestore-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=vmrestore $(MAKE) app-local-windows-goarch
|
||||
GOARCH=amd64 APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local-windows-goarch
|
||||
|
||||
vmrestore-pure:
|
||||
APP_NAME=vmrestore $(MAKE) app-local-pure
|
||||
APP_NAME=vmrestore EXTRA_GO_BUILD_TAGS=$(VMRESTORE_GO_BUILD_TAGS) $(MAKE) app-local-pure
|
||||
|
||||
@@ -10,7 +10,7 @@ func TestParseIntervalSuccess(t *testing.T) {
|
||||
t.Helper()
|
||||
interval, err := parseInterval(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in parseInterva(%q): %s", s, err)
|
||||
t.Fatalf("unexpected error in parseInterval(%q): %s", s, err)
|
||||
}
|
||||
if interval != intervalExpected {
|
||||
t.Fatalf("unexpected result for parseInterval(%q); got %d; want %d", s, interval, intervalExpected)
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestScanStringSuccess(t *testing.T) {
|
||||
t.Fatalf("unexpected string scanned from %s; got %s; want %s", s, result, sExpected)
|
||||
}
|
||||
if !strings.HasPrefix(s, result) {
|
||||
t.Fatalf("invalid prefix for scanne string %s: %s", s, result)
|
||||
t.Fatalf("invalid prefix for scanned string %s: %s", s, result)
|
||||
}
|
||||
}
|
||||
f(`""`, `""`)
|
||||
|
||||
@@ -210,7 +210,7 @@ func (p *parser) parseMetricExprOrFuncCall() (Expr, error) {
|
||||
}
|
||||
return fe, nil
|
||||
default:
|
||||
// Metric epxression or bool expression or None.
|
||||
// Metric expression or bool expression or None.
|
||||
if isBool(ident) {
|
||||
be := &BoolExpr{
|
||||
B: strings.EqualFold(ident, "true"),
|
||||
|
||||
@@ -269,7 +269,7 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
||||
}
|
||||
|
||||
// Slow path - spin up multiple local workers for parallel data processing.
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-poing,
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-pong,
|
||||
// which reduces the scalability on systems with many CPU cores.
|
||||
|
||||
// Prepare the work for workers.
|
||||
@@ -485,7 +485,7 @@ func (pts *packedTimeseries) unpackTo(dst []*sortBlock, tbf *tmpBlocksFile, tr s
|
||||
}
|
||||
|
||||
// Slow path - spin up multiple local workers for parallel data unpacking.
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-poing,
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-pong,
|
||||
// which reduces the scalability on systems with many CPU cores.
|
||||
|
||||
// Prepare the work for workers.
|
||||
|
||||
@@ -135,7 +135,7 @@ func (tbf *tmpBlocksFile) WriteBlockRefData(b []byte) (tmpBlockAddr, error) {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Len() returnt tbf size in bytes.
|
||||
// Len() return tbf size in bytes.
|
||||
func (tbf *tmpBlocksFile) Len() uint64 {
|
||||
return tbf.offset
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOp
|
||||
rightValues := right[i].Values
|
||||
dstValues := dst[i].Values
|
||||
if len(leftValues) != len(rightValues) || len(leftValues) != len(dstValues) {
|
||||
logger.Panicf("BUG: len(leftVaues) must match len(rightValues) and len(dstValues); got %d vs %d vs %d",
|
||||
logger.Panicf("BUG: len(leftValues) must match len(rightValues) and len(dstValues); got %d vs %d vs %d",
|
||||
len(leftValues), len(rightValues), len(dstValues))
|
||||
}
|
||||
for j, a := range leftValues {
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestValidateMaxPointsPerSeriesFailure(t *testing.T) {
|
||||
f := func(start, end, step int64, maxPoints int) {
|
||||
t.Helper()
|
||||
if err := ValidateMaxPointsPerSeries(start, end, step, maxPoints); err == nil {
|
||||
t.Fatalf("expecint non-nil error for ValidateMaxPointsPerSeries(start=%d, end=%d, step=%d, maxPoints=%d)", start, end, step, maxPoints)
|
||||
t.Fatalf("expecting non-nil error for ValidateMaxPointsPerSeries(start=%d, end=%d, step=%d, maxPoints=%d)", start, end, step, maxPoints)
|
||||
}
|
||||
}
|
||||
// zero step
|
||||
|
||||
@@ -173,7 +173,7 @@ func (tc *TestCase) MustStartVmagent(instance string, flags []string, promScrape
|
||||
// vminsert, and one vmselect.
|
||||
//
|
||||
// Both Vmsingle and Vmcluster implement the PrometheusWriteQuerier used in
|
||||
// business logic tests to abstract out the infrasture.
|
||||
// business logic tests to abstract out the infrastructure.
|
||||
//
|
||||
// This type is not suitable for infrastructure tests where custom cluster
|
||||
// setups are often required.
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestClusterMultilevelSelect(t *testing.T) {
|
||||
//
|
||||
// vmselect (L2) -> vmselect (L1) -> vmstorage <- vminsert
|
||||
//
|
||||
// vmisert writes data into vmstorage.
|
||||
// vminsert writes data into vmstorage.
|
||||
// vmselect (L2) reads that data via vmselect (L1).
|
||||
|
||||
vmstorage := tc.MustStartVmstorage("vmstorage", []string{
|
||||
|
||||
@@ -49,7 +49,7 @@ func StartVminsert(instance string, flags []string, cli *Client, output io.Write
|
||||
graphiteListenAddrRE,
|
||||
openTSDBListenAddrRE,
|
||||
}
|
||||
// Add storateNode REs to block until vminsert establishes connections with
|
||||
// Add storageNode REs to block until vminsert establishes connections with
|
||||
// all storage nodes. The extracted values are unused.
|
||||
for _, sn := range storageNodes(flags) {
|
||||
logRecord := fmt.Sprintf("successfully dialed -storageNode=\"%s\"", sn)
|
||||
|
||||
@@ -34,28 +34,28 @@
|
||||
# for details
|
||||
tsbs: tsbs-build tsbs-generate-data tsbs-load-data tsbs-generate-queries tsbs-run-queries
|
||||
|
||||
TSBS_SCALE := 100000
|
||||
TSBS_END := $(shell date -u +%Y-%m-%dT00:00:00Z)
|
||||
TSBS_START := $(shell \
|
||||
TSBS_SCALE ?= 100000
|
||||
TSBS_END ?= $(shell date -u +%Y-%m-%dT00:00:00Z)
|
||||
TSBS_START ?= $(shell \
|
||||
NOW=$$(date -u +%s); \
|
||||
START=$$((NOW - 86400)); \
|
||||
date -u -d "@$$START" +%Y-%m-%dT00:00:00Z 2>/dev/null || \
|
||||
date -u -r $$START +%Y-%m-%dT00:00:00Z 2>/dev/null \
|
||||
)
|
||||
TSBS_STEP := 80s
|
||||
TSBS_QUERIES := 1000
|
||||
TSBS_WORKERS := 4
|
||||
TSBS_STEP ?= 80s
|
||||
TSBS_QUERIES ?= 1000
|
||||
TSBS_WORKERS ?= 4
|
||||
TSBS_DATA_FILE := /tmp/tsbs-data-$(TSBS_SCALE)-$(TSBS_START)-$(TSBS_END)-$(TSBS_STEP).gz
|
||||
TSBS_QUERY_FILE := /tmp/tsbs-queries-$(TSBS_SCALE)-$(TSBS_START)-$(TSBS_END)-$(TSBS_QUERIES).gz
|
||||
# For cluster setup use http://vminsert:8480/insert/0/influx/write
|
||||
TSBS_WRITE_URLS := http://localhost:8428/write
|
||||
TSBS_WRITE_URLS ?= http://localhost:8428/write
|
||||
# For cluster setup use http://vmselect:8481/select/0/prometheus
|
||||
TSBS_READ_URLS := http://localhost:8428
|
||||
TSBS_METRICS_URL := http://localhost:8428/metrics
|
||||
TSBS_READ_URLS ?= http://localhost:8428
|
||||
TSBS_METRICS_URL ?= http://localhost:8428/metrics
|
||||
|
||||
# Build TSBS tools
|
||||
tsbs-build:
|
||||
test -d /tmp/tsbs || (git clone https://github.com/timescale/tsbs.git /tmp/tsbs && \
|
||||
test -d /tmp/tsbs/cmd/tsbs_run_queries_victoriametrics || (git clone https://github.com/timescale/tsbs.git /tmp/tsbs && \
|
||||
cd /tmp/tsbs/cmd/tsbs_generate_data && GOBIN=/tmp/tsbs/bin go install && \
|
||||
cd /tmp/tsbs/cmd/tsbs_generate_queries && GOBIN=/tmp/tsbs/bin go install && \
|
||||
cd /tmp/tsbs/cmd/tsbs_load_victoriametrics && GOBIN=/tmp/tsbs/bin go install && \
|
||||
|
||||
@@ -43,7 +43,7 @@ app-via-docker: package-builder
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -trimpath -buildvcs=false \
|
||||
-ldflags "-extldflags '-static' $(GO_BUILDINFO)" \
|
||||
-tags 'netgo osusergo musl' \
|
||||
-tags 'netgo osusergo musl $(EXTRA_GO_BUILD_TAGS)' \
|
||||
-o bin/$(APP_NAME)$(APP_SUFFIX)-prod $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
app-via-docker-windows: package-builder
|
||||
@@ -58,7 +58,7 @@ app-via-docker-windows: package-builder
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -trimpath -buildvcs=false \
|
||||
-ldflags "-s -w -extldflags '-static' $(GO_BUILDINFO)" \
|
||||
-tags 'netgo osusergo' \
|
||||
-tags 'netgo osusergo $(EXTRA_GO_BUILD_TAGS)' \
|
||||
-o bin/$(APP_NAME)-windows$(APP_SUFFIX)-prod.exe $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
package-via-docker: package-base
|
||||
|
||||
@@ -3,7 +3,7 @@ services:
|
||||
# It scrapes targets defined in --promscrape.config
|
||||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
ports:
|
||||
@@ -35,14 +35,14 @@ services:
|
||||
# vmstorage shards. Each shard receives 1/N of all metrics sent to vminserts,
|
||||
# where N is number of vmstorages (2 in this case).
|
||||
vmstorage-1:
|
||||
image: victoriametrics/vmstorage:v1.123.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.124.0-cluster
|
||||
volumes:
|
||||
- strgdata-1:/storage
|
||||
command:
|
||||
- "--storageDataPath=/storage"
|
||||
restart: always
|
||||
vmstorage-2:
|
||||
image: victoriametrics/vmstorage:v1.123.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.124.0-cluster
|
||||
volumes:
|
||||
- strgdata-2:/storage
|
||||
command:
|
||||
@@ -52,7 +52,7 @@ services:
|
||||
# vminsert is ingestion frontend. It receives metrics pushed by vmagent,
|
||||
# pre-process them and distributes across configured vmstorage shards.
|
||||
vminsert-1:
|
||||
image: victoriametrics/vminsert:v1.123.0-cluster
|
||||
image: victoriametrics/vminsert:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
- "--storageNode=vmstorage-2:8400"
|
||||
restart: always
|
||||
vminsert-2:
|
||||
image: victoriametrics/vminsert:v1.123.0-cluster
|
||||
image: victoriametrics/vminsert:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -73,7 +73,7 @@ services:
|
||||
# vmselect is a query fronted. It serves read queries in MetricsQL or PromQL.
|
||||
# vmselect collects results from configured `--storageNode` shards.
|
||||
vmselect-1:
|
||||
image: victoriametrics/vmselect:v1.123.0-cluster
|
||||
image: victoriametrics/vmselect:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -83,7 +83,7 @@ services:
|
||||
- "--vmalert.proxyURL=http://vmalert:8880"
|
||||
restart: always
|
||||
vmselect-2:
|
||||
image: victoriametrics/vmselect:v1.123.0-cluster
|
||||
image: victoriametrics/vmselect:v1.124.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
@@ -98,7 +98,7 @@ services:
|
||||
# read requests from Grafana, vmui, vmalert among vmselects.
|
||||
# It can be used as an authentication proxy.
|
||||
vmauth:
|
||||
image: victoriametrics/vmauth:v1.123.0
|
||||
image: victoriametrics/vmauth:v1.124.0
|
||||
depends_on:
|
||||
- "vmselect-1"
|
||||
- "vmselect-2"
|
||||
@@ -112,7 +112,7 @@ services:
|
||||
|
||||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
ports:
|
||||
|
||||
@@ -3,7 +3,7 @@ services:
|
||||
# It scrapes targets defined in --promscrape.config
|
||||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -18,7 +18,7 @@ services:
|
||||
# VictoriaMetrics instance, a single process responsible for
|
||||
# storing metrics and serve read requests.
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
- 8089:8089
|
||||
@@ -54,7 +54,7 @@ services:
|
||||
|
||||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
- "alertmanager"
|
||||
|
||||
@@ -100,7 +100,7 @@ groups:
|
||||
summary: "Churn rate is more than 10% on \"{{ $labels.instance }}\" for the last 15m"
|
||||
description: "VM constantly creates new time series on \"{{ $labels.instance }}\".\n
|
||||
This effect is known as Churn Rate.\n
|
||||
High Churn Rate tightly connected with database performance and may
|
||||
High Churn Rate is tightly connected with database performance and may
|
||||
result in unexpected OOM's or slow queries."
|
||||
|
||||
- alert: TooHighChurnRate24h
|
||||
@@ -117,7 +117,7 @@ groups:
|
||||
description: "The number of created new time series over last 24h is 3x times higher than
|
||||
current number of active series on \"{{ $labels.instance }}\".\n
|
||||
This effect is known as Churn Rate.\n
|
||||
High Churn Rate tightly connected with database performance and may
|
||||
High Churn Rate is tightly connected with database performance and may
|
||||
result in unexpected OOM's or slow queries."
|
||||
|
||||
- alert: TooHighSlowInsertsRate
|
||||
@@ -135,4 +135,4 @@ groups:
|
||||
summary: "Percentage of slow inserts is more than 5% on \"{{ $labels.instance }}\" for the last 15m"
|
||||
description: "High rate of slow inserts on \"{{ $labels.instance }}\" may be a sign of resource exhaustion
|
||||
for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series.
|
||||
See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183"
|
||||
See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -14,7 +14,7 @@ services:
|
||||
restart: always
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
@@ -40,7 +40,7 @@ services:
|
||||
restart: always
|
||||
|
||||
vmalert:
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -59,7 +59,7 @@ services:
|
||||
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr": },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
|
||||
restart: always
|
||||
vmanomaly:
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
||||
@@ -1005,7 +1005,7 @@
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Anoamlies: Read Latency",
|
||||
"title": "Anomalies: Read Latency",
|
||||
"type": "state-timeline"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -4,12 +4,12 @@ Benchmark compares VictoriaLogs with ELK stack and Grafana Loki.
|
||||
|
||||
Benchmark is based on:
|
||||
|
||||
- Logs from this repository - https://github.com/logpai/loghub
|
||||
- Logs from this repository - [https://github.com/logpai/loghub](https://github.com/logpai/loghub)
|
||||
- [logs generator](./generator)
|
||||
|
||||
For ELK suite it uses:
|
||||
|
||||
- filebeat - https://www.elastic.co/beats/filebeat
|
||||
- filebeat - [https://www.elastic.co/beats/filebeat](https://www.elastic.co/beats/filebeat)
|
||||
- elastic + kibana
|
||||
|
||||
For Grafana Loki suite it uses:
|
||||
@@ -24,7 +24,7 @@ For Grafana Loki suite it uses:
|
||||
|
||||
- VictoriaLogs instance
|
||||
- vmsingle - port forwarded to `localhost:8428` to see UI
|
||||
- exporters for system metris
|
||||
- exporters for system metrics
|
||||
|
||||
ELK suite uses [docker-compose-elk.yml](./docker-compose-elk.yml) with the following services:
|
||||
|
||||
@@ -54,7 +54,7 @@ Each filebeat than writes logs to elastic and VictoriaLogs via elasticsearch-com
|
||||
1. Download and unarchive logs by running:
|
||||
|
||||
```shell
|
||||
cd source_logs
|
||||
cd source_logs
|
||||
bash download.sh
|
||||
```
|
||||
|
||||
@@ -74,11 +74,11 @@ Unarchived logs size per file for reference:
|
||||
13G hadoop-*.log
|
||||
```
|
||||
|
||||
2. (optional) If needed, adjust amount of logs sent by generator by modifying `-outputRateLimitItems` and
|
||||
1. (optional) If needed, adjust amount of logs sent by generator by modifying `-outputRateLimitItems` and
|
||||
`outputRateLimitPeriod` parameters in [docker-compose.yml](./docker-compose.yml). By default, it is configured to
|
||||
send 10000 logs per second.
|
||||
|
||||
3. (optional) Build victoria-logs image and adjust `image` parameter in [docker-compose.yml](./docker-compose.yml):
|
||||
1. (optional) Build victoria-logs image and adjust `image` parameter in [docker-compose.yml](./docker-compose.yml):
|
||||
|
||||
```shell
|
||||
make package-victoria-logs
|
||||
@@ -95,26 +95,27 @@ output.elasticsearch:
|
||||
hosts: [ "http://vlogs:9428/insert/elasticsearch/" ]
|
||||
```
|
||||
|
||||
4. Choose a suite to run.
|
||||
1. Choose a suite to run.
|
||||
|
||||
In order to run ELK suite use the following command:
|
||||
```
|
||||
|
||||
```sh
|
||||
make docker-up-elk
|
||||
```
|
||||
|
||||
In order to run Loki suite use the following command:
|
||||
```
|
||||
|
||||
```sh
|
||||
make docker-up-loki
|
||||
```
|
||||
|
||||
|
||||
5. Navigate to `http://localhost:3000/` to see Grafana dashboards with resource usage
|
||||
1. Navigate to `http://localhost:3000/` to see Grafana dashboards with resource usage
|
||||
comparison.
|
||||
|
||||
Navigate to `http://localhost:3000/d/hkm6P6_4z/elastic-vs-vlogs` to see ELK suite results.
|
||||
|
||||
Navigate to `http://localhost:3000/d/hkm6P6_4y/loki-vs-vlogs` to see Loki suite results.
|
||||
|
||||
|
||||
Example results vs ELK:
|
||||
|
||||

|
||||
|
||||
@@ -14,6 +14,19 @@ aliases:
|
||||
---
|
||||
Please find the changelog for VictoriaMetrics Anomaly Detection below.
|
||||
|
||||
## v1.25.3
|
||||
Released: 2025-08-19
|
||||
|
||||
- FEATURE: Added forecasting capabilities to the [`ProphetModel`](https://docs.victoriametrics.com/anomaly-detection/components/models/#prophet) this allows users to generate *future* (point-wise and interval) predictions with offsets defined by `forecast_at` argument (e.g. `['1d', '1w']`) at *current* timestamp and store these in respective series, e.g. `yhat_1d`, `yhat_lower_1d`, `yhat_upper_1d`, etc. This feature is particularly useful for scenarios where future predictions are needed, such as capacity planning or trend analysis. See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#forecasting) for more details.
|
||||
|
||||
- IMPROVEMENT: Added `logger_levels` argument to `settings` [config section](https://docs.victoriametrics.com/anomaly-detection/components/settings/#logger-levels) to allow setting specific log levels for individual components. Useful for debugging specific components. For example, `logger_levels: { "reader.vm": "DEBUG" }` will set the log level for the `VmReader` component to `DEBUG`, while leaving other components at their default log levels. Also is supported in [hot reload](https://docs.victoriametrics.com/anomaly-detection/components/#hot-reload) mode, allowing for dynamic log level changes without service restarts.
|
||||
|
||||
- IMPROVEMENT: Added logging of URLs used for querying VictoriaMetrics TSDB in [`VmReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) to ease the debugging of incomplete data retrieval, incorrect endpoints, or misconfigured tenant IDs. The URLs are logged at the `DEBUG` level, so you can control their verbosity using the `--loggerLevelComponents` argument with `reader.vm=DEBUG` or `reader=DEBUG` to see the URLs in the logs.
|
||||
|
||||
- IMPROVEMENT: Added `offset` [argument](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) to `VmReader` on reader and query levels to allow for flexible time offset adjustments in the reader. Useful for correcting for data collection delays. The `offset` can be specified as a string (e.g., "15s", "-20s") and will be applied to all queries processed by the reader. See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#using-offsets) for more details.
|
||||
|
||||
- BUGFIX: Resolved the issue where symlink-ed configuration files were not properly processed by [hot reload](https://docs.victoriametrics.com/anomaly-detection/components/#hot-reload) mechanism, leading to the service not picking up changes made to the original files. Now it properly resolves symlinks and reloads the configuration when the original file is modified.
|
||||
|
||||
## v1.25.2
|
||||
Released: 2025-07-30
|
||||
|
||||
|
||||
@@ -54,6 +54,25 @@ Respective config is defined in a [`reader`](https://docs.victoriametrics.com/an
|
||||
## Handling noisy input data
|
||||
`vmanomaly` operates on data fetched from VictoriaMetrics using [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) queries, so the initial data quality can be fine-tuned with aggregation, grouping, and filtering to reduce noise and improve anomaly detection accuracy.
|
||||
|
||||
## Using offsets
|
||||
`vmanomaly` supports {{% available_from "v1.25.3" anomaly %}} the use of offsets in the [`reader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader) section to adjust the time range of the data being queried. This can be particularly useful for correcting for data collection delays or other timing issues. It can be also defined or overridden on [per-query basis](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters).
|
||||
|
||||
For example, if you want to query data with a 60-second delay (e.g. data collection happened 1 sec ago, however, timestamps written to VictoriaMetrics are 60 seconds in the past), you can set the `offset` argument to `-60s` in the reader section:
|
||||
|
||||
```yaml
|
||||
reader:
|
||||
class: 'vm'
|
||||
datasource_url: 'http://localhost:8428'
|
||||
sampling_period: '10s'
|
||||
offset: '-60s'
|
||||
queries:
|
||||
vmb:
|
||||
expr: 'avg(vm_blocks)'
|
||||
cpu_custom_offset:
|
||||
expr: 'avg(rate(vm_cpu_usage[5m]))'
|
||||
offset: '-30s' # this will override the global offset for this query only
|
||||
```
|
||||
|
||||
## Handling timezones
|
||||
|
||||
`vmanomaly` supports timezone-aware anomaly detection {{% available_from "v1.18.0" anomaly %}} through a `tz` argument, available both at the [reader level](https://docs.victoriametrics.com/anomaly-detection/components/reader#vm-reader) and at the [query level](https://docs.victoriametrics.com/anomaly-detection/components/reader/#per-query-parameters).
|
||||
@@ -179,6 +198,22 @@ While `vmanomaly` detects anomalies and produces scores, it *does not directly g
|
||||
|
||||
<img src="https://docs.victoriametrics.com/anomaly-detection/guides/guide-vmanomaly-vmalert/guide-vmanomaly-vmalert_overview.webp" alt="node_exporter_example_diagram" style="width:60%"/>
|
||||
|
||||
Once anomaly scores are written back to VictoriaMetrics, you can use [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) expressions subset in `vmalert` to define alerting rules based on these scores. Reasonable defaults are `anomaly_score > 1`:
|
||||
|
||||
```yaml
|
||||
groups:
|
||||
- name: vmanomaly_alerts
|
||||
rules:
|
||||
- alert: HighAnomalyScore
|
||||
expr: anomaly_score > 1 # or similar expressions, like `min(anomaly_score{...}) by (...) > 1`
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Anomaly score > 1 for {{ $labels.for }} query"
|
||||
description: "Anomaly score is {{ $value }} for query {{ $labels.for }}. Value: {{ $value }}."
|
||||
```
|
||||
|
||||
## Preventing alert fatigue
|
||||
Produced anomaly scores are designed in such a way that values from 0.0 to 1.0 indicate non-anomalous data, while a value greater than 1.0 is generally classified as an anomaly. However, there are no perfect models for anomaly detection, that's why reasonable defaults expressions like `anomaly_score > 1` may not work 100% of the time. However, anomaly scores, produced by `vmanomaly` are written back as metrics to VictoriaMetrics, where tools like [`vmalert`](https://docs.victoriametrics.com/victoriametrics/vmalert/) can use [MetricsQL](https://docs.victoriametrics.com/victoriametrics/metricsql/) expressions to fine-tune alerting thresholds and conditions, balancing between avoiding [false negatives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-negative) and reducing [false positives](https://victoriametrics.com/blog/victoriametrics-anomaly-detection-handbook-chapter-1/#false-positive).
|
||||
|
||||
@@ -228,6 +263,117 @@ writer:
|
||||
|
||||
Configuration above will produce N intervals of full length (`fit_window`=14d + `fit_every`=1h) until `to_iso` timestamp is reached to run N consecutive `fit` calls to train models; Then these models will be used to produce `M = [fit_every / sampling_frequency]` infer datapoints for `fit_every` range at the end of each such interval, imitating M consecutive calls of `infer_every` in `PeriodicScheduler` [config](https://docs.victoriametrics.com/anomaly-detection/components/scheduler#periodic-scheduler). These datapoints then will be written back to VictoriaMetrics TSDB, defined in `writer` [section](https://docs.victoriametrics.com/anomaly-detection/components/writer#vm-writer) for further visualization (i.e. in VMUI or Grafana)
|
||||
|
||||
## Forecasting
|
||||
|
||||
Not intended for forecasting in its core, `vmanomaly` can still be used to produce forecasts using [ProphetModel](https://docs.victoriametrics.com/anomaly-detection/components/models#prophet) {{% available_from "v1.25.3" anomaly %}}, which can be helpful in scenarios like capacity planning, resource allocation, or trend analysis, if the underlying data is complex and can't be handled by inline MetricsQL queries, including [predict_linear](https://docs.victoriametrics.com/victoriametrics/metricsql/#predict_linear).
|
||||
|
||||
> However, please note that this mode should be used with care, as the model will produce `yhat_{h}` (and probably `yhat_lower_{h}`, and `yhat_upper_{h}`) time series **for each timeseries returned by input queries and for each forecasting horizon specified in `forecast_at` argument, which can lead to a significant increase in the number of active timeseries in VictoriaMetrics TSDB**.
|
||||
|
||||
Here's an example of how to produce forecasts using `vmanomaly` and combine it with the regular model, e.g. to estimate daily outcomes for a disk usage metric:
|
||||
|
||||
```yaml
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/scheduler/#periodic-scheduler
|
||||
schedulers:
|
||||
periodic_5m: # this scheduler will be used to produce anomaly scores each 5 minutes using "regular" simple model
|
||||
class: 'periodic'
|
||||
fit_every: '30d'
|
||||
fit_window: '3d'
|
||||
infer_every: '5m'
|
||||
periodic_forecast: # this scheduler will be used to produce forecasts each 24h using "daily" model
|
||||
class: 'periodic'
|
||||
fit_every: '7d'
|
||||
fit_window: '730d' # to fit the model on 2 years of data to account for seasonality and holidays
|
||||
infer_every: '24h'
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader
|
||||
reader:
|
||||
class: 'vm'
|
||||
datasource_url: 'http://play.victoriametrics.com'
|
||||
tenant_id: '0:0'
|
||||
sampling_period: '5m'
|
||||
# other reader params ...
|
||||
queries:
|
||||
disk_usage_perc_5m:
|
||||
expr: |
|
||||
max_over_time(
|
||||
1 - (node_filesystem_avail_bytes{mountpoint="/",fstype!="rootfs"}
|
||||
/
|
||||
node_filesystem_size_bytes{mountpoint="/",fstype!="rootfs"}),
|
||||
1h
|
||||
)
|
||||
data_range: [0, 1]
|
||||
# step: '1m' # default will be inherited from sampling_period
|
||||
disk_usage_perc_1d:
|
||||
expr: |
|
||||
max_over_time(
|
||||
1 - (node_filesystem_avail_bytes{mountpoint="/",fstype!="rootfs"}
|
||||
/
|
||||
node_filesystem_size_bytes{mountpoint="/",fstype!="rootfs"}),
|
||||
24h
|
||||
)
|
||||
step: '1d' # override default step to 1d, as we want to produce daily forecasts
|
||||
data_range: [0, 1]
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/models/
|
||||
models:
|
||||
quantile_5m:
|
||||
class: 'quantile_online' # online model, which updates itself each infer call
|
||||
queries: ['disk_usage_perc_5m']
|
||||
schedulers: ['periodic_5m']
|
||||
clip_predictions: True
|
||||
detection_direction: 'above_expected' # as we are interested in spikes in capacity planning
|
||||
quantiles: [0.25, 0.5, 0.75] # to produce median and upper quartiles
|
||||
iqr_threshold: 2.0
|
||||
|
||||
prophet_1d:
|
||||
class: 'prophet'
|
||||
queries: ['disk_usage_perc_1d']
|
||||
schedulers: ['periodic_forecast']
|
||||
clip_predictions: True
|
||||
detection_direction: 'above_expected' # as we are interested in spikes in capacity planning
|
||||
forecast_at: ['3d', '7d'] # this will produce forecasts for 3 and 7 days ahead
|
||||
provide_series: ['yhat', 'yhat_upper'] # to write forecasts back to VictoriaMetrics, omitting `yhat_lower` as it is not needed in this example
|
||||
# other model params, yearly_seasonality may stay
|
||||
# https://facebook.github.io/prophet/docs/quick_start#python-api
|
||||
args:
|
||||
interval_width: 0.98 # see https://facebook.github.io/prophet/docs/uncertainty_intervals
|
||||
country_holidays: 'US'
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/writer/#vm-writer
|
||||
writer:
|
||||
class: 'vm'
|
||||
datasource_url: '{your_victoriametrics_url_for_writing}'
|
||||
# tenant_id: '0:0' # or your tenant ID if using clustered VictoriaMetrics
|
||||
# other writer params ...
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/writer/#metrics-formatting
|
||||
metric_format:
|
||||
__name__: $VAR
|
||||
for: $QUERY_KEY
|
||||
```
|
||||
|
||||
Then, respective alerts can be configured in [`vmalert`](https://docs.victoriametrics.com/victoriametrics/vmalert/) to notify disk exhaustion risks, e.g. if the forecasted disk usage exceeds 90% in the next 3 days:
|
||||
|
||||
```yaml
|
||||
groups:
|
||||
- name: disk_usage_alerts
|
||||
rules:
|
||||
- alert: DiskUsageHigh
|
||||
expr: |
|
||||
yhat_7d{for="disk_usage_perc_1d"} > 0.9
|
||||
for: 24h
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Disk usage is forecasted to exceed 90% in the next 3 days"
|
||||
description: "Disk usage is forecasted to exceed 90% in the next 3 days for instance {{ $labels.instance }}. Forecasted value: {{ $value }}."
|
||||
- alert: DiskUsageCritical
|
||||
expr: |
|
||||
yhat_3d{for="disk_usage_perc_1d"} > 0.95
|
||||
for: 24h
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Disk usage is forecasted to exceed 95% in the next 3 days"
|
||||
description: "Disk usage is forecasted to exceed 95% in the next 3 days for instance {{ $labels.instance }}. Forecasted value: {{ $value }}."
|
||||
```
|
||||
|
||||
## Resource consumption of vmanomaly
|
||||
`vmanomaly` itself is a lightweight service, resource usage is primarily dependent on [scheduling](https://docs.victoriametrics.com/anomaly-detection/components/scheduler) (how often and on what data to fit/infer your models), [# and size of timeseries returned by your queries](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader), and the complexity of the employed [models](https://docs.victoriametrics.com/anomaly-detection/components/models). Its resource usage is directly related to these factors, making it adaptable to various operational scales. Various optimizations are available to balance between RAM usage, processing speed, and model capacity. These options are described in the sections below.
|
||||
|
||||
@@ -243,7 +389,7 @@ services:
|
||||
# ...
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
# ...
|
||||
ports:
|
||||
- "8490:8490"
|
||||
@@ -456,7 +602,7 @@ options:
|
||||
Here’s an example of using the config splitter to divide configurations based on the `extra_filters` argument from the reader section:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.25.2 && docker image tag victoriametrics/vmanomaly:v1.25.2 vmanomaly
|
||||
docker pull victoriametrics/vmanomaly:v1.25.3 && docker image tag victoriametrics/vmanomaly:v1.25.3 vmanomaly
|
||||
```
|
||||
|
||||
```sh
|
||||
|
||||
@@ -121,13 +121,13 @@ Below are the steps to get `vmanomaly` up and running inside a Docker container:
|
||||
1. Pull Docker image:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.25.2
|
||||
docker pull victoriametrics/vmanomaly:v1.25.3
|
||||
```
|
||||
|
||||
2. (Optional step) tag the `vmanomaly` Docker image:
|
||||
|
||||
```sh
|
||||
docker image tag victoriametrics/vmanomaly:v1.25.2 vmanomaly
|
||||
docker image tag victoriametrics/vmanomaly:v1.25.3 vmanomaly
|
||||
```
|
||||
|
||||
3. Start the `vmanomaly` Docker container with a *license file*, use the command below.
|
||||
@@ -163,7 +163,7 @@ docker run -it --user 1000:1000 \
|
||||
services:
|
||||
# ...
|
||||
vmanomaly:
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
volumes:
|
||||
$YOUR_LICENSE_FILE_PATH:/license
|
||||
$YOUR_CONFIG_FILE_PATH:/config.yml
|
||||
@@ -220,6 +220,14 @@ settings:
|
||||
n_workers: 4 # number of workers to run workload in parallel, set to 0 or negative number to use all available CPU cores
|
||||
anomaly_score_outside_data_range: 5.0 # default anomaly score for anomalies outside expected data range
|
||||
restore_state: True # restore state from previous run, available since v1.24.0
|
||||
# https://docs.victoriametrics.com/anomaly-detection/components/settings/#logger-levels
|
||||
# to override service-global logger levels, use the `logger_levels` section
|
||||
logger_levels:
|
||||
# vmanomaly: info
|
||||
# scheduler: info
|
||||
# reader: info
|
||||
# writer: info
|
||||
model.prophet: warning
|
||||
|
||||
schedulers:
|
||||
1d_1m:
|
||||
@@ -299,6 +307,9 @@ For optimal service behavior, consider the following tweaks when configuring `vm
|
||||
- Set up [anomaly score dashboard](https://docs.victoriametrics.com/anomaly-detection/presets/#grafana-dashboard) to visualize the results of anomaly detection.
|
||||
- Set up [self-monitoring dashboard](https://docs.victoriametrics.com/anomaly-detection/self-monitoring/) to monitor the health of `vmanomaly` service and its components.
|
||||
|
||||
**Logging**:
|
||||
- Tune logging levels in the `settings.logger_levels` [section](https://docs.victoriametrics.com/anomaly-detection/components/settings/#logger-levels) to control the verbosity of logs. This can help in debugging and monitoring the service behavior, as well as in disabling excessive logging for production environments.
|
||||
|
||||
## Check also
|
||||
|
||||
Please refer to the following links for a deeper understanding of Anomaly Detection and `vmanomaly`:
|
||||
|
||||
@@ -652,7 +652,7 @@ models:
|
||||
|
||||
> `ProphetModel` is a [univariate](#univariate-models), [non-rolling](#non-rolling-models), [offline](#offline-models) model.
|
||||
|
||||
> {{% available_from "v1.18.2" anomaly %}} the format for `tz_seasonalities` has been updated to enhance flexibility. Previously, it accepted a list of strings (e.g., `['hod', 'minute']`). Now, it follows the same structure as custom seasonalities defined in the `seasonalities` argument (e.g., `{"name": "hod", "fourier_order": 5, "mode": "additive"}`). This change is backward-compatible, so older configurations will be automatically converted to the new format using default values.
|
||||
> {{% available_from "v1.25.3" anomaly %}} Producing forecasts for future timestamps is now supported. To enable this, set the `forecast_at` argument to a list of relative future offsets (e.g., `['1h', '1d']`). The model will then generate forecasts for these future timestamps, which can be useful for planning and resource allocation. Output series are affected by [provide_series](#provide-series) argument, which need to include at least `yhat` for point-wise forecasts (and `yhat_lower` or/and `yhat_upper` for respective confidence intervals). See the example below for more details.
|
||||
|
||||
*Parameters specific for vmanomaly*:
|
||||
|
||||
@@ -661,7 +661,11 @@ models:
|
||||
- `scale`{{% available_from "v1.18.0" anomaly %}} (float): Is used to adjust the margins between `yhat` and [`yhat_lower`, `yhat_upper`]. New margin = `|yhat_* - yhat_lower| * scale`. Defaults to 1 (no scaling is applied). See `scale`[common arg](https://docs.victoriametrics.com/anomaly-detection/components/models/#scale) section for detailed instructions and 2-sided option.
|
||||
- `tz_aware`{{% available_from "v1.18.0" anomaly %}} (bool): Enables handling of timezone-aware timestamps. Default is `False`. Should be used with `tz_seasonalities` and `tz_use_cyclical_encoding` parameters.
|
||||
- `tz_seasonalities`{{% available_from "v1.18.0" anomaly %}} (list[dict]): Specifies timezone-aware seasonal components. Requires `tz_aware=True`. Supported options include `minute`, `hod` (hour of day), `dow` (day of week), and `month` (month of year). {{% available_from "v1.18.2" anomaly %}} users can configure additional parameters for each seasonality, such as `fourier_order`, `prior_scale`, and `mode`. For more details, please refer to the **Timezone-unaware** configuration example below.
|
||||
> {{% available_from "v1.18.2" anomaly %}} the format for `tz_seasonalities` has been updated to enhance flexibility. Previously, it accepted a list of strings (e.g., `['hod', 'minute']`). Now, it follows the same structure as custom seasonalities defined in the `seasonalities` argument (e.g., `{"name": "hod", "fourier_order": 5, "mode": "additive"}`). This change is backward-compatible, so older configurations will be automatically converted to the new format using default values.
|
||||
- `tz_use_cyclical_encoding`{{% available_from "v1.18.0" anomaly %}} (bool): If set to `True`, applies [cyclical encoding technique](https://www.kaggle.com/code/avanwyk/encoding-cyclical-features-for-deep-learning) to timezone-aware seasonalities. Should be used with `tz_aware=True` and `tz_seasonalities`.
|
||||
- `forecast_at`{{% available_from "v1.25.3" anomaly %}} (list[str]): Specifies future relative offsets for which forecasts should be generated (e.g., `['1h', '1d']`). Works similarly to [predict_linear](https://docs.victoriametrics.com/victoriametrics/metricsql/#predict_linear) in MetricQL, but with more flexibility and seasonality support - produced series will have *the same timestamp* as the other [output](#vmanomaly-output) series, but with the forecasted value for the *future timestamp*. Defaults to `[]` (empty list, meaning no future forecasts are produced). If set, `provide_series` must include at least `yhat` for point-wise forecasts (and `yhat_lower` or/and `yhat_upper` for respective confidence intervals). For example, if `forecast_at` is set to `['1h', '1d']`, the model will produce forecasts for both the next hour and the next day, and these series can be accessed by `yhat_1h`, `yhat_lower_1h`, `yhat_upper_1h`, `yhat_1d`, `yhat_lower_1d`, and `yhat_upper_1d` in the output, respectively. See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#forecasting) for more details.
|
||||
|
||||
> `forecast_at` parameter can lead to **significant increase in active timeseries** if you have a lot of time series returned by your queries, as it will produce additional series for each of the future timestamps specified in `forecast_at` (optionally multiplied by 1-3 if interval forecasts are included). For example, if you have 1000 time series returned by your query and set `forecast_at` to `[1h, 1d, 1w]`, and `provide_series` includes `yhat_lower` and `yhat_upper`, it will produce 1000 (series) * 3 (intervals) * 3 (predictions, point + interval) = 9000 additional timeseries. Consider using it only on small subset of metrics (e.g. grouped by `host` or `region`) to avoid this issue, as it also **proportionally (to the number of `forecast_at` elements) increases the timings of inference calls**.
|
||||
|
||||
> Apart from standard [`vmanomaly` output](#vmanomaly-output), Prophet model can provide additional metrics.
|
||||
|
||||
@@ -1308,7 +1312,7 @@ monitoring:
|
||||
Let's pull the docker image for `vmanomaly`:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.25.2
|
||||
docker pull victoriametrics/vmanomaly:v1.25.3
|
||||
```
|
||||
|
||||
Now we can run the docker container putting as volumes both config and model file:
|
||||
@@ -1322,7 +1326,7 @@ docker run -it \
|
||||
-v $(PWD)/license:/license \
|
||||
-v $(PWD)/custom_model.py:/vmanomaly/model/custom.py \
|
||||
-v $(PWD)/custom.yaml:/config.yaml \
|
||||
victoriametrics/vmanomaly:v1.25.2 /config.yaml \
|
||||
victoriametrics/vmanomaly:v1.25.3 /config.yaml \
|
||||
--licenseFile=/license
|
||||
```
|
||||
|
||||
|
||||
@@ -85,14 +85,18 @@ There is change{{% available_from "v1.13.0" anomaly %}} of [`queries`](https://d
|
||||
|
||||
> The recommended approach for using per-query `tenant_id`s is to set both `reader.tenant_id` and `writer.tenant_id` to `multitenant`. See [this section](https://docs.victoriametrics.com/anomaly-detection/components/writer/#multitenancy-support) for more details. Configurations where `reader.tenant_id` equals `writer.tenant_id` and is not `multitenant` are also considered safe, provided there is a single, DISTINCT `tenant_id` defined in the reader (either at the reader level or the query level, if set).
|
||||
|
||||
- `offset` {{% available_from "v1.25.3" anomaly %}} (string): this optional argument allows specifying a time offset for the query, which can be useful for adjusting the query time range to account for data collection delays or other timing issues. The offset is specified as a string (e.g., "15s", "-20s") and will be applied to the query time range. Valid resolutions are `ms`, `s`, `m`, `h`, `d` (miliseconds, seconds, minutes, hours, days). If not set, defaults to `0s` (0). See [FAQ](https://docs.victoriametrics.com/anomaly-detection/faq/#using-offsets) for more details.
|
||||
|
||||
### Per-query config example
|
||||
```yaml
|
||||
reader:
|
||||
class: 'vm'
|
||||
sampling_period: '1m'
|
||||
datasource_url: 'https://play.victoriametrics.com/' # source victoriametrics/prometheus
|
||||
max_points_per_query: 10000
|
||||
data_range: [0, 'inf']
|
||||
tenant_id: 'multitenant'
|
||||
offset: '0s' # optional, defaults to 0s if not set
|
||||
# other reader params ...
|
||||
queries:
|
||||
ingestion_rate_t1:
|
||||
@@ -109,6 +113,7 @@ reader:
|
||||
max_points_per_query: 5000 # overrides reader-level value of 10000 for `ingestion_rate` query
|
||||
tz: 'America/New_York' # to override reader-wise `tz`
|
||||
tenant_id: '2:0' # overriding tenant_id to isolate data
|
||||
offset: '-15s' # to override reader-wise `offset` and query data 15 seconds earlier to account for data collection delays
|
||||
```
|
||||
|
||||
### Config parameters
|
||||
@@ -395,10 +400,24 @@ Optional argument{{% available_from "v1.18.0" anomaly %}} specifies the [IANA](h
|
||||
Optional argument{{% available_from "v1.18.1" anomaly %}} allows defining **valid** data ranges for input of all the queries in `queries`. Defaults to `["-inf", "inf"]` if not set and can be overridden on a [per-query basis](#per-query-parameters).
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
<span style="white-space: nowrap;">`offset`</span>
|
||||
</td>
|
||||
<td>
|
||||
|
||||
`60s`
|
||||
</td>
|
||||
<td>
|
||||
Optional argument{{% available_from "v1.25.3" anomaly %}} allows specifying a time offset for all queries in `queries`. Defaults to `0s` (0) if not set and can be overridden on a [per-query basis](#per-query-parameters).
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
Config file example:
|
||||
<br>
|
||||
Config section example:
|
||||
|
||||
```yaml
|
||||
reader:
|
||||
@@ -407,6 +426,7 @@ reader:
|
||||
tenant_id: '0:0'
|
||||
tz: 'America/New_York'
|
||||
data_range: [1, 'inf'] # reader-level
|
||||
offset: '0s' # reader-level
|
||||
queries:
|
||||
ingestion_rate:
|
||||
expr: 'sum(rate(vm_rows_inserted_total[5m])) by (type) > 0'
|
||||
@@ -414,6 +434,7 @@ reader:
|
||||
data_range: [0, 'inf'] # if set, overrides reader-level data_range
|
||||
tz: 'Australia/Sydney' # if set, overrides reader-level tz
|
||||
# tenant_id: '1:0' # if set, overrides reader-level tenant_id
|
||||
# offset: '-15s' # if set, overrides reader-level offset
|
||||
sampling_period: '1m'
|
||||
query_from_last_seen_timestamp: True # false by default
|
||||
latency_offset: '1ms'
|
||||
|
||||
@@ -305,3 +305,27 @@ reader: # can be partially reused, because its class and datasource URL are unc
|
||||
This means that the service upon restart:
|
||||
1. Won't restore the state of `zscore_online` model, because its `z_threshold` argument **has changed**, retraining from scratch is needed on the last `fit_window` = 24 hours of data for `q1`, `q2` and `q3` (as model's `queries` arg is not set so it defaults to all queries found in the reader).
|
||||
2. Will **partially** restore the state of `prophet` model, because its class and schedulers are unchanged, but **only instances trained on timeseries returned by `q1` query**. New fit/infer jobs will be set for new query `q3`. The old query `q2` artifacts will be dropped upon restart - all respective models and data for (`prophet`, `q2`) combination will be removed from the database file and from the disk.
|
||||
|
||||
|
||||
### Logger Levels
|
||||
|
||||
{{% available_from "v1.25.3" anomaly %}} `vmanomaly` service supports per-component logger levels, allowing to control the verbosity of logs for each component independently. This can be useful for debugging or monitoring specific components without overwhelming the logs with information from other components. Prefixes are also supported, allowing to set the logger level for all components with a specific prefix.
|
||||
|
||||
The logger levels can be set in the `settings` section of the config file under `logger_levels` key, where the key is the component name or prefix and the value is the desired logger level. The available logger levels are: `debug`, `info`, `warning`, `error`, and `critical`.
|
||||
|
||||
> Best used in combination with [hot-reload](https://docs.victoriametrics.com/anomaly-detection/components/#hot-reload) to change the logger levels *on-the-fly* without restarting the service through a short-circuit config check than doesn't even trigger the state restoration logic.
|
||||
|
||||
Here's an example configuration that sets the logger level for the `reader` component to `debug` and for the `writer` component to `critical`, while `--loggerLevel` [command line argument](https://docs.victoriametrics.com/anomaly-detection/quickstart/#command-line-arguments) sets the default logger level to `INFO` for all (the other) components, unless overridden by the config:
|
||||
|
||||
> If commented out in hot-reload mode during hot-reload event, the logger level for the component will be set back to what `--loggerLevel` command line argument is set to, which defaults to `info` if not specified.
|
||||
|
||||
```yaml
|
||||
settings:
|
||||
n_workers: 4
|
||||
restore_state: True # enables state restoration
|
||||
logger_levels:
|
||||
reader.vm: debug # affects only VmReader logs
|
||||
model: warning # applies to all components with 'model' prefix, such as 'model.zscore_online', 'model.prophet', etc.
|
||||
# once commented out in hot-reload mode, will use the default logger level set by --loggerLevel command line argument
|
||||
# monitoring.push: critical
|
||||
```
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
- To use *vmanomaly*, part of the enterprise package, a license key is required. Obtain your key [here](https://victoriametrics.com/products/enterprise/trial/) for this tutorial or for enterprise use.
|
||||
- In the tutorial, we'll be using the following VictoriaMetrics components:
|
||||
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) (v1.123.0)
|
||||
- [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/) (v1.123.0)
|
||||
- [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/) (v1.123.0)
|
||||
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/) (v1.124.0)
|
||||
- [vmalert](https://docs.victoriametrics.com/victoriametrics/vmalert/) (v1.124.0)
|
||||
- [vmagent](https://docs.victoriametrics.com/victoriametrics/vmagent/) (v1.124.0)
|
||||
- [Grafana](https://grafana.com/) (v.10.2.1)
|
||||
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/)
|
||||
- [Node exporter](https://github.com/prometheus/node_exporter#node-exporter) (v1.7.0) and [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (v0.27.0)
|
||||
@@ -315,7 +315,7 @@ Let's wrap it all up together into the `docker-compose.yml` file.
|
||||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -332,7 +332,7 @@ services:
|
||||
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
@@ -365,7 +365,7 @@ services:
|
||||
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.123.0
|
||||
image: victoriametrics/vmalert:v1.124.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
@@ -387,7 +387,7 @@ services:
|
||||
restart: always
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.25.2
|
||||
image: victoriametrics/vmanomaly:v1.25.3
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
||||
@@ -241,27 +241,27 @@ services:
|
||||
- grafana_data:/var/lib/grafana/
|
||||
|
||||
vmsingle:
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
command:
|
||||
- -httpListenAddr=0.0.0.0:8429
|
||||
|
||||
vmstorage:
|
||||
image: victoriametrics/vmstorage:v1.123.0-cluster
|
||||
image: victoriametrics/vmstorage:v1.124.0-cluster
|
||||
|
||||
vminsert:
|
||||
image: victoriametrics/vminsert:v1.123.0-cluster
|
||||
image: victoriametrics/vminsert:v1.124.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8400
|
||||
- -httpListenAddr=0.0.0.0:8480
|
||||
|
||||
vmselect:
|
||||
image: victoriametrics/vmselect:v1.123.0-cluster
|
||||
image: victoriametrics/vmselect:v1.124.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8401
|
||||
- -httpListenAddr=0.0.0.0:8481
|
||||
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
command:
|
||||
@@ -270,7 +270,7 @@ services:
|
||||
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
|
||||
|
||||
vmgateway-cluster:
|
||||
image: victoriametrics/vmgateway:v1.123.0-enterprise
|
||||
image: victoriametrics/vmgateway:v1.124.0-enterprise
|
||||
ports:
|
||||
- 8431:8431
|
||||
volumes:
|
||||
@@ -286,7 +286,7 @@ services:
|
||||
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
|
||||
|
||||
vmgateway-single:
|
||||
image: victoriametrics/vmgateway:v1.123.0-enterprise
|
||||
image: victoriametrics/vmgateway:v1.124.0-enterprise
|
||||
ports:
|
||||
- 8432:8431
|
||||
volumes:
|
||||
@@ -397,7 +397,7 @@ Once iDP configuration is done, vmagent configuration needs to be updated to use
|
||||
|
||||
```yaml
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.123.0
|
||||
image: victoriametrics/vmagent:v1.124.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
- ./vmagent-client-secret:/etc/vmagent/oauth2-client-secret
|
||||
|
||||
@@ -27,5 +27,5 @@ to [the latest available releases](https://docs.victoriametrics.com/victoriametr
|
||||
|
||||
## Currently supported LTS release lines
|
||||
|
||||
- v1.122.x - the latest one is [v1.122.1 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.1)
|
||||
- v1.110.x - the latest one is [v1.110.15 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.15)
|
||||
- v1.122.x - the latest one is [v1.122.2 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.122.2)
|
||||
- v1.110.x - the latest one is [v1.110.16 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.16)
|
||||
|
||||
@@ -57,9 +57,9 @@ and performing [regular upgrades](https://docs.victoriametrics.com/victoriametri
|
||||
Download the newest available [VictoriaMetrics release](https://docs.victoriametrics.com/victoriametrics/changelog/)
|
||||
from [DockerHub](https://hub.docker.com/r/victoriametrics/victoria-metrics) or [Quay](https://quay.io/repository/victoriametrics/victoria-metrics?tab=tags):
|
||||
```sh
|
||||
docker pull victoriametrics/victoria-metrics:v1.123.0
|
||||
docker pull victoriametrics/victoria-metrics:v1.124.0
|
||||
docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 \
|
||||
victoriametrics/victoria-metrics:v1.123.0 --selfScrapeInterval=5s -storageDataPath=victoria-metrics-data
|
||||
victoriametrics/victoria-metrics:v1.124.0 --selfScrapeInterval=5s -storageDataPath=victoria-metrics-data
|
||||
```
|
||||
_For Enterprise images see [this link](https://docs.victoriametrics.com/victoriametrics/enterprise/#docker-images)._
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
build:
|
||||
list: never
|
||||
publishResources: false
|
||||
render: never
|
||||
---
|
||||
VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and time series database.
|
||||
See [case studies for VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/casestudies/).
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
build:
|
||||
list: never
|
||||
publishResources: false
|
||||
render: never
|
||||
---
|
||||
The following `tip` changes can be tested by building VictoriaMetrics components from the latest commits according to the following docs:
|
||||
|
||||
* [How to build single-node VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-build-from-sources)
|
||||
|
||||
@@ -89,7 +89,7 @@ VictoriaMetrics Enterprise components are available in the following forms:
|
||||
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
|
||||
|
||||
Binary releases of VictoriaMetrics Enterprise are available [at the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz`.
|
||||
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz`.
|
||||
|
||||
In order to run binary release of VictoriaMetrics Enterprise component, please download the `*-enterprise.tar.gz` archive for your OS and architecture
|
||||
from the [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) and unpack it. Then run the unpacked binary.
|
||||
@@ -107,8 +107,8 @@ For example, the following command runs VictoriaMetrics Enterprise binary with t
|
||||
obtained at [this page](https://victoriametrics.com/products/enterprise/trial/):
|
||||
|
||||
```sh
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz
|
||||
tar -xzf victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz
|
||||
tar -xzf victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz
|
||||
./victoria-metrics-prod -license=BASE64_ENCODED_LICENSE_KEY
|
||||
```
|
||||
|
||||
@@ -123,7 +123,7 @@ Alternatively, VictoriaMetrics Enterprise license can be stored in the file and
|
||||
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
|
||||
|
||||
Docker images for VictoriaMetrics Enterprise are available at VictoriaMetrics [Docker Hub](https://hub.docker.com/u/victoriametrics) and [Quay](https://quay.io/organization/victoriametrics).
|
||||
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.123.0-enterprise`.
|
||||
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.124.0-enterprise`.
|
||||
|
||||
In order to run Docker image of VictoriaMetrics Enterprise component, it is required to provide the license key via command-line
|
||||
flag as described [here](#binary-releases).
|
||||
@@ -133,13 +133,13 @@ Enterprise license key can be obtained at [this page](https://victoriametrics.co
|
||||
For example, the following command runs VictoriaMetrics Enterprise Docker image with the specified license key:
|
||||
|
||||
```sh
|
||||
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.123.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
|
||||
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.124.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
|
||||
```
|
||||
|
||||
Alternatively, the license code can be stored in the file and then referred via `-licenseFile` command-line flag:
|
||||
|
||||
```sh
|
||||
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.123.0-enterprise -licenseFile=/path/to/vm-license
|
||||
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.124.0-enterprise -licenseFile=/path/to/vm-license
|
||||
```
|
||||
|
||||
Example docker-compose configuration:
|
||||
@@ -148,7 +148,7 @@ version: "3.5"
|
||||
services:
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.123.0
|
||||
image: victoriametrics/victoria-metrics:v1.124.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
@@ -180,7 +180,7 @@ is used to provide key in plain-text:
|
||||
```yaml
|
||||
server:
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
|
||||
license:
|
||||
key: {BASE64_ENCODED_LICENSE_KEY}
|
||||
@@ -191,7 +191,7 @@ In order to provide key via existing secret, the following values file is used:
|
||||
```yaml
|
||||
server:
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
|
||||
license:
|
||||
secret:
|
||||
@@ -240,7 +240,7 @@ spec:
|
||||
license:
|
||||
key: {BASE64_ENCODED_LICENSE_KEY}
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
```
|
||||
|
||||
In order to provide key via existing secret, the following custom resource is used:
|
||||
@@ -257,7 +257,7 @@ spec:
|
||||
name: vm-license
|
||||
key: license
|
||||
image:
|
||||
tag: v1.123.0-enterprise
|
||||
tag: v1.124.0-enterprise
|
||||
```
|
||||
|
||||
Example secret with license key:
|
||||
@@ -290,7 +290,7 @@ Builds are available for amd64 and arm64
|
||||
|
||||
Example archive:
|
||||
|
||||
`victoria-metrics-linux-amd64-v1.123.0-enterprise.tar.gz`
|
||||
`victoria-metrics-linux-amd64-v1.124.0-enterprise.tar.gz`
|
||||
|
||||
Includes:
|
||||
|
||||
@@ -299,7 +299,7 @@ Includes:
|
||||
|
||||
Example Docker image:
|
||||
|
||||
`victoriametrics/victoria-metrics:v1.123.0-enterprise-fips` – uses the FIPS-compatible binary and based on `scratch` image.
|
||||
`victoriametrics/victoria-metrics:v1.124.0-enterprise-fips` – uses the FIPS-compatible binary and based on `scratch` image.
|
||||
|
||||
## Monitoring license expiration
|
||||
|
||||
|
||||
@@ -36,8 +36,8 @@ scrape_configs:
|
||||
After you created the `scrape.yaml` file, download and unpack [single-node VictoriaMetrics](https://docs.victoriametrics.com/) to the same directory:
|
||||
|
||||
```
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
```
|
||||
|
||||
Then start VictoriaMetrics and instruct it to scrape targets defined in `scrape.yaml` and save scraped metrics
|
||||
@@ -152,8 +152,8 @@ Then start [single-node VictoriaMetrics](https://docs.victoriametrics.com/) acco
|
||||
|
||||
```yaml
|
||||
# Download and unpack single-node VictoriaMetrics
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.123.0.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.124.0.tar.gz
|
||||
|
||||
# Run single-node VictoriaMetrics with the given scrape.yaml
|
||||
./victoria-metrics-prod -promscrape.config=scrape.yaml
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
build:
|
||||
list: never
|
||||
publishResources: false
|
||||
render: never
|
||||
---
|
||||
VictoriaMetrics command-line tool (**vmctl**) provides the following migration modes:
|
||||
- [Prometheus](https://docs.victoriametrics.com/victoriametrics/vmctl/prometheus/) to VictoriaMetrics via [snapshot](https://prometheus.io/docs/prometheus/latest/querying/api/#snapshot)
|
||||
- [InfluxDB](https://docs.victoriametrics.com/victoriametrics/vmctl/influxdb/) to VictoriaMetrics
|
||||
@@ -26,9 +32,9 @@ vmctl command-line tool is available as:
|
||||
|
||||
Download and unpack vmctl:
|
||||
```sh
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.123.0/vmutils-darwin-arm64-v1.123.0.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.124.0/vmutils-darwin-arm64-v1.124.0.tar.gz
|
||||
|
||||
tar xzf vmutils-darwin-arm64-v1.123.0.tar.gz
|
||||
tar xzf vmutils-darwin-arm64-v1.124.0.tar.gz
|
||||
```
|
||||
|
||||
Once binary is unpacked, see the full list of supported modes by running the following command:
|
||||
@@ -352,4 +358,4 @@ Moved to [vmctl/victoriametrics](https://docs.victoriametrics.com/victoriametric
|
||||
|
||||
###### Tuning
|
||||
|
||||
Moved to [vmctl#migration-tips](https://docs.victoriametrics.com/victoriametrics/vmctl#migration-tips).
|
||||
Moved to [vmctl#migration-tips](https://docs.victoriametrics.com/victoriametrics/vmctl#migration-tips).
|
||||
|
||||
20
go.mod
20
go.mod
@@ -2,12 +2,6 @@ module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
go 1.25.0
|
||||
|
||||
// This is needed in order to avoid vmbackup and vmrestore binary size increase by 20MB
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
//
|
||||
// TODO: remove this entry after https://github.com/googleapis/google-cloud-go/issues/11448 is fixed
|
||||
replace cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0
|
||||
|
||||
// Pin AWS libraries to version before 2025-01-15
|
||||
// Release notes: https://github.com/aws/aws-sdk-go-v2/releases/tag/release-2025-01-15
|
||||
// This version enabled request and response checksum verification by default which
|
||||
@@ -66,13 +60,18 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.4 // indirect
|
||||
cloud.google.com/go/auth v0.16.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
|
||||
@@ -92,11 +91,15 @@ require (
|
||||
github.com/aws/smithy-go v1.22.5 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
@@ -127,6 +130,7 @@ require (
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.131.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.131.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
@@ -136,9 +140,11 @@ require (
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/component v1.37.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.37.0 // indirect
|
||||
@@ -151,6 +157,7 @@ require (
|
||||
go.opentelemetry.io/collector/processor v1.37.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.128.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
|
||||
@@ -158,6 +165,7 @@ require (
|
||||
go.opentelemetry.io/otel/log v0.13.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
|
||||
37
go.sum
37
go.sum
@@ -1,3 +1,5 @@
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.121.4 h1:cVvUiY0sX0xwyxPwdSU2KsF9knOVmtRyAMt8xou0iTs=
|
||||
cloud.google.com/go v0.121.4/go.mod h1:XEBchUiHFJbz4lKBZwYBDHV/rSyfFktk737TLDU089s=
|
||||
cloud.google.com/go/auth v0.16.3 h1:kabzoQ9/bobUmnseYnBO6qQG7q4a/CffFRlJSxv2wCc=
|
||||
@@ -8,10 +10,16 @@ cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeO
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
|
||||
cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
|
||||
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
|
||||
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
|
||||
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
|
||||
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
|
||||
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
|
||||
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
|
||||
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
|
||||
cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsLxI=
|
||||
cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU=
|
||||
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 h1:Hr5FTipp7SL07o2FvoVOX9HRiRH3CR3Mj8pxqCcdD5A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
@@ -34,8 +42,16 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJ
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20250728123024-98593029b5aa h1:qTB0QsUpBe/WzXQKcALj3Ossizb2daUHXmaVoWFdVlE=
|
||||
github.com/VictoriaMetrics/VictoriaLogs v0.0.0-20250728123024-98593029b5aa/go.mod h1:jeov7Un2x4Dpxw2Qn2MWa0kbwNn1Gc2Iw+8gvPqGsZk=
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
|
||||
@@ -131,8 +147,11 @@ github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8E
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
|
||||
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
||||
@@ -145,6 +164,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -340,6 +361,8 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 h1:4+LP7qmsLSGbmc66m1s5dKRMBw
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32/go.mod h1:kzh+BSAvpoyHHdHBCDhmSWtBc1NbLMZ2lWHqnBoxFks=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -376,6 +399,8 @@ github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAz
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
|
||||
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/component v1.37.0 h1:yc5X0WhZwlpJ+W8Sg1fpRRjiUu3nByLe1wVOKWWRWRQ=
|
||||
@@ -416,6 +441,8 @@ go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCu
|
||||
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 h1:FGre0nZh5BSw7G73VpT3xs38HchsfPsa2aZtMp0NPOs=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0/go.mod h1:X2PYPViI2wTPIMIOBjG17KNybTzsrATnvPJ02kkz7LM=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 h1:rbRJ8BBoVMsQShESYZ0FkvcITu8X8QNwJogcLUmDNNw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0/go.mod h1:ru6KHrNtNHxM4nD/vd6QrLVWgKhxPYgblq4VAtNawTQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.62.0 h1:wCeciVlAfb5DC8MQl/DlmAv/FVPNpQgFvI/71+hatuc=
|
||||
@@ -424,6 +451,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/X
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/log v0.13.0 h1:yoxRoIZcohB6Xf0lNv9QIyCzQvrtGZklVbdCoyb7dls=
|
||||
go.opentelemetry.io/otel/log v0.13.0/go.mod h1:INKfG4k1O9CL25BaM1qLe0zIedOpvlS5Z7XgSbmN83E=
|
||||
go.opentelemetry.io/otel/log/logtest v0.13.0 h1:xxaIcgoEEtnwdgj6D6Uo9K/Dynz9jqIxSDu2YObJ69Q=
|
||||
|
||||
@@ -44,7 +44,7 @@ type FS struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// envLoookupFunc is used for looking up environment variables in tests.
|
||||
// envLookupFunc is used for looking up environment variables in tests.
|
||||
envLookupFunc func(name string) (string, bool)
|
||||
}
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||
pathReal, err := filepath.EvalSymlinks(pathOrig)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) || strings.Contains(err.Error(), "no such file or directory") {
|
||||
// Remove symlink that points to nowere.
|
||||
// Remove symlink that points to nowhere.
|
||||
logger.Infof("removing broken symlink %q", pathOrig)
|
||||
if err := os.Remove(pathOrig); err != nil {
|
||||
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
|
||||
|
||||
@@ -84,7 +84,7 @@ func TestByteBufferReadFrom(t *testing.T) {
|
||||
t.Fatalf("unexpected number of bytes read; got %d; want %d", n, 0)
|
||||
}
|
||||
if len(bb.B) != 0 {
|
||||
t.Fatalf("unexpejcted len(bb.B); got %d; want %d", len(bb.B), 0)
|
||||
t.Fatalf("unexpected len(bb.B); got %d; want %d", len(bb.B), 0)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestBuffer(t *testing.T) {
|
||||
|
||||
cbLen := cb.Len()
|
||||
if cbLen != totalSize {
|
||||
t.Fatalf("nexpected Buffer.Len value; got %d; want %d", cbLen, totalSize)
|
||||
t.Fatalf("unexpected Buffer.Len value; got %d; want %d", cbLen, totalSize)
|
||||
}
|
||||
|
||||
size := cb.SizeBytes()
|
||||
|
||||
@@ -33,7 +33,7 @@ func (rh *ConsistentHash) GetNodeIdx(h uint64, excludeIdxs []int) int {
|
||||
|
||||
if len(excludeIdxs) == len(rh.nodeHashes) {
|
||||
// All the nodes are excluded. Treat this case as no nodes are excluded.
|
||||
// This is better from load-balacning PoV than selecting some static node.
|
||||
// This is better from load-balancing PoV than selecting some static node.
|
||||
excludeIdxs = nil
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestConsistentHash(t *testing.T) {
|
||||
}
|
||||
maxIndexMismatches := float64(len(keys)) / float64(len(nodes))
|
||||
if float64(indexMismatches) > maxIndexMismatches {
|
||||
t.Fatalf("too many index mismtaches after excluding a node; got %d; want no more than %f", indexMismatches, maxIndexMismatches)
|
||||
t.Fatalf("too many index mismatches after excluding a node; got %d; want no more than %f", indexMismatches, maxIndexMismatches)
|
||||
}
|
||||
expectedPerIdxCount = float64(len(keys)) / float64(len(nodes)-1)
|
||||
for i, perIdxCount := range perIdxCounts {
|
||||
|
||||
@@ -83,7 +83,7 @@ func UnmarshalInt64(src []byte) int64 {
|
||||
return v
|
||||
}
|
||||
|
||||
// MarshalVarInt64 appends marshalsed v to dst and returns the result.
|
||||
// MarshalVarInt64 appends marshaled v to dst and returns the result.
|
||||
func MarshalVarInt64(dst []byte, v int64) []byte {
|
||||
u := uint64((v << 1) ^ (v >> 63))
|
||||
|
||||
@@ -578,7 +578,7 @@ type Uint64s struct {
|
||||
var uint64sPool sync.Pool
|
||||
|
||||
// GetUint32s returns an uint32 slice with the given size.
|
||||
// The slize contents isn't initialized - it may contain garbage.
|
||||
// The slice contents isn't initialized - it may contain garbage.
|
||||
func GetUint32s(size int) *Uint32s {
|
||||
v := uint32sPool.Get()
|
||||
if v == nil {
|
||||
|
||||
@@ -94,7 +94,7 @@ func IsPartiallyRemovedDir(dirPath string) bool {
|
||||
des := MustReadDir(dirPath)
|
||||
if len(des) == 0 {
|
||||
// Delete empty dirs too, since they may appear when the unclean shutdown happens after the deleteDirFilename is deleted,
|
||||
// but before the directory is deleted istelf.
|
||||
// but before the directory is deleted itself.
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ type server struct {
|
||||
// In such cases the caller must serve the request.
|
||||
type RequestHandler func(w http.ResponseWriter, r *http.Request) bool
|
||||
|
||||
// ServeOptions defiens optional parameters for http server
|
||||
// ServeOptions defines optional parameters for http server
|
||||
type ServeOptions struct {
|
||||
// UseProxyProtocol if is set to true for the corresponding addr, then the incoming connections are accepted via proxy protocol.
|
||||
// See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
|
||||
@@ -104,7 +104,7 @@ func readProxyProto(r io.Reader) (net.Addr, error) {
|
||||
// Read the protocol block itself
|
||||
bb.B = bytesutil.ResizeNoCopyMayOverallocate(bb.B, blockLen)
|
||||
if _, err := io.ReadFull(r, bb.B); err != nil {
|
||||
return nil, fmt.Errorf("cannot read proxy protocol block with the lehgth %d bytes: %w", blockLen, err)
|
||||
return nil, fmt.Errorf("cannot read proxy protocol block with the length %d bytes: %w", blockLen, err)
|
||||
}
|
||||
switch command {
|
||||
case 0:
|
||||
|
||||
@@ -72,7 +72,7 @@ func MustOpenFastQueue(path, name string, maxInmemoryBlocks int, maxPendingBytes
|
||||
return fq
|
||||
}
|
||||
|
||||
// IsPersistentQueueDisabled returns true if persistend queue at fq is disabled.
|
||||
// IsPersistentQueueDisabled returns true if persistent queue at fq is disabled.
|
||||
func (fq *FastQueue) IsPersistentQueueDisabled() bool {
|
||||
return fq.isPQDisabled
|
||||
}
|
||||
|
||||
@@ -663,7 +663,7 @@ func (opts *Options) NewConfig() (*Config, error) {
|
||||
}
|
||||
if opts.OAuth2 != nil {
|
||||
if actx.getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth, `bearer_token` and `ouath2`")
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth, `bearer_token` and `oauth2`")
|
||||
}
|
||||
if err := actx.initFromOAuth2Config(baseDir, opts.OAuth2); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize oauth2: %w", err)
|
||||
|
||||
@@ -231,7 +231,7 @@ scrape_configs:
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
sws := cfg.getStaticScrapeWork()
|
||||
swsExpected := []*ScrapeWork{
|
||||
@@ -304,7 +304,7 @@ scrape_configs:
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
sws := cfg.getStaticScrapeWork()
|
||||
swsExpected := []*ScrapeWork{{
|
||||
@@ -330,7 +330,7 @@ scrape_configs:
|
||||
`
|
||||
var cfg Config
|
||||
if err := cfg.parseData([]byte(data), "sss"); err != nil {
|
||||
t.Fatalf("cannot parase data: %s", err)
|
||||
t.Fatalf("cannot parse data: %s", err)
|
||||
}
|
||||
sws := cfg.getFileSDScrapeWork(nil)
|
||||
if !equalStaticConfigForScrapeWorks(sws, sws) {
|
||||
@@ -1193,7 +1193,7 @@ scrape_configs:
|
||||
scrape_configs:
|
||||
- job_name: path wo slash
|
||||
enable_compression: false
|
||||
static_configs:
|
||||
static_configs:
|
||||
- targets: ["foo.bar:1234"]
|
||||
relabel_configs:
|
||||
- replacement: metricspath
|
||||
|
||||
@@ -179,7 +179,7 @@ func getRefreshTokenFunc(sdc *SDConfig, ac, proxyAC *promauth.Config, env *cloud
|
||||
return nil, fmt.Errorf("missing `client_id` config option for `authentication_method: OAuth`")
|
||||
}
|
||||
if sdc.ClientSecret.String() == "" {
|
||||
return nil, fmt.Errorf("missing `client_secrect` config option for `authentication_method: OAuth`")
|
||||
return nil, fmt.Errorf("missing `client_secret` config option for `authentication_method: OAuth`")
|
||||
}
|
||||
q := url.Values{
|
||||
"grant_type": []string{"client_credentials"},
|
||||
|
||||
@@ -55,7 +55,7 @@ type InstancesResponse struct {
|
||||
NextPageToken string `xml:"nextToken"`
|
||||
}
|
||||
|
||||
// ReservationSet represetns ReservationSet from https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html
|
||||
// ReservationSet represents ReservationSet from https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html
|
||||
type ReservationSet struct {
|
||||
Items []Reservation `xml:"item"`
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ type Tag struct {
|
||||
Content string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// DataCenterInfo -eureka datacentre metadata
|
||||
// DataCenterInfo -eureka datacenter metadata
|
||||
type DataCenterInfo struct {
|
||||
Name string `xml:"name"`
|
||||
Metadata MetaData `xml:"metadata"`
|
||||
|
||||
@@ -963,7 +963,7 @@ func (uw *urlWatcher) maybeUpdateDependedScrapeWorksLocked() {
|
||||
continue
|
||||
}
|
||||
if attachNodeMetadata && role == "node" && (uwx.role == "pod" || uwx.role == "endpoints" || uwx.role == "endpointslice") {
|
||||
// pod, endpoints and enpointslices objects depend on node objects if attachNodeMetadata is set
|
||||
// pod, endpoints and endpointslices objects depend on node objects if attachNodeMetadata is set
|
||||
uwx.needRecreateScrapeWorks = true
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func appendThreeStrings(dst []byte, a, b, c string) []byte {
|
||||
return dst
|
||||
}
|
||||
|
||||
// OwnerReference represents OwnerReferense from k8s API.
|
||||
// OwnerReference represents OwnerReference from k8s API.
|
||||
//
|
||||
// See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#ownerreference-v1-meta
|
||||
type OwnerReference struct {
|
||||
|
||||
@@ -46,7 +46,7 @@ func (sdc *SDConfig) role() string {
|
||||
return sdc.Role
|
||||
}
|
||||
|
||||
// AttachMetadata represents `attach_metadata` option at `kuberentes_sd_config`.
|
||||
// AttachMetadata represents `attach_metadata` option at `kubernetes_sd_config`.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
|
||||
type AttachMetadata struct {
|
||||
|
||||
@@ -105,7 +105,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||
|
||||
func getAPIServerPath(serverURL string) (string, string, error) {
|
||||
if serverURL == "" {
|
||||
return "", "", fmt.Errorf("missing servier url")
|
||||
return "", "", fmt.Errorf("missing server url")
|
||||
}
|
||||
if !strings.Contains(serverURL, "://") {
|
||||
serverURL = "http://" + serverURL
|
||||
|
||||
57
lib/promutil/labelscompressorv2_test.go
Normal file
57
lib/promutil/labelscompressorv2_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package promutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
|
||||
func TestLabelsCompressorV2(t *testing.T) {
|
||||
lc := NewLabelsCompressorV2()
|
||||
|
||||
labels1 := []prompb.Label{
|
||||
{Name: "label1", Value: "value1"},
|
||||
{Name: "label2", Value: "value2"},
|
||||
{Name: "label3", Value: "value3"},
|
||||
}
|
||||
labels2 := []prompb.Label{
|
||||
{Name: "label3", Value: "value3"},
|
||||
{Name: "label4", Value: "value4"},
|
||||
{Name: "label5", Value: "value5"},
|
||||
}
|
||||
|
||||
compressed1 := lc.Compress(labels1)
|
||||
compressed2 := lc.Compress(labels2)
|
||||
|
||||
runtime.GC()
|
||||
cleaned := lc.Cleanup()
|
||||
if cleaned != 0 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned zero unused labels, got %d", cleaned)
|
||||
}
|
||||
|
||||
decompressed1 := compressed1.Decompress()
|
||||
if !reflect.DeepEqual(labels1, decompressed1) {
|
||||
t.Fatalf("decompressed labels1 do not match original: got %+v, want %+v", decompressed1, labels1)
|
||||
}
|
||||
|
||||
compressed1 = Key{}
|
||||
runtime.GC()
|
||||
cleaned = lc.Cleanup()
|
||||
if cleaned != 2 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned two unused labels, got %d", cleaned)
|
||||
}
|
||||
|
||||
decompressed2 := compressed2.Decompress()
|
||||
if !reflect.DeepEqual(labels2, decompressed2) {
|
||||
t.Fatalf("decompressed labels2 do not match original: got %+v, want %+v", decompressed2, labels2)
|
||||
}
|
||||
|
||||
compressed2 = Key{}
|
||||
runtime.GC()
|
||||
cleaned = lc.Cleanup()
|
||||
if cleaned != 3 {
|
||||
t.Fatalf("lc.Cleanup() should've cleaned two unused labels, got %d", cleaned)
|
||||
}
|
||||
}
|
||||
102
lib/promutil/labelscomressorv2.go
Normal file
102
lib/promutil/labelscomressorv2.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package promutil
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
"weak"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
)
|
||||
|
||||
type Key struct {
|
||||
labelRefs []labelRef
|
||||
}
|
||||
|
||||
func (k Key) Decompress() []prompb.Label {
|
||||
res := make([]prompb.Label, 0, len(k.labelRefs))
|
||||
for i := range k.labelRefs {
|
||||
res = append(res, cloneLabel(*k.labelRefs[i].label))
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type labelRef struct {
|
||||
label *prompb.Label
|
||||
}
|
||||
|
||||
type LabelsCompressorV2 struct {
|
||||
mux sync.Mutex
|
||||
labels map[prompb.Label]weak.Pointer[prompb.Label]
|
||||
}
|
||||
|
||||
func NewLabelsCompressorV2() *LabelsCompressorV2 {
|
||||
lc := &LabelsCompressorV2{
|
||||
labels: make(map[prompb.Label]weak.Pointer[prompb.Label]),
|
||||
}
|
||||
|
||||
go lc.cleanup()
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) Compress(labels []prompb.Label) Key {
|
||||
lc.mux.Lock()
|
||||
defer lc.mux.Unlock()
|
||||
|
||||
labelRefs := make([]labelRef, 0, len(labels))
|
||||
for i := range labels {
|
||||
wl := lc.labels[labels[i]]
|
||||
l := wl.Value()
|
||||
if l == nil {
|
||||
labelKey := cloneLabel(labels[i])
|
||||
labelVal := cloneLabel(labels[i])
|
||||
|
||||
wl = weak.Make(&labelVal)
|
||||
lc.labels[labelKey] = wl
|
||||
|
||||
l = wl.Value()
|
||||
}
|
||||
|
||||
labelRefs = append(labelRefs, labelRef{
|
||||
label: l,
|
||||
})
|
||||
}
|
||||
|
||||
return Key{
|
||||
labelRefs: labelRefs,
|
||||
}
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) cleanup() {
|
||||
t := time.NewTicker(5 * time.Minute)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
lc.Cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lc *LabelsCompressorV2) Cleanup() int {
|
||||
lc.mux.Lock()
|
||||
defer lc.mux.Unlock()
|
||||
|
||||
count := 0
|
||||
|
||||
for l, wl := range lc.labels {
|
||||
if wl.Value() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Println(l)
|
||||
|
||||
count++
|
||||
delete(lc.labels, l)
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func (uw *unmarshalWork) runCallback(rows []csvimport.Row) {
|
||||
ctx.wg.Done()
|
||||
}
|
||||
|
||||
// Unmarshal implements prototparserutil.UnmarshalWork
|
||||
// Unmarshal implements protoparserutil.UnmarshalWork
|
||||
func (uw *unmarshalWork) Unmarshal() {
|
||||
uw.rows.Unmarshal(bytesutil.ToUnsafeString(uw.reqBuf), uw.cds)
|
||||
rows := uw.rows.Rows
|
||||
|
||||
@@ -43,7 +43,7 @@ func (rs *Rows) Reset() {
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals grahite plaintext protocol rows from s.
|
||||
// Unmarshal unmarshals graphite plaintext protocol rows from s.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
//
|
||||
|
||||
@@ -10,7 +10,7 @@ func TestNextUnquotedChar(t *testing.T) {
|
||||
t.Helper()
|
||||
n := nextUnquotedChar(s, ch, noUnescape, true)
|
||||
if n != nExpected {
|
||||
t.Fatalf("unexpected n for nextUnqotedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
t.Fatalf("unexpected n for nextUnquotedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ func ProcessRequestBody(b []byte) ([]byte, error) {
|
||||
}
|
||||
totalLength := varIntLength + int(messageLength)
|
||||
if totalLength > len(r.Data) {
|
||||
return nil, fmt.Errorf("failed to parse OpenTelementry message: insufficient length of buffer")
|
||||
return nil, fmt.Errorf("failed to parse OpenTelemetry message: insufficient length of buffer")
|
||||
}
|
||||
dst = append(dst, r.Data[varIntLength:totalLength]...)
|
||||
r.Data = r.Data[totalLength:]
|
||||
|
||||
@@ -273,7 +273,7 @@ func compareValues(values, valuesExpected []float64) error {
|
||||
return fmt.Errorf("expecting NaN at position #%d; got %v", i, v)
|
||||
}
|
||||
} else if v != vExpected {
|
||||
return fmt.Errorf("unepxected value at position #%d; got %v; want %v", i, v, vExpected)
|
||||
return fmt.Errorf("unexpected value at position #%d; got %v; want %v", i, v, vExpected)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -15,7 +15,7 @@ type RateLimiter struct {
|
||||
// perSecondLimit is the per-second limit of resources.
|
||||
perSecondLimit int64
|
||||
|
||||
// stopCh is used for unbloking rate limiting.
|
||||
// stopCh is used for unblocking rate limiting.
|
||||
stopCh <-chan struct{}
|
||||
|
||||
// mu protects budget and deadline from concurrent access.
|
||||
|
||||
@@ -123,7 +123,7 @@ func (r *Regex) GetLiterals() []string {
|
||||
return a
|
||||
}
|
||||
|
||||
// String returns string represetnation for r
|
||||
// String returns string representation for r
|
||||
func (r *Regex) String() string {
|
||||
return r.exprStr
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func simplifyRegexpExt(sre *syntax.Regexp, keepBeginOp, keepEndOp bool) *syntax.
|
||||
}
|
||||
}
|
||||
sre.Sub = subs
|
||||
// Remove anchros from the beginning and the end of regexp, since they
|
||||
// Remove anchors from the beginning and the end of regexp, since they
|
||||
// will be added later.
|
||||
if !keepBeginOp {
|
||||
for len(sre.Sub) > 0 && sre.Sub[0].Op == syntax.OpBeginText {
|
||||
|
||||
@@ -720,7 +720,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, tim
|
||||
}
|
||||
}
|
||||
|
||||
// Check timerseriesCounters only for serial test.
|
||||
// Check timeseriesCounters only for serial test.
|
||||
// Concurrent test may create duplicate timeseries, so GetSeriesCount
|
||||
// would return more timeseries than needed.
|
||||
if !isConcurrent {
|
||||
@@ -837,7 +837,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, tim
|
||||
}
|
||||
tsidsFound, err = searchTSIDsInTest(db, []*TagFilters{tfs}, tr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot search with multipel filters matching empty tags: %w", err)
|
||||
return fmt.Errorf("cannot search with multiple filters matching empty tags: %w", err)
|
||||
}
|
||||
if !testHasTSID(tsidsFound, tsid) {
|
||||
return fmt.Errorf("tsids is missing when matching multiple filters with empty tags tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn)
|
||||
|
||||
@@ -407,7 +407,7 @@ func (mn *MetricName) String() string {
|
||||
// Marshal appends marshaled mn to dst and returns the result.
|
||||
//
|
||||
// mn.sortTags must be called before calling this function
|
||||
// in order to sort and de-duplcate tags.
|
||||
// in order to sort and de-duplicate tags.
|
||||
func (mn *MetricName) Marshal(dst []byte) []byte {
|
||||
// Calculate the required size and pre-allocate space in dst
|
||||
dstLen := len(dst)
|
||||
|
||||
@@ -193,7 +193,7 @@ func TestMetricNameRemoveTagsOn(t *testing.T) {
|
||||
emptyMN.AddTag("key", "value")
|
||||
emptyMN.RemoveTagsOn(nil)
|
||||
if len(emptyMN.MetricGroup) != 0 || len(emptyMN.Tags) != 0 {
|
||||
t.Fatalf("expecitng empty metric name got %s", &emptyMN)
|
||||
t.Fatalf("expecting empty metric name got %s", &emptyMN)
|
||||
}
|
||||
|
||||
var asIsMN MetricName
|
||||
@@ -204,7 +204,7 @@ func TestMetricNameRemoveTagsOn(t *testing.T) {
|
||||
expAsIsMN.MetricGroup = []byte("name")
|
||||
expAsIsMN.AddTag("key", "value")
|
||||
if !reflect.DeepEqual(expAsIsMN, asIsMN) {
|
||||
t.Fatalf("expecitng %s got %s", &expAsIsMN, &asIsMN)
|
||||
t.Fatalf("expecting %s got %s", &expAsIsMN, &asIsMN)
|
||||
}
|
||||
|
||||
var mn MetricName
|
||||
@@ -215,7 +215,7 @@ func TestMetricNameRemoveTagsOn(t *testing.T) {
|
||||
var expMN MetricName
|
||||
expMN.AddTag("baz", "qux")
|
||||
if !reflect.DeepEqual(expMN.Tags, mn.Tags) || len(mn.MetricGroup) != len(expMN.MetricGroup) {
|
||||
t.Fatalf("expecitng %s got %s", &expMN, &mn)
|
||||
t.Fatalf("expecting %s got %s", &expMN, &mn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ func TestMetricNameRemoveTag(t *testing.T) {
|
||||
var expMN MetricName
|
||||
expMN.AddTag("baz", "qux")
|
||||
if !reflect.DeepEqual(expMN.Tags, mn.Tags) || len(mn.MetricGroup) != len(expMN.MetricGroup) {
|
||||
t.Fatalf("expecitng %s got %s", &expMN, &mn)
|
||||
t.Fatalf("expecting %s got %s", &expMN, &mn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,6 +245,6 @@ func TestMetricNameRemoveTagsIgnoring(t *testing.T) {
|
||||
var expMN MetricName
|
||||
expMN.AddTag("baz", "qux")
|
||||
if !reflect.DeepEqual(expMN.Tags, mn.Tags) || len(mn.MetricGroup) != len(expMN.MetricGroup) {
|
||||
t.Fatalf("expecitng %s got %s", &expMN, &mn)
|
||||
t.Fatalf("expecting %s got %s", &expMN, &mn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,7 +187,7 @@ func (mt *Tracker) cloneMetricNameLocked(metricName []byte) string {
|
||||
idx := len(mt.metricNamesBuf)
|
||||
n := len(metricName) + len(mt.metricNamesBuf)
|
||||
if n > cap(mt.metricNamesBuf) {
|
||||
// allocate a new slice instead of reallocting exist
|
||||
// allocate a new slice instead of reallocating exist
|
||||
// it saves memory and reduces GC pressure
|
||||
mt.metricNamesBuf = make([]byte, 0, metricNameBufSize)
|
||||
idx = 0
|
||||
|
||||
@@ -195,7 +195,7 @@ func testSearchInternal(s *Storage, tr TimeRange, mrs []MetricRow) error {
|
||||
return fmt.Errorf("cannot add tag filter %q=%q: %w", "instance", ".*", err)
|
||||
}
|
||||
|
||||
// Build extectedMrs.
|
||||
// Build expectedMrs.
|
||||
var expectedMrs []MetricRow
|
||||
metricGroupRegexp := regexp.MustCompile(fmt.Sprintf("^%s$", metricGroupRe))
|
||||
var mn MetricName
|
||||
|
||||
@@ -2563,11 +2563,11 @@ func TestStorageSearchMetricNames_TooManyTimeseries(t *testing.T) {
|
||||
names, err := s.SearchMetricNames(nil, tfss, opts.tr, opts.maxMetrics, noDeadline)
|
||||
gotErr := err != nil
|
||||
if gotErr != opts.wantErr {
|
||||
t.Errorf("SeachMetricNames(%v, %v, %d): unexpected error: got %v, want error to happen %v", []any{
|
||||
t.Errorf("SearchMetricNames(%v, %v, %d): unexpected error: got %v, want error to happen %v", []any{
|
||||
tfss, &opts.tr, opts.maxMetrics, err, opts.wantErr}...)
|
||||
}
|
||||
if got := len(names); got != opts.wantCount {
|
||||
t.Errorf("SeachMetricNames(%v, %v, %d): unexpected metric name count: got %d, want %d", []any{
|
||||
t.Errorf("SearchMetricNames(%v, %v, %d): unexpected metric name count: got %d, want %d", []any{
|
||||
tfss, &opts.tr, opts.maxMetrics, got, opts.wantCount}...)
|
||||
}
|
||||
}
|
||||
@@ -3897,7 +3897,7 @@ func TestStorageAddRows_currHourMetricIDs(t *testing.T) {
|
||||
// testSearchMetricIDs returns metricIDs for the given tfss and tr.
|
||||
//
|
||||
// The returned metricIDs are sorted. The function panics in in case of error.
|
||||
// The function is not a part of Storage beause it is currently used in unit
|
||||
// The function is not a part of Storage because it is currently used in unit
|
||||
// tests only.
|
||||
func testSearchMetricIDs(s *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) []uint64 {
|
||||
search := func(qt *querytracer.Tracer, idb *indexDB, tr TimeRange) ([]uint64, error) {
|
||||
@@ -4345,7 +4345,7 @@ func testGenerateMetricRowBatches(opts *batchOptions) ([][]MetricRow, *counts) {
|
||||
allTimeseries := len(names)
|
||||
rowsAddedTotal := uint64(opts.numBatches * opts.numRowsPerBatch)
|
||||
|
||||
// When RegisterMetricNames() is called it only restisters the time series
|
||||
// When RegisterMetricNames() is called it only registers the time series
|
||||
// in IndexDB but no samples is written to the storage.
|
||||
if opts.registerOnly {
|
||||
rowsAddedTotal = 0
|
||||
|
||||
@@ -460,10 +460,10 @@ func (tb *table) historicalMergeWatcher() {
|
||||
for _, ptw := range ptws {
|
||||
if ptw.pt.name == currentPartitionName {
|
||||
// Do not run force merge for the current month.
|
||||
// For the current month, the samples are countinously
|
||||
// For the current month, the samples are continuously
|
||||
// deduplicated and retention filters applied by the background in-memory, small, and big part
|
||||
// merge tasks. See:
|
||||
// - partition.mergeParts() in paritiont.go and
|
||||
// - partition.mergeParts() in partition.go and
|
||||
// - Block.deduplicateSamplesDuringMerge() in block.go.
|
||||
// - blockStreamMerger.getRetentionDeadline() in block_stream_merger.go
|
||||
continue
|
||||
|
||||
@@ -418,7 +418,7 @@ func getCommonPrefix(ss []string) (string, []string) {
|
||||
//
|
||||
// commonPrefix must contain either {nsPrefixTagToMetricIDs} or {nsPrefixDateTagToMetricIDs, date}.
|
||||
//
|
||||
// If isNegaitve is true, then the tag filter matches all the values
|
||||
// If isNegative is true, then the tag filter matches all the values
|
||||
// except the given one.
|
||||
//
|
||||
// If isRegexp is true, then the value is interpreted as anchored regexp,
|
||||
|
||||
@@ -162,7 +162,7 @@ type Config struct {
|
||||
// Interval is the interval between aggregations.
|
||||
Interval string `yaml:"interval"`
|
||||
|
||||
// NoAlighFlushToInterval disables aligning of flushes to multiples of Interval.
|
||||
// NoAlignFlushToInterval disables aligning of flushes to multiples of Interval.
|
||||
// By default flushes are aligned to Interval.
|
||||
//
|
||||
// See also FlushOnShutdown.
|
||||
|
||||
0
streamaggr.yaml
Normal file
0
streamaggr.yaml
Normal file
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
7.3.2
|
||||
# Keep this pinned version in parity with cel-go
|
||||
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.pb.go linguist-generated=true
|
||||
*.pb.go -diff -merge
|
||||
2
vendor/cel.dev/expr/.gitignore
vendored
Normal file
2
vendor/cel.dev/expr/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
bazel-*
|
||||
MODULE.bazel.lock
|
||||
34
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
34
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
go_library(
|
||||
name = "expr",
|
||||
srcs = [
|
||||
"checked.pb.go",
|
||||
"eval.pb.go",
|
||||
"explain.pb.go",
|
||||
"syntax.pb.go",
|
||||
"value.pb.go",
|
||||
],
|
||||
importpath = "cel.dev/expr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||
"@org_golang_google_protobuf//runtime/protoimpl",
|
||||
"@org_golang_google_protobuf//types/known/anypb",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/emptypb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":expr",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Contributor Code of Conduct
|
||||
## Version 0.1.1 (adapted from 0.3b-angular)
|
||||
|
||||
As contributors and maintainers of the Common Expression Language
|
||||
(CEL) project, we pledge to respect everyone who contributes by
|
||||
posting issues, updating documentation, submitting pull requests,
|
||||
providing feedback in comments, and any other activities.
|
||||
|
||||
Communication through any of CEL's channels (GitHub, Gitter, IRC,
|
||||
mailing lists, Google+, Twitter, etc.) must be constructive and never
|
||||
resort to personal attacks, trolling, public or private harassment,
|
||||
insults, or other unprofessional conduct.
|
||||
|
||||
We promise to extend courtesy and respect to everyone involved in this
|
||||
project regardless of gender, gender identity, sexual orientation,
|
||||
disability, age, race, ethnicity, religion, or level of experience. We
|
||||
expect anyone contributing to the project to do the same.
|
||||
|
||||
If any member of the community violates this code of conduct, the
|
||||
maintainers of the CEL project may take action, removing issues,
|
||||
comments, and PRs or blocking accounts as deemed appropriate.
|
||||
|
||||
If you are subject to or witness unacceptable behavior, or have any
|
||||
other concerns, please email us at
|
||||
[cel-conduct@google.com](mailto:cel-conduct@google.com).
|
||||
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# How to Contribute
|
||||
|
||||
We'd love to accept your patches and contributions to this project. There are a
|
||||
few guidelines you need to follow.
|
||||
|
||||
## Contributor License Agreement
|
||||
|
||||
Contributions to this project must be accompanied by a Contributor License
|
||||
Agreement. You (or your employer) retain the copyright to your contribution,
|
||||
this simply gives us permission to use and redistribute your contributions as
|
||||
part of the project. Head over to <https://cla.developers.google.com/> to see
|
||||
your current agreements on file or to sign a new one.
|
||||
|
||||
You generally only need to submit a CLA once, so if you've already submitted one
|
||||
(even if it was for a different project), you probably don't need to do it
|
||||
again.
|
||||
|
||||
## Code reviews
|
||||
|
||||
All submissions, including submissions by project members, require review. We
|
||||
use GitHub pull requests for this purpose. Consult
|
||||
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
|
||||
information on using pull requests.
|
||||
|
||||
## What to expect from maintainers
|
||||
|
||||
Expect maintainers to respond to new issues or pull requests within a week.
|
||||
For outstanding and ongoing issues and particularly for long-running
|
||||
pull requests, expect the maintainers to review within a week of a
|
||||
contributor asking for a new review. There is no commitment to resolution --
|
||||
merging or closing a pull request, or fixing or closing an issue -- because some
|
||||
issues will require more discussion than others.
|
||||
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Project Governance
|
||||
|
||||
This document defines the governance process for the CEL language. CEL is
|
||||
Google-developed, but openly governed. Major contributors to the CEL
|
||||
specification and its corresponding implementations constitute the CEL
|
||||
Language Council. New members may be added by a unanimous vote of the
|
||||
Council.
|
||||
|
||||
The MAINTAINERS.md file lists the members of the CEL Language Council, and
|
||||
unofficially indicates the "areas of expertise" of each member with respect
|
||||
to the publicly available CEL repos.
|
||||
|
||||
## Code Changes
|
||||
|
||||
Code changes must follow the standard pull request (PR) model documented in the
|
||||
CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
|
||||
maintainer. The maintainer reserves the right to request that any feature
|
||||
request (FR) or PR be reviewed by the language council.
|
||||
|
||||
## Syntax and Semantic Changes
|
||||
|
||||
Syntactic and semantic changes must be reviewed by the CEL Language Council.
|
||||
Maintainers may also request language council review at their discretion.
|
||||
|
||||
The review process is as follows:
|
||||
|
||||
- Create a Feature Request in the CEL-Spec repo. The feature description will
|
||||
serve as an abstract for the detailed design document.
|
||||
- Co-develop a design document with the Language Council.
|
||||
- Once the proposer gives the design document approval, the document will be
|
||||
linked to the FR in the CEL-Spec repo and opened for comments to members of
|
||||
the cel-lang-discuss@googlegroups.com.
|
||||
- The Language Council will review the design doc at the next council meeting
|
||||
(once every three weeks) and the council decision included in the document.
|
||||
|
||||
If the proposal is approved, the spec will be updated by a maintainer (if
|
||||
applicable) and a rationale will be included in the CEL-Spec wiki to ensure
|
||||
future developers may follow CEL's growth and direction over time.
|
||||
|
||||
Approved proposals may be implemented by the proposer or by the maintainers as
|
||||
the parties see fit. At the discretion of the maintainer, changes from the
|
||||
approved design are permitted during implementation if they improve the user
|
||||
experience and clarity of the feature.
|
||||
202
vendor/cel.dev/expr/LICENSE
vendored
Normal file
202
vendor/cel.dev/expr/LICENSE
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# CEL Language Council
|
||||
|
||||
| Name | Company | Area of Expertise |
|
||||
|-----------------|--------------|-------------------|
|
||||
| Alfred Fuller | Facebook | cel-cpp, cel-spec |
|
||||
| Jim Larson | Google | cel-go, cel-spec |
|
||||
| Matthais Blume | Google | cel-spec |
|
||||
| Tristan Swadell | Google | cel-go, cel-spec |
|
||||
|
||||
## Emeritus
|
||||
|
||||
* Sanjay Ghemawat (Google)
|
||||
* Wolfgang Grieskamp (Facebook)
|
||||
74
vendor/cel.dev/expr/MODULE.bazel
vendored
Normal file
74
vendor/cel.dev/expr/MODULE.bazel
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
module(
|
||||
name = "cel-spec",
|
||||
)
|
||||
|
||||
bazel_dep(
|
||||
name = "bazel_skylib",
|
||||
version = "1.7.1",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "gazelle",
|
||||
version = "0.39.1",
|
||||
repo_name = "bazel_gazelle",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis",
|
||||
version = "0.0.0-20241220-5e258e33.bcr.1",
|
||||
repo_name = "com_google_googleapis",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis-cc",
|
||||
version = "1.0.0",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis-java",
|
||||
version = "1.0.0",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "googleapis-go",
|
||||
version = "1.0.0",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "protobuf",
|
||||
version = "27.0",
|
||||
repo_name = "com_google_protobuf",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_cc",
|
||||
version = "0.0.17",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_go",
|
||||
version = "0.53.0",
|
||||
repo_name = "io_bazel_rules_go",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_java",
|
||||
version = "7.6.5",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_proto",
|
||||
version = "7.0.2",
|
||||
)
|
||||
bazel_dep(
|
||||
name = "rules_python",
|
||||
version = "0.35.0",
|
||||
)
|
||||
|
||||
### PYTHON ###
|
||||
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
|
||||
python.toolchain(
|
||||
ignore_root_user_error = True,
|
||||
python_version = "3.11",
|
||||
)
|
||||
|
||||
go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
|
||||
go_sdk.download(version = "1.22.0")
|
||||
|
||||
go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
|
||||
go_deps.from_file(go_mod = "//:go.mod")
|
||||
use_repo(
|
||||
go_deps,
|
||||
"org_golang_google_genproto_googleapis_rpc",
|
||||
"org_golang_google_protobuf",
|
||||
)
|
||||
71
vendor/cel.dev/expr/README.md
vendored
Normal file
71
vendor/cel.dev/expr/README.md
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
# Common Expression Language
|
||||
|
||||
The Common Expression Language (CEL) implements common semantics for expression
|
||||
evaluation, enabling different applications to more easily interoperate.
|
||||
|
||||
Key Applications
|
||||
|
||||
* Security policy: organizations have complex infrastructure and need common
|
||||
tooling to reason about the system as a whole
|
||||
* Protocols: expressions are a useful data type and require interoperability
|
||||
across programming languages and platforms.
|
||||
|
||||
|
||||
Guiding philosophy:
|
||||
|
||||
1. Keep it small & fast.
|
||||
* CEL evaluates in linear time, is mutation free, and not Turing-complete.
|
||||
This limitation is a feature of the language design, which allows the
|
||||
implementation to evaluate orders of magnitude faster than equivalently
|
||||
sandboxed JavaScript.
|
||||
2. Make it extensible.
|
||||
* CEL is designed to be embedded in applications, and allows for
|
||||
extensibility via its context which allows for functions and data to be
|
||||
provided by the software that embeds it.
|
||||
3. Developer-friendly.
|
||||
* The language is approachable to developers. The initial spec was based
|
||||
on the experience of developing Firebase Rules and usability testing
|
||||
many prior iterations.
|
||||
* The library itself and accompanying toolings should be easy to adopt by
|
||||
teams that seek to integrate CEL into their platforms.
|
||||
|
||||
The required components of a system that supports CEL are:
|
||||
|
||||
* The textual representation of an expression as written by a developer. It is
|
||||
of similar syntax to expressions in C/C++/Java/JavaScript
|
||||
* A representation of the program's abstract syntax tree (AST).
|
||||
* A compiler library that converts the textual representation to the binary
|
||||
representation. This can be done ahead of time (in the control plane) or
|
||||
just before evaluation (in the data plane).
|
||||
* A context containing one or more typed variables, often protobuf messages.
|
||||
Most use-cases will use `attribute_context.proto`
|
||||
* An evaluator library that takes the binary format in the context and
|
||||
produces a result, usually a Boolean.
|
||||
|
||||
For use cases which require persistence or cross-process communcation, it is
|
||||
highly recommended to serialize the type-checked expression as a protocol
|
||||
buffer. The CEL team will maintains canonical protocol buffers for ASTs and
|
||||
will keep these versions identical and wire-compatible in perpetuity:
|
||||
|
||||
* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
|
||||
* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
|
||||
|
||||
|
||||
Example of boolean conditions and object construction:
|
||||
|
||||
``` c
|
||||
// Condition
|
||||
account.balance >= transaction.withdrawal
|
||||
|| (account.overdraftProtection
|
||||
&& account.overdraftLimit >= transaction.withdrawal - account.balance)
|
||||
|
||||
// Object construction
|
||||
common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
|
||||
```
|
||||
|
||||
For more detail, see:
|
||||
|
||||
* [Introduction](doc/intro.md)
|
||||
* [Language Definition](doc/langdef.md)
|
||||
|
||||
Released under the [Apache License](LICENSE).
|
||||
145
vendor/cel.dev/expr/WORKSPACE
vendored
Normal file
145
vendor/cel.dev/expr/WORKSPACE
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_proto",
|
||||
sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
|
||||
strip_prefix = "rules_proto-4.0.0-3.20.0",
|
||||
urls = [
|
||||
"https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# googleapis as of 09/16/2024
|
||||
http_archive(
|
||||
name = "com_google_googleapis",
|
||||
strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
|
||||
sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# protobuf
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
|
||||
strip_prefix = "protobuf-3.21.5",
|
||||
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
|
||||
)
|
||||
|
||||
# googletest
|
||||
http_archive(
|
||||
name = "com_google_googletest",
|
||||
urls = ["https://github.com/google/googletest/archive/master.zip"],
|
||||
strip_prefix = "googletest-master",
|
||||
)
|
||||
|
||||
# gflags
|
||||
http_archive(
|
||||
name = "com_github_gflags_gflags",
|
||||
sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
|
||||
strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
|
||||
"https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# glog
|
||||
http_archive(
|
||||
name = "com_google_glog",
|
||||
sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
|
||||
strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
|
||||
"https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# absl
|
||||
http_archive(
|
||||
name = "com_google_absl",
|
||||
strip_prefix = "abseil-cpp-master",
|
||||
urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
|
||||
)
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
|
||||
load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
cc = True,
|
||||
)
|
||||
|
||||
# Do *not* call *_dependencies(), etc, yet. See comment at the end.
|
||||
|
||||
# Generated Google APIs protos for Golang
|
||||
# Generated Google APIs protos for Golang 08/26/2024
|
||||
go_repository(
|
||||
name = "org_golang_google_genproto_googleapis_api",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/genproto/googleapis/api",
|
||||
sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
|
||||
version = "v0.0.0-20240826202546-f6391c0de4c7",
|
||||
)
|
||||
|
||||
# Generated Google APIs protos for Golang 08/26/2024
|
||||
go_repository(
|
||||
name = "org_golang_google_genproto_googleapis_rpc",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/genproto/googleapis/rpc",
|
||||
sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
|
||||
version = "v0.0.0-20240826202546-f6391c0de4c7",
|
||||
)
|
||||
|
||||
# gRPC deps
|
||||
go_repository(
|
||||
name = "org_golang_google_grpc",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/grpc",
|
||||
tag = "v1.49.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_net",
|
||||
importpath = "golang.org/x/net",
|
||||
sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
|
||||
version = "v0.0.0-20190311183353-d8887717615a",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_text",
|
||||
importpath = "golang.org/x/text",
|
||||
sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
|
||||
version = "v0.3.2",
|
||||
)
|
||||
|
||||
# Run the dependencies at the end. These will silently try to import some
|
||||
# of the above repositories but at different versions, so ours must come first.
|
||||
go_rules_dependencies()
|
||||
go_register_toolchains(version = "1.19.1")
|
||||
gazelle_dependencies()
|
||||
rules_proto_dependencies()
|
||||
rules_proto_toolchains()
|
||||
protobuf_deps()
|
||||
0
vendor/cel.dev/expr/WORKSPACE.bzlmod
vendored
Normal file
0
vendor/cel.dev/expr/WORKSPACE.bzlmod
vendored
Normal file
1432
vendor/cel.dev/expr/checked.pb.go
generated
vendored
Normal file
1432
vendor/cel.dev/expr/checked.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
9
vendor/cel.dev/expr/cloudbuild.yaml
vendored
Normal file
9
vendor/cel.dev/expr/cloudbuild.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
steps:
|
||||
- name: 'gcr.io/cloud-builders/bazel:7.3.2'
|
||||
entrypoint: bazel
|
||||
args: ['build', '...']
|
||||
id: bazel-build
|
||||
waitFor: ['-']
|
||||
timeout: 15m
|
||||
options:
|
||||
machineType: 'N1_HIGHCPU_32'
|
||||
487
vendor/cel.dev/expr/eval.pb.go
generated
vendored
Normal file
487
vendor/cel.dev/expr/eval.pb.go
generated
vendored
Normal file
@@ -0,0 +1,487 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.3
|
||||
// protoc v5.27.1
|
||||
// source: cel/expr/eval.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type EvalState struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *EvalState) Reset() {
|
||||
*x = EvalState{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EvalState) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvalState) ProtoMessage() {}
|
||||
|
||||
func (x *EvalState) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
|
||||
func (*EvalState) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *EvalState) GetValues() []*ExprValue {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *EvalState) GetResults() []*EvalState_Result {
|
||||
if x != nil {
|
||||
return x.Results
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExprValue struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// Types that are valid to be assigned to Kind:
|
||||
//
|
||||
// *ExprValue_Value
|
||||
// *ExprValue_Error
|
||||
// *ExprValue_Unknown
|
||||
Kind isExprValue_Kind `protobuf_oneof:"kind"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExprValue) Reset() {
|
||||
*x = ExprValue{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ExprValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExprValue) ProtoMessage() {}
|
||||
|
||||
func (x *ExprValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
|
||||
func (*ExprValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetKind() isExprValue_Kind {
|
||||
if x != nil {
|
||||
return x.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetValue() *Value {
|
||||
if x != nil {
|
||||
if x, ok := x.Kind.(*ExprValue_Value); ok {
|
||||
return x.Value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetError() *ErrorSet {
|
||||
if x != nil {
|
||||
if x, ok := x.Kind.(*ExprValue_Error); ok {
|
||||
return x.Error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetUnknown() *UnknownSet {
|
||||
if x != nil {
|
||||
if x, ok := x.Kind.(*ExprValue_Unknown); ok {
|
||||
return x.Unknown
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isExprValue_Kind interface {
|
||||
isExprValue_Kind()
|
||||
}
|
||||
|
||||
type ExprValue_Value struct {
|
||||
Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ExprValue_Error struct {
|
||||
Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ExprValue_Unknown struct {
|
||||
Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ExprValue_Value) isExprValue_Kind() {}
|
||||
|
||||
func (*ExprValue_Error) isExprValue_Kind() {}
|
||||
|
||||
func (*ExprValue_Unknown) isExprValue_Kind() {}
|
||||
|
||||
type ErrorSet struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ErrorSet) Reset() {
|
||||
*x = ErrorSet{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ErrorSet) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ErrorSet) ProtoMessage() {}
|
||||
|
||||
func (x *ErrorSet) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
|
||||
func (*ErrorSet) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ErrorSet) GetErrors() []*Status {
|
||||
if x != nil {
|
||||
return x.Errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Status struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Status) Reset() {
|
||||
*x = Status{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *Status) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Status) ProtoMessage() {}
|
||||
|
||||
func (x *Status) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Status.ProtoReflect.Descriptor instead.
|
||||
func (*Status) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *Status) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Status) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Status) GetDetails() []*anypb.Any {
|
||||
if x != nil {
|
||||
return x.Details
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type UnknownSet struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *UnknownSet) Reset() {
|
||||
*x = UnknownSet{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *UnknownSet) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UnknownSet) ProtoMessage() {}
|
||||
|
||||
func (x *UnknownSet) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
|
||||
func (*UnknownSet) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *UnknownSet) GetExprs() []int64 {
|
||||
if x != nil {
|
||||
return x.Exprs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EvalState_Result struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
|
||||
Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) Reset() {
|
||||
*x = EvalState_Result{}
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvalState_Result) ProtoMessage() {}
|
||||
|
||||
func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
|
||||
func (*EvalState_Result) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) GetExpr() int64 {
|
||||
if x != nil {
|
||||
return x.Expr
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) GetValue() int64 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_cel_expr_eval_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_eval_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
|
||||
0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f,
|
||||
0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b,
|
||||
0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72,
|
||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
|
||||
0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74,
|
||||
0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
||||
0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65,
|
||||
0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12,
|
||||
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05,
|
||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65,
|
||||
0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48,
|
||||
0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e,
|
||||
0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
|
||||
0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48,
|
||||
0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69,
|
||||
0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28,
|
||||
0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||
0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
|
||||
0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65,
|
||||
0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
|
||||
0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
|
||||
0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8,
|
||||
0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_eval_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_eval_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_eval_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_eval_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_cel_expr_eval_proto_goTypes = []any{
|
||||
(*EvalState)(nil), // 0: cel.expr.EvalState
|
||||
(*ExprValue)(nil), // 1: cel.expr.ExprValue
|
||||
(*ErrorSet)(nil), // 2: cel.expr.ErrorSet
|
||||
(*Status)(nil), // 3: cel.expr.Status
|
||||
(*UnknownSet)(nil), // 4: cel.expr.UnknownSet
|
||||
(*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result
|
||||
(*Value)(nil), // 6: cel.expr.Value
|
||||
(*anypb.Any)(nil), // 7: google.protobuf.Any
|
||||
}
|
||||
var file_cel_expr_eval_proto_depIdxs = []int32{
|
||||
1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
|
||||
5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
|
||||
6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
|
||||
2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
|
||||
4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
|
||||
3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status
|
||||
7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any
|
||||
7, // [7:7] is the sub-list for method output_type
|
||||
7, // [7:7] is the sub-list for method input_type
|
||||
7, // [7:7] is the sub-list for extension type_name
|
||||
7, // [7:7] is the sub-list for extension extendee
|
||||
0, // [0:7] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_eval_proto_init() }
|
||||
func file_cel_expr_eval_proto_init() {
|
||||
if File_cel_expr_eval_proto != nil {
|
||||
return
|
||||
}
|
||||
file_cel_expr_value_proto_init()
|
||||
file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{
|
||||
(*ExprValue_Value)(nil),
|
||||
(*ExprValue_Error)(nil),
|
||||
(*ExprValue_Unknown)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_eval_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_eval_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_eval_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_eval_proto = out.File
|
||||
file_cel_expr_eval_proto_rawDesc = nil
|
||||
file_cel_expr_eval_proto_goTypes = nil
|
||||
file_cel_expr_eval_proto_depIdxs = nil
|
||||
}
|
||||
236
vendor/cel.dev/expr/explain.pb.go
generated
vendored
Normal file
236
vendor/cel.dev/expr/explain.pb.go
generated
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.5
|
||||
// source: cel/expr/explain.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Explain struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Explain) Reset() {
|
||||
*x = Explain{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Explain) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Explain) ProtoMessage() {}
|
||||
|
||||
func (x *Explain) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
|
||||
func (*Explain) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Explain) GetValues() []*Value {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Explain) GetExprSteps() []*Explain_ExprStep {
|
||||
if x != nil {
|
||||
return x.ExprSteps
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Explain_ExprStep struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) Reset() {
|
||||
*x = Explain_ExprStep{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Explain_ExprStep) ProtoMessage() {}
|
||||
|
||||
func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
|
||||
func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) GetId() int64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) GetValueIndex() int32 {
|
||||
if x != nil {
|
||||
return x.ValueIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_cel_expr_explain_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_explain_proto_rawDesc = []byte{
|
||||
0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
|
||||
0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
|
||||
0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
|
||||
0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
|
||||
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
|
||||
0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
|
||||
0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
|
||||
0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
|
||||
0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
|
||||
0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_explain_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_explain_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_explain_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_explain_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_cel_expr_explain_proto_goTypes = []interface{}{
|
||||
(*Explain)(nil), // 0: cel.expr.Explain
|
||||
(*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
|
||||
(*Value)(nil), // 2: cel.expr.Value
|
||||
}
|
||||
var file_cel_expr_explain_proto_depIdxs = []int32{
|
||||
2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
|
||||
1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_explain_proto_init() }
|
||||
func file_cel_expr_explain_proto_init() {
|
||||
if File_cel_expr_explain_proto != nil {
|
||||
return
|
||||
}
|
||||
file_cel_expr_value_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Explain); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Explain_ExprStep); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_explain_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_explain_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_explain_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_explain_proto = out.File
|
||||
file_cel_expr_explain_proto_rawDesc = nil
|
||||
file_cel_expr_explain_proto_goTypes = nil
|
||||
file_cel_expr_explain_proto_depIdxs = nil
|
||||
}
|
||||
9
vendor/cel.dev/expr/regen_go_proto.sh
vendored
Normal file
9
vendor/cel.dev/expr/regen_go_proto.sh
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
bazel build //proto/cel/expr/conformance/...
|
||||
files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
|
||||
for src in ${files[@]};
|
||||
do
|
||||
dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
|
||||
echo "copying $dst"
|
||||
$(cp $src $dst)
|
||||
done
|
||||
10
vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
vendored
Normal file
10
vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
bazel build //proto/cel/expr:all
|
||||
|
||||
rm -vf ./*.pb.go
|
||||
|
||||
files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
|
||||
for src in "${files[@]}";
|
||||
do
|
||||
cp -v "${src}" ./
|
||||
done
|
||||
1633
vendor/cel.dev/expr/syntax.pb.go
generated
vendored
Normal file
1633
vendor/cel.dev/expr/syntax.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
653
vendor/cel.dev/expr/value.pb.go
generated
vendored
Normal file
653
vendor/cel.dev/expr/value.pb.go
generated
vendored
Normal file
@@ -0,0 +1,653 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.5
|
||||
// source: cel/expr/value.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
structpb "google.golang.org/protobuf/types/known/structpb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Kind:
|
||||
//
|
||||
// *Value_NullValue
|
||||
// *Value_BoolValue
|
||||
// *Value_Int64Value
|
||||
// *Value_Uint64Value
|
||||
// *Value_DoubleValue
|
||||
// *Value_StringValue
|
||||
// *Value_BytesValue
|
||||
// *Value_EnumValue
|
||||
// *Value_ObjectValue
|
||||
// *Value_MapValue
|
||||
// *Value_ListValue
|
||||
// *Value_TypeValue
|
||||
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||
}
|
||||
|
||||
func (x *Value) Reset() {
|
||||
*x = Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Value) ProtoMessage() {}
|
||||
|
||||
func (x *Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Value.ProtoReflect.Descriptor instead.
|
||||
func (*Value) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *Value) GetKind() isValue_Kind {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetNullValue() structpb.NullValue {
|
||||
if x, ok := x.GetKind().(*Value_NullValue); ok {
|
||||
return x.NullValue
|
||||
}
|
||||
return structpb.NullValue(0)
|
||||
}
|
||||
|
||||
func (x *Value) GetBoolValue() bool {
|
||||
if x, ok := x.GetKind().(*Value_BoolValue); ok {
|
||||
return x.BoolValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Value) GetInt64Value() int64 {
|
||||
if x, ok := x.GetKind().(*Value_Int64Value); ok {
|
||||
return x.Int64Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetUint64Value() uint64 {
|
||||
if x, ok := x.GetKind().(*Value_Uint64Value); ok {
|
||||
return x.Uint64Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetDoubleValue() float64 {
|
||||
if x, ok := x.GetKind().(*Value_DoubleValue); ok {
|
||||
return x.DoubleValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetStringValue() string {
|
||||
if x, ok := x.GetKind().(*Value_StringValue); ok {
|
||||
return x.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Value) GetBytesValue() []byte {
|
||||
if x, ok := x.GetKind().(*Value_BytesValue); ok {
|
||||
return x.BytesValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetEnumValue() *EnumValue {
|
||||
if x, ok := x.GetKind().(*Value_EnumValue); ok {
|
||||
return x.EnumValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetObjectValue() *anypb.Any {
|
||||
if x, ok := x.GetKind().(*Value_ObjectValue); ok {
|
||||
return x.ObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetMapValue() *MapValue {
|
||||
if x, ok := x.GetKind().(*Value_MapValue); ok {
|
||||
return x.MapValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetListValue() *ListValue {
|
||||
if x, ok := x.GetKind().(*Value_ListValue); ok {
|
||||
return x.ListValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetTypeValue() string {
|
||||
if x, ok := x.GetKind().(*Value_TypeValue); ok {
|
||||
return x.TypeValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type isValue_Kind interface {
|
||||
isValue_Kind()
|
||||
}
|
||||
|
||||
type Value_NullValue struct {
|
||||
NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
||||
}
|
||||
|
||||
type Value_BoolValue struct {
|
||||
BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_Int64Value struct {
|
||||
Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_Uint64Value struct {
|
||||
Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_DoubleValue struct {
|
||||
DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_StringValue struct {
|
||||
StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_BytesValue struct {
|
||||
BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_EnumValue struct {
|
||||
EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ObjectValue struct {
|
||||
ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_MapValue struct {
|
||||
MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ListValue struct {
|
||||
ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_TypeValue struct {
|
||||
TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Value_NullValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BoolValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_Int64Value) isValue_Kind() {}
|
||||
|
||||
func (*Value_Uint64Value) isValue_Kind() {}
|
||||
|
||||
func (*Value_DoubleValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_StringValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BytesValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_EnumValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ObjectValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_MapValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ListValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_TypeValue) isValue_Kind() {}
|
||||
|
||||
type EnumValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnumValue) Reset() {
|
||||
*x = EnumValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnumValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnumValue) ProtoMessage() {}
|
||||
|
||||
func (x *EnumValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
|
||||
func (*EnumValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *EnumValue) GetType() string {
|
||||
if x != nil {
|
||||
return x.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *EnumValue) GetValue() int32 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ListValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListValue) Reset() {
|
||||
*x = ListValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListValue) ProtoMessage() {}
|
||||
|
||||
func (x *ListValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
|
||||
func (*ListValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ListValue) GetValues() []*Value {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MapValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MapValue) Reset() {
|
||||
*x = MapValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MapValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MapValue) ProtoMessage() {}
|
||||
|
||||
func (x *MapValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
|
||||
func (*MapValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *MapValue) GetEntries() []*MapValue_Entry {
|
||||
if x != nil {
|
||||
return x.Entries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MapValue_Entry struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) Reset() {
|
||||
*x = MapValue_Entry{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MapValue_Entry) ProtoMessage() {}
|
||||
|
||||
func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
|
||||
func (*MapValue_Entry) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) GetKey() *Value {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) GetValue() *Value {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_cel_expr_value_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_value_proto_rawDesc = []byte{
|
||||
0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
|
||||
0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
|
||||
0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
|
||||
0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
|
||||
0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
|
||||
0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
|
||||
0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
|
||||
0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
|
||||
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
|
||||
0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
|
||||
0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
|
||||
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
|
||||
0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
|
||||
0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
|
||||
0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
|
||||
0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
|
||||
0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
|
||||
0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
|
||||
0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
|
||||
0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
|
||||
0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
|
||||
0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_value_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_value_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_value_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_value_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_cel_expr_value_proto_goTypes = []interface{}{
|
||||
(*Value)(nil), // 0: cel.expr.Value
|
||||
(*EnumValue)(nil), // 1: cel.expr.EnumValue
|
||||
(*ListValue)(nil), // 2: cel.expr.ListValue
|
||||
(*MapValue)(nil), // 3: cel.expr.MapValue
|
||||
(*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
|
||||
(structpb.NullValue)(0), // 5: google.protobuf.NullValue
|
||||
(*anypb.Any)(nil), // 6: google.protobuf.Any
|
||||
}
|
||||
var file_cel_expr_value_proto_depIdxs = []int32{
|
||||
5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
|
||||
1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
|
||||
6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
|
||||
3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
|
||||
2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
|
||||
0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
|
||||
4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
|
||||
0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
|
||||
0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_value_proto_init() }
|
||||
func file_cel_expr_value_proto_init() {
|
||||
if File_cel_expr_value_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnumValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MapValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MapValue_Entry); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
|
||||
(*Value_NullValue)(nil),
|
||||
(*Value_BoolValue)(nil),
|
||||
(*Value_Int64Value)(nil),
|
||||
(*Value_Uint64Value)(nil),
|
||||
(*Value_DoubleValue)(nil),
|
||||
(*Value_StringValue)(nil),
|
||||
(*Value_BytesValue)(nil),
|
||||
(*Value_EnumValue)(nil),
|
||||
(*Value_ObjectValue)(nil),
|
||||
(*Value_MapValue)(nil),
|
||||
(*Value_ListValue)(nil),
|
||||
(*Value_TypeValue)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_value_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_value_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_value_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_value_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_value_proto = out.File
|
||||
file_cel_expr_value_proto_rawDesc = nil
|
||||
file_cel_expr_value_proto_goTypes = nil
|
||||
file_cel_expr_value_proto_depIdxs = nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user